pci_sun4v.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. /* pci_sun4v.c: SUN4V specific PCI controller support.
  2. *
  3. * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/pci.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <linux/irq.h>
  13. #include <linux/msi.h>
  14. #include <linux/log2.h>
  15. #include <asm/iommu.h>
  16. #include <asm/irq.h>
  17. #include <asm/upa.h>
  18. #include <asm/pstate.h>
  19. #include <asm/oplib.h>
  20. #include <asm/hypervisor.h>
  21. #include <asm/prom.h>
  22. #include "pci_impl.h"
  23. #include "iommu_common.h"
  24. #include "pci_sun4v.h"
  25. static unsigned long vpci_major = 1;
  26. static unsigned long vpci_minor = 1;
  27. #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
  28. struct iommu_batch {
  29. struct device *dev; /* Device mapping is for. */
  30. unsigned long prot; /* IOMMU page protections */
  31. unsigned long entry; /* Index into IOTSB. */
  32. u64 *pglist; /* List of physical pages */
  33. unsigned long npages; /* Number of pages in list. */
  34. };
  35. static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  36. /* Interrupts must be disabled. */
  37. static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  38. {
  39. struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  40. p->dev = dev;
  41. p->prot = prot;
  42. p->entry = entry;
  43. p->npages = 0;
  44. }
  45. /* Interrupts must be disabled. */
  46. static long iommu_batch_flush(struct iommu_batch *p)
  47. {
  48. struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
  49. unsigned long devhandle = pbm->devhandle;
  50. unsigned long prot = p->prot;
  51. unsigned long entry = p->entry;
  52. u64 *pglist = p->pglist;
  53. unsigned long npages = p->npages;
  54. while (npages != 0) {
  55. long num;
  56. num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
  57. npages, prot, __pa(pglist));
  58. if (unlikely(num < 0)) {
  59. if (printk_ratelimit())
  60. printk("iommu_batch_flush: IOMMU map of "
  61. "[%08lx:%08lx:%lx:%lx:%lx] failed with "
  62. "status %ld\n",
  63. devhandle, HV_PCI_TSBID(0, entry),
  64. npages, prot, __pa(pglist), num);
  65. return -1;
  66. }
  67. entry += num;
  68. npages -= num;
  69. pglist += num;
  70. }
  71. p->entry = entry;
  72. p->npages = 0;
  73. return 0;
  74. }
  75. static inline void iommu_batch_new_entry(unsigned long entry)
  76. {
  77. struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  78. if (p->entry + p->npages == entry)
  79. return;
  80. if (p->entry != ~0UL)
  81. iommu_batch_flush(p);
  82. p->entry = entry;
  83. }
  84. /* Interrupts must be disabled. */
  85. static inline long iommu_batch_add(u64 phys_page)
  86. {
  87. struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  88. BUG_ON(p->npages >= PGLIST_NENTS);
  89. p->pglist[p->npages++] = phys_page;
  90. if (p->npages == PGLIST_NENTS)
  91. return iommu_batch_flush(p);
  92. return 0;
  93. }
  94. /* Interrupts must be disabled. */
  95. static inline long iommu_batch_end(void)
  96. {
  97. struct iommu_batch *p = &__get_cpu_var(iommu_batch);
  98. BUG_ON(p->npages >= PGLIST_NENTS);
  99. return iommu_batch_flush(p);
  100. }
  101. static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
  102. dma_addr_t *dma_addrp, gfp_t gfp)
  103. {
  104. unsigned long flags, order, first_page, npages, n;
  105. struct iommu *iommu;
  106. struct page *page;
  107. void *ret;
  108. long entry;
  109. int nid;
  110. size = IO_PAGE_ALIGN(size);
  111. order = get_order(size);
  112. if (unlikely(order >= MAX_ORDER))
  113. return NULL;
  114. npages = size >> IO_PAGE_SHIFT;
  115. nid = dev->archdata.numa_node;
  116. page = alloc_pages_node(nid, gfp, order);
  117. if (unlikely(!page))
  118. return NULL;
  119. first_page = (unsigned long) page_address(page);
  120. memset((char *)first_page, 0, PAGE_SIZE << order);
  121. iommu = dev->archdata.iommu;
  122. spin_lock_irqsave(&iommu->lock, flags);
  123. entry = iommu_range_alloc(dev, iommu, npages, NULL);
  124. spin_unlock_irqrestore(&iommu->lock, flags);
  125. if (unlikely(entry == DMA_ERROR_CODE))
  126. goto range_alloc_fail;
  127. *dma_addrp = (iommu->page_table_map_base +
  128. (entry << IO_PAGE_SHIFT));
  129. ret = (void *) first_page;
  130. first_page = __pa(first_page);
  131. local_irq_save(flags);
  132. iommu_batch_start(dev,
  133. (HV_PCI_MAP_ATTR_READ |
  134. HV_PCI_MAP_ATTR_WRITE),
  135. entry);
  136. for (n = 0; n < npages; n++) {
  137. long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
  138. if (unlikely(err < 0L))
  139. goto iommu_map_fail;
  140. }
  141. if (unlikely(iommu_batch_end() < 0L))
  142. goto iommu_map_fail;
  143. local_irq_restore(flags);
  144. return ret;
  145. iommu_map_fail:
  146. /* Interrupts are disabled. */
  147. spin_lock(&iommu->lock);
  148. iommu_range_free(iommu, *dma_addrp, npages);
  149. spin_unlock_irqrestore(&iommu->lock, flags);
  150. range_alloc_fail:
  151. free_pages(first_page, order);
  152. return NULL;
  153. }
  154. static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
  155. dma_addr_t dvma)
  156. {
  157. struct pci_pbm_info *pbm;
  158. struct iommu *iommu;
  159. unsigned long flags, order, npages, entry;
  160. u32 devhandle;
  161. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  162. iommu = dev->archdata.iommu;
  163. pbm = dev->archdata.host_controller;
  164. devhandle = pbm->devhandle;
  165. entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  166. spin_lock_irqsave(&iommu->lock, flags);
  167. iommu_range_free(iommu, dvma, npages);
  168. do {
  169. unsigned long num;
  170. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  171. npages);
  172. entry += num;
  173. npages -= num;
  174. } while (npages != 0);
  175. spin_unlock_irqrestore(&iommu->lock, flags);
  176. order = get_order(size);
  177. if (order < 10)
  178. free_pages((unsigned long)cpu, order);
  179. }
  180. static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
  181. enum dma_data_direction direction)
  182. {
  183. struct iommu *iommu;
  184. unsigned long flags, npages, oaddr;
  185. unsigned long i, base_paddr;
  186. u32 bus_addr, ret;
  187. unsigned long prot;
  188. long entry;
  189. iommu = dev->archdata.iommu;
  190. if (unlikely(direction == DMA_NONE))
  191. goto bad;
  192. oaddr = (unsigned long)ptr;
  193. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  194. npages >>= IO_PAGE_SHIFT;
  195. spin_lock_irqsave(&iommu->lock, flags);
  196. entry = iommu_range_alloc(dev, iommu, npages, NULL);
  197. spin_unlock_irqrestore(&iommu->lock, flags);
  198. if (unlikely(entry == DMA_ERROR_CODE))
  199. goto bad;
  200. bus_addr = (iommu->page_table_map_base +
  201. (entry << IO_PAGE_SHIFT));
  202. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  203. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  204. prot = HV_PCI_MAP_ATTR_READ;
  205. if (direction != DMA_TO_DEVICE)
  206. prot |= HV_PCI_MAP_ATTR_WRITE;
  207. local_irq_save(flags);
  208. iommu_batch_start(dev, prot, entry);
  209. for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
  210. long err = iommu_batch_add(base_paddr);
  211. if (unlikely(err < 0L))
  212. goto iommu_map_fail;
  213. }
  214. if (unlikely(iommu_batch_end() < 0L))
  215. goto iommu_map_fail;
  216. local_irq_restore(flags);
  217. return ret;
  218. bad:
  219. if (printk_ratelimit())
  220. WARN_ON(1);
  221. return DMA_ERROR_CODE;
  222. iommu_map_fail:
  223. /* Interrupts are disabled. */
  224. spin_lock(&iommu->lock);
  225. iommu_range_free(iommu, bus_addr, npages);
  226. spin_unlock_irqrestore(&iommu->lock, flags);
  227. return DMA_ERROR_CODE;
  228. }
  229. static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
  230. size_t sz, enum dma_data_direction direction)
  231. {
  232. struct pci_pbm_info *pbm;
  233. struct iommu *iommu;
  234. unsigned long flags, npages;
  235. long entry;
  236. u32 devhandle;
  237. if (unlikely(direction == DMA_NONE)) {
  238. if (printk_ratelimit())
  239. WARN_ON(1);
  240. return;
  241. }
  242. iommu = dev->archdata.iommu;
  243. pbm = dev->archdata.host_controller;
  244. devhandle = pbm->devhandle;
  245. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  246. npages >>= IO_PAGE_SHIFT;
  247. bus_addr &= IO_PAGE_MASK;
  248. spin_lock_irqsave(&iommu->lock, flags);
  249. iommu_range_free(iommu, bus_addr, npages);
  250. entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
  251. do {
  252. unsigned long num;
  253. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  254. npages);
  255. entry += num;
  256. npages -= num;
  257. } while (npages != 0);
  258. spin_unlock_irqrestore(&iommu->lock, flags);
  259. }
  260. static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
  261. int nelems, enum dma_data_direction direction)
  262. {
  263. struct scatterlist *s, *outs, *segstart;
  264. unsigned long flags, handle, prot;
  265. dma_addr_t dma_next = 0, dma_addr;
  266. unsigned int max_seg_size;
  267. unsigned long seg_boundary_size;
  268. int outcount, incount, i;
  269. struct iommu *iommu;
  270. unsigned long base_shift;
  271. long err;
  272. BUG_ON(direction == DMA_NONE);
  273. iommu = dev->archdata.iommu;
  274. if (nelems == 0 || !iommu)
  275. return 0;
  276. prot = HV_PCI_MAP_ATTR_READ;
  277. if (direction != DMA_TO_DEVICE)
  278. prot |= HV_PCI_MAP_ATTR_WRITE;
  279. outs = s = segstart = &sglist[0];
  280. outcount = 1;
  281. incount = nelems;
  282. handle = 0;
  283. /* Init first segment length for backout at failure */
  284. outs->dma_length = 0;
  285. spin_lock_irqsave(&iommu->lock, flags);
  286. iommu_batch_start(dev, prot, ~0UL);
  287. max_seg_size = dma_get_max_seg_size(dev);
  288. seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  289. IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
  290. base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
  291. for_each_sg(sglist, s, nelems, i) {
  292. unsigned long paddr, npages, entry, out_entry = 0, slen;
  293. slen = s->length;
  294. /* Sanity check */
  295. if (slen == 0) {
  296. dma_next = 0;
  297. continue;
  298. }
  299. /* Allocate iommu entries for that segment */
  300. paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
  301. npages = iommu_num_pages(paddr, slen);
  302. entry = iommu_range_alloc(dev, iommu, npages, &handle);
  303. /* Handle failure */
  304. if (unlikely(entry == DMA_ERROR_CODE)) {
  305. if (printk_ratelimit())
  306. printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
  307. " npages %lx\n", iommu, paddr, npages);
  308. goto iommu_map_failed;
  309. }
  310. iommu_batch_new_entry(entry);
  311. /* Convert entry to a dma_addr_t */
  312. dma_addr = iommu->page_table_map_base +
  313. (entry << IO_PAGE_SHIFT);
  314. dma_addr |= (s->offset & ~IO_PAGE_MASK);
  315. /* Insert into HW table */
  316. paddr &= IO_PAGE_MASK;
  317. while (npages--) {
  318. err = iommu_batch_add(paddr);
  319. if (unlikely(err < 0L))
  320. goto iommu_map_failed;
  321. paddr += IO_PAGE_SIZE;
  322. }
  323. /* If we are in an open segment, try merging */
  324. if (segstart != s) {
  325. /* We cannot merge if:
  326. * - allocated dma_addr isn't contiguous to previous allocation
  327. */
  328. if ((dma_addr != dma_next) ||
  329. (outs->dma_length + s->length > max_seg_size) ||
  330. (is_span_boundary(out_entry, base_shift,
  331. seg_boundary_size, outs, s))) {
  332. /* Can't merge: create a new segment */
  333. segstart = s;
  334. outcount++;
  335. outs = sg_next(outs);
  336. } else {
  337. outs->dma_length += s->length;
  338. }
  339. }
  340. if (segstart == s) {
  341. /* This is a new segment, fill entries */
  342. outs->dma_address = dma_addr;
  343. outs->dma_length = slen;
  344. out_entry = entry;
  345. }
  346. /* Calculate next page pointer for contiguous check */
  347. dma_next = dma_addr + slen;
  348. }
  349. err = iommu_batch_end();
  350. if (unlikely(err < 0L))
  351. goto iommu_map_failed;
  352. spin_unlock_irqrestore(&iommu->lock, flags);
  353. if (outcount < incount) {
  354. outs = sg_next(outs);
  355. outs->dma_address = DMA_ERROR_CODE;
  356. outs->dma_length = 0;
  357. }
  358. return outcount;
  359. iommu_map_failed:
  360. for_each_sg(sglist, s, nelems, i) {
  361. if (s->dma_length != 0) {
  362. unsigned long vaddr, npages;
  363. vaddr = s->dma_address & IO_PAGE_MASK;
  364. npages = iommu_num_pages(s->dma_address, s->dma_length);
  365. iommu_range_free(iommu, vaddr, npages);
  366. /* XXX demap? XXX */
  367. s->dma_address = DMA_ERROR_CODE;
  368. s->dma_length = 0;
  369. }
  370. if (s == outs)
  371. break;
  372. }
  373. spin_unlock_irqrestore(&iommu->lock, flags);
  374. return 0;
  375. }
  376. static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
  377. int nelems, enum dma_data_direction direction)
  378. {
  379. struct pci_pbm_info *pbm;
  380. struct scatterlist *sg;
  381. struct iommu *iommu;
  382. unsigned long flags;
  383. u32 devhandle;
  384. BUG_ON(direction == DMA_NONE);
  385. iommu = dev->archdata.iommu;
  386. pbm = dev->archdata.host_controller;
  387. devhandle = pbm->devhandle;
  388. spin_lock_irqsave(&iommu->lock, flags);
  389. sg = sglist;
  390. while (nelems--) {
  391. dma_addr_t dma_handle = sg->dma_address;
  392. unsigned int len = sg->dma_length;
  393. unsigned long npages, entry;
  394. if (!len)
  395. break;
  396. npages = iommu_num_pages(dma_handle, len);
  397. iommu_range_free(iommu, dma_handle, npages);
  398. entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
  399. while (npages) {
  400. unsigned long num;
  401. num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry),
  402. npages);
  403. entry += num;
  404. npages -= num;
  405. }
  406. sg = sg_next(sg);
  407. }
  408. spin_unlock_irqrestore(&iommu->lock, flags);
  409. }
  410. static void dma_4v_sync_single_for_cpu(struct device *dev,
  411. dma_addr_t bus_addr, size_t sz,
  412. enum dma_data_direction direction)
  413. {
  414. /* Nothing to do... */
  415. }
  416. static void dma_4v_sync_sg_for_cpu(struct device *dev,
  417. struct scatterlist *sglist, int nelems,
  418. enum dma_data_direction direction)
  419. {
  420. /* Nothing to do... */
  421. }
  422. static const struct dma_ops sun4v_dma_ops = {
  423. .alloc_coherent = dma_4v_alloc_coherent,
  424. .free_coherent = dma_4v_free_coherent,
  425. .map_single = dma_4v_map_single,
  426. .unmap_single = dma_4v_unmap_single,
  427. .map_sg = dma_4v_map_sg,
  428. .unmap_sg = dma_4v_unmap_sg,
  429. .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
  430. .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
  431. };
  432. static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
  433. {
  434. struct property *prop;
  435. struct device_node *dp;
  436. dp = pbm->prom_node;
  437. prop = of_find_property(dp, "66mhz-capable", NULL);
  438. pbm->is_66mhz_capable = (prop != NULL);
  439. pbm->pci_bus = pci_scan_one_pbm(pbm);
  440. /* XXX register error interrupt handlers XXX */
  441. }
  442. static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
  443. struct iommu *iommu)
  444. {
  445. struct iommu_arena *arena = &iommu->arena;
  446. unsigned long i, cnt = 0;
  447. u32 devhandle;
  448. devhandle = pbm->devhandle;
  449. for (i = 0; i < arena->limit; i++) {
  450. unsigned long ret, io_attrs, ra;
  451. ret = pci_sun4v_iommu_getmap(devhandle,
  452. HV_PCI_TSBID(0, i),
  453. &io_attrs, &ra);
  454. if (ret == HV_EOK) {
  455. if (page_in_phys_avail(ra)) {
  456. pci_sun4v_iommu_demap(devhandle,
  457. HV_PCI_TSBID(0, i), 1);
  458. } else {
  459. cnt++;
  460. __set_bit(i, arena->map);
  461. }
  462. }
  463. }
  464. return cnt;
  465. }
  466. static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
  467. {
  468. struct iommu *iommu = pbm->iommu;
  469. struct property *prop;
  470. unsigned long num_tsb_entries, sz, tsbsize;
  471. u32 vdma[2], dma_mask, dma_offset;
  472. prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
  473. if (prop) {
  474. u32 *val = prop->value;
  475. vdma[0] = val[0];
  476. vdma[1] = val[1];
  477. } else {
  478. /* No property, use default values. */
  479. vdma[0] = 0x80000000;
  480. vdma[1] = 0x80000000;
  481. }
  482. if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
  483. prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
  484. vdma[0], vdma[1]);
  485. prom_halt();
  486. };
  487. dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
  488. num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
  489. tsbsize = num_tsb_entries * sizeof(iopte_t);
  490. dma_offset = vdma[0];
  491. /* Setup initial software IOMMU state. */
  492. spin_lock_init(&iommu->lock);
  493. iommu->ctx_lowest_free = 1;
  494. iommu->page_table_map_base = dma_offset;
  495. iommu->dma_addr_mask = dma_mask;
  496. /* Allocate and initialize the free area map. */
  497. sz = (num_tsb_entries + 7) / 8;
  498. sz = (sz + 7UL) & ~7UL;
  499. iommu->arena.map = kzalloc(sz, GFP_KERNEL);
  500. if (!iommu->arena.map) {
  501. prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
  502. prom_halt();
  503. }
  504. iommu->arena.limit = num_tsb_entries;
  505. sz = probe_existing_entries(pbm, iommu);
  506. if (sz)
  507. printk("%s: Imported %lu TSB entries from OBP\n",
  508. pbm->name, sz);
  509. }
  510. #ifdef CONFIG_PCI_MSI
  511. struct pci_sun4v_msiq_entry {
  512. u64 version_type;
  513. #define MSIQ_VERSION_MASK 0xffffffff00000000UL
  514. #define MSIQ_VERSION_SHIFT 32
  515. #define MSIQ_TYPE_MASK 0x00000000000000ffUL
  516. #define MSIQ_TYPE_SHIFT 0
  517. #define MSIQ_TYPE_NONE 0x00
  518. #define MSIQ_TYPE_MSG 0x01
  519. #define MSIQ_TYPE_MSI32 0x02
  520. #define MSIQ_TYPE_MSI64 0x03
  521. #define MSIQ_TYPE_INTX 0x08
  522. #define MSIQ_TYPE_NONE2 0xff
  523. u64 intx_sysino;
  524. u64 reserved1;
  525. u64 stick;
  526. u64 req_id; /* bus/device/func */
  527. #define MSIQ_REQID_BUS_MASK 0xff00UL
  528. #define MSIQ_REQID_BUS_SHIFT 8
  529. #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
  530. #define MSIQ_REQID_DEVICE_SHIFT 3
  531. #define MSIQ_REQID_FUNC_MASK 0x0007UL
  532. #define MSIQ_REQID_FUNC_SHIFT 0
  533. u64 msi_address;
  534. /* The format of this value is message type dependent.
  535. * For MSI bits 15:0 are the data from the MSI packet.
  536. * For MSI-X bits 31:0 are the data from the MSI packet.
  537. * For MSG, the message code and message routing code where:
  538. * bits 39:32 is the bus/device/fn of the msg target-id
  539. * bits 18:16 is the message routing code
  540. * bits 7:0 is the message code
  541. * For INTx the low order 2-bits are:
  542. * 00 - INTA
  543. * 01 - INTB
  544. * 10 - INTC
  545. * 11 - INTD
  546. */
  547. u64 msi_data;
  548. u64 reserved2;
  549. };
  550. static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
  551. unsigned long *head)
  552. {
  553. unsigned long err, limit;
  554. err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
  555. if (unlikely(err))
  556. return -ENXIO;
  557. limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
  558. if (unlikely(*head >= limit))
  559. return -EFBIG;
  560. return 0;
  561. }
  562. static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
  563. unsigned long msiqid, unsigned long *head,
  564. unsigned long *msi)
  565. {
  566. struct pci_sun4v_msiq_entry *ep;
  567. unsigned long err, type;
  568. /* Note: void pointer arithmetic, 'head' is a byte offset */
  569. ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
  570. (pbm->msiq_ent_count *
  571. sizeof(struct pci_sun4v_msiq_entry))) +
  572. *head);
  573. if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
  574. return 0;
  575. type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
  576. if (unlikely(type != MSIQ_TYPE_MSI32 &&
  577. type != MSIQ_TYPE_MSI64))
  578. return -EINVAL;
  579. *msi = ep->msi_data;
  580. err = pci_sun4v_msi_setstate(pbm->devhandle,
  581. ep->msi_data /* msi_num */,
  582. HV_MSISTATE_IDLE);
  583. if (unlikely(err))
  584. return -ENXIO;
  585. /* Clear the entry. */
  586. ep->version_type &= ~MSIQ_TYPE_MASK;
  587. (*head) += sizeof(struct pci_sun4v_msiq_entry);
  588. if (*head >=
  589. (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
  590. *head = 0;
  591. return 1;
  592. }
  593. static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
  594. unsigned long head)
  595. {
  596. unsigned long err;
  597. err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
  598. if (unlikely(err))
  599. return -EINVAL;
  600. return 0;
  601. }
  602. static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
  603. unsigned long msi, int is_msi64)
  604. {
  605. if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
  606. (is_msi64 ?
  607. HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
  608. return -ENXIO;
  609. if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
  610. return -ENXIO;
  611. if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
  612. return -ENXIO;
  613. return 0;
  614. }
  615. static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
  616. {
  617. unsigned long err, msiqid;
  618. err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
  619. if (err)
  620. return -ENXIO;
  621. pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
  622. return 0;
  623. }
  624. static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
  625. {
  626. unsigned long q_size, alloc_size, pages, order;
  627. int i;
  628. q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
  629. alloc_size = (pbm->msiq_num * q_size);
  630. order = get_order(alloc_size);
  631. pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
  632. if (pages == 0UL) {
  633. printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
  634. order);
  635. return -ENOMEM;
  636. }
  637. memset((char *)pages, 0, PAGE_SIZE << order);
  638. pbm->msi_queues = (void *) pages;
  639. for (i = 0; i < pbm->msiq_num; i++) {
  640. unsigned long err, base = __pa(pages + (i * q_size));
  641. unsigned long ret1, ret2;
  642. err = pci_sun4v_msiq_conf(pbm->devhandle,
  643. pbm->msiq_first + i,
  644. base, pbm->msiq_ent_count);
  645. if (err) {
  646. printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
  647. err);
  648. goto h_error;
  649. }
  650. err = pci_sun4v_msiq_info(pbm->devhandle,
  651. pbm->msiq_first + i,
  652. &ret1, &ret2);
  653. if (err) {
  654. printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
  655. err);
  656. goto h_error;
  657. }
  658. if (ret1 != base || ret2 != pbm->msiq_ent_count) {
  659. printk(KERN_ERR "MSI: Bogus qconf "
  660. "expected[%lx:%x] got[%lx:%lx]\n",
  661. base, pbm->msiq_ent_count,
  662. ret1, ret2);
  663. goto h_error;
  664. }
  665. }
  666. return 0;
  667. h_error:
  668. free_pages(pages, order);
  669. return -EINVAL;
  670. }
  671. static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
  672. {
  673. unsigned long q_size, alloc_size, pages, order;
  674. int i;
  675. for (i = 0; i < pbm->msiq_num; i++) {
  676. unsigned long msiqid = pbm->msiq_first + i;
  677. (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
  678. }
  679. q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
  680. alloc_size = (pbm->msiq_num * q_size);
  681. order = get_order(alloc_size);
  682. pages = (unsigned long) pbm->msi_queues;
  683. free_pages(pages, order);
  684. pbm->msi_queues = NULL;
  685. }
  686. static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
  687. unsigned long msiqid,
  688. unsigned long devino)
  689. {
  690. unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
  691. if (!virt_irq)
  692. return -ENOMEM;
  693. if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
  694. return -EINVAL;
  695. if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
  696. return -EINVAL;
  697. return virt_irq;
  698. }
  699. static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
  700. .get_head = pci_sun4v_get_head,
  701. .dequeue_msi = pci_sun4v_dequeue_msi,
  702. .set_head = pci_sun4v_set_head,
  703. .msi_setup = pci_sun4v_msi_setup,
  704. .msi_teardown = pci_sun4v_msi_teardown,
  705. .msiq_alloc = pci_sun4v_msiq_alloc,
  706. .msiq_free = pci_sun4v_msiq_free,
  707. .msiq_build_irq = pci_sun4v_msiq_build_irq,
  708. };
  709. static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
  710. {
  711. sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
  712. }
  713. #else /* CONFIG_PCI_MSI */
  714. static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
  715. {
  716. }
  717. #endif /* !(CONFIG_PCI_MSI) */
  718. static void __init pci_sun4v_pbm_init(struct pci_controller_info *p,
  719. struct device_node *dp, u32 devhandle)
  720. {
  721. struct pci_pbm_info *pbm;
  722. if (devhandle & 0x40)
  723. pbm = &p->pbm_B;
  724. else
  725. pbm = &p->pbm_A;
  726. pbm->next = pci_pbm_root;
  727. pci_pbm_root = pbm;
  728. pbm->numa_node = of_node_to_nid(dp);
  729. pbm->scan_bus = pci_sun4v_scan_bus;
  730. pbm->pci_ops = &sun4v_pci_ops;
  731. pbm->config_space_reg_bits = 12;
  732. pbm->index = pci_num_pbms++;
  733. pbm->parent = p;
  734. pbm->prom_node = dp;
  735. pbm->devhandle = devhandle;
  736. pbm->name = dp->full_name;
  737. printk("%s: SUN4V PCI Bus Module\n", pbm->name);
  738. printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
  739. pci_determine_mem_io_space(pbm);
  740. pci_get_pbm_props(pbm);
  741. pci_sun4v_iommu_init(pbm);
  742. pci_sun4v_msi_init(pbm);
  743. }
  744. void __init sun4v_pci_init(struct device_node *dp, char *model_name)
  745. {
  746. static int hvapi_negotiated = 0;
  747. struct pci_controller_info *p;
  748. struct pci_pbm_info *pbm;
  749. struct iommu *iommu;
  750. struct property *prop;
  751. struct linux_prom64_registers *regs;
  752. u32 devhandle;
  753. int i;
  754. if (!hvapi_negotiated++) {
  755. int err = sun4v_hvapi_register(HV_GRP_PCI,
  756. vpci_major,
  757. &vpci_minor);
  758. if (err) {
  759. prom_printf("SUN4V_PCI: Could not register hvapi, "
  760. "err=%d\n", err);
  761. prom_halt();
  762. }
  763. printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
  764. vpci_major, vpci_minor);
  765. dma_ops = &sun4v_dma_ops;
  766. }
  767. prop = of_find_property(dp, "reg", NULL);
  768. if (!prop) {
  769. prom_printf("SUN4V_PCI: Could not find config registers\n");
  770. prom_halt();
  771. }
  772. regs = prop->value;
  773. devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
  774. for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
  775. if (pbm->devhandle == (devhandle ^ 0x40)) {
  776. pci_sun4v_pbm_init(pbm->parent, dp, devhandle);
  777. return;
  778. }
  779. }
  780. for_each_possible_cpu(i) {
  781. unsigned long page = get_zeroed_page(GFP_ATOMIC);
  782. if (!page)
  783. goto fatal_memory_error;
  784. per_cpu(iommu_batch, i).pglist = (u64 *) page;
  785. }
  786. p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
  787. if (!p)
  788. goto fatal_memory_error;
  789. iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
  790. if (!iommu)
  791. goto fatal_memory_error;
  792. p->pbm_A.iommu = iommu;
  793. iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
  794. if (!iommu)
  795. goto fatal_memory_error;
  796. p->pbm_B.iommu = iommu;
  797. pci_sun4v_pbm_init(p, dp, devhandle);
  798. return;
  799. fatal_memory_error:
  800. prom_printf("SUN4V_PCI: Fatal memory allocation error.\n");
  801. prom_halt();
  802. }