wsp_pci.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132
  1. /*
  2. * Copyright 2010 Ben Herrenschmidt, IBM Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #define DEBUG
  10. #include <linux/kernel.h>
  11. #include <linux/pci.h>
  12. #include <linux/delay.h>
  13. #include <linux/string.h>
  14. #include <linux/init.h>
  15. #include <linux/bootmem.h>
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/debugfs.h>
  19. #include <asm/sections.h>
  20. #include <asm/io.h>
  21. #include <asm/prom.h>
  22. #include <asm/pci-bridge.h>
  23. #include <asm/machdep.h>
  24. #include <asm/ppc-pci.h>
  25. #include <asm/iommu.h>
  26. #include <asm/io-workarounds.h>
  27. #include "wsp.h"
  28. #include "wsp_pci.h"
  29. #include "msi.h"
  30. /* Max number of TVTs for one table. Only 32-bit tables can use
  31. * multiple TVTs and so the max currently supported is thus 8
  32. * since only 2G of DMA space is supported
  33. */
  34. #define MAX_TABLE_TVT_COUNT 8
  35. struct wsp_dma_table {
  36. struct list_head link;
  37. struct iommu_table table;
  38. struct wsp_phb *phb;
  39. struct page *tces[MAX_TABLE_TVT_COUNT];
  40. };
  41. /* We support DMA regions from 0...2G in 32bit space (no support for
  42. * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
  43. * entry) with validation enabled (though not supported by SimiCS
  44. * just yet).
  45. *
  46. * To simplify things, we divide this 2G space into N regions based
  47. * on the constant below which could be turned into a tunable eventually
  48. *
  49. * We then assign dynamically those regions to devices as they show up.
  50. *
  51. * We use a bitmap as an allocator for these.
  52. *
  53. * Tables are allocated/created dynamically as devices are discovered,
  54. * multiple TVT entries are used if needed
  55. *
  56. * When 64-bit DMA support is added we should simply use a separate set
  57. * of larger regions (the HW supports 64 TVT entries). We can
  58. * additionally create a bypass region in 64-bit space for performances
  59. * though that would have a cost in term of security.
  60. *
  61. * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
  62. * for all devices and bus/dev/fn validation is disabled
  63. *
  64. * Note that a DMA32 region cannot be smaller than 256M so the max
  65. * supported here for now is 8. We don't yet support sharing regions
  66. * between multiple devices so the max number of devices supported
  67. * is MAX_TABLE_TVT_COUNT.
  68. */
  69. #define NUM_DMA32_REGIONS 1
  70. struct wsp_phb {
  71. struct pci_controller *hose;
  72. /* Lock controlling access to the list of dma tables.
  73. * It does -not- protect against dma_* operations on
  74. * those tables, those should be stopped before an entry
  75. * is removed from the list.
  76. *
  77. * The lock is also used for error handling operations
  78. */
  79. spinlock_t lock;
  80. struct list_head dma_tables;
  81. unsigned long dma32_map;
  82. unsigned long dma32_base;
  83. unsigned int dma32_num_regions;
  84. unsigned long dma32_region_size;
  85. /* Debugfs stuff */
  86. struct dentry *ddir;
  87. struct list_head all;
  88. };
  89. static LIST_HEAD(wsp_phbs);
  90. //#define cfg_debug(fmt...) pr_debug(fmt)
  91. #define cfg_debug(fmt...)
  92. static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  93. int offset, int len, u32 *val)
  94. {
  95. struct pci_controller *hose;
  96. int suboff;
  97. u64 addr;
  98. hose = pci_bus_to_host(bus);
  99. if (hose == NULL)
  100. return PCIBIOS_DEVICE_NOT_FOUND;
  101. if (offset >= 0x1000)
  102. return PCIBIOS_BAD_REGISTER_NUMBER;
  103. addr = PCIE_REG_CA_ENABLE |
  104. ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
  105. ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
  106. ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
  107. suboff = offset & 3;
  108. /*
  109. * Note: the caller has already checked that offset is
  110. * suitably aligned and that len is 1, 2 or 4.
  111. */
  112. switch (len) {
  113. case 1:
  114. addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  115. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  116. *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
  117. >> (suboff << 3)) & 0xff;
  118. cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
  119. bus->number, devfn >> 3, devfn & 7,
  120. offset, suboff, addr, *val);
  121. break;
  122. case 2:
  123. addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  124. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  125. *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
  126. >> (suboff << 3)) & 0xffff;
  127. cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
  128. bus->number, devfn >> 3, devfn & 7,
  129. offset, suboff, addr, *val);
  130. break;
  131. default:
  132. addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
  133. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  134. *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
  135. cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
  136. bus->number, devfn >> 3, devfn & 7,
  137. offset, suboff, addr, *val);
  138. break;
  139. }
  140. return PCIBIOS_SUCCESSFUL;
  141. }
  142. static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  143. int offset, int len, u32 val)
  144. {
  145. struct pci_controller *hose;
  146. int suboff;
  147. u64 addr;
  148. hose = pci_bus_to_host(bus);
  149. if (hose == NULL)
  150. return PCIBIOS_DEVICE_NOT_FOUND;
  151. if (offset >= 0x1000)
  152. return PCIBIOS_BAD_REGISTER_NUMBER;
  153. addr = PCIE_REG_CA_ENABLE |
  154. ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
  155. ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
  156. ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
  157. suboff = offset & 3;
  158. /*
  159. * Note: the caller has already checked that offset is
  160. * suitably aligned and that len is 1, 2 or 4.
  161. */
  162. switch (len) {
  163. case 1:
  164. addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  165. val <<= suboff << 3;
  166. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  167. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  168. cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
  169. bus->number, devfn >> 3, devfn & 7,
  170. offset, suboff, addr, val);
  171. break;
  172. case 2:
  173. addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  174. val <<= suboff << 3;
  175. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  176. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  177. cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
  178. bus->number, devfn >> 3, devfn & 7,
  179. offset, suboff, addr, val);
  180. break;
  181. default:
  182. addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
  183. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  184. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  185. cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
  186. bus->number, devfn >> 3, devfn & 7,
  187. offset, suboff, addr, val);
  188. break;
  189. }
  190. return PCIBIOS_SUCCESSFUL;
  191. }
  192. static struct pci_ops wsp_pcie_pci_ops =
  193. {
  194. .read = wsp_pcie_read_config,
  195. .write = wsp_pcie_write_config,
  196. };
  197. #define TCE_SHIFT 12
  198. #define TCE_PAGE_SIZE (1 << TCE_SHIFT)
  199. #define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
  200. #define TCE_PCI_READ 0x1 /* read from PCI allowed */
  201. #define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
  202. #define TCE_RPN_SHIFT 12
  203. //#define dma_debug(fmt...) pr_debug(fmt)
  204. #define dma_debug(fmt...)
  205. static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
  206. unsigned long uaddr, enum dma_data_direction direction,
  207. struct dma_attrs *attrs)
  208. {
  209. struct wsp_dma_table *ptbl = container_of(tbl,
  210. struct wsp_dma_table,
  211. table);
  212. u64 proto_tce;
  213. u64 *tcep;
  214. u64 rpn;
  215. proto_tce = TCE_PCI_READ;
  216. #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  217. proto_tce |= TCE_PCI_WRITE;
  218. #else
  219. if (direction != DMA_TO_DEVICE)
  220. proto_tce |= TCE_PCI_WRITE;
  221. #endif
  222. /* XXX Make this faster by factoring out the page address for
  223. * within a TCE table
  224. */
  225. while (npages--) {
  226. /* We don't use it->base as the table can be scattered */
  227. tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
  228. tcep += (index & 0xffff);
  229. /* can't move this out since we might cross LMB boundary */
  230. rpn = __pa(uaddr) >> TCE_SHIFT;
  231. *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
  232. dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
  233. tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
  234. uaddr += TCE_PAGE_SIZE;
  235. index++;
  236. }
  237. return 0;
  238. }
  239. static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
  240. {
  241. struct wsp_dma_table *ptbl = container_of(tbl,
  242. struct wsp_dma_table,
  243. table);
  244. #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  245. struct pci_controller *hose = ptbl->phb->hose;
  246. #endif
  247. u64 *tcep;
  248. /* XXX Make this faster by factoring out the page address for
  249. * within a TCE table. Also use line-kill option to kill multiple
  250. * TCEs at once
  251. */
  252. while (npages--) {
  253. /* We don't use it->base as the table can be scattered */
  254. tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
  255. tcep += (index & 0xffff);
  256. dma_debug("[DMA] TCE %p cleared\n", tcep);
  257. *tcep = 0;
  258. #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  259. /* Don't write there since it would pollute other MMIO accesses */
  260. out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
  261. PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
  262. (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
  263. #endif
  264. index++;
  265. }
  266. }
  267. static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
  268. unsigned int region,
  269. struct pci_dev *validate)
  270. {
  271. struct pci_controller *hose = phb->hose;
  272. unsigned long size = phb->dma32_region_size;
  273. unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
  274. struct wsp_dma_table *tbl;
  275. int tvts_per_table, i, tvt, nid;
  276. unsigned long flags;
  277. nid = of_node_to_nid(phb->hose->dn);
  278. /* Calculate how many TVTs are needed */
  279. tvts_per_table = size / 0x10000000;
  280. if (tvts_per_table == 0)
  281. tvts_per_table = 1;
  282. /* Calculate the base TVT index. We know all tables have the same
  283. * size so we just do a simple multiply here
  284. */
  285. tvt = region * tvts_per_table;
  286. pr_debug(" Region : %d\n", region);
  287. pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
  288. pr_debug(" Number of TVTs : %d\n", tvts_per_table);
  289. pr_debug(" Base TVT : %d\n", tvt);
  290. pr_debug(" Node : %d\n", nid);
  291. tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
  292. if (!tbl)
  293. return ERR_PTR(-ENOMEM);
  294. tbl->phb = phb;
  295. /* Create as many TVTs as needed, each represents 256M at most */
  296. for (i = 0; i < tvts_per_table; i++) {
  297. u64 tvt_data1, tvt_data0;
  298. /* Allocate table. We use a 4K TCE size for now always so
  299. * one table is always 8 * (258M / 4K) == 512K
  300. */
  301. tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
  302. if (tbl->tces[i] == NULL)
  303. goto fail;
  304. memset(page_address(tbl->tces[i]), 0, 0x80000);
  305. pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
  306. /* Table size. We currently set it to be the whole 256M region */
  307. tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
  308. /* IO page size set to 4K */
  309. tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
  310. /* Shift in the address */
  311. tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
  312. /* Validation stuff. We only validate fully bus/dev/fn for now
  313. * one day maybe we can group devices but that isn't the case
  314. * at the moment
  315. */
  316. if (validate) {
  317. tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
  318. tvt_data0 |= validate->bus->number;
  319. tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
  320. tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
  321. << IODA_TVT1_DEVNUM_VALUE_SHIFT;
  322. tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
  323. tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
  324. << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
  325. }
  326. /* XX PE number is always 0 for now */
  327. /* Program the values using the PHB lock */
  328. spin_lock_irqsave(&phb->lock, flags);
  329. out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
  330. (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
  331. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
  332. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
  333. spin_unlock_irqrestore(&phb->lock, flags);
  334. }
  335. /* Init bits and pieces */
  336. tbl->table.it_blocksize = 16;
  337. tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
  338. tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
  339. /*
  340. * It's already blank but we clear it anyway.
  341. * Consider an aditiona interface that makes cleaing optional
  342. */
  343. iommu_init_table(&tbl->table, nid);
  344. list_add(&tbl->link, &phb->dma_tables);
  345. return tbl;
  346. fail:
  347. pr_debug(" Failed to allocate a 256M TCE table !\n");
  348. for (i = 0; i < tvts_per_table; i++)
  349. if (tbl->tces[i])
  350. __free_pages(tbl->tces[i], get_order(0x80000));
  351. kfree(tbl);
  352. return ERR_PTR(-ENOMEM);
  353. }
  354. static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
  355. {
  356. struct dev_archdata *archdata = &pdev->dev.archdata;
  357. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  358. struct wsp_phb *phb = hose->private_data;
  359. struct wsp_dma_table *table = NULL;
  360. unsigned long flags;
  361. int i;
  362. /* Don't assign an iommu table to a bridge */
  363. if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
  364. return;
  365. pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
  366. spin_lock_irqsave(&phb->lock, flags);
  367. /* If only one region, check if it already exist */
  368. if (phb->dma32_num_regions == 1) {
  369. spin_unlock_irqrestore(&phb->lock, flags);
  370. if (list_empty(&phb->dma_tables))
  371. table = wsp_pci_create_dma32_table(phb, 0, NULL);
  372. else
  373. table = list_first_entry(&phb->dma_tables,
  374. struct wsp_dma_table,
  375. link);
  376. } else {
  377. /* else find a free region */
  378. for (i = 0; i < phb->dma32_num_regions && !table; i++) {
  379. if (__test_and_set_bit(i, &phb->dma32_map))
  380. continue;
  381. spin_unlock_irqrestore(&phb->lock, flags);
  382. table = wsp_pci_create_dma32_table(phb, i, pdev);
  383. }
  384. }
  385. /* Check if we got an error */
  386. if (IS_ERR(table)) {
  387. pr_err("%s: Failed to create DMA table, err %ld !\n",
  388. pci_name(pdev), PTR_ERR(table));
  389. return;
  390. }
  391. /* Or a valid table */
  392. if (table) {
  393. pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
  394. pci_name(pdev),
  395. table->table.it_offset << IOMMU_PAGE_SHIFT,
  396. (table->table.it_offset << IOMMU_PAGE_SHIFT)
  397. + phb->dma32_region_size - 1);
  398. archdata->dma_data.iommu_table_base = &table->table;
  399. return;
  400. }
  401. /* Or no room */
  402. spin_unlock_irqrestore(&phb->lock, flags);
  403. pr_err("%s: Out of DMA space !\n", pci_name(pdev));
  404. }
  405. static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
  406. {
  407. u64 val;
  408. int i;
  409. #define DUMP_REG(x) \
  410. pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
  411. /*
  412. * Some WSP variants has a bogus class code by default in the PCI-E
  413. * root complex's built-in P2P bridge
  414. */
  415. val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
  416. pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
  417. out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
  418. (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
  419. pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
  420. #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  421. /* XXX Disable TCE caching, it doesn't work on DD1 */
  422. out_be64(hose->cfg_data + 0xe50,
  423. in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
  424. printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
  425. #endif
  426. /* Configure M32A and IO. IO is hard wired to be 1M for now */
  427. out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
  428. out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
  429. (~(hose->io_resource.end - hose->io_resource.start)) &
  430. 0x3fffffff000ul);
  431. out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
  432. out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
  433. hose->mem_resources[0].start);
  434. printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
  435. (~(hose->mem_resources[0].end -
  436. hose->mem_resources[0].start)) & 0x3ffffff0000ul);
  437. out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
  438. (~(hose->mem_resources[0].end -
  439. hose->mem_resources[0].start)) & 0x3ffffff0000ul);
  440. out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
  441. (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
  442. /* Clear all TVT entries
  443. *
  444. * XX Might get TVT count from device-tree
  445. */
  446. for (i = 0; i < IODA_TVT_COUNT; i++) {
  447. out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
  448. PCIE_REG_IODA_AD_TBL_TVT | i);
  449. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
  450. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
  451. }
  452. /* Kill the TCE cache */
  453. out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
  454. in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
  455. PCIE_REG_PHBC_64B_TCE_EN);
  456. /* Enable 32 & 64-bit MSIs, IO space and M32A */
  457. val = PCIE_REG_PHBC_32BIT_MSI_EN |
  458. PCIE_REG_PHBC_IO_EN |
  459. PCIE_REG_PHBC_64BIT_MSI_EN |
  460. PCIE_REG_PHBC_M32A_EN;
  461. if (iommu_is_off)
  462. val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
  463. pr_debug("Will write config: 0x%llx\n", val);
  464. out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
  465. /* Enable error reporting */
  466. out_be64(hose->cfg_data + 0xe00,
  467. in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
  468. /* Mask an error that's generated when doing config space probe
  469. *
  470. * XXX Maybe we should only mask it around config space cycles... that or
  471. * ignore it when we know we had a config space cycle recently ?
  472. */
  473. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
  474. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
  475. /* Enable UTL errors, for now, all of them got to UTL irq 1
  476. *
  477. * We similarily mask one UTL error caused apparently during normal
  478. * probing. We also mask the link up error
  479. */
  480. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
  481. out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
  482. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
  483. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
  484. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
  485. out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
  486. DUMP_REG(PCIE_REG_IO_BASE_ADDR);
  487. DUMP_REG(PCIE_REG_IO_BASE_MASK);
  488. DUMP_REG(PCIE_REG_IO_START_ADDR);
  489. DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
  490. DUMP_REG(PCIE_REG_M32A_BASE_MASK);
  491. DUMP_REG(PCIE_REG_M32A_START_ADDR);
  492. DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
  493. DUMP_REG(PCIE_REG_M32B_BASE_MASK);
  494. DUMP_REG(PCIE_REG_M32B_START_ADDR);
  495. DUMP_REG(PCIE_REG_M64_BASE_ADDR);
  496. DUMP_REG(PCIE_REG_M64_BASE_MASK);
  497. DUMP_REG(PCIE_REG_M64_START_ADDR);
  498. DUMP_REG(PCIE_REG_PHB_CONFIG);
  499. }
  500. static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
  501. {
  502. u64 val;
  503. int i;
  504. for (i = 0; i < 10000; i++) {
  505. val = in_be64(phb->hose->cfg_data + 0xe08);
  506. if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
  507. return;
  508. udelay(1);
  509. }
  510. pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
  511. phb->hose->global_number, port);
  512. }
  513. #define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
  514. static ret wsp_pci_##name at \
  515. { \
  516. struct iowa_bus *bus; \
  517. struct wsp_phb *phb; \
  518. unsigned long flags; \
  519. ret rval; \
  520. bus = iowa_pio_find_bus(aa); \
  521. WARN_ON(!bus); \
  522. phb = bus->private; \
  523. spin_lock_irqsave(&phb->lock, flags); \
  524. wsp_pci_wait_io_idle(phb, aa); \
  525. rval = __do_##name al; \
  526. spin_unlock_irqrestore(&phb->lock, flags); \
  527. return rval; \
  528. }
  529. #define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
  530. static void wsp_pci_##name at \
  531. { \
  532. struct iowa_bus *bus; \
  533. struct wsp_phb *phb; \
  534. unsigned long flags; \
  535. bus = iowa_pio_find_bus(aa); \
  536. WARN_ON(!bus); \
  537. phb = bus->private; \
  538. spin_lock_irqsave(&phb->lock, flags); \
  539. wsp_pci_wait_io_idle(phb, aa); \
  540. __do_##name al; \
  541. spin_unlock_irqrestore(&phb->lock, flags); \
  542. }
  543. #define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
  544. #define DEF_PCI_AC_NORET_mem(name, at, al, aa)
  545. #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
  546. DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
  547. #define DEF_PCI_AC_NORET(name, at, al, space, aa) \
  548. DEF_PCI_AC_NORET_##space(name, at, al, aa) \
  549. #include <asm/io-defs.h>
  550. #undef DEF_PCI_AC_RET
  551. #undef DEF_PCI_AC_NORET
  552. static struct ppc_pci_io wsp_pci_iops = {
  553. .inb = wsp_pci_inb,
  554. .inw = wsp_pci_inw,
  555. .inl = wsp_pci_inl,
  556. .outb = wsp_pci_outb,
  557. .outw = wsp_pci_outw,
  558. .outl = wsp_pci_outl,
  559. .insb = wsp_pci_insb,
  560. .insw = wsp_pci_insw,
  561. .insl = wsp_pci_insl,
  562. .outsb = wsp_pci_outsb,
  563. .outsw = wsp_pci_outsw,
  564. .outsl = wsp_pci_outsl,
  565. };
  566. static int __init wsp_setup_one_phb(struct device_node *np)
  567. {
  568. struct pci_controller *hose;
  569. struct wsp_phb *phb;
  570. pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
  571. phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
  572. if (!phb)
  573. return -ENOMEM;
  574. hose = pcibios_alloc_controller(np);
  575. if (!hose) {
  576. /* Can't really free the phb */
  577. return -ENOMEM;
  578. }
  579. hose->private_data = phb;
  580. phb->hose = hose;
  581. INIT_LIST_HEAD(&phb->dma_tables);
  582. spin_lock_init(&phb->lock);
  583. /* XXX Use bus-range property ? */
  584. hose->first_busno = 0;
  585. hose->last_busno = 0xff;
  586. /* We use cfg_data as the address for the whole bridge MMIO space
  587. */
  588. hose->cfg_data = of_iomap(hose->dn, 0);
  589. pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
  590. /* Get the ranges of the device-tree */
  591. pci_process_bridge_OF_ranges(hose, np, 0);
  592. /* XXX Force re-assigning of everything for now */
  593. pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
  594. PCI_ENABLE_PROC_DOMAINS);
  595. /* Calculate how the TCE space is divided */
  596. phb->dma32_base = 0;
  597. phb->dma32_num_regions = NUM_DMA32_REGIONS;
  598. if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
  599. pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
  600. MAX_TABLE_TVT_COUNT);
  601. phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
  602. }
  603. phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
  604. BUG_ON(!is_power_of_2(phb->dma32_region_size));
  605. /* Setup config ops */
  606. hose->ops = &wsp_pcie_pci_ops;
  607. /* Configure the HW */
  608. wsp_pcie_configure_hw(hose);
  609. /* Instanciate IO workarounds */
  610. iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
  611. #ifdef CONFIG_PCI_MSI
  612. wsp_setup_phb_msi(hose);
  613. #endif
  614. /* Add to global list */
  615. list_add(&phb->all, &wsp_phbs);
  616. return 0;
  617. }
  618. void __init wsp_setup_pci(void)
  619. {
  620. struct device_node *np;
  621. int rc;
  622. /* Find host bridges */
  623. for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
  624. rc = wsp_setup_one_phb(np);
  625. if (rc)
  626. pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
  627. np->full_name, rc);
  628. }
  629. /* Establish device-tree linkage */
  630. pci_devs_phb_init();
  631. /* Set DMA ops to use TCEs */
  632. if (iommu_is_off) {
  633. pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
  634. set_pci_dma_ops(&dma_direct_ops);
  635. } else {
  636. ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
  637. ppc_md.tce_build = tce_build_wsp;
  638. ppc_md.tce_free = tce_free_wsp;
  639. set_pci_dma_ops(&dma_iommu_ops);
  640. }
  641. }
  642. #define err_debug(fmt...) pr_debug(fmt)
  643. //#define err_debug(fmt...)
  644. static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
  645. {
  646. const u32 *prop;
  647. int hw_irq;
  648. /* Ok, no interrupts property, let's try to find our child P2P */
  649. np = of_get_next_child(np, NULL);
  650. if (np == NULL)
  651. return 0;
  652. /* Grab it's interrupt map */
  653. prop = of_get_property(np, "interrupt-map", NULL);
  654. if (prop == NULL)
  655. return 0;
  656. /* Grab one of the interrupts in there, keep the low 4 bits */
  657. hw_irq = prop[5] & 0xf;
  658. /* 0..4 for PHB 0 and 5..9 for PHB 1 */
  659. if (hw_irq < 5)
  660. hw_irq = 4;
  661. else
  662. hw_irq = 9;
  663. hw_irq |= prop[5] & ~0xf;
  664. err_debug("PCI: Using 0x%x as error IRQ for %s\n",
  665. hw_irq, np->parent->full_name);
  666. return irq_create_mapping(NULL, hw_irq);
  667. }
  668. static const struct {
  669. u32 offset;
  670. const char *name;
  671. } wsp_pci_regs[] = {
  672. #define DREG(x) { PCIE_REG_##x, #x }
  673. #define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
  674. /* Architected registers except CONFIG_ and IODA
  675. * to avoid side effects
  676. */
  677. DREG(DMA_CHAN_STATUS),
  678. DREG(CPU_LOADSTORE_STATUS),
  679. DREG(LOCK0),
  680. DREG(LOCK1),
  681. DREG(PHB_CONFIG),
  682. DREG(IO_BASE_ADDR),
  683. DREG(IO_BASE_MASK),
  684. DREG(IO_START_ADDR),
  685. DREG(M32A_BASE_ADDR),
  686. DREG(M32A_BASE_MASK),
  687. DREG(M32A_START_ADDR),
  688. DREG(M32B_BASE_ADDR),
  689. DREG(M32B_BASE_MASK),
  690. DREG(M32B_START_ADDR),
  691. DREG(M64_BASE_ADDR),
  692. DREG(M64_BASE_MASK),
  693. DREG(M64_START_ADDR),
  694. DREG(TCE_KILL),
  695. DREG(LOCK2),
  696. DREG(PHB_GEN_CAP),
  697. DREG(PHB_TCE_CAP),
  698. DREG(PHB_IRQ_CAP),
  699. DREG(PHB_EEH_CAP),
  700. DREG(PAPR_ERR_INJ_CONTROL),
  701. DREG(PAPR_ERR_INJ_ADDR),
  702. DREG(PAPR_ERR_INJ_MASK),
  703. /* UTL core regs */
  704. DUTL(SYS_BUS_CONTROL),
  705. DUTL(STATUS),
  706. DUTL(SYS_BUS_AGENT_STATUS),
  707. DUTL(SYS_BUS_AGENT_ERR_SEV),
  708. DUTL(SYS_BUS_AGENT_IRQ_EN),
  709. DUTL(SYS_BUS_BURST_SZ_CONF),
  710. DUTL(REVISION_ID),
  711. DUTL(OUT_POST_HDR_BUF_ALLOC),
  712. DUTL(OUT_POST_DAT_BUF_ALLOC),
  713. DUTL(IN_POST_HDR_BUF_ALLOC),
  714. DUTL(IN_POST_DAT_BUF_ALLOC),
  715. DUTL(OUT_NP_BUF_ALLOC),
  716. DUTL(IN_NP_BUF_ALLOC),
  717. DUTL(PCIE_TAGS_ALLOC),
  718. DUTL(GBIF_READ_TAGS_ALLOC),
  719. DUTL(PCIE_PORT_CONTROL),
  720. DUTL(PCIE_PORT_STATUS),
  721. DUTL(PCIE_PORT_ERROR_SEV),
  722. DUTL(PCIE_PORT_IRQ_EN),
  723. DUTL(RC_STATUS),
  724. DUTL(RC_ERR_SEVERITY),
  725. DUTL(RC_IRQ_EN),
  726. DUTL(EP_STATUS),
  727. DUTL(EP_ERR_SEVERITY),
  728. DUTL(EP_ERR_IRQ_EN),
  729. DUTL(PCI_PM_CTRL1),
  730. DUTL(PCI_PM_CTRL2),
  731. /* PCIe stack regs */
  732. DREG(SYSTEM_CONFIG1),
  733. DREG(SYSTEM_CONFIG2),
  734. DREG(EP_SYSTEM_CONFIG),
  735. DREG(EP_FLR),
  736. DREG(EP_BAR_CONFIG),
  737. DREG(LINK_CONFIG),
  738. DREG(PM_CONFIG),
  739. DREG(DLP_CONTROL),
  740. DREG(DLP_STATUS),
  741. DREG(ERR_REPORT_CONTROL),
  742. DREG(SLOT_CONTROL1),
  743. DREG(SLOT_CONTROL2),
  744. DREG(UTL_CONFIG),
  745. DREG(BUFFERS_CONFIG),
  746. DREG(ERROR_INJECT),
  747. DREG(SRIOV_CONFIG),
  748. DREG(PF0_SRIOV_STATUS),
  749. DREG(PF1_SRIOV_STATUS),
  750. DREG(PORT_NUMBER),
  751. DREG(POR_SYSTEM_CONFIG),
  752. /* Internal logic regs */
  753. DREG(PHB_VERSION),
  754. DREG(RESET),
  755. DREG(PHB_CONTROL),
  756. DREG(PHB_TIMEOUT_CONTROL1),
  757. DREG(PHB_QUIESCE_DMA),
  758. DREG(PHB_DMA_READ_TAG_ACTV),
  759. DREG(PHB_TCE_READ_TAG_ACTV),
  760. /* FIR registers */
  761. DREG(LEM_FIR_ACCUM),
  762. DREG(LEM_FIR_AND_MASK),
  763. DREG(LEM_FIR_OR_MASK),
  764. DREG(LEM_ACTION0),
  765. DREG(LEM_ACTION1),
  766. DREG(LEM_ERROR_MASK),
  767. DREG(LEM_ERROR_AND_MASK),
  768. DREG(LEM_ERROR_OR_MASK),
  769. /* Error traps registers */
  770. DREG(PHB_ERR_STATUS),
  771. DREG(PHB_ERR_STATUS),
  772. DREG(PHB_ERR1_STATUS),
  773. DREG(PHB_ERR_INJECT),
  774. DREG(PHB_ERR_LEM_ENABLE),
  775. DREG(PHB_ERR_IRQ_ENABLE),
  776. DREG(PHB_ERR_FREEZE_ENABLE),
  777. DREG(PHB_ERR_SIDE_ENABLE),
  778. DREG(PHB_ERR_LOG_0),
  779. DREG(PHB_ERR_LOG_1),
  780. DREG(PHB_ERR_STATUS_MASK),
  781. DREG(PHB_ERR1_STATUS_MASK),
  782. DREG(MMIO_ERR_STATUS),
  783. DREG(MMIO_ERR1_STATUS),
  784. DREG(MMIO_ERR_INJECT),
  785. DREG(MMIO_ERR_LEM_ENABLE),
  786. DREG(MMIO_ERR_IRQ_ENABLE),
  787. DREG(MMIO_ERR_FREEZE_ENABLE),
  788. DREG(MMIO_ERR_SIDE_ENABLE),
  789. DREG(MMIO_ERR_LOG_0),
  790. DREG(MMIO_ERR_LOG_1),
  791. DREG(MMIO_ERR_STATUS_MASK),
  792. DREG(MMIO_ERR1_STATUS_MASK),
  793. DREG(DMA_ERR_STATUS),
  794. DREG(DMA_ERR1_STATUS),
  795. DREG(DMA_ERR_INJECT),
  796. DREG(DMA_ERR_LEM_ENABLE),
  797. DREG(DMA_ERR_IRQ_ENABLE),
  798. DREG(DMA_ERR_FREEZE_ENABLE),
  799. DREG(DMA_ERR_SIDE_ENABLE),
  800. DREG(DMA_ERR_LOG_0),
  801. DREG(DMA_ERR_LOG_1),
  802. DREG(DMA_ERR_STATUS_MASK),
  803. DREG(DMA_ERR1_STATUS_MASK),
  804. /* Debug and Trace registers */
  805. DREG(PHB_DEBUG_CONTROL0),
  806. DREG(PHB_DEBUG_STATUS0),
  807. DREG(PHB_DEBUG_CONTROL1),
  808. DREG(PHB_DEBUG_STATUS1),
  809. DREG(PHB_DEBUG_CONTROL2),
  810. DREG(PHB_DEBUG_STATUS2),
  811. DREG(PHB_DEBUG_CONTROL3),
  812. DREG(PHB_DEBUG_STATUS3),
  813. DREG(PHB_DEBUG_CONTROL4),
  814. DREG(PHB_DEBUG_STATUS4),
  815. DREG(PHB_DEBUG_CONTROL5),
  816. DREG(PHB_DEBUG_STATUS5),
  817. /* Don't seem to exist ...
  818. DREG(PHB_DEBUG_CONTROL6),
  819. DREG(PHB_DEBUG_STATUS6),
  820. */
  821. };
  822. static int wsp_pci_regs_show(struct seq_file *m, void *private)
  823. {
  824. struct wsp_phb *phb = m->private;
  825. struct pci_controller *hose = phb->hose;
  826. int i;
  827. for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
  828. /* Skip write-only regs */
  829. if (wsp_pci_regs[i].offset == 0xc08 ||
  830. wsp_pci_regs[i].offset == 0xc10 ||
  831. wsp_pci_regs[i].offset == 0xc38 ||
  832. wsp_pci_regs[i].offset == 0xc40)
  833. continue;
  834. seq_printf(m, "0x%03x: 0x%016llx %s\n",
  835. wsp_pci_regs[i].offset,
  836. in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
  837. wsp_pci_regs[i].name);
  838. }
  839. return 0;
  840. }
  841. static int wsp_pci_regs_open(struct inode *inode, struct file *file)
  842. {
  843. return single_open(file, wsp_pci_regs_show, inode->i_private);
  844. }
  845. static const struct file_operations wsp_pci_regs_fops = {
  846. .open = wsp_pci_regs_open,
  847. .read = seq_read,
  848. .llseek = seq_lseek,
  849. .release = single_release,
  850. };
  851. static int wsp_pci_reg_set(void *data, u64 val)
  852. {
  853. out_be64((void __iomem *)data, val);
  854. return 0;
  855. }
  856. static int wsp_pci_reg_get(void *data, u64 *val)
  857. {
  858. *val = in_be64((void __iomem *)data);
  859. return 0;
  860. }
  861. DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
  862. static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
  863. {
  864. struct wsp_phb *phb = dev_id;
  865. struct pci_controller *hose = phb->hose;
  866. irqreturn_t handled = IRQ_NONE;
  867. struct wsp_pcie_err_log_data ed;
  868. pr_err("PCI: Error interrupt on %s (PHB %d)\n",
  869. hose->dn->full_name, hose->global_number);
  870. again:
  871. memset(&ed, 0, sizeof(ed));
  872. /* Read and clear UTL errors */
  873. ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
  874. if (ed.utl_sys_err)
  875. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
  876. ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
  877. if (ed.utl_port_err)
  878. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
  879. ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
  880. if (ed.utl_rc_err)
  881. out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
  882. /* Read and clear main trap errors */
  883. ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
  884. if (ed.phb_err) {
  885. ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
  886. ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
  887. ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
  888. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
  889. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
  890. }
  891. ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
  892. if (ed.mmio_err) {
  893. ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
  894. ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
  895. ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
  896. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
  897. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
  898. }
  899. ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
  900. if (ed.dma_err) {
  901. ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
  902. ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
  903. ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
  904. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
  905. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
  906. }
  907. /* Now print things out */
  908. if (ed.phb_err) {
  909. pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
  910. pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
  911. pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
  912. pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
  913. }
  914. if (ed.mmio_err) {
  915. pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
  916. pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
  917. pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
  918. pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
  919. }
  920. if (ed.dma_err) {
  921. pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
  922. pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
  923. pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
  924. pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
  925. }
  926. if (ed.utl_sys_err)
  927. pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
  928. if (ed.utl_port_err)
  929. pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
  930. if (ed.utl_rc_err)
  931. pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
  932. /* Interrupts are caused by the error traps. If we had any error there
  933. * we loop again in case the UTL buffered some new stuff between
  934. * going there and going to the traps
  935. */
  936. if (ed.dma_err || ed.mmio_err || ed.phb_err) {
  937. handled = IRQ_HANDLED;
  938. goto again;
  939. }
  940. return handled;
  941. }
  942. static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
  943. {
  944. struct pci_controller *hose = phb->hose;
  945. int err_irq, i, rc;
  946. char fname[16];
  947. /* Create a debugfs file for that PHB */
  948. sprintf(fname, "phb%d", phb->hose->global_number);
  949. phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
  950. /* Some useful debug output */
  951. if (phb->ddir) {
  952. struct dentry *d = debugfs_create_dir("regs", phb->ddir);
  953. char tmp[64];
  954. for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
  955. sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
  956. wsp_pci_regs[i].name);
  957. debugfs_create_file(tmp, 0600, d,
  958. hose->cfg_data + wsp_pci_regs[i].offset,
  959. &wsp_pci_reg_fops);
  960. }
  961. debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
  962. }
  963. /* Find the IRQ number for that PHB */
  964. err_irq = irq_of_parse_and_map(hose->dn, 0);
  965. if (err_irq == 0)
  966. /* XXX Error IRQ lacking from device-tree */
  967. err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
  968. if (err_irq == 0) {
  969. pr_err("PCI: Failed to fetch error interrupt for %s\n",
  970. hose->dn->full_name);
  971. return;
  972. }
  973. /* Request it */
  974. rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
  975. if (rc) {
  976. pr_err("PCI: Failed to request interrupt for %s\n",
  977. hose->dn->full_name);
  978. }
  979. /* Enable interrupts for all errors for now */
  980. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  981. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  982. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  983. }
  984. /*
  985. * This is called later to hookup with the error interrupt
  986. */
  987. static int __init wsp_setup_pci_late(void)
  988. {
  989. struct wsp_phb *phb;
  990. list_for_each_entry(phb, &wsp_phbs, all)
  991. wsp_setup_pci_err_reporting(phb);
  992. return 0;
  993. }
  994. arch_initcall(wsp_setup_pci_late);