wsp_pci.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133
  1. /*
  2. * Copyright 2010 Ben Herrenschmidt, IBM Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #define DEBUG
  10. #include <linux/kernel.h>
  11. #include <linux/pci.h>
  12. #include <linux/delay.h>
  13. #include <linux/string.h>
  14. #include <linux/init.h>
  15. #include <linux/bootmem.h>
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/debugfs.h>
  19. #include <asm/sections.h>
  20. #include <asm/io.h>
  21. #include <asm/prom.h>
  22. #include <asm/pci-bridge.h>
  23. #include <asm/machdep.h>
  24. #include <asm/ppc-pci.h>
  25. #include <asm/iommu.h>
  26. #include <asm/io-workarounds.h>
  27. #include "wsp.h"
  28. #include "wsp_pci.h"
  29. #include "msi.h"
  30. /* Max number of TVTs for one table. Only 32-bit tables can use
  31. * multiple TVTs and so the max currently supported is thus 8
  32. * since only 2G of DMA space is supported
  33. */
  34. #define MAX_TABLE_TVT_COUNT 8
  35. struct wsp_dma_table {
  36. struct list_head link;
  37. struct iommu_table table;
  38. struct wsp_phb *phb;
  39. struct page *tces[MAX_TABLE_TVT_COUNT];
  40. };
  41. /* We support DMA regions from 0...2G in 32bit space (no support for
  42. * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
  43. * entry) with validation enabled (though not supported by SimiCS
  44. * just yet).
  45. *
  46. * To simplify things, we divide this 2G space into N regions based
  47. * on the constant below which could be turned into a tunable eventually
  48. *
  49. * We then assign dynamically those regions to devices as they show up.
  50. *
  51. * We use a bitmap as an allocator for these.
  52. *
  53. * Tables are allocated/created dynamically as devices are discovered,
  54. * multiple TVT entries are used if needed
  55. *
  56. * When 64-bit DMA support is added we should simply use a separate set
  57. * of larger regions (the HW supports 64 TVT entries). We can
  58. * additionally create a bypass region in 64-bit space for performances
  59. * though that would have a cost in term of security.
  60. *
  61. * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
  62. * for all devices and bus/dev/fn validation is disabled
  63. *
  64. * Note that a DMA32 region cannot be smaller than 256M so the max
  65. * supported here for now is 8. We don't yet support sharing regions
  66. * between multiple devices so the max number of devices supported
  67. * is MAX_TABLE_TVT_COUNT.
  68. */
  69. #define NUM_DMA32_REGIONS 1
  70. struct wsp_phb {
  71. struct pci_controller *hose;
  72. /* Lock controlling access to the list of dma tables.
  73. * It does -not- protect against dma_* operations on
  74. * those tables, those should be stopped before an entry
  75. * is removed from the list.
  76. *
  77. * The lock is also used for error handling operations
  78. */
  79. spinlock_t lock;
  80. struct list_head dma_tables;
  81. unsigned long dma32_map;
  82. unsigned long dma32_base;
  83. unsigned int dma32_num_regions;
  84. unsigned long dma32_region_size;
  85. /* Debugfs stuff */
  86. struct dentry *ddir;
  87. struct list_head all;
  88. };
  89. static LIST_HEAD(wsp_phbs);
  90. //#define cfg_debug(fmt...) pr_debug(fmt)
  91. #define cfg_debug(fmt...)
  92. static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  93. int offset, int len, u32 *val)
  94. {
  95. struct pci_controller *hose;
  96. int suboff;
  97. u64 addr;
  98. hose = pci_bus_to_host(bus);
  99. if (hose == NULL)
  100. return PCIBIOS_DEVICE_NOT_FOUND;
  101. if (offset >= 0x1000)
  102. return PCIBIOS_BAD_REGISTER_NUMBER;
  103. addr = PCIE_REG_CA_ENABLE |
  104. ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
  105. ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
  106. ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
  107. suboff = offset & 3;
  108. /*
  109. * Note: the caller has already checked that offset is
  110. * suitably aligned and that len is 1, 2 or 4.
  111. */
  112. switch (len) {
  113. case 1:
  114. addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  115. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  116. *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
  117. >> (suboff << 3)) & 0xff;
  118. cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
  119. bus->number, devfn >> 3, devfn & 7,
  120. offset, suboff, addr, *val);
  121. break;
  122. case 2:
  123. addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  124. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  125. *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
  126. >> (suboff << 3)) & 0xffff;
  127. cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
  128. bus->number, devfn >> 3, devfn & 7,
  129. offset, suboff, addr, *val);
  130. break;
  131. default:
  132. addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
  133. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  134. *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
  135. cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
  136. bus->number, devfn >> 3, devfn & 7,
  137. offset, suboff, addr, *val);
  138. break;
  139. }
  140. return PCIBIOS_SUCCESSFUL;
  141. }
  142. static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  143. int offset, int len, u32 val)
  144. {
  145. struct pci_controller *hose;
  146. int suboff;
  147. u64 addr;
  148. hose = pci_bus_to_host(bus);
  149. if (hose == NULL)
  150. return PCIBIOS_DEVICE_NOT_FOUND;
  151. if (offset >= 0x1000)
  152. return PCIBIOS_BAD_REGISTER_NUMBER;
  153. addr = PCIE_REG_CA_ENABLE |
  154. ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
  155. ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
  156. ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
  157. suboff = offset & 3;
  158. /*
  159. * Note: the caller has already checked that offset is
  160. * suitably aligned and that len is 1, 2 or 4.
  161. */
  162. switch (len) {
  163. case 1:
  164. addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  165. val <<= suboff << 3;
  166. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  167. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  168. cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
  169. bus->number, devfn >> 3, devfn & 7,
  170. offset, suboff, addr, val);
  171. break;
  172. case 2:
  173. addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  174. val <<= suboff << 3;
  175. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  176. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  177. cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
  178. bus->number, devfn >> 3, devfn & 7,
  179. offset, suboff, addr, val);
  180. break;
  181. default:
  182. addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
  183. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  184. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  185. cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
  186. bus->number, devfn >> 3, devfn & 7,
  187. offset, suboff, addr, val);
  188. break;
  189. }
  190. return PCIBIOS_SUCCESSFUL;
  191. }
  192. static struct pci_ops wsp_pcie_pci_ops =
  193. {
  194. .read = wsp_pcie_read_config,
  195. .write = wsp_pcie_write_config,
  196. };
  197. #define TCE_SHIFT 12
  198. #define TCE_PAGE_SIZE (1 << TCE_SHIFT)
  199. #define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
  200. #define TCE_PCI_READ 0x1 /* read from PCI allowed */
  201. #define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
  202. #define TCE_RPN_SHIFT 12
  203. //#define dma_debug(fmt...) pr_debug(fmt)
  204. #define dma_debug(fmt...)
  205. static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
  206. unsigned long uaddr, enum dma_data_direction direction,
  207. struct dma_attrs *attrs)
  208. {
  209. struct wsp_dma_table *ptbl = container_of(tbl,
  210. struct wsp_dma_table,
  211. table);
  212. u64 proto_tce;
  213. u64 *tcep;
  214. u64 rpn;
  215. proto_tce = TCE_PCI_READ;
  216. #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  217. proto_tce |= TCE_PCI_WRITE;
  218. #else
  219. if (direction != DMA_TO_DEVICE)
  220. proto_tce |= TCE_PCI_WRITE;
  221. #endif
  222. /* XXX Make this faster by factoring out the page address for
  223. * within a TCE table
  224. */
  225. while (npages--) {
  226. /* We don't use it->base as the table can be scattered */
  227. tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
  228. tcep += (index & 0xffff);
  229. /* can't move this out since we might cross LMB boundary */
  230. rpn = __pa(uaddr) >> TCE_SHIFT;
  231. *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
  232. dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
  233. tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
  234. uaddr += TCE_PAGE_SIZE;
  235. index++;
  236. }
  237. return 0;
  238. }
  239. static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
  240. {
  241. struct wsp_dma_table *ptbl = container_of(tbl,
  242. struct wsp_dma_table,
  243. table);
  244. #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  245. struct pci_controller *hose = ptbl->phb->hose;
  246. #endif
  247. u64 *tcep;
  248. /* XXX Make this faster by factoring out the page address for
  249. * within a TCE table. Also use line-kill option to kill multiple
  250. * TCEs at once
  251. */
  252. while (npages--) {
  253. /* We don't use it->base as the table can be scattered */
  254. tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
  255. tcep += (index & 0xffff);
  256. dma_debug("[DMA] TCE %p cleared\n", tcep);
  257. *tcep = 0;
  258. #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  259. /* Don't write there since it would pollute other MMIO accesses */
  260. out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
  261. PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
  262. (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
  263. #endif
  264. index++;
  265. }
  266. }
  267. static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
  268. unsigned int region,
  269. struct pci_dev *validate)
  270. {
  271. struct pci_controller *hose = phb->hose;
  272. unsigned long size = phb->dma32_region_size;
  273. unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
  274. struct wsp_dma_table *tbl;
  275. int tvts_per_table, i, tvt, nid;
  276. unsigned long flags;
  277. nid = of_node_to_nid(phb->hose->dn);
  278. /* Calculate how many TVTs are needed */
  279. tvts_per_table = size / 0x10000000;
  280. if (tvts_per_table == 0)
  281. tvts_per_table = 1;
  282. /* Calculate the base TVT index. We know all tables have the same
  283. * size so we just do a simple multiply here
  284. */
  285. tvt = region * tvts_per_table;
  286. pr_debug(" Region : %d\n", region);
  287. pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
  288. pr_debug(" Number of TVTs : %d\n", tvts_per_table);
  289. pr_debug(" Base TVT : %d\n", tvt);
  290. pr_debug(" Node : %d\n", nid);
  291. tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
  292. if (!tbl)
  293. return ERR_PTR(-ENOMEM);
  294. tbl->phb = phb;
  295. /* Create as many TVTs as needed, each represents 256M at most */
  296. for (i = 0; i < tvts_per_table; i++) {
  297. u64 tvt_data1, tvt_data0;
  298. /* Allocate table. We use a 4K TCE size for now always so
  299. * one table is always 8 * (258M / 4K) == 512K
  300. */
  301. tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
  302. if (tbl->tces[i] == NULL)
  303. goto fail;
  304. memset(page_address(tbl->tces[i]), 0, 0x80000);
  305. pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
  306. /* Table size. We currently set it to be the whole 256M region */
  307. tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
  308. /* IO page size set to 4K */
  309. tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
  310. /* Shift in the address */
  311. tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
  312. /* Validation stuff. We only validate fully bus/dev/fn for now
  313. * one day maybe we can group devices but that isn't the case
  314. * at the moment
  315. */
  316. if (validate) {
  317. tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
  318. tvt_data0 |= validate->bus->number;
  319. tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
  320. tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
  321. << IODA_TVT1_DEVNUM_VALUE_SHIFT;
  322. tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
  323. tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
  324. << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
  325. }
  326. /* XX PE number is always 0 for now */
  327. /* Program the values using the PHB lock */
  328. spin_lock_irqsave(&phb->lock, flags);
  329. out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
  330. (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
  331. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
  332. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
  333. spin_unlock_irqrestore(&phb->lock, flags);
  334. }
  335. /* Init bits and pieces */
  336. tbl->table.it_blocksize = 16;
  337. tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
  338. tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
  339. /*
  340. * It's already blank but we clear it anyway.
  341. * Consider an aditiona interface that makes cleaing optional
  342. */
  343. iommu_init_table(&tbl->table, nid);
  344. list_add(&tbl->link, &phb->dma_tables);
  345. return tbl;
  346. fail:
  347. pr_debug(" Failed to allocate a 256M TCE table !\n");
  348. for (i = 0; i < tvts_per_table; i++)
  349. if (tbl->tces[i])
  350. __free_pages(tbl->tces[i], get_order(0x80000));
  351. kfree(tbl);
  352. return ERR_PTR(-ENOMEM);
  353. }
  354. static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
  355. {
  356. struct dev_archdata *archdata = &pdev->dev.archdata;
  357. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  358. struct wsp_phb *phb = hose->private_data;
  359. struct wsp_dma_table *table = NULL;
  360. unsigned long flags;
  361. int i;
  362. /* Don't assign an iommu table to a bridge */
  363. if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
  364. return;
  365. pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
  366. spin_lock_irqsave(&phb->lock, flags);
  367. /* If only one region, check if it already exist */
  368. if (phb->dma32_num_regions == 1) {
  369. spin_unlock_irqrestore(&phb->lock, flags);
  370. if (list_empty(&phb->dma_tables))
  371. table = wsp_pci_create_dma32_table(phb, 0, NULL);
  372. else
  373. table = list_first_entry(&phb->dma_tables,
  374. struct wsp_dma_table,
  375. link);
  376. } else {
  377. /* else find a free region */
  378. for (i = 0; i < phb->dma32_num_regions && !table; i++) {
  379. if (__test_and_set_bit(i, &phb->dma32_map))
  380. continue;
  381. spin_unlock_irqrestore(&phb->lock, flags);
  382. table = wsp_pci_create_dma32_table(phb, i, pdev);
  383. }
  384. }
  385. /* Check if we got an error */
  386. if (IS_ERR(table)) {
  387. pr_err("%s: Failed to create DMA table, err %ld !\n",
  388. pci_name(pdev), PTR_ERR(table));
  389. return;
  390. }
  391. /* Or a valid table */
  392. if (table) {
  393. pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
  394. pci_name(pdev),
  395. table->table.it_offset << IOMMU_PAGE_SHIFT,
  396. (table->table.it_offset << IOMMU_PAGE_SHIFT)
  397. + phb->dma32_region_size - 1);
  398. archdata->dma_data.iommu_table_base = &table->table;
  399. return;
  400. }
  401. /* Or no room */
  402. spin_unlock_irqrestore(&phb->lock, flags);
  403. pr_err("%s: Out of DMA space !\n", pci_name(pdev));
  404. }
  405. static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
  406. {
  407. u64 val;
  408. int i;
  409. #define DUMP_REG(x) \
  410. pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
  411. #ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS
  412. /* WSP DD1 has a bogus class code by default in the PCI-E
  413. * root complex's built-in P2P bridge */
  414. val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
  415. pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
  416. out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
  417. (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
  418. pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
  419. #endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
  420. #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  421. /* XXX Disable TCE caching, it doesn't work on DD1 */
  422. out_be64(hose->cfg_data + 0xe50,
  423. in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
  424. printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
  425. #endif
  426. /* Configure M32A and IO. IO is hard wired to be 1M for now */
  427. out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
  428. out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
  429. (~(hose->io_resource.end - hose->io_resource.start)) &
  430. 0x3fffffff000ul);
  431. out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
  432. out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
  433. hose->mem_resources[0].start);
  434. printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
  435. (~(hose->mem_resources[0].end -
  436. hose->mem_resources[0].start)) & 0x3ffffff0000ul);
  437. out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
  438. (~(hose->mem_resources[0].end -
  439. hose->mem_resources[0].start)) & 0x3ffffff0000ul);
  440. out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
  441. (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
  442. /* Clear all TVT entries
  443. *
  444. * XX Might get TVT count from device-tree
  445. */
  446. for (i = 0; i < IODA_TVT_COUNT; i++) {
  447. out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
  448. PCIE_REG_IODA_AD_TBL_TVT | i);
  449. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
  450. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
  451. }
  452. /* Kill the TCE cache */
  453. out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
  454. in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
  455. PCIE_REG_PHBC_64B_TCE_EN);
  456. /* Enable 32 & 64-bit MSIs, IO space and M32A */
  457. val = PCIE_REG_PHBC_32BIT_MSI_EN |
  458. PCIE_REG_PHBC_IO_EN |
  459. PCIE_REG_PHBC_64BIT_MSI_EN |
  460. PCIE_REG_PHBC_M32A_EN;
  461. if (iommu_is_off)
  462. val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
  463. pr_debug("Will write config: 0x%llx\n", val);
  464. out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
  465. /* Enable error reporting */
  466. out_be64(hose->cfg_data + 0xe00,
  467. in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
  468. /* Mask an error that's generated when doing config space probe
  469. *
  470. * XXX Maybe we should only mask it around config space cycles... that or
  471. * ignore it when we know we had a config space cycle recently ?
  472. */
  473. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
  474. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
  475. /* Enable UTL errors, for now, all of them got to UTL irq 1
  476. *
  477. * We similarily mask one UTL error caused apparently during normal
  478. * probing. We also mask the link up error
  479. */
  480. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
  481. out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
  482. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
  483. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
  484. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
  485. out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
  486. DUMP_REG(PCIE_REG_IO_BASE_ADDR);
  487. DUMP_REG(PCIE_REG_IO_BASE_MASK);
  488. DUMP_REG(PCIE_REG_IO_START_ADDR);
  489. DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
  490. DUMP_REG(PCIE_REG_M32A_BASE_MASK);
  491. DUMP_REG(PCIE_REG_M32A_START_ADDR);
  492. DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
  493. DUMP_REG(PCIE_REG_M32B_BASE_MASK);
  494. DUMP_REG(PCIE_REG_M32B_START_ADDR);
  495. DUMP_REG(PCIE_REG_M64_BASE_ADDR);
  496. DUMP_REG(PCIE_REG_M64_BASE_MASK);
  497. DUMP_REG(PCIE_REG_M64_START_ADDR);
  498. DUMP_REG(PCIE_REG_PHB_CONFIG);
  499. }
  500. static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
  501. {
  502. u64 val;
  503. int i;
  504. for (i = 0; i < 10000; i++) {
  505. val = in_be64(phb->hose->cfg_data + 0xe08);
  506. if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
  507. return;
  508. udelay(1);
  509. }
  510. pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
  511. phb->hose->global_number, port);
  512. }
  513. #define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
  514. static ret wsp_pci_##name at \
  515. { \
  516. struct iowa_bus *bus; \
  517. struct wsp_phb *phb; \
  518. unsigned long flags; \
  519. ret rval; \
  520. bus = iowa_pio_find_bus(aa); \
  521. WARN_ON(!bus); \
  522. phb = bus->private; \
  523. spin_lock_irqsave(&phb->lock, flags); \
  524. wsp_pci_wait_io_idle(phb, aa); \
  525. rval = __do_##name al; \
  526. spin_unlock_irqrestore(&phb->lock, flags); \
  527. return rval; \
  528. }
  529. #define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
  530. static void wsp_pci_##name at \
  531. { \
  532. struct iowa_bus *bus; \
  533. struct wsp_phb *phb; \
  534. unsigned long flags; \
  535. bus = iowa_pio_find_bus(aa); \
  536. WARN_ON(!bus); \
  537. phb = bus->private; \
  538. spin_lock_irqsave(&phb->lock, flags); \
  539. wsp_pci_wait_io_idle(phb, aa); \
  540. __do_##name al; \
  541. spin_unlock_irqrestore(&phb->lock, flags); \
  542. }
  543. #define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
  544. #define DEF_PCI_AC_NORET_mem(name, at, al, aa)
  545. #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
  546. DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
  547. #define DEF_PCI_AC_NORET(name, at, al, space, aa) \
  548. DEF_PCI_AC_NORET_##space(name, at, al, aa) \
  549. #include <asm/io-defs.h>
  550. #undef DEF_PCI_AC_RET
  551. #undef DEF_PCI_AC_NORET
  552. static struct ppc_pci_io wsp_pci_iops = {
  553. .inb = wsp_pci_inb,
  554. .inw = wsp_pci_inw,
  555. .inl = wsp_pci_inl,
  556. .outb = wsp_pci_outb,
  557. .outw = wsp_pci_outw,
  558. .outl = wsp_pci_outl,
  559. .insb = wsp_pci_insb,
  560. .insw = wsp_pci_insw,
  561. .insl = wsp_pci_insl,
  562. .outsb = wsp_pci_outsb,
  563. .outsw = wsp_pci_outsw,
  564. .outsl = wsp_pci_outsl,
  565. };
  566. static int __init wsp_setup_one_phb(struct device_node *np)
  567. {
  568. struct pci_controller *hose;
  569. struct wsp_phb *phb;
  570. pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
  571. phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
  572. if (!phb)
  573. return -ENOMEM;
  574. hose = pcibios_alloc_controller(np);
  575. if (!hose) {
  576. /* Can't really free the phb */
  577. return -ENOMEM;
  578. }
  579. hose->private_data = phb;
  580. phb->hose = hose;
  581. INIT_LIST_HEAD(&phb->dma_tables);
  582. spin_lock_init(&phb->lock);
  583. /* XXX Use bus-range property ? */
  584. hose->first_busno = 0;
  585. hose->last_busno = 0xff;
  586. /* We use cfg_data as the address for the whole bridge MMIO space
  587. */
  588. hose->cfg_data = of_iomap(hose->dn, 0);
  589. pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
  590. /* Get the ranges of the device-tree */
  591. pci_process_bridge_OF_ranges(hose, np, 0);
  592. /* XXX Force re-assigning of everything for now */
  593. pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
  594. PCI_ENABLE_PROC_DOMAINS);
  595. pci_probe_only = 0;
  596. /* Calculate how the TCE space is divided */
  597. phb->dma32_base = 0;
  598. phb->dma32_num_regions = NUM_DMA32_REGIONS;
  599. if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
  600. pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
  601. MAX_TABLE_TVT_COUNT);
  602. phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
  603. }
  604. phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
  605. BUG_ON(!is_power_of_2(phb->dma32_region_size));
  606. /* Setup config ops */
  607. hose->ops = &wsp_pcie_pci_ops;
  608. /* Configure the HW */
  609. wsp_pcie_configure_hw(hose);
  610. /* Instanciate IO workarounds */
  611. iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
  612. #ifdef CONFIG_PCI_MSI
  613. wsp_setup_phb_msi(hose);
  614. #endif
  615. /* Add to global list */
  616. list_add(&phb->all, &wsp_phbs);
  617. return 0;
  618. }
  619. void __init wsp_setup_pci(void)
  620. {
  621. struct device_node *np;
  622. int rc;
  623. /* Find host bridges */
  624. for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
  625. rc = wsp_setup_one_phb(np);
  626. if (rc)
  627. pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
  628. np->full_name, rc);
  629. }
  630. /* Establish device-tree linkage */
  631. pci_devs_phb_init();
  632. /* Set DMA ops to use TCEs */
  633. if (iommu_is_off) {
  634. pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
  635. set_pci_dma_ops(&dma_direct_ops);
  636. } else {
  637. ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
  638. ppc_md.tce_build = tce_build_wsp;
  639. ppc_md.tce_free = tce_free_wsp;
  640. set_pci_dma_ops(&dma_iommu_ops);
  641. }
  642. }
  643. #define err_debug(fmt...) pr_debug(fmt)
  644. //#define err_debug(fmt...)
  645. static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
  646. {
  647. const u32 *prop;
  648. int hw_irq;
  649. /* Ok, no interrupts property, let's try to find our child P2P */
  650. np = of_get_next_child(np, NULL);
  651. if (np == NULL)
  652. return 0;
  653. /* Grab it's interrupt map */
  654. prop = of_get_property(np, "interrupt-map", NULL);
  655. if (prop == NULL)
  656. return 0;
  657. /* Grab one of the interrupts in there, keep the low 4 bits */
  658. hw_irq = prop[5] & 0xf;
  659. /* 0..4 for PHB 0 and 5..9 for PHB 1 */
  660. if (hw_irq < 5)
  661. hw_irq = 4;
  662. else
  663. hw_irq = 9;
  664. hw_irq |= prop[5] & ~0xf;
  665. err_debug("PCI: Using 0x%x as error IRQ for %s\n",
  666. hw_irq, np->parent->full_name);
  667. return irq_create_mapping(NULL, hw_irq);
  668. }
  669. static const struct {
  670. u32 offset;
  671. const char *name;
  672. } wsp_pci_regs[] = {
  673. #define DREG(x) { PCIE_REG_##x, #x }
  674. #define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
  675. /* Architected registers except CONFIG_ and IODA
  676. * to avoid side effects
  677. */
  678. DREG(DMA_CHAN_STATUS),
  679. DREG(CPU_LOADSTORE_STATUS),
  680. DREG(LOCK0),
  681. DREG(LOCK1),
  682. DREG(PHB_CONFIG),
  683. DREG(IO_BASE_ADDR),
  684. DREG(IO_BASE_MASK),
  685. DREG(IO_START_ADDR),
  686. DREG(M32A_BASE_ADDR),
  687. DREG(M32A_BASE_MASK),
  688. DREG(M32A_START_ADDR),
  689. DREG(M32B_BASE_ADDR),
  690. DREG(M32B_BASE_MASK),
  691. DREG(M32B_START_ADDR),
  692. DREG(M64_BASE_ADDR),
  693. DREG(M64_BASE_MASK),
  694. DREG(M64_START_ADDR),
  695. DREG(TCE_KILL),
  696. DREG(LOCK2),
  697. DREG(PHB_GEN_CAP),
  698. DREG(PHB_TCE_CAP),
  699. DREG(PHB_IRQ_CAP),
  700. DREG(PHB_EEH_CAP),
  701. DREG(PAPR_ERR_INJ_CONTROL),
  702. DREG(PAPR_ERR_INJ_ADDR),
  703. DREG(PAPR_ERR_INJ_MASK),
  704. /* UTL core regs */
  705. DUTL(SYS_BUS_CONTROL),
  706. DUTL(STATUS),
  707. DUTL(SYS_BUS_AGENT_STATUS),
  708. DUTL(SYS_BUS_AGENT_ERR_SEV),
  709. DUTL(SYS_BUS_AGENT_IRQ_EN),
  710. DUTL(SYS_BUS_BURST_SZ_CONF),
  711. DUTL(REVISION_ID),
  712. DUTL(OUT_POST_HDR_BUF_ALLOC),
  713. DUTL(OUT_POST_DAT_BUF_ALLOC),
  714. DUTL(IN_POST_HDR_BUF_ALLOC),
  715. DUTL(IN_POST_DAT_BUF_ALLOC),
  716. DUTL(OUT_NP_BUF_ALLOC),
  717. DUTL(IN_NP_BUF_ALLOC),
  718. DUTL(PCIE_TAGS_ALLOC),
  719. DUTL(GBIF_READ_TAGS_ALLOC),
  720. DUTL(PCIE_PORT_CONTROL),
  721. DUTL(PCIE_PORT_STATUS),
  722. DUTL(PCIE_PORT_ERROR_SEV),
  723. DUTL(PCIE_PORT_IRQ_EN),
  724. DUTL(RC_STATUS),
  725. DUTL(RC_ERR_SEVERITY),
  726. DUTL(RC_IRQ_EN),
  727. DUTL(EP_STATUS),
  728. DUTL(EP_ERR_SEVERITY),
  729. DUTL(EP_ERR_IRQ_EN),
  730. DUTL(PCI_PM_CTRL1),
  731. DUTL(PCI_PM_CTRL2),
  732. /* PCIe stack regs */
  733. DREG(SYSTEM_CONFIG1),
  734. DREG(SYSTEM_CONFIG2),
  735. DREG(EP_SYSTEM_CONFIG),
  736. DREG(EP_FLR),
  737. DREG(EP_BAR_CONFIG),
  738. DREG(LINK_CONFIG),
  739. DREG(PM_CONFIG),
  740. DREG(DLP_CONTROL),
  741. DREG(DLP_STATUS),
  742. DREG(ERR_REPORT_CONTROL),
  743. DREG(SLOT_CONTROL1),
  744. DREG(SLOT_CONTROL2),
  745. DREG(UTL_CONFIG),
  746. DREG(BUFFERS_CONFIG),
  747. DREG(ERROR_INJECT),
  748. DREG(SRIOV_CONFIG),
  749. DREG(PF0_SRIOV_STATUS),
  750. DREG(PF1_SRIOV_STATUS),
  751. DREG(PORT_NUMBER),
  752. DREG(POR_SYSTEM_CONFIG),
  753. /* Internal logic regs */
  754. DREG(PHB_VERSION),
  755. DREG(RESET),
  756. DREG(PHB_CONTROL),
  757. DREG(PHB_TIMEOUT_CONTROL1),
  758. DREG(PHB_QUIESCE_DMA),
  759. DREG(PHB_DMA_READ_TAG_ACTV),
  760. DREG(PHB_TCE_READ_TAG_ACTV),
  761. /* FIR registers */
  762. DREG(LEM_FIR_ACCUM),
  763. DREG(LEM_FIR_AND_MASK),
  764. DREG(LEM_FIR_OR_MASK),
  765. DREG(LEM_ACTION0),
  766. DREG(LEM_ACTION1),
  767. DREG(LEM_ERROR_MASK),
  768. DREG(LEM_ERROR_AND_MASK),
  769. DREG(LEM_ERROR_OR_MASK),
  770. /* Error traps registers */
  771. DREG(PHB_ERR_STATUS),
  772. DREG(PHB_ERR_STATUS),
  773. DREG(PHB_ERR1_STATUS),
  774. DREG(PHB_ERR_INJECT),
  775. DREG(PHB_ERR_LEM_ENABLE),
  776. DREG(PHB_ERR_IRQ_ENABLE),
  777. DREG(PHB_ERR_FREEZE_ENABLE),
  778. DREG(PHB_ERR_SIDE_ENABLE),
  779. DREG(PHB_ERR_LOG_0),
  780. DREG(PHB_ERR_LOG_1),
  781. DREG(PHB_ERR_STATUS_MASK),
  782. DREG(PHB_ERR1_STATUS_MASK),
  783. DREG(MMIO_ERR_STATUS),
  784. DREG(MMIO_ERR1_STATUS),
  785. DREG(MMIO_ERR_INJECT),
  786. DREG(MMIO_ERR_LEM_ENABLE),
  787. DREG(MMIO_ERR_IRQ_ENABLE),
  788. DREG(MMIO_ERR_FREEZE_ENABLE),
  789. DREG(MMIO_ERR_SIDE_ENABLE),
  790. DREG(MMIO_ERR_LOG_0),
  791. DREG(MMIO_ERR_LOG_1),
  792. DREG(MMIO_ERR_STATUS_MASK),
  793. DREG(MMIO_ERR1_STATUS_MASK),
  794. DREG(DMA_ERR_STATUS),
  795. DREG(DMA_ERR1_STATUS),
  796. DREG(DMA_ERR_INJECT),
  797. DREG(DMA_ERR_LEM_ENABLE),
  798. DREG(DMA_ERR_IRQ_ENABLE),
  799. DREG(DMA_ERR_FREEZE_ENABLE),
  800. DREG(DMA_ERR_SIDE_ENABLE),
  801. DREG(DMA_ERR_LOG_0),
  802. DREG(DMA_ERR_LOG_1),
  803. DREG(DMA_ERR_STATUS_MASK),
  804. DREG(DMA_ERR1_STATUS_MASK),
  805. /* Debug and Trace registers */
  806. DREG(PHB_DEBUG_CONTROL0),
  807. DREG(PHB_DEBUG_STATUS0),
  808. DREG(PHB_DEBUG_CONTROL1),
  809. DREG(PHB_DEBUG_STATUS1),
  810. DREG(PHB_DEBUG_CONTROL2),
  811. DREG(PHB_DEBUG_STATUS2),
  812. DREG(PHB_DEBUG_CONTROL3),
  813. DREG(PHB_DEBUG_STATUS3),
  814. DREG(PHB_DEBUG_CONTROL4),
  815. DREG(PHB_DEBUG_STATUS4),
  816. DREG(PHB_DEBUG_CONTROL5),
  817. DREG(PHB_DEBUG_STATUS5),
  818. /* Don't seem to exist ...
  819. DREG(PHB_DEBUG_CONTROL6),
  820. DREG(PHB_DEBUG_STATUS6),
  821. */
  822. };
  823. static int wsp_pci_regs_show(struct seq_file *m, void *private)
  824. {
  825. struct wsp_phb *phb = m->private;
  826. struct pci_controller *hose = phb->hose;
  827. int i;
  828. for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
  829. /* Skip write-only regs */
  830. if (wsp_pci_regs[i].offset == 0xc08 ||
  831. wsp_pci_regs[i].offset == 0xc10 ||
  832. wsp_pci_regs[i].offset == 0xc38 ||
  833. wsp_pci_regs[i].offset == 0xc40)
  834. continue;
  835. seq_printf(m, "0x%03x: 0x%016llx %s\n",
  836. wsp_pci_regs[i].offset,
  837. in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
  838. wsp_pci_regs[i].name);
  839. }
  840. return 0;
  841. }
  842. static int wsp_pci_regs_open(struct inode *inode, struct file *file)
  843. {
  844. return single_open(file, wsp_pci_regs_show, inode->i_private);
  845. }
  846. static const struct file_operations wsp_pci_regs_fops = {
  847. .open = wsp_pci_regs_open,
  848. .read = seq_read,
  849. .llseek = seq_lseek,
  850. .release = single_release,
  851. };
  852. static int wsp_pci_reg_set(void *data, u64 val)
  853. {
  854. out_be64((void __iomem *)data, val);
  855. return 0;
  856. }
  857. static int wsp_pci_reg_get(void *data, u64 *val)
  858. {
  859. *val = in_be64((void __iomem *)data);
  860. return 0;
  861. }
  862. DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
  863. static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
  864. {
  865. struct wsp_phb *phb = dev_id;
  866. struct pci_controller *hose = phb->hose;
  867. irqreturn_t handled = IRQ_NONE;
  868. struct wsp_pcie_err_log_data ed;
  869. pr_err("PCI: Error interrupt on %s (PHB %d)\n",
  870. hose->dn->full_name, hose->global_number);
  871. again:
  872. memset(&ed, 0, sizeof(ed));
  873. /* Read and clear UTL errors */
  874. ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
  875. if (ed.utl_sys_err)
  876. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
  877. ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
  878. if (ed.utl_port_err)
  879. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
  880. ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
  881. if (ed.utl_rc_err)
  882. out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
  883. /* Read and clear main trap errors */
  884. ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
  885. if (ed.phb_err) {
  886. ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
  887. ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
  888. ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
  889. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
  890. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
  891. }
  892. ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
  893. if (ed.mmio_err) {
  894. ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
  895. ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
  896. ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
  897. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
  898. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
  899. }
  900. ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
  901. if (ed.dma_err) {
  902. ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
  903. ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
  904. ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
  905. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
  906. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
  907. }
  908. /* Now print things out */
  909. if (ed.phb_err) {
  910. pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
  911. pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
  912. pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
  913. pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
  914. }
  915. if (ed.mmio_err) {
  916. pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
  917. pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
  918. pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
  919. pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
  920. }
  921. if (ed.dma_err) {
  922. pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
  923. pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
  924. pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
  925. pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
  926. }
  927. if (ed.utl_sys_err)
  928. pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
  929. if (ed.utl_port_err)
  930. pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
  931. if (ed.utl_rc_err)
  932. pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
  933. /* Interrupts are caused by the error traps. If we had any error there
  934. * we loop again in case the UTL buffered some new stuff between
  935. * going there and going to the traps
  936. */
  937. if (ed.dma_err || ed.mmio_err || ed.phb_err) {
  938. handled = IRQ_HANDLED;
  939. goto again;
  940. }
  941. return handled;
  942. }
  943. static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
  944. {
  945. struct pci_controller *hose = phb->hose;
  946. int err_irq, i, rc;
  947. char fname[16];
  948. /* Create a debugfs file for that PHB */
  949. sprintf(fname, "phb%d", phb->hose->global_number);
  950. phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
  951. /* Some useful debug output */
  952. if (phb->ddir) {
  953. struct dentry *d = debugfs_create_dir("regs", phb->ddir);
  954. char tmp[64];
  955. for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
  956. sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
  957. wsp_pci_regs[i].name);
  958. debugfs_create_file(tmp, 0600, d,
  959. hose->cfg_data + wsp_pci_regs[i].offset,
  960. &wsp_pci_reg_fops);
  961. }
  962. debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
  963. }
  964. /* Find the IRQ number for that PHB */
  965. err_irq = irq_of_parse_and_map(hose->dn, 0);
  966. if (err_irq == 0)
  967. /* XXX Error IRQ lacking from device-tree */
  968. err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
  969. if (err_irq == 0) {
  970. pr_err("PCI: Failed to fetch error interrupt for %s\n",
  971. hose->dn->full_name);
  972. return;
  973. }
  974. /* Request it */
  975. rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
  976. if (rc) {
  977. pr_err("PCI: Failed to request interrupt for %s\n",
  978. hose->dn->full_name);
  979. }
  980. /* Enable interrupts for all errors for now */
  981. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  982. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  983. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  984. }
  985. /*
  986. * This is called later to hookup with the error interrupt
  987. */
  988. static int __init wsp_setup_pci_late(void)
  989. {
  990. struct wsp_phb *phb;
  991. list_for_each_entry(phb, &wsp_phbs, all)
  992. wsp_setup_pci_err_reporting(phb);
  993. return 0;
  994. }
  995. arch_initcall(wsp_setup_pci_late);