ppc4xx_pci.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727
  1. /*
  2. * PCI / PCI-X / PCI-Express support for 4xx parts
  3. *
  4. * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  5. *
  6. * Most PCI Express code is coming from Stefan Roese implementation for
  7. * arch/ppc in the Denx tree, slightly reworked by me.
  8. *
  9. * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
  10. *
  11. * Some of that comes itself from a previous implementation for 440SPE only
  12. * by Roland Dreier:
  13. *
  14. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  15. * Roland Dreier <rolandd@cisco.com>
  16. *
  17. */
  18. #undef DEBUG
  19. #include <linux/kernel.h>
  20. #include <linux/pci.h>
  21. #include <linux/init.h>
  22. #include <linux/of.h>
  23. #include <linux/bootmem.h>
  24. #include <linux/delay.h>
  25. #include <asm/io.h>
  26. #include <asm/pci-bridge.h>
  27. #include <asm/machdep.h>
  28. #include <asm/dcr.h>
  29. #include <asm/dcr-regs.h>
  30. #include <mm/mmu_decl.h>
  31. #include "ppc4xx_pci.h"
  32. static int dma_offset_set;
  33. #define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
  34. #define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
  35. #define RES_TO_U32_LOW(val) \
  36. ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
  37. #define RES_TO_U32_HIGH(val) \
  38. ((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
  39. static inline int ppc440spe_revA(void)
  40. {
  41. /* Catch both 440SPe variants, with and without RAID6 support */
  42. if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
  43. return 1;
  44. else
  45. return 0;
  46. }
  47. static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
  48. {
  49. struct pci_controller *hose;
  50. int i;
  51. if (dev->devfn != 0 || dev->bus->self != NULL)
  52. return;
  53. hose = pci_bus_to_host(dev->bus);
  54. if (hose == NULL)
  55. return;
  56. if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
  57. !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
  58. !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
  59. return;
  60. if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
  61. of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
  62. hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
  63. }
  64. /* Hide the PCI host BARs from the kernel as their content doesn't
  65. * fit well in the resource management
  66. */
  67. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  68. dev->resource[i].start = dev->resource[i].end = 0;
  69. dev->resource[i].flags = 0;
  70. }
  71. printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
  72. pci_name(dev));
  73. }
  74. DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
  75. static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
  76. void __iomem *reg,
  77. struct resource *res)
  78. {
  79. u64 size;
  80. const u32 *ranges;
  81. int rlen;
  82. int pna = of_n_addr_cells(hose->dn);
  83. int np = pna + 5;
  84. /* Default */
  85. res->start = 0;
  86. size = 0x80000000;
  87. res->end = size - 1;
  88. res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
  89. /* Get dma-ranges property */
  90. ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
  91. if (ranges == NULL)
  92. goto out;
  93. /* Walk it */
  94. while ((rlen -= np * 4) >= 0) {
  95. u32 pci_space = ranges[0];
  96. u64 pci_addr = of_read_number(ranges + 1, 2);
  97. u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
  98. size = of_read_number(ranges + pna + 3, 2);
  99. ranges += np;
  100. if (cpu_addr == OF_BAD_ADDR || size == 0)
  101. continue;
  102. /* We only care about memory */
  103. if ((pci_space & 0x03000000) != 0x02000000)
  104. continue;
  105. /* We currently only support memory at 0, and pci_addr
  106. * within 32 bits space
  107. */
  108. if (cpu_addr != 0 || pci_addr > 0xffffffff) {
  109. printk(KERN_WARNING "%s: Ignored unsupported dma range"
  110. " 0x%016llx...0x%016llx -> 0x%016llx\n",
  111. hose->dn->full_name,
  112. pci_addr, pci_addr + size - 1, cpu_addr);
  113. continue;
  114. }
  115. /* Check if not prefetchable */
  116. if (!(pci_space & 0x40000000))
  117. res->flags &= ~IORESOURCE_PREFETCH;
  118. /* Use that */
  119. res->start = pci_addr;
  120. /* Beware of 32 bits resources */
  121. if (sizeof(resource_size_t) == sizeof(u32) &&
  122. (pci_addr + size) > 0x100000000ull)
  123. res->end = 0xffffffff;
  124. else
  125. res->end = res->start + size - 1;
  126. break;
  127. }
  128. /* We only support one global DMA offset */
  129. if (dma_offset_set && pci_dram_offset != res->start) {
  130. printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
  131. hose->dn->full_name);
  132. return -ENXIO;
  133. }
  134. /* Check that we can fit all of memory as we don't support
  135. * DMA bounce buffers
  136. */
  137. if (size < total_memory) {
  138. printk(KERN_ERR "%s: dma-ranges too small "
  139. "(size=%llx total_memory=%llx)\n",
  140. hose->dn->full_name, size, (u64)total_memory);
  141. return -ENXIO;
  142. }
  143. /* Check we are a power of 2 size and that base is a multiple of size*/
  144. if ((size & (size - 1)) != 0 ||
  145. (res->start & (size - 1)) != 0) {
  146. printk(KERN_ERR "%s: dma-ranges unaligned\n",
  147. hose->dn->full_name);
  148. return -ENXIO;
  149. }
  150. /* Check that we are fully contained within 32 bits space */
  151. if (res->end > 0xffffffff) {
  152. printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
  153. hose->dn->full_name);
  154. return -ENXIO;
  155. }
  156. out:
  157. dma_offset_set = 1;
  158. pci_dram_offset = res->start;
  159. printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
  160. pci_dram_offset);
  161. return 0;
  162. }
  163. /*
  164. * 4xx PCI 2.x part
  165. */
  166. static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
  167. void __iomem *reg)
  168. {
  169. u32 la, ma, pcila, pciha;
  170. int i, j;
  171. /* Setup outbound memory windows */
  172. for (i = j = 0; i < 3; i++) {
  173. struct resource *res = &hose->mem_resources[i];
  174. /* we only care about memory windows */
  175. if (!(res->flags & IORESOURCE_MEM))
  176. continue;
  177. if (j > 2) {
  178. printk(KERN_WARNING "%s: Too many ranges\n",
  179. hose->dn->full_name);
  180. break;
  181. }
  182. /* Calculate register values */
  183. la = res->start;
  184. pciha = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
  185. pcila = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
  186. ma = res->end + 1 - res->start;
  187. if (!is_power_of_2(ma) || ma < 0x1000 || ma > 0xffffffffu) {
  188. printk(KERN_WARNING "%s: Resource out of range\n",
  189. hose->dn->full_name);
  190. continue;
  191. }
  192. ma = (0xffffffffu << ilog2(ma)) | 0x1;
  193. if (res->flags & IORESOURCE_PREFETCH)
  194. ma |= 0x2;
  195. /* Program register values */
  196. writel(la, reg + PCIL0_PMM0LA + (0x10 * j));
  197. writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * j));
  198. writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * j));
  199. writel(ma, reg + PCIL0_PMM0MA + (0x10 * j));
  200. j++;
  201. }
  202. }
  203. static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
  204. void __iomem *reg,
  205. const struct resource *res)
  206. {
  207. resource_size_t size = res->end - res->start + 1;
  208. u32 sa;
  209. /* Calculate window size */
  210. sa = (0xffffffffu << ilog2(size)) | 1;
  211. sa |= 0x1;
  212. /* RAM is always at 0 local for now */
  213. writel(0, reg + PCIL0_PTM1LA);
  214. writel(sa, reg + PCIL0_PTM1MS);
  215. /* Map on PCI side */
  216. early_write_config_dword(hose, hose->first_busno, 0,
  217. PCI_BASE_ADDRESS_1, res->start);
  218. early_write_config_dword(hose, hose->first_busno, 0,
  219. PCI_BASE_ADDRESS_2, 0x00000000);
  220. early_write_config_word(hose, hose->first_busno, 0,
  221. PCI_COMMAND, 0x0006);
  222. }
  223. static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
  224. {
  225. /* NYI */
  226. struct resource rsrc_cfg;
  227. struct resource rsrc_reg;
  228. struct resource dma_window;
  229. struct pci_controller *hose = NULL;
  230. void __iomem *reg = NULL;
  231. const int *bus_range;
  232. int primary = 0;
  233. /* Check if device is enabled */
  234. if (!of_device_is_available(np)) {
  235. printk(KERN_INFO "%s: Port disabled via device-tree\n",
  236. np->full_name);
  237. return;
  238. }
  239. /* Fetch config space registers address */
  240. if (of_address_to_resource(np, 0, &rsrc_cfg)) {
  241. printk(KERN_ERR "%s: Can't get PCI config register base !",
  242. np->full_name);
  243. return;
  244. }
  245. /* Fetch host bridge internal registers address */
  246. if (of_address_to_resource(np, 3, &rsrc_reg)) {
  247. printk(KERN_ERR "%s: Can't get PCI internal register base !",
  248. np->full_name);
  249. return;
  250. }
  251. /* Check if primary bridge */
  252. if (of_get_property(np, "primary", NULL))
  253. primary = 1;
  254. /* Get bus range if any */
  255. bus_range = of_get_property(np, "bus-range", NULL);
  256. /* Map registers */
  257. reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
  258. if (reg == NULL) {
  259. printk(KERN_ERR "%s: Can't map registers !", np->full_name);
  260. goto fail;
  261. }
  262. /* Allocate the host controller data structure */
  263. hose = pcibios_alloc_controller(np);
  264. if (!hose)
  265. goto fail;
  266. hose->first_busno = bus_range ? bus_range[0] : 0x0;
  267. hose->last_busno = bus_range ? bus_range[1] : 0xff;
  268. /* Setup config space */
  269. setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
  270. /* Disable all windows */
  271. writel(0, reg + PCIL0_PMM0MA);
  272. writel(0, reg + PCIL0_PMM1MA);
  273. writel(0, reg + PCIL0_PMM2MA);
  274. writel(0, reg + PCIL0_PTM1MS);
  275. writel(0, reg + PCIL0_PTM2MS);
  276. /* Parse outbound mapping resources */
  277. pci_process_bridge_OF_ranges(hose, np, primary);
  278. /* Parse inbound mapping resources */
  279. if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
  280. goto fail;
  281. /* Configure outbound ranges POMs */
  282. ppc4xx_configure_pci_PMMs(hose, reg);
  283. /* Configure inbound ranges PIMs */
  284. ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
  285. /* We don't need the registers anymore */
  286. iounmap(reg);
  287. return;
  288. fail:
  289. if (hose)
  290. pcibios_free_controller(hose);
  291. if (reg)
  292. iounmap(reg);
  293. }
  294. /*
  295. * 4xx PCI-X part
  296. */
  297. static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
  298. void __iomem *reg)
  299. {
  300. u32 lah, lal, pciah, pcial, sa;
  301. int i, j;
  302. /* Setup outbound memory windows */
  303. for (i = j = 0; i < 3; i++) {
  304. struct resource *res = &hose->mem_resources[i];
  305. /* we only care about memory windows */
  306. if (!(res->flags & IORESOURCE_MEM))
  307. continue;
  308. if (j > 1) {
  309. printk(KERN_WARNING "%s: Too many ranges\n",
  310. hose->dn->full_name);
  311. break;
  312. }
  313. /* Calculate register values */
  314. lah = RES_TO_U32_HIGH(res->start);
  315. lal = RES_TO_U32_LOW(res->start);
  316. pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
  317. pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
  318. sa = res->end + 1 - res->start;
  319. if (!is_power_of_2(sa) || sa < 0x100000 ||
  320. sa > 0xffffffffu) {
  321. printk(KERN_WARNING "%s: Resource out of range\n",
  322. hose->dn->full_name);
  323. continue;
  324. }
  325. sa = (0xffffffffu << ilog2(sa)) | 0x1;
  326. /* Program register values */
  327. if (j == 0) {
  328. writel(lah, reg + PCIX0_POM0LAH);
  329. writel(lal, reg + PCIX0_POM0LAL);
  330. writel(pciah, reg + PCIX0_POM0PCIAH);
  331. writel(pcial, reg + PCIX0_POM0PCIAL);
  332. writel(sa, reg + PCIX0_POM0SA);
  333. } else {
  334. writel(lah, reg + PCIX0_POM1LAH);
  335. writel(lal, reg + PCIX0_POM1LAL);
  336. writel(pciah, reg + PCIX0_POM1PCIAH);
  337. writel(pcial, reg + PCIX0_POM1PCIAL);
  338. writel(sa, reg + PCIX0_POM1SA);
  339. }
  340. j++;
  341. }
  342. }
  343. static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
  344. void __iomem *reg,
  345. const struct resource *res,
  346. int big_pim,
  347. int enable_msi_hole)
  348. {
  349. resource_size_t size = res->end - res->start + 1;
  350. u32 sa;
  351. /* RAM is always at 0 */
  352. writel(0x00000000, reg + PCIX0_PIM0LAH);
  353. writel(0x00000000, reg + PCIX0_PIM0LAL);
  354. /* Calculate window size */
  355. sa = (0xffffffffu << ilog2(size)) | 1;
  356. sa |= 0x1;
  357. if (res->flags & IORESOURCE_PREFETCH)
  358. sa |= 0x2;
  359. if (enable_msi_hole)
  360. sa |= 0x4;
  361. writel(sa, reg + PCIX0_PIM0SA);
  362. if (big_pim)
  363. writel(0xffffffff, reg + PCIX0_PIM0SAH);
  364. /* Map on PCI side */
  365. writel(0x00000000, reg + PCIX0_BAR0H);
  366. writel(res->start, reg + PCIX0_BAR0L);
  367. writew(0x0006, reg + PCIX0_COMMAND);
  368. }
  369. static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
  370. {
  371. struct resource rsrc_cfg;
  372. struct resource rsrc_reg;
  373. struct resource dma_window;
  374. struct pci_controller *hose = NULL;
  375. void __iomem *reg = NULL;
  376. const int *bus_range;
  377. int big_pim = 0, msi = 0, primary = 0;
  378. /* Fetch config space registers address */
  379. if (of_address_to_resource(np, 0, &rsrc_cfg)) {
  380. printk(KERN_ERR "%s:Can't get PCI-X config register base !",
  381. np->full_name);
  382. return;
  383. }
  384. /* Fetch host bridge internal registers address */
  385. if (of_address_to_resource(np, 3, &rsrc_reg)) {
  386. printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
  387. np->full_name);
  388. return;
  389. }
  390. /* Check if it supports large PIMs (440GX) */
  391. if (of_get_property(np, "large-inbound-windows", NULL))
  392. big_pim = 1;
  393. /* Check if we should enable MSIs inbound hole */
  394. if (of_get_property(np, "enable-msi-hole", NULL))
  395. msi = 1;
  396. /* Check if primary bridge */
  397. if (of_get_property(np, "primary", NULL))
  398. primary = 1;
  399. /* Get bus range if any */
  400. bus_range = of_get_property(np, "bus-range", NULL);
  401. /* Map registers */
  402. reg = ioremap(rsrc_reg.start, rsrc_reg.end + 1 - rsrc_reg.start);
  403. if (reg == NULL) {
  404. printk(KERN_ERR "%s: Can't map registers !", np->full_name);
  405. goto fail;
  406. }
  407. /* Allocate the host controller data structure */
  408. hose = pcibios_alloc_controller(np);
  409. if (!hose)
  410. goto fail;
  411. hose->first_busno = bus_range ? bus_range[0] : 0x0;
  412. hose->last_busno = bus_range ? bus_range[1] : 0xff;
  413. /* Setup config space */
  414. setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
  415. /* Disable all windows */
  416. writel(0, reg + PCIX0_POM0SA);
  417. writel(0, reg + PCIX0_POM1SA);
  418. writel(0, reg + PCIX0_POM2SA);
  419. writel(0, reg + PCIX0_PIM0SA);
  420. writel(0, reg + PCIX0_PIM1SA);
  421. writel(0, reg + PCIX0_PIM2SA);
  422. if (big_pim) {
  423. writel(0, reg + PCIX0_PIM0SAH);
  424. writel(0, reg + PCIX0_PIM2SAH);
  425. }
  426. /* Parse outbound mapping resources */
  427. pci_process_bridge_OF_ranges(hose, np, primary);
  428. /* Parse inbound mapping resources */
  429. if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
  430. goto fail;
  431. /* Configure outbound ranges POMs */
  432. ppc4xx_configure_pcix_POMs(hose, reg);
  433. /* Configure inbound ranges PIMs */
  434. ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
  435. /* We don't need the registers anymore */
  436. iounmap(reg);
  437. return;
  438. fail:
  439. if (hose)
  440. pcibios_free_controller(hose);
  441. if (reg)
  442. iounmap(reg);
  443. }
  444. #ifdef CONFIG_PPC4xx_PCI_EXPRESS
  445. /*
  446. * 4xx PCI-Express part
  447. *
  448. * We support 3 parts currently based on the compatible property:
  449. *
  450. * ibm,plb-pciex-440spe
  451. * ibm,plb-pciex-405ex
  452. * ibm,plb-pciex-460ex
  453. *
  454. * Anything else will be rejected for now as they are all subtly
  455. * different unfortunately.
  456. *
  457. */
  458. #define MAX_PCIE_BUS_MAPPED 0x40
  459. struct ppc4xx_pciex_port
  460. {
  461. struct pci_controller *hose;
  462. struct device_node *node;
  463. unsigned int index;
  464. int endpoint;
  465. int link;
  466. int has_ibpre;
  467. unsigned int sdr_base;
  468. dcr_host_t dcrs;
  469. struct resource cfg_space;
  470. struct resource utl_regs;
  471. void __iomem *utl_base;
  472. };
  473. static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
  474. static unsigned int ppc4xx_pciex_port_count;
  475. struct ppc4xx_pciex_hwops
  476. {
  477. int (*core_init)(struct device_node *np);
  478. int (*port_init_hw)(struct ppc4xx_pciex_port *port);
  479. int (*setup_utl)(struct ppc4xx_pciex_port *port);
  480. };
  481. static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
  482. #ifdef CONFIG_44x
  483. /* Check various reset bits of the 440SPe PCIe core */
  484. static int __init ppc440spe_pciex_check_reset(struct device_node *np)
  485. {
  486. u32 valPE0, valPE1, valPE2;
  487. int err = 0;
  488. /* SDR0_PEGPLLLCT1 reset */
  489. if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
  490. /*
  491. * the PCIe core was probably already initialised
  492. * by firmware - let's re-reset RCSSET regs
  493. *
  494. * -- Shouldn't we also re-reset the whole thing ? -- BenH
  495. */
  496. pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
  497. mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
  498. mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
  499. mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
  500. }
  501. valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
  502. valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
  503. valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
  504. /* SDR0_PExRCSSET rstgu */
  505. if (!(valPE0 & 0x01000000) ||
  506. !(valPE1 & 0x01000000) ||
  507. !(valPE2 & 0x01000000)) {
  508. printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
  509. err = -1;
  510. }
  511. /* SDR0_PExRCSSET rstdl */
  512. if (!(valPE0 & 0x00010000) ||
  513. !(valPE1 & 0x00010000) ||
  514. !(valPE2 & 0x00010000)) {
  515. printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
  516. err = -1;
  517. }
  518. /* SDR0_PExRCSSET rstpyn */
  519. if ((valPE0 & 0x00001000) ||
  520. (valPE1 & 0x00001000) ||
  521. (valPE2 & 0x00001000)) {
  522. printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
  523. err = -1;
  524. }
  525. /* SDR0_PExRCSSET hldplb */
  526. if ((valPE0 & 0x10000000) ||
  527. (valPE1 & 0x10000000) ||
  528. (valPE2 & 0x10000000)) {
  529. printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
  530. err = -1;
  531. }
  532. /* SDR0_PExRCSSET rdy */
  533. if ((valPE0 & 0x00100000) ||
  534. (valPE1 & 0x00100000) ||
  535. (valPE2 & 0x00100000)) {
  536. printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
  537. err = -1;
  538. }
  539. /* SDR0_PExRCSSET shutdown */
  540. if ((valPE0 & 0x00000100) ||
  541. (valPE1 & 0x00000100) ||
  542. (valPE2 & 0x00000100)) {
  543. printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
  544. err = -1;
  545. }
  546. return err;
  547. }
  548. /* Global PCIe core initializations for 440SPe core */
  549. static int __init ppc440spe_pciex_core_init(struct device_node *np)
  550. {
  551. int time_out = 20;
  552. /* Set PLL clock receiver to LVPECL */
  553. dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
  554. /* Shouldn't we do all the calibration stuff etc... here ? */
  555. if (ppc440spe_pciex_check_reset(np))
  556. return -ENXIO;
  557. if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
  558. printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
  559. "failed (0x%08x)\n",
  560. mfdcri(SDR0, PESDR0_PLLLCT2));
  561. return -1;
  562. }
  563. /* De-assert reset of PCIe PLL, wait for lock */
  564. dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
  565. udelay(3);
  566. while (time_out) {
  567. if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
  568. time_out--;
  569. udelay(1);
  570. } else
  571. break;
  572. }
  573. if (!time_out) {
  574. printk(KERN_INFO "PCIE: VCO output not locked\n");
  575. return -1;
  576. }
  577. pr_debug("PCIE initialization OK\n");
  578. return 3;
  579. }
  580. static int ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
  581. {
  582. u32 val = 1 << 24;
  583. if (port->endpoint)
  584. val = PTYPE_LEGACY_ENDPOINT << 20;
  585. else
  586. val = PTYPE_ROOT_PORT << 20;
  587. if (port->index == 0)
  588. val |= LNKW_X8 << 12;
  589. else
  590. val |= LNKW_X4 << 12;
  591. mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
  592. mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
  593. if (ppc440spe_revA())
  594. mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
  595. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
  596. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
  597. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
  598. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
  599. if (port->index == 0) {
  600. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
  601. 0x35000000);
  602. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
  603. 0x35000000);
  604. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
  605. 0x35000000);
  606. mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
  607. 0x35000000);
  608. }
  609. dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
  610. (1 << 24) | (1 << 16), 1 << 12);
  611. return 0;
  612. }
  613. static int ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
  614. {
  615. return ppc440spe_pciex_init_port_hw(port);
  616. }
  617. static int ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
  618. {
  619. int rc = ppc440spe_pciex_init_port_hw(port);
  620. port->has_ibpre = 1;
  621. return rc;
  622. }
  623. static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
  624. {
  625. /* XXX Check what that value means... I hate magic */
  626. dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
  627. /*
  628. * Set buffer allocations and then assert VRB and TXE.
  629. */
  630. out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
  631. out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
  632. out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
  633. out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
  634. out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
  635. out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
  636. out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
  637. out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
  638. return 0;
  639. }
  640. static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
  641. {
  642. /* Report CRS to the operating system */
  643. out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
  644. return 0;
  645. }
  646. static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
  647. {
  648. .core_init = ppc440spe_pciex_core_init,
  649. .port_init_hw = ppc440speA_pciex_init_port_hw,
  650. .setup_utl = ppc440speA_pciex_init_utl,
  651. };
  652. static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
  653. {
  654. .core_init = ppc440spe_pciex_core_init,
  655. .port_init_hw = ppc440speB_pciex_init_port_hw,
  656. .setup_utl = ppc440speB_pciex_init_utl,
  657. };
  658. static int __init ppc460ex_pciex_core_init(struct device_node *np)
  659. {
  660. /* Nothing to do, return 2 ports */
  661. return 2;
  662. }
  663. static int ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
  664. {
  665. u32 val;
  666. u32 utlset1;
  667. if (port->endpoint)
  668. val = PTYPE_LEGACY_ENDPOINT << 20;
  669. else
  670. val = PTYPE_ROOT_PORT << 20;
  671. if (port->index == 0) {
  672. val |= LNKW_X1 << 12;
  673. utlset1 = 0x20000000;
  674. } else {
  675. val |= LNKW_X4 << 12;
  676. utlset1 = 0x20101101;
  677. }
  678. mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
  679. mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
  680. mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
  681. switch (port->index) {
  682. case 0:
  683. mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
  684. mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
  685. mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
  686. mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
  687. break;
  688. case 1:
  689. mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
  690. mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
  691. mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
  692. mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
  693. mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
  694. mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
  695. mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
  696. mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
  697. mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
  698. mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
  699. mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
  700. mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
  701. mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
  702. break;
  703. }
  704. mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
  705. mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
  706. (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
  707. /* Poll for PHY reset */
  708. /* XXX FIXME add timeout */
  709. switch (port->index) {
  710. case 0:
  711. while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
  712. udelay(10);
  713. break;
  714. case 1:
  715. while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
  716. udelay(10);
  717. break;
  718. }
  719. mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
  720. (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
  721. ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
  722. PESDRx_RCSSET_RSTPYN);
  723. port->has_ibpre = 1;
  724. return 0;
  725. }
  726. static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
  727. {
  728. dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
  729. /*
  730. * Set buffer allocations and then assert VRB and TXE.
  731. */
  732. out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
  733. out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
  734. out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
  735. out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
  736. out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
  737. out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
  738. out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
  739. out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
  740. out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
  741. return 0;
  742. }
  743. static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
  744. {
  745. .core_init = ppc460ex_pciex_core_init,
  746. .port_init_hw = ppc460ex_pciex_init_port_hw,
  747. .setup_utl = ppc460ex_pciex_init_utl,
  748. };
  749. #endif /* CONFIG_44x */
  750. #ifdef CONFIG_40x
  751. static int __init ppc405ex_pciex_core_init(struct device_node *np)
  752. {
  753. /* Nothing to do, return 2 ports */
  754. return 2;
  755. }
  756. static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
  757. {
  758. /* Assert the PE0_PHY reset */
  759. mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
  760. msleep(1);
  761. /* deassert the PE0_hotreset */
  762. if (port->endpoint)
  763. mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
  764. else
  765. mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
  766. /* poll for phy !reset */
  767. /* XXX FIXME add timeout */
  768. while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
  769. ;
  770. /* deassert the PE0_gpl_utl_reset */
  771. mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
  772. }
  773. static int ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
  774. {
  775. u32 val;
  776. if (port->endpoint)
  777. val = PTYPE_LEGACY_ENDPOINT;
  778. else
  779. val = PTYPE_ROOT_PORT;
  780. mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
  781. 1 << 24 | val << 20 | LNKW_X1 << 12);
  782. mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
  783. mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
  784. mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
  785. mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
  786. /*
  787. * Only reset the PHY when no link is currently established.
  788. * This is for the Atheros PCIe board which has problems to establish
  789. * the link (again) after this PHY reset. All other currently tested
  790. * PCIe boards don't show this problem.
  791. * This has to be re-tested and fixed in a later release!
  792. */
  793. val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
  794. if (!(val & 0x00001000))
  795. ppc405ex_pcie_phy_reset(port);
  796. dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
  797. port->has_ibpre = 1;
  798. return 0;
  799. }
  800. static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
  801. {
  802. dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
  803. /*
  804. * Set buffer allocations and then assert VRB and TXE.
  805. */
  806. out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
  807. out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
  808. out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
  809. out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
  810. out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
  811. out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
  812. out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
  813. out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
  814. out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
  815. return 0;
  816. }
  817. static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
  818. {
  819. .core_init = ppc405ex_pciex_core_init,
  820. .port_init_hw = ppc405ex_pciex_init_port_hw,
  821. .setup_utl = ppc405ex_pciex_init_utl,
  822. };
  823. #endif /* CONFIG_40x */
  824. /* Check that the core has been initied and if not, do it */
  825. static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
  826. {
  827. static int core_init;
  828. int count = -ENODEV;
  829. if (core_init++)
  830. return 0;
  831. #ifdef CONFIG_44x
  832. if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
  833. if (ppc440spe_revA())
  834. ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
  835. else
  836. ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
  837. }
  838. if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
  839. ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
  840. #endif /* CONFIG_44x */
  841. #ifdef CONFIG_40x
  842. if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
  843. ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
  844. #endif
  845. if (ppc4xx_pciex_hwops == NULL) {
  846. printk(KERN_WARNING "PCIE: unknown host type %s\n",
  847. np->full_name);
  848. return -ENODEV;
  849. }
  850. count = ppc4xx_pciex_hwops->core_init(np);
  851. if (count > 0) {
  852. ppc4xx_pciex_ports =
  853. kzalloc(count * sizeof(struct ppc4xx_pciex_port),
  854. GFP_KERNEL);
  855. if (ppc4xx_pciex_ports) {
  856. ppc4xx_pciex_port_count = count;
  857. return 0;
  858. }
  859. printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
  860. return -ENOMEM;
  861. }
  862. return -ENODEV;
  863. }
  864. static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
  865. {
  866. /* We map PCI Express configuration based on the reg property */
  867. dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
  868. RES_TO_U32_HIGH(port->cfg_space.start));
  869. dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
  870. RES_TO_U32_LOW(port->cfg_space.start));
  871. /* XXX FIXME: Use size from reg property. For now, map 512M */
  872. dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
  873. /* We map UTL registers based on the reg property */
  874. dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
  875. RES_TO_U32_HIGH(port->utl_regs.start));
  876. dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
  877. RES_TO_U32_LOW(port->utl_regs.start));
  878. /* XXX FIXME: Use size from reg property */
  879. dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
  880. /* Disable all other outbound windows */
  881. dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
  882. dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
  883. dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
  884. dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
  885. }
  886. static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
  887. unsigned int sdr_offset,
  888. unsigned int mask,
  889. unsigned int value,
  890. int timeout_ms)
  891. {
  892. u32 val;
  893. while(timeout_ms--) {
  894. val = mfdcri(SDR0, port->sdr_base + sdr_offset);
  895. if ((val & mask) == value) {
  896. pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
  897. port->index, sdr_offset, timeout_ms, val);
  898. return 0;
  899. }
  900. msleep(1);
  901. }
  902. return -1;
  903. }
  904. static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
  905. {
  906. int rc = 0;
  907. /* Init HW */
  908. if (ppc4xx_pciex_hwops->port_init_hw)
  909. rc = ppc4xx_pciex_hwops->port_init_hw(port);
  910. if (rc != 0)
  911. return rc;
  912. printk(KERN_INFO "PCIE%d: Checking link...\n",
  913. port->index);
  914. /* Wait for reset to complete */
  915. if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
  916. printk(KERN_WARNING "PCIE%d: PGRST failed\n",
  917. port->index);
  918. return -1;
  919. }
  920. /* Check for card presence detect if supported, if not, just wait for
  921. * link unconditionally.
  922. *
  923. * note that we don't fail if there is no link, we just filter out
  924. * config space accesses. That way, it will be easier to implement
  925. * hotplug later on.
  926. */
  927. if (!port->has_ibpre ||
  928. !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
  929. 1 << 28, 1 << 28, 100)) {
  930. printk(KERN_INFO
  931. "PCIE%d: Device detected, waiting for link...\n",
  932. port->index);
  933. if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
  934. 0x1000, 0x1000, 2000))
  935. printk(KERN_WARNING
  936. "PCIE%d: Link up failed\n", port->index);
  937. else {
  938. printk(KERN_INFO
  939. "PCIE%d: link is up !\n", port->index);
  940. port->link = 1;
  941. }
  942. } else
  943. printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
  944. /*
  945. * Initialize mapping: disable all regions and configure
  946. * CFG and REG regions based on resources in the device tree
  947. */
  948. ppc4xx_pciex_port_init_mapping(port);
  949. /*
  950. * Map UTL
  951. */
  952. port->utl_base = ioremap(port->utl_regs.start, 0x100);
  953. BUG_ON(port->utl_base == NULL);
  954. /*
  955. * Setup UTL registers --BenH.
  956. */
  957. if (ppc4xx_pciex_hwops->setup_utl)
  958. ppc4xx_pciex_hwops->setup_utl(port);
  959. /*
  960. * Check for VC0 active and assert RDY.
  961. */
  962. if (port->link &&
  963. ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
  964. 1 << 16, 1 << 16, 5000)) {
  965. printk(KERN_INFO "PCIE%d: VC0 not active\n", port->index);
  966. port->link = 0;
  967. }
  968. dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
  969. msleep(100);
  970. return 0;
  971. }
  972. static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
  973. struct pci_bus *bus,
  974. unsigned int devfn)
  975. {
  976. static int message;
  977. /* Endpoint can not generate upstream(remote) config cycles */
  978. if (port->endpoint && bus->number != port->hose->first_busno)
  979. return PCIBIOS_DEVICE_NOT_FOUND;
  980. /* Check we are within the mapped range */
  981. if (bus->number > port->hose->last_busno) {
  982. if (!message) {
  983. printk(KERN_WARNING "Warning! Probing bus %u"
  984. " out of range !\n", bus->number);
  985. message++;
  986. }
  987. return PCIBIOS_DEVICE_NOT_FOUND;
  988. }
  989. /* The root complex has only one device / function */
  990. if (bus->number == port->hose->first_busno && devfn != 0)
  991. return PCIBIOS_DEVICE_NOT_FOUND;
  992. /* The other side of the RC has only one device as well */
  993. if (bus->number == (port->hose->first_busno + 1) &&
  994. PCI_SLOT(devfn) != 0)
  995. return PCIBIOS_DEVICE_NOT_FOUND;
  996. /* Check if we have a link */
  997. if ((bus->number != port->hose->first_busno) && !port->link)
  998. return PCIBIOS_DEVICE_NOT_FOUND;
  999. return 0;
  1000. }
  1001. static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
  1002. struct pci_bus *bus,
  1003. unsigned int devfn)
  1004. {
  1005. int relbus;
  1006. /* Remove the casts when we finally remove the stupid volatile
  1007. * in struct pci_controller
  1008. */
  1009. if (bus->number == port->hose->first_busno)
  1010. return (void __iomem *)port->hose->cfg_addr;
  1011. relbus = bus->number - (port->hose->first_busno + 1);
  1012. return (void __iomem *)port->hose->cfg_data +
  1013. ((relbus << 20) | (devfn << 12));
  1014. }
  1015. static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
  1016. int offset, int len, u32 *val)
  1017. {
  1018. struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
  1019. struct ppc4xx_pciex_port *port =
  1020. &ppc4xx_pciex_ports[hose->indirect_type];
  1021. void __iomem *addr;
  1022. u32 gpl_cfg;
  1023. BUG_ON(hose != port->hose);
  1024. if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
  1025. return PCIBIOS_DEVICE_NOT_FOUND;
  1026. addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
  1027. /*
  1028. * Reading from configuration space of non-existing device can
  1029. * generate transaction errors. For the read duration we suppress
  1030. * assertion of machine check exceptions to avoid those.
  1031. */
  1032. gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
  1033. dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
  1034. /* Make sure no CRS is recorded */
  1035. out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
  1036. switch (len) {
  1037. case 1:
  1038. *val = in_8((u8 *)(addr + offset));
  1039. break;
  1040. case 2:
  1041. *val = in_le16((u16 *)(addr + offset));
  1042. break;
  1043. default:
  1044. *val = in_le32((u32 *)(addr + offset));
  1045. break;
  1046. }
  1047. pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
  1048. " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
  1049. bus->number, hose->first_busno, hose->last_busno,
  1050. devfn, offset, len, addr + offset, *val);
  1051. /* Check for CRS (440SPe rev B does that for us but heh ..) */
  1052. if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
  1053. pr_debug("Got CRS !\n");
  1054. if (len != 4 || offset != 0)
  1055. return PCIBIOS_DEVICE_NOT_FOUND;
  1056. *val = 0xffff0001;
  1057. }
  1058. dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
  1059. return PCIBIOS_SUCCESSFUL;
  1060. }
  1061. static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
  1062. int offset, int len, u32 val)
  1063. {
  1064. struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
  1065. struct ppc4xx_pciex_port *port =
  1066. &ppc4xx_pciex_ports[hose->indirect_type];
  1067. void __iomem *addr;
  1068. u32 gpl_cfg;
  1069. if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
  1070. return PCIBIOS_DEVICE_NOT_FOUND;
  1071. addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
  1072. /*
  1073. * Reading from configuration space of non-existing device can
  1074. * generate transaction errors. For the read duration we suppress
  1075. * assertion of machine check exceptions to avoid those.
  1076. */
  1077. gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
  1078. dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
  1079. pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
  1080. " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
  1081. bus->number, hose->first_busno, hose->last_busno,
  1082. devfn, offset, len, addr + offset, val);
  1083. switch (len) {
  1084. case 1:
  1085. out_8((u8 *)(addr + offset), val);
  1086. break;
  1087. case 2:
  1088. out_le16((u16 *)(addr + offset), val);
  1089. break;
  1090. default:
  1091. out_le32((u32 *)(addr + offset), val);
  1092. break;
  1093. }
  1094. dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
  1095. return PCIBIOS_SUCCESSFUL;
  1096. }
  1097. static struct pci_ops ppc4xx_pciex_pci_ops =
  1098. {
  1099. .read = ppc4xx_pciex_read_config,
  1100. .write = ppc4xx_pciex_write_config,
  1101. };
  1102. static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
  1103. struct pci_controller *hose,
  1104. void __iomem *mbase)
  1105. {
  1106. u32 lah, lal, pciah, pcial, sa;
  1107. int i, j;
  1108. /* Setup outbound memory windows */
  1109. for (i = j = 0; i < 3; i++) {
  1110. struct resource *res = &hose->mem_resources[i];
  1111. /* we only care about memory windows */
  1112. if (!(res->flags & IORESOURCE_MEM))
  1113. continue;
  1114. if (j > 1) {
  1115. printk(KERN_WARNING "%s: Too many ranges\n",
  1116. port->node->full_name);
  1117. break;
  1118. }
  1119. /* Calculate register values */
  1120. lah = RES_TO_U32_HIGH(res->start);
  1121. lal = RES_TO_U32_LOW(res->start);
  1122. pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
  1123. pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
  1124. sa = res->end + 1 - res->start;
  1125. if (!is_power_of_2(sa) || sa < 0x100000 ||
  1126. sa > 0xffffffffu) {
  1127. printk(KERN_WARNING "%s: Resource out of range\n",
  1128. port->node->full_name);
  1129. continue;
  1130. }
  1131. sa = (0xffffffffu << ilog2(sa)) | 0x1;
  1132. /* Program register values */
  1133. switch (j) {
  1134. case 0:
  1135. out_le32(mbase + PECFG_POM0LAH, pciah);
  1136. out_le32(mbase + PECFG_POM0LAL, pcial);
  1137. dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
  1138. dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
  1139. dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
  1140. dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
  1141. break;
  1142. case 1:
  1143. out_le32(mbase + PECFG_POM1LAH, pciah);
  1144. out_le32(mbase + PECFG_POM1LAL, pcial);
  1145. dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
  1146. dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
  1147. dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
  1148. dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
  1149. break;
  1150. }
  1151. j++;
  1152. }
  1153. /* Configure IO, always 64K starting at 0 */
  1154. if (hose->io_resource.flags & IORESOURCE_IO) {
  1155. lah = RES_TO_U32_HIGH(hose->io_base_phys);
  1156. lal = RES_TO_U32_LOW(hose->io_base_phys);
  1157. out_le32(mbase + PECFG_POM2LAH, 0);
  1158. out_le32(mbase + PECFG_POM2LAL, 0);
  1159. dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
  1160. dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
  1161. dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
  1162. dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3);
  1163. }
  1164. }
  1165. static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
  1166. struct pci_controller *hose,
  1167. void __iomem *mbase,
  1168. struct resource *res)
  1169. {
  1170. resource_size_t size = res->end - res->start + 1;
  1171. u64 sa;
  1172. if (port->endpoint) {
  1173. resource_size_t ep_addr = 0;
  1174. resource_size_t ep_size = 32 << 20;
  1175. /* Currently we map a fixed 64MByte window to PLB address
  1176. * 0 (SDRAM). This should probably be configurable via a dts
  1177. * property.
  1178. */
  1179. /* Calculate window size */
  1180. sa = (0xffffffffffffffffull << ilog2(ep_size));;
  1181. /* Setup BAR0 */
  1182. out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
  1183. out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
  1184. PCI_BASE_ADDRESS_MEM_TYPE_64);
  1185. /* Disable BAR1 & BAR2 */
  1186. out_le32(mbase + PECFG_BAR1MPA, 0);
  1187. out_le32(mbase + PECFG_BAR2HMPA, 0);
  1188. out_le32(mbase + PECFG_BAR2LMPA, 0);
  1189. out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
  1190. out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
  1191. out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
  1192. out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
  1193. } else {
  1194. /* Calculate window size */
  1195. sa = (0xffffffffffffffffull << ilog2(size));;
  1196. if (res->flags & IORESOURCE_PREFETCH)
  1197. sa |= 0x8;
  1198. out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
  1199. out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
  1200. /* The setup of the split looks weird to me ... let's see
  1201. * if it works
  1202. */
  1203. out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
  1204. out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
  1205. out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
  1206. out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
  1207. out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
  1208. out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
  1209. out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
  1210. out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
  1211. }
  1212. /* Enable inbound mapping */
  1213. out_le32(mbase + PECFG_PIMEN, 0x1);
  1214. /* Enable I/O, Mem, and Busmaster cycles */
  1215. out_le16(mbase + PCI_COMMAND,
  1216. in_le16(mbase + PCI_COMMAND) |
  1217. PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
  1218. }
  1219. static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
  1220. {
  1221. struct resource dma_window;
  1222. struct pci_controller *hose = NULL;
  1223. const int *bus_range;
  1224. int primary = 0, busses;
  1225. void __iomem *mbase = NULL, *cfg_data = NULL;
  1226. const u32 *pval;
  1227. u32 val;
  1228. /* Check if primary bridge */
  1229. if (of_get_property(port->node, "primary", NULL))
  1230. primary = 1;
  1231. /* Get bus range if any */
  1232. bus_range = of_get_property(port->node, "bus-range", NULL);
  1233. /* Allocate the host controller data structure */
  1234. hose = pcibios_alloc_controller(port->node);
  1235. if (!hose)
  1236. goto fail;
  1237. /* We stick the port number in "indirect_type" so the config space
  1238. * ops can retrieve the port data structure easily
  1239. */
  1240. hose->indirect_type = port->index;
  1241. /* Get bus range */
  1242. hose->first_busno = bus_range ? bus_range[0] : 0x0;
  1243. hose->last_busno = bus_range ? bus_range[1] : 0xff;
  1244. /* Because of how big mapping the config space is (1M per bus), we
  1245. * limit how many busses we support. In the long run, we could replace
  1246. * that with something akin to kmap_atomic instead. We set aside 1 bus
  1247. * for the host itself too.
  1248. */
  1249. busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
  1250. if (busses > MAX_PCIE_BUS_MAPPED) {
  1251. busses = MAX_PCIE_BUS_MAPPED;
  1252. hose->last_busno = hose->first_busno + busses;
  1253. }
  1254. if (!port->endpoint) {
  1255. /* Only map the external config space in cfg_data for
  1256. * PCIe root-complexes. External space is 1M per bus
  1257. */
  1258. cfg_data = ioremap(port->cfg_space.start +
  1259. (hose->first_busno + 1) * 0x100000,
  1260. busses * 0x100000);
  1261. if (cfg_data == NULL) {
  1262. printk(KERN_ERR "%s: Can't map external config space !",
  1263. port->node->full_name);
  1264. goto fail;
  1265. }
  1266. hose->cfg_data = cfg_data;
  1267. }
  1268. /* Always map the host config space in cfg_addr.
  1269. * Internal space is 4K
  1270. */
  1271. mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
  1272. if (mbase == NULL) {
  1273. printk(KERN_ERR "%s: Can't map internal config space !",
  1274. port->node->full_name);
  1275. goto fail;
  1276. }
  1277. hose->cfg_addr = mbase;
  1278. pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
  1279. hose->first_busno, hose->last_busno);
  1280. pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
  1281. hose->cfg_addr, hose->cfg_data);
  1282. /* Setup config space */
  1283. hose->ops = &ppc4xx_pciex_pci_ops;
  1284. port->hose = hose;
  1285. mbase = (void __iomem *)hose->cfg_addr;
  1286. if (!port->endpoint) {
  1287. /*
  1288. * Set bus numbers on our root port
  1289. */
  1290. out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
  1291. out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
  1292. out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
  1293. }
  1294. /*
  1295. * OMRs are already reset, also disable PIMs
  1296. */
  1297. out_le32(mbase + PECFG_PIMEN, 0);
  1298. /* Parse outbound mapping resources */
  1299. pci_process_bridge_OF_ranges(hose, port->node, primary);
  1300. /* Parse inbound mapping resources */
  1301. if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
  1302. goto fail;
  1303. /* Configure outbound ranges POMs */
  1304. ppc4xx_configure_pciex_POMs(port, hose, mbase);
  1305. /* Configure inbound ranges PIMs */
  1306. ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
  1307. /* The root complex doesn't show up if we don't set some vendor
  1308. * and device IDs into it. The defaults below are the same bogus
  1309. * one that the initial code in arch/ppc had. This can be
  1310. * overwritten by setting the "vendor-id/device-id" properties
  1311. * in the pciex node.
  1312. */
  1313. /* Get the (optional) vendor-/device-id from the device-tree */
  1314. pval = of_get_property(port->node, "vendor-id", NULL);
  1315. if (pval) {
  1316. val = *pval;
  1317. } else {
  1318. if (!port->endpoint)
  1319. val = 0xaaa0 + port->index;
  1320. else
  1321. val = 0xeee0 + port->index;
  1322. }
  1323. out_le16(mbase + 0x200, val);
  1324. pval = of_get_property(port->node, "device-id", NULL);
  1325. if (pval) {
  1326. val = *pval;
  1327. } else {
  1328. if (!port->endpoint)
  1329. val = 0xbed0 + port->index;
  1330. else
  1331. val = 0xfed0 + port->index;
  1332. }
  1333. out_le16(mbase + 0x202, val);
  1334. if (!port->endpoint) {
  1335. /* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
  1336. out_le32(mbase + 0x208, 0x06040001);
  1337. printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
  1338. port->index);
  1339. } else {
  1340. /* Set Class Code to Processor/PPC */
  1341. out_le32(mbase + 0x208, 0x0b200001);
  1342. printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
  1343. port->index);
  1344. }
  1345. return;
  1346. fail:
  1347. if (hose)
  1348. pcibios_free_controller(hose);
  1349. if (cfg_data)
  1350. iounmap(cfg_data);
  1351. if (mbase)
  1352. iounmap(mbase);
  1353. }
  1354. static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
  1355. {
  1356. struct ppc4xx_pciex_port *port;
  1357. const u32 *pval;
  1358. int portno;
  1359. unsigned int dcrs;
  1360. const char *val;
  1361. /* First, proceed to core initialization as we assume there's
  1362. * only one PCIe core in the system
  1363. */
  1364. if (ppc4xx_pciex_check_core_init(np))
  1365. return;
  1366. /* Get the port number from the device-tree */
  1367. pval = of_get_property(np, "port", NULL);
  1368. if (pval == NULL) {
  1369. printk(KERN_ERR "PCIE: Can't find port number for %s\n",
  1370. np->full_name);
  1371. return;
  1372. }
  1373. portno = *pval;
  1374. if (portno >= ppc4xx_pciex_port_count) {
  1375. printk(KERN_ERR "PCIE: port number out of range for %s\n",
  1376. np->full_name);
  1377. return;
  1378. }
  1379. port = &ppc4xx_pciex_ports[portno];
  1380. port->index = portno;
  1381. /*
  1382. * Check if device is enabled
  1383. */
  1384. if (!of_device_is_available(np)) {
  1385. printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
  1386. return;
  1387. }
  1388. port->node = of_node_get(np);
  1389. pval = of_get_property(np, "sdr-base", NULL);
  1390. if (pval == NULL) {
  1391. printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
  1392. np->full_name);
  1393. return;
  1394. }
  1395. port->sdr_base = *pval;
  1396. /* Check if device_type property is set to "pci" or "pci-endpoint".
  1397. * Resulting from this setup this PCIe port will be configured
  1398. * as root-complex or as endpoint.
  1399. */
  1400. val = of_get_property(port->node, "device_type", NULL);
  1401. if (!strcmp(val, "pci-endpoint")) {
  1402. port->endpoint = 1;
  1403. } else if (!strcmp(val, "pci")) {
  1404. port->endpoint = 0;
  1405. } else {
  1406. printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
  1407. np->full_name);
  1408. return;
  1409. }
  1410. /* Fetch config space registers address */
  1411. if (of_address_to_resource(np, 0, &port->cfg_space)) {
  1412. printk(KERN_ERR "%s: Can't get PCI-E config space !",
  1413. np->full_name);
  1414. return;
  1415. }
  1416. /* Fetch host bridge internal registers address */
  1417. if (of_address_to_resource(np, 1, &port->utl_regs)) {
  1418. printk(KERN_ERR "%s: Can't get UTL register base !",
  1419. np->full_name);
  1420. return;
  1421. }
  1422. /* Map DCRs */
  1423. dcrs = dcr_resource_start(np, 0);
  1424. if (dcrs == 0) {
  1425. printk(KERN_ERR "%s: Can't get DCR register base !",
  1426. np->full_name);
  1427. return;
  1428. }
  1429. port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
  1430. /* Initialize the port specific registers */
  1431. if (ppc4xx_pciex_port_init(port)) {
  1432. printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
  1433. return;
  1434. }
  1435. /* Setup the linux hose data structure */
  1436. ppc4xx_pciex_port_setup_hose(port);
  1437. }
  1438. #endif /* CONFIG_PPC4xx_PCI_EXPRESS */
  1439. static int __init ppc4xx_pci_find_bridges(void)
  1440. {
  1441. struct device_node *np;
  1442. #ifdef CONFIG_PPC4xx_PCI_EXPRESS
  1443. for_each_compatible_node(np, NULL, "ibm,plb-pciex")
  1444. ppc4xx_probe_pciex_bridge(np);
  1445. #endif
  1446. for_each_compatible_node(np, NULL, "ibm,plb-pcix")
  1447. ppc4xx_probe_pcix_bridge(np);
  1448. for_each_compatible_node(np, NULL, "ibm,plb-pci")
  1449. ppc4xx_probe_pci_bridge(np);
  1450. return 0;
  1451. }
  1452. arch_initcall(ppc4xx_pci_find_bridges);