pci.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * Copyright (C) 2001 Allan Trautman, IBM Corporation
  3. *
  4. * iSeries specific routines for PCI.
  5. *
  6. * Based on code from pci.c and iSeries_pci.c 32bit
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/string.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/pci.h>
  28. #include <asm/io.h>
  29. #include <asm/irq.h>
  30. #include <asm/prom.h>
  31. #include <asm/machdep.h>
  32. #include <asm/pci-bridge.h>
  33. #include <asm/iommu.h>
  34. #include <asm/abs_addr.h>
  35. #include <asm/firmware.h>
  36. #include <asm/iseries/hv_call_xm.h>
  37. #include <asm/iseries/mf.h>
  38. #include <asm/iseries/iommu.h>
  39. #include <asm/ppc-pci.h>
  40. #include "irq.h"
  41. #include "pci.h"
  42. #include "call_pci.h"
  43. #define PCI_RETRY_MAX 3
  44. static int limit_pci_retries = 1; /* Set Retry Error on. */
  45. /*
  46. * Table defines
  47. * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
  48. */
  49. #define IOMM_TABLE_MAX_ENTRIES 1024
  50. #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
  51. #define BASE_IO_MEMORY 0xE000000000000000UL
  52. static unsigned long max_io_memory = BASE_IO_MEMORY;
  53. static long current_iomm_table_entry;
  54. /*
  55. * Lookup Tables.
  56. */
  57. static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
  58. static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
  59. static const char pci_io_text[] = "iSeries PCI I/O";
  60. static DEFINE_SPINLOCK(iomm_table_lock);
  61. /*
  62. * iomm_table_allocate_entry
  63. *
  64. * Adds pci_dev entry in address translation table
  65. *
  66. * - Allocates the number of entries required in table base on BAR
  67. * size.
  68. * - Allocates starting at BASE_IO_MEMORY and increases.
  69. * - The size is round up to be a multiple of entry size.
  70. * - CurrentIndex is incremented to keep track of the last entry.
  71. * - Builds the resource entry for allocated BARs.
  72. */
  73. static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
  74. {
  75. struct resource *bar_res = &dev->resource[bar_num];
  76. long bar_size = pci_resource_len(dev, bar_num);
  77. /*
  78. * No space to allocate, quick exit, skip Allocation.
  79. */
  80. if (bar_size == 0)
  81. return;
  82. /*
  83. * Set Resource values.
  84. */
  85. spin_lock(&iomm_table_lock);
  86. bar_res->name = pci_io_text;
  87. bar_res->start = BASE_IO_MEMORY +
  88. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  89. bar_res->end = bar_res->start + bar_size - 1;
  90. /*
  91. * Allocate the number of table entries needed for BAR.
  92. */
  93. while (bar_size > 0 ) {
  94. iomm_table[current_iomm_table_entry] = dev->sysdata;
  95. iobar_table[current_iomm_table_entry] = bar_num;
  96. bar_size -= IOMM_TABLE_ENTRY_SIZE;
  97. ++current_iomm_table_entry;
  98. }
  99. max_io_memory = BASE_IO_MEMORY +
  100. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  101. spin_unlock(&iomm_table_lock);
  102. }
  103. /*
  104. * allocate_device_bars
  105. *
  106. * - Allocates ALL pci_dev BAR's and updates the resources with the
  107. * BAR value. BARS with zero length will have the resources
  108. * The HvCallPci_getBarParms is used to get the size of the BAR
  109. * space. It calls iomm_table_allocate_entry to allocate
  110. * each entry.
  111. * - Loops through The Bar resources(0 - 5) including the ROM
  112. * is resource(6).
  113. */
  114. static void __init allocate_device_bars(struct pci_dev *dev)
  115. {
  116. int bar_num;
  117. for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
  118. iomm_table_allocate_entry(dev, bar_num);
  119. }
  120. /*
  121. * Log error information to system console.
  122. * Filter out the device not there errors.
  123. * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
  124. * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
  125. * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
  126. */
  127. static void pci_log_error(char *error, int bus, int subbus,
  128. int agent, int hv_res)
  129. {
  130. if (hv_res == 0x0302)
  131. return;
  132. printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
  133. error, bus, subbus, agent, hv_res);
  134. }
  135. /*
  136. * Look down the chain to find the matching Device Device
  137. */
  138. static struct device_node *find_device_node(int bus, int devfn)
  139. {
  140. struct device_node *node;
  141. for (node = NULL; (node = of_find_all_nodes(node)); ) {
  142. struct pci_dn *pdn = PCI_DN(node);
  143. if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
  144. return node;
  145. }
  146. return NULL;
  147. }
  148. /*
  149. * iSeries_pci_final_fixup(void)
  150. */
  151. void __init iSeries_pci_final_fixup(void)
  152. {
  153. struct pci_dev *pdev = NULL;
  154. struct device_node *node;
  155. int num_dev = 0;
  156. /* Fix up at the device node and pci_dev relationship */
  157. mf_display_src(0xC9000100);
  158. printk("pcibios_final_fixup\n");
  159. for_each_pci_dev(pdev) {
  160. struct pci_dn *pdn;
  161. const u32 *agent;
  162. node = find_device_node(pdev->bus->number, pdev->devfn);
  163. printk("pci dev %p (%x.%x), node %p\n", pdev,
  164. pdev->bus->number, pdev->devfn, node);
  165. if (!node) {
  166. printk("PCI: Device Tree not found for 0x%016lX\n",
  167. (unsigned long)pdev);
  168. continue;
  169. }
  170. pdn = PCI_DN(node);
  171. agent = of_get_property(node, "linux,agent-id", NULL);
  172. if (pdn && agent) {
  173. u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
  174. pdn->bussubno);
  175. int err;
  176. err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
  177. *agent, irq);
  178. if (err)
  179. pci_log_error("Connect Bus Unit",
  180. pdn->busno, pdn->bussubno, *agent, err);
  181. else {
  182. err = HvCallPci_configStore8(pdn->busno,
  183. pdn->bussubno, *agent,
  184. PCI_INTERRUPT_LINE, irq);
  185. if (err)
  186. pci_log_error("PciCfgStore Irq Failed!",
  187. pdn->busno, pdn->bussubno,
  188. *agent, err);
  189. else
  190. pdev->irq = irq;
  191. }
  192. }
  193. num_dev++;
  194. pdev->sysdata = node;
  195. PCI_DN(node)->pcidev = pdev;
  196. allocate_device_bars(pdev);
  197. iSeries_Device_Information(pdev, num_dev, pdn->busno,
  198. pdn->bussubno);
  199. iommu_devnode_init_iSeries(pdev, node);
  200. }
  201. iSeries_activate_IRQs();
  202. mf_display_src(0xC9000200);
  203. }
  204. /*
  205. * Config space read and write functions.
  206. * For now at least, we look for the device node for the bus and devfn
  207. * that we are asked to access. It may be possible to translate the devfn
  208. * to a subbus and deviceid more directly.
  209. */
  210. static u64 hv_cfg_read_func[4] = {
  211. HvCallPciConfigLoad8, HvCallPciConfigLoad16,
  212. HvCallPciConfigLoad32, HvCallPciConfigLoad32
  213. };
  214. static u64 hv_cfg_write_func[4] = {
  215. HvCallPciConfigStore8, HvCallPciConfigStore16,
  216. HvCallPciConfigStore32, HvCallPciConfigStore32
  217. };
  218. /*
  219. * Read PCI config space
  220. */
  221. static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
  222. int offset, int size, u32 *val)
  223. {
  224. struct device_node *node = find_device_node(bus->number, devfn);
  225. u64 fn;
  226. struct HvCallPci_LoadReturn ret;
  227. if (node == NULL)
  228. return PCIBIOS_DEVICE_NOT_FOUND;
  229. if (offset > 255) {
  230. *val = ~0;
  231. return PCIBIOS_BAD_REGISTER_NUMBER;
  232. }
  233. fn = hv_cfg_read_func[(size - 1) & 3];
  234. HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
  235. if (ret.rc != 0) {
  236. *val = ~0;
  237. return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
  238. }
  239. *val = ret.value;
  240. return 0;
  241. }
  242. /*
  243. * Write PCI config space
  244. */
  245. static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
  246. int offset, int size, u32 val)
  247. {
  248. struct device_node *node = find_device_node(bus->number, devfn);
  249. u64 fn;
  250. u64 ret;
  251. if (node == NULL)
  252. return PCIBIOS_DEVICE_NOT_FOUND;
  253. if (offset > 255)
  254. return PCIBIOS_BAD_REGISTER_NUMBER;
  255. fn = hv_cfg_write_func[(size - 1) & 3];
  256. ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
  257. if (ret != 0)
  258. return PCIBIOS_DEVICE_NOT_FOUND;
  259. return 0;
  260. }
  261. static struct pci_ops iSeries_pci_ops = {
  262. .read = iSeries_pci_read_config,
  263. .write = iSeries_pci_write_config
  264. };
  265. /*
  266. * Check Return Code
  267. * -> On Failure, print and log information.
  268. * Increment Retry Count, if exceeds max, panic partition.
  269. *
  270. * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
  271. * PCI: Device 23.90 ReadL Retry( 1)
  272. * PCI: Device 23.90 ReadL Retry Successful(1)
  273. */
  274. static int check_return_code(char *type, struct device_node *dn,
  275. int *retry, u64 ret)
  276. {
  277. if (ret != 0) {
  278. struct pci_dn *pdn = PCI_DN(dn);
  279. (*retry)++;
  280. printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
  281. type, pdn->busno, pdn->devfn,
  282. *retry, (int)ret);
  283. /*
  284. * Bump the retry and check for retry count exceeded.
  285. * If, Exceeded, panic the system.
  286. */
  287. if (((*retry) > PCI_RETRY_MAX) &&
  288. (limit_pci_retries > 0)) {
  289. mf_display_src(0xB6000103);
  290. panic_timeout = 0;
  291. panic("PCI: Hardware I/O Error, SRC B6000103, "
  292. "Automatic Reboot Disabled.\n");
  293. }
  294. return -1; /* Retry Try */
  295. }
  296. return 0;
  297. }
  298. /*
  299. * Translate the I/O Address into a device node, bar, and bar offset.
  300. * Note: Make sure the passed variable end up on the stack to avoid
  301. * the exposure of being device global.
  302. */
  303. static inline struct device_node *xlate_iomm_address(
  304. const volatile void __iomem *addr,
  305. u64 *dsaptr, u64 *bar_offset, const char *func)
  306. {
  307. unsigned long orig_addr;
  308. unsigned long base_addr;
  309. unsigned long ind;
  310. struct device_node *dn;
  311. orig_addr = (unsigned long __force)addr;
  312. if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
  313. static unsigned long last_jiffies;
  314. static int num_printed;
  315. if ((jiffies - last_jiffies) > 60 * HZ) {
  316. last_jiffies = jiffies;
  317. num_printed = 0;
  318. }
  319. if (num_printed++ < 10)
  320. printk(KERN_ERR
  321. "iSeries_%s: invalid access at IO address %p\n",
  322. func, addr);
  323. return NULL;
  324. }
  325. base_addr = orig_addr - BASE_IO_MEMORY;
  326. ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
  327. dn = iomm_table[ind];
  328. if (dn != NULL) {
  329. int barnum = iobar_table[ind];
  330. *dsaptr = iseries_ds_addr(dn) | (barnum << 24);
  331. *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
  332. } else
  333. panic("PCI: Invalid PCI IO address detected!\n");
  334. return dn;
  335. }
  336. /*
  337. * Read MM I/O Instructions for the iSeries
  338. * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
  339. * else, data is returned in Big Endian format.
  340. */
  341. static u8 iseries_readb(const volatile void __iomem *addr)
  342. {
  343. u64 bar_offset;
  344. u64 dsa;
  345. int retry = 0;
  346. struct HvCallPci_LoadReturn ret;
  347. struct device_node *dn =
  348. xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte");
  349. if (dn == NULL)
  350. return 0xff;
  351. do {
  352. HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
  353. } while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
  354. return ret.value;
  355. }
  356. static u16 iseries_readw_be(const volatile void __iomem *addr)
  357. {
  358. u64 bar_offset;
  359. u64 dsa;
  360. int retry = 0;
  361. struct HvCallPci_LoadReturn ret;
  362. struct device_node *dn =
  363. xlate_iomm_address(addr, &dsa, &bar_offset, "read_word");
  364. if (dn == NULL)
  365. return 0xffff;
  366. do {
  367. HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
  368. bar_offset, 0);
  369. } while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
  370. return ret.value;
  371. }
  372. static u32 iseries_readl_be(const volatile void __iomem *addr)
  373. {
  374. u64 bar_offset;
  375. u64 dsa;
  376. int retry = 0;
  377. struct HvCallPci_LoadReturn ret;
  378. struct device_node *dn =
  379. xlate_iomm_address(addr, &dsa, &bar_offset, "read_long");
  380. if (dn == NULL)
  381. return 0xffffffff;
  382. do {
  383. HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
  384. bar_offset, 0);
  385. } while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
  386. return ret.value;
  387. }
  388. /*
  389. * Write MM I/O Instructions for the iSeries
  390. *
  391. */
  392. static void iseries_writeb(u8 data, volatile void __iomem *addr)
  393. {
  394. u64 bar_offset;
  395. u64 dsa;
  396. int retry = 0;
  397. u64 rc;
  398. struct device_node *dn =
  399. xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte");
  400. if (dn == NULL)
  401. return;
  402. do {
  403. rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
  404. } while (check_return_code("WWB", dn, &retry, rc) != 0);
  405. }
  406. static void iseries_writew_be(u16 data, volatile void __iomem *addr)
  407. {
  408. u64 bar_offset;
  409. u64 dsa;
  410. int retry = 0;
  411. u64 rc;
  412. struct device_node *dn =
  413. xlate_iomm_address(addr, &dsa, &bar_offset, "write_word");
  414. if (dn == NULL)
  415. return;
  416. do {
  417. rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
  418. } while (check_return_code("WWW", dn, &retry, rc) != 0);
  419. }
  420. static void iseries_writel_be(u32 data, volatile void __iomem *addr)
  421. {
  422. u64 bar_offset;
  423. u64 dsa;
  424. int retry = 0;
  425. u64 rc;
  426. struct device_node *dn =
  427. xlate_iomm_address(addr, &dsa, &bar_offset, "write_long");
  428. if (dn == NULL)
  429. return;
  430. do {
  431. rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
  432. } while (check_return_code("WWL", dn, &retry, rc) != 0);
  433. }
  434. static u16 iseries_readw(const volatile void __iomem *addr)
  435. {
  436. return le16_to_cpu(iseries_readw_be(addr));
  437. }
  438. static u32 iseries_readl(const volatile void __iomem *addr)
  439. {
  440. return le32_to_cpu(iseries_readl_be(addr));
  441. }
  442. static void iseries_writew(u16 data, volatile void __iomem *addr)
  443. {
  444. iseries_writew_be(cpu_to_le16(data), addr);
  445. }
  446. static void iseries_writel(u32 data, volatile void __iomem *addr)
  447. {
  448. iseries_writel(cpu_to_le32(data), addr);
  449. }
  450. static void iseries_readsb(const volatile void __iomem *addr, void *buf,
  451. unsigned long count)
  452. {
  453. u8 *dst = buf;
  454. while(count-- > 0)
  455. *(dst++) = iseries_readb(addr);
  456. }
  457. static void iseries_readsw(const volatile void __iomem *addr, void *buf,
  458. unsigned long count)
  459. {
  460. u16 *dst = buf;
  461. while(count-- > 0)
  462. *(dst++) = iseries_readw_be(addr);
  463. }
  464. static void iseries_readsl(const volatile void __iomem *addr, void *buf,
  465. unsigned long count)
  466. {
  467. u32 *dst = buf;
  468. while(count-- > 0)
  469. *(dst++) = iseries_readl_be(addr);
  470. }
  471. static void iseries_writesb(volatile void __iomem *addr, const void *buf,
  472. unsigned long count)
  473. {
  474. const u8 *src = buf;
  475. while(count-- > 0)
  476. iseries_writeb(*(src++), addr);
  477. }
  478. static void iseries_writesw(volatile void __iomem *addr, const void *buf,
  479. unsigned long count)
  480. {
  481. const u16 *src = buf;
  482. while(count-- > 0)
  483. iseries_writew_be(*(src++), addr);
  484. }
  485. static void iseries_writesl(volatile void __iomem *addr, const void *buf,
  486. unsigned long count)
  487. {
  488. const u32 *src = buf;
  489. while(count-- > 0)
  490. iseries_writel_be(*(src++), addr);
  491. }
  492. static void iseries_memset_io(volatile void __iomem *addr, int c,
  493. unsigned long n)
  494. {
  495. volatile char __iomem *d = addr;
  496. while (n-- > 0)
  497. iseries_writeb(c, d++);
  498. }
  499. static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
  500. unsigned long n)
  501. {
  502. char *d = dest;
  503. const volatile char __iomem *s = src;
  504. while (n-- > 0)
  505. *d++ = iseries_readb(s++);
  506. }
  507. static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
  508. unsigned long n)
  509. {
  510. const char *s = src;
  511. volatile char __iomem *d = dest;
  512. while (n-- > 0)
  513. iseries_writeb(*s++, d++);
  514. }
  515. /* We only set MMIO ops. The default PIO ops will be default
  516. * to the MMIO ops + pci_io_base which is 0 on iSeries as
  517. * expected so both should work.
  518. *
  519. * Note that we don't implement the readq/writeq versions as
  520. * I don't know of an HV call for doing so. Thus, the default
  521. * operation will be used instead, which will fault a the value
  522. * return by iSeries for MMIO addresses always hits a non mapped
  523. * area. This is as good as the BUG() we used to have there.
  524. */
  525. static struct ppc_pci_io __initdata iseries_pci_io = {
  526. .readb = iseries_readb,
  527. .readw = iseries_readw,
  528. .readl = iseries_readl,
  529. .readw_be = iseries_readw_be,
  530. .readl_be = iseries_readl_be,
  531. .writeb = iseries_writeb,
  532. .writew = iseries_writew,
  533. .writel = iseries_writel,
  534. .writew_be = iseries_writew_be,
  535. .writel_be = iseries_writel_be,
  536. .readsb = iseries_readsb,
  537. .readsw = iseries_readsw,
  538. .readsl = iseries_readsl,
  539. .writesb = iseries_writesb,
  540. .writesw = iseries_writesw,
  541. .writesl = iseries_writesl,
  542. .memset_io = iseries_memset_io,
  543. .memcpy_fromio = iseries_memcpy_fromio,
  544. .memcpy_toio = iseries_memcpy_toio,
  545. };
  546. /*
  547. * iSeries_pcibios_init
  548. *
  549. * Description:
  550. * This function checks for all possible system PCI host bridges that connect
  551. * PCI buses. The system hypervisor is queried as to the guest partition
  552. * ownership status. A pci_controller is built for any bus which is partially
  553. * owned or fully owned by this guest partition.
  554. */
  555. void __init iSeries_pcibios_init(void)
  556. {
  557. struct pci_controller *phb;
  558. struct device_node *root = of_find_node_by_path("/");
  559. struct device_node *node = NULL;
  560. /* Install IO hooks */
  561. ppc_pci_io = iseries_pci_io;
  562. /* iSeries has no IO space in the common sense, it needs to set
  563. * the IO base to 0
  564. */
  565. pci_io_base = 0;
  566. if (root == NULL) {
  567. printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
  568. "of device tree\n");
  569. return;
  570. }
  571. while ((node = of_get_next_child(root, node)) != NULL) {
  572. HvBusNumber bus;
  573. const u32 *busp;
  574. if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
  575. continue;
  576. busp = of_get_property(node, "bus-range", NULL);
  577. if (busp == NULL)
  578. continue;
  579. bus = *busp;
  580. printk("bus %d appears to exist\n", bus);
  581. phb = pcibios_alloc_controller(node);
  582. if (phb == NULL)
  583. continue;
  584. phb->pci_mem_offset = bus;
  585. phb->first_busno = bus;
  586. phb->last_busno = bus;
  587. phb->ops = &iSeries_pci_ops;
  588. }
  589. of_node_put(root);
  590. pci_devs_phb_init();
  591. }