pci.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /*
  2. * Copyright (C) 2001 Allan Trautman, IBM Corporation
  3. *
  4. * iSeries specific routines for PCI.
  5. *
  6. * Based on code from pci.c and iSeries_pci.c 32bit
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/string.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/ide.h>
  28. #include <linux/pci.h>
  29. #include <asm/io.h>
  30. #include <asm/irq.h>
  31. #include <asm/prom.h>
  32. #include <asm/machdep.h>
  33. #include <asm/pci-bridge.h>
  34. #include <asm/iommu.h>
  35. #include <asm/abs_addr.h>
  36. #include <asm/iseries/hv_call_xm.h>
  37. #include <asm/iseries/mf.h>
  38. #include <asm/iseries/iommu.h>
  39. #include <asm/ppc-pci.h>
  40. #include "irq.h"
  41. #include "pci.h"
  42. #include "call_pci.h"
  43. /*
  44. * Forward declares of prototypes.
  45. */
  46. static struct device_node *find_Device_Node(int bus, int devfn);
  47. static int Pci_Retry_Max = 3; /* Only retry 3 times */
  48. static int Pci_Error_Flag = 1; /* Set Retry Error on. */
  49. static struct pci_ops iSeries_pci_ops;
  50. /*
  51. * Table defines
  52. * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
  53. */
  54. #define IOMM_TABLE_MAX_ENTRIES 1024
  55. #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
  56. #define BASE_IO_MEMORY 0xE000000000000000UL
  57. static unsigned long max_io_memory = BASE_IO_MEMORY;
  58. static long current_iomm_table_entry;
  59. /*
  60. * Lookup Tables.
  61. */
  62. static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
  63. static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
  64. static const char pci_io_text[] = "iSeries PCI I/O";
  65. static DEFINE_SPINLOCK(iomm_table_lock);
  66. /*
  67. * iomm_table_allocate_entry
  68. *
  69. * Adds pci_dev entry in address translation table
  70. *
  71. * - Allocates the number of entries required in table base on BAR
  72. * size.
  73. * - Allocates starting at BASE_IO_MEMORY and increases.
  74. * - The size is round up to be a multiple of entry size.
  75. * - CurrentIndex is incremented to keep track of the last entry.
  76. * - Builds the resource entry for allocated BARs.
  77. */
  78. static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
  79. {
  80. struct resource *bar_res = &dev->resource[bar_num];
  81. long bar_size = pci_resource_len(dev, bar_num);
  82. /*
  83. * No space to allocate, quick exit, skip Allocation.
  84. */
  85. if (bar_size == 0)
  86. return;
  87. /*
  88. * Set Resource values.
  89. */
  90. spin_lock(&iomm_table_lock);
  91. bar_res->name = pci_io_text;
  92. bar_res->start = BASE_IO_MEMORY +
  93. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  94. bar_res->end = bar_res->start + bar_size - 1;
  95. /*
  96. * Allocate the number of table entries needed for BAR.
  97. */
  98. while (bar_size > 0 ) {
  99. iomm_table[current_iomm_table_entry] = dev->sysdata;
  100. iobar_table[current_iomm_table_entry] = bar_num;
  101. bar_size -= IOMM_TABLE_ENTRY_SIZE;
  102. ++current_iomm_table_entry;
  103. }
  104. max_io_memory = BASE_IO_MEMORY +
  105. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  106. spin_unlock(&iomm_table_lock);
  107. }
  108. /*
  109. * allocate_device_bars
  110. *
  111. * - Allocates ALL pci_dev BAR's and updates the resources with the
  112. * BAR value. BARS with zero length will have the resources
  113. * The HvCallPci_getBarParms is used to get the size of the BAR
  114. * space. It calls iomm_table_allocate_entry to allocate
  115. * each entry.
  116. * - Loops through The Bar resources(0 - 5) including the ROM
  117. * is resource(6).
  118. */
  119. static void allocate_device_bars(struct pci_dev *dev)
  120. {
  121. int bar_num;
  122. for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
  123. iomm_table_allocate_entry(dev, bar_num);
  124. }
  125. /*
  126. * Log error information to system console.
  127. * Filter out the device not there errors.
  128. * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
  129. * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
  130. * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
  131. */
  132. static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
  133. int AgentId, int HvRc)
  134. {
  135. if (HvRc == 0x0302)
  136. return;
  137. printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
  138. Error_Text, Bus, SubBus, AgentId, HvRc);
  139. }
  140. /*
  141. * iSeries_pcibios_init
  142. *
  143. * Description:
  144. * This function checks for all possible system PCI host bridges that connect
  145. * PCI buses. The system hypervisor is queried as to the guest partition
  146. * ownership status. A pci_controller is built for any bus which is partially
  147. * owned or fully owned by this guest partition.
  148. */
  149. void iSeries_pcibios_init(void)
  150. {
  151. struct pci_controller *phb;
  152. struct device_node *root = of_find_node_by_path("/");
  153. struct device_node *node = NULL;
  154. if (root == NULL) {
  155. printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
  156. "of device tree\n");
  157. return;
  158. }
  159. while ((node = of_get_next_child(root, node)) != NULL) {
  160. HvBusNumber bus;
  161. const u32 *busp;
  162. if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
  163. continue;
  164. busp = get_property(node, "bus-range", NULL);
  165. if (busp == NULL)
  166. continue;
  167. bus = *busp;
  168. printk("bus %d appears to exist\n", bus);
  169. phb = pcibios_alloc_controller(node);
  170. if (phb == NULL)
  171. continue;
  172. phb->pci_mem_offset = phb->local_number = bus;
  173. phb->first_busno = bus;
  174. phb->last_busno = bus;
  175. phb->ops = &iSeries_pci_ops;
  176. }
  177. of_node_put(root);
  178. pci_devs_phb_init();
  179. }
  180. /*
  181. * iSeries_pci_final_fixup(void)
  182. */
  183. void __init iSeries_pci_final_fixup(void)
  184. {
  185. struct pci_dev *pdev = NULL;
  186. struct device_node *node;
  187. int DeviceCount = 0;
  188. /* Fix up at the device node and pci_dev relationship */
  189. mf_display_src(0xC9000100);
  190. printk("pcibios_final_fixup\n");
  191. for_each_pci_dev(pdev) {
  192. node = find_Device_Node(pdev->bus->number, pdev->devfn);
  193. printk("pci dev %p (%x.%x), node %p\n", pdev,
  194. pdev->bus->number, pdev->devfn, node);
  195. if (node != NULL) {
  196. struct pci_dn *pdn = PCI_DN(node);
  197. const u32 *agent;
  198. agent = get_property(node, "linux,agent-id", NULL);
  199. if ((pdn != NULL) && (agent != NULL)) {
  200. u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
  201. pdn->bussubno);
  202. int err;
  203. err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
  204. *agent, irq);
  205. if (err)
  206. pci_Log_Error("Connect Bus Unit",
  207. pdn->busno, pdn->bussubno, *agent, err);
  208. else {
  209. err = HvCallPci_configStore8(pdn->busno, pdn->bussubno,
  210. *agent,
  211. PCI_INTERRUPT_LINE,
  212. irq);
  213. if (err)
  214. pci_Log_Error("PciCfgStore Irq Failed!",
  215. pdn->busno, pdn->bussubno, *agent, err);
  216. }
  217. if (!err)
  218. pdev->irq = irq;
  219. }
  220. ++DeviceCount;
  221. pdev->sysdata = (void *)node;
  222. PCI_DN(node)->pcidev = pdev;
  223. allocate_device_bars(pdev);
  224. iSeries_Device_Information(pdev, DeviceCount);
  225. iommu_devnode_init_iSeries(node);
  226. } else
  227. printk("PCI: Device Tree not found for 0x%016lX\n",
  228. (unsigned long)pdev);
  229. }
  230. iSeries_activate_IRQs();
  231. mf_display_src(0xC9000200);
  232. }
  233. void pcibios_fixup_bus(struct pci_bus *PciBus)
  234. {
  235. }
  236. void pcibios_fixup_resources(struct pci_dev *pdev)
  237. {
  238. }
  239. /*
  240. * I/0 Memory copy MUST use mmio commands on iSeries
  241. * To do; For performance, include the hv call directly
  242. */
  243. void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count)
  244. {
  245. u8 ByteValue = c;
  246. long NumberOfBytes = Count;
  247. while (NumberOfBytes > 0) {
  248. iSeries_Write_Byte(ByteValue, dest++);
  249. -- NumberOfBytes;
  250. }
  251. }
  252. EXPORT_SYMBOL(iSeries_memset_io);
  253. void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count)
  254. {
  255. char *src = source;
  256. long NumberOfBytes = count;
  257. while (NumberOfBytes > 0) {
  258. iSeries_Write_Byte(*src++, dest++);
  259. -- NumberOfBytes;
  260. }
  261. }
  262. EXPORT_SYMBOL(iSeries_memcpy_toio);
  263. void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count)
  264. {
  265. char *dst = dest;
  266. long NumberOfBytes = count;
  267. while (NumberOfBytes > 0) {
  268. *dst++ = iSeries_Read_Byte(src++);
  269. -- NumberOfBytes;
  270. }
  271. }
  272. EXPORT_SYMBOL(iSeries_memcpy_fromio);
  273. /*
  274. * Look down the chain to find the matching Device Device
  275. */
  276. static struct device_node *find_Device_Node(int bus, int devfn)
  277. {
  278. struct device_node *node;
  279. for (node = NULL; (node = of_find_all_nodes(node)); ) {
  280. struct pci_dn *pdn = PCI_DN(node);
  281. if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
  282. return node;
  283. }
  284. return NULL;
  285. }
  286. #if 0
  287. /*
  288. * Returns the device node for the passed pci_dev
  289. * Sanity Check Node PciDev to passed pci_dev
  290. * If none is found, returns a NULL which the client must handle.
  291. */
  292. static struct device_node *get_Device_Node(struct pci_dev *pdev)
  293. {
  294. struct device_node *node;
  295. node = pdev->sysdata;
  296. if (node == NULL || PCI_DN(node)->pcidev != pdev)
  297. node = find_Device_Node(pdev->bus->number, pdev->devfn);
  298. return node;
  299. }
  300. #endif
  301. /*
  302. * Config space read and write functions.
  303. * For now at least, we look for the device node for the bus and devfn
  304. * that we are asked to access. It may be possible to translate the devfn
  305. * to a subbus and deviceid more directly.
  306. */
  307. static u64 hv_cfg_read_func[4] = {
  308. HvCallPciConfigLoad8, HvCallPciConfigLoad16,
  309. HvCallPciConfigLoad32, HvCallPciConfigLoad32
  310. };
  311. static u64 hv_cfg_write_func[4] = {
  312. HvCallPciConfigStore8, HvCallPciConfigStore16,
  313. HvCallPciConfigStore32, HvCallPciConfigStore32
  314. };
  315. /*
  316. * Read PCI config space
  317. */
  318. static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
  319. int offset, int size, u32 *val)
  320. {
  321. struct device_node *node = find_Device_Node(bus->number, devfn);
  322. u64 fn;
  323. struct HvCallPci_LoadReturn ret;
  324. if (node == NULL)
  325. return PCIBIOS_DEVICE_NOT_FOUND;
  326. if (offset > 255) {
  327. *val = ~0;
  328. return PCIBIOS_BAD_REGISTER_NUMBER;
  329. }
  330. fn = hv_cfg_read_func[(size - 1) & 3];
  331. HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
  332. if (ret.rc != 0) {
  333. *val = ~0;
  334. return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
  335. }
  336. *val = ret.value;
  337. return 0;
  338. }
  339. /*
  340. * Write PCI config space
  341. */
  342. static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
  343. int offset, int size, u32 val)
  344. {
  345. struct device_node *node = find_Device_Node(bus->number, devfn);
  346. u64 fn;
  347. u64 ret;
  348. if (node == NULL)
  349. return PCIBIOS_DEVICE_NOT_FOUND;
  350. if (offset > 255)
  351. return PCIBIOS_BAD_REGISTER_NUMBER;
  352. fn = hv_cfg_write_func[(size - 1) & 3];
  353. ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
  354. if (ret != 0)
  355. return PCIBIOS_DEVICE_NOT_FOUND;
  356. return 0;
  357. }
  358. static struct pci_ops iSeries_pci_ops = {
  359. .read = iSeries_pci_read_config,
  360. .write = iSeries_pci_write_config
  361. };
  362. /*
  363. * Check Return Code
  364. * -> On Failure, print and log information.
  365. * Increment Retry Count, if exceeds max, panic partition.
  366. *
  367. * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
  368. * PCI: Device 23.90 ReadL Retry( 1)
  369. * PCI: Device 23.90 ReadL Retry Successful(1)
  370. */
  371. static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
  372. int *retry, u64 ret)
  373. {
  374. if (ret != 0) {
  375. struct pci_dn *pdn = PCI_DN(DevNode);
  376. (*retry)++;
  377. printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
  378. TextHdr, pdn->busno, pdn->devfn,
  379. *retry, (int)ret);
  380. /*
  381. * Bump the retry and check for retry count exceeded.
  382. * If, Exceeded, panic the system.
  383. */
  384. if (((*retry) > Pci_Retry_Max) &&
  385. (Pci_Error_Flag > 0)) {
  386. mf_display_src(0xB6000103);
  387. panic_timeout = 0;
  388. panic("PCI: Hardware I/O Error, SRC B6000103, "
  389. "Automatic Reboot Disabled.\n");
  390. }
  391. return -1; /* Retry Try */
  392. }
  393. return 0;
  394. }
  395. /*
  396. * Translate the I/O Address into a device node, bar, and bar offset.
  397. * Note: Make sure the passed variable end up on the stack to avoid
  398. * the exposure of being device global.
  399. */
  400. static inline struct device_node *xlate_iomm_address(
  401. const volatile void __iomem *IoAddress,
  402. u64 *dsaptr, u64 *BarOffsetPtr)
  403. {
  404. unsigned long OrigIoAddr;
  405. unsigned long BaseIoAddr;
  406. unsigned long TableIndex;
  407. struct device_node *DevNode;
  408. OrigIoAddr = (unsigned long __force)IoAddress;
  409. if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
  410. return NULL;
  411. BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
  412. TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
  413. DevNode = iomm_table[TableIndex];
  414. if (DevNode != NULL) {
  415. int barnum = iobar_table[TableIndex];
  416. *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
  417. *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
  418. } else
  419. panic("PCI: Invalid PCI IoAddress detected!\n");
  420. return DevNode;
  421. }
  422. /*
  423. * Read MM I/O Instructions for the iSeries
  424. * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
  425. * else, data is returned in big Endian format.
  426. *
  427. * iSeries_Read_Byte = Read Byte ( 8 bit)
  428. * iSeries_Read_Word = Read Word (16 bit)
  429. * iSeries_Read_Long = Read Long (32 bit)
  430. */
  431. u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
  432. {
  433. u64 BarOffset;
  434. u64 dsa;
  435. int retry = 0;
  436. struct HvCallPci_LoadReturn ret;
  437. struct device_node *DevNode =
  438. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  439. if (DevNode == NULL) {
  440. static unsigned long last_jiffies;
  441. static int num_printed;
  442. if ((jiffies - last_jiffies) > 60 * HZ) {
  443. last_jiffies = jiffies;
  444. num_printed = 0;
  445. }
  446. if (num_printed++ < 10)
  447. printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
  448. return 0xff;
  449. }
  450. do {
  451. HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
  452. } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
  453. return (u8)ret.value;
  454. }
  455. EXPORT_SYMBOL(iSeries_Read_Byte);
  456. u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
  457. {
  458. u64 BarOffset;
  459. u64 dsa;
  460. int retry = 0;
  461. struct HvCallPci_LoadReturn ret;
  462. struct device_node *DevNode =
  463. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  464. if (DevNode == NULL) {
  465. static unsigned long last_jiffies;
  466. static int num_printed;
  467. if ((jiffies - last_jiffies) > 60 * HZ) {
  468. last_jiffies = jiffies;
  469. num_printed = 0;
  470. }
  471. if (num_printed++ < 10)
  472. printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
  473. return 0xffff;
  474. }
  475. do {
  476. HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
  477. BarOffset, 0);
  478. } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
  479. return swab16((u16)ret.value);
  480. }
  481. EXPORT_SYMBOL(iSeries_Read_Word);
  482. u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
  483. {
  484. u64 BarOffset;
  485. u64 dsa;
  486. int retry = 0;
  487. struct HvCallPci_LoadReturn ret;
  488. struct device_node *DevNode =
  489. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  490. if (DevNode == NULL) {
  491. static unsigned long last_jiffies;
  492. static int num_printed;
  493. if ((jiffies - last_jiffies) > 60 * HZ) {
  494. last_jiffies = jiffies;
  495. num_printed = 0;
  496. }
  497. if (num_printed++ < 10)
  498. printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
  499. return 0xffffffff;
  500. }
  501. do {
  502. HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
  503. BarOffset, 0);
  504. } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
  505. return swab32((u32)ret.value);
  506. }
  507. EXPORT_SYMBOL(iSeries_Read_Long);
  508. /*
  509. * Write MM I/O Instructions for the iSeries
  510. *
  511. * iSeries_Write_Byte = Write Byte (8 bit)
  512. * iSeries_Write_Word = Write Word(16 bit)
  513. * iSeries_Write_Long = Write Long(32 bit)
  514. */
  515. void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
  516. {
  517. u64 BarOffset;
  518. u64 dsa;
  519. int retry = 0;
  520. u64 rc;
  521. struct device_node *DevNode =
  522. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  523. if (DevNode == NULL) {
  524. static unsigned long last_jiffies;
  525. static int num_printed;
  526. if ((jiffies - last_jiffies) > 60 * HZ) {
  527. last_jiffies = jiffies;
  528. num_printed = 0;
  529. }
  530. if (num_printed++ < 10)
  531. printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
  532. return;
  533. }
  534. do {
  535. rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
  536. } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
  537. }
  538. EXPORT_SYMBOL(iSeries_Write_Byte);
  539. void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
  540. {
  541. u64 BarOffset;
  542. u64 dsa;
  543. int retry = 0;
  544. u64 rc;
  545. struct device_node *DevNode =
  546. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  547. if (DevNode == NULL) {
  548. static unsigned long last_jiffies;
  549. static int num_printed;
  550. if ((jiffies - last_jiffies) > 60 * HZ) {
  551. last_jiffies = jiffies;
  552. num_printed = 0;
  553. }
  554. if (num_printed++ < 10)
  555. printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
  556. return;
  557. }
  558. do {
  559. rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
  560. } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
  561. }
  562. EXPORT_SYMBOL(iSeries_Write_Word);
  563. void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
  564. {
  565. u64 BarOffset;
  566. u64 dsa;
  567. int retry = 0;
  568. u64 rc;
  569. struct device_node *DevNode =
  570. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  571. if (DevNode == NULL) {
  572. static unsigned long last_jiffies;
  573. static int num_printed;
  574. if ((jiffies - last_jiffies) > 60 * HZ) {
  575. last_jiffies = jiffies;
  576. num_printed = 0;
  577. }
  578. if (num_printed++ < 10)
  579. printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
  580. return;
  581. }
  582. do {
  583. rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
  584. } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
  585. }
  586. EXPORT_SYMBOL(iSeries_Write_Long);