pci.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * Copyright (C) 2001 Allan Trautman, IBM Corporation
  3. *
  4. * iSeries specific routines for PCI.
  5. *
  6. * Based on code from pci.c and iSeries_pci.c 32bit
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/string.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/ide.h>
  28. #include <linux/pci.h>
  29. #include <asm/io.h>
  30. #include <asm/irq.h>
  31. #include <asm/prom.h>
  32. #include <asm/machdep.h>
  33. #include <asm/pci-bridge.h>
  34. #include <asm/iommu.h>
  35. #include <asm/abs_addr.h>
  36. #include <asm/iseries/hv_call_xm.h>
  37. #include <asm/iseries/mf.h>
  38. #include <asm/iseries/iommu.h>
  39. #include <asm/ppc-pci.h>
  40. #include "irq.h"
  41. #include "pci.h"
  42. #include "call_pci.h"
  43. /*
  44. * Forward declares of prototypes.
  45. */
  46. static struct device_node *find_Device_Node(int bus, int devfn);
  47. LIST_HEAD(iSeries_Global_Device_List);
  48. static int Pci_Retry_Max = 3; /* Only retry 3 times */
  49. static int Pci_Error_Flag = 1; /* Set Retry Error on. */
  50. static struct pci_ops iSeries_pci_ops;
  51. /*
  52. * Table defines
  53. * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
  54. */
  55. #define IOMM_TABLE_MAX_ENTRIES 1024
  56. #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
  57. #define BASE_IO_MEMORY 0xE000000000000000UL
  58. static unsigned long max_io_memory = BASE_IO_MEMORY;
  59. static long current_iomm_table_entry;
  60. /*
  61. * Lookup Tables.
  62. */
  63. static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
  64. static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
  65. static const char pci_io_text[] = "iSeries PCI I/O";
  66. static DEFINE_SPINLOCK(iomm_table_lock);
  67. /*
  68. * iomm_table_allocate_entry
  69. *
  70. * Adds pci_dev entry in address translation table
  71. *
  72. * - Allocates the number of entries required in table base on BAR
  73. * size.
  74. * - Allocates starting at BASE_IO_MEMORY and increases.
  75. * - The size is round up to be a multiple of entry size.
  76. * - CurrentIndex is incremented to keep track of the last entry.
  77. * - Builds the resource entry for allocated BARs.
  78. */
  79. static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
  80. {
  81. struct resource *bar_res = &dev->resource[bar_num];
  82. long bar_size = pci_resource_len(dev, bar_num);
  83. /*
  84. * No space to allocate, quick exit, skip Allocation.
  85. */
  86. if (bar_size == 0)
  87. return;
  88. /*
  89. * Set Resource values.
  90. */
  91. spin_lock(&iomm_table_lock);
  92. bar_res->name = pci_io_text;
  93. bar_res->start = BASE_IO_MEMORY +
  94. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  95. bar_res->end = bar_res->start + bar_size - 1;
  96. /*
  97. * Allocate the number of table entries needed for BAR.
  98. */
  99. while (bar_size > 0 ) {
  100. iomm_table[current_iomm_table_entry] = dev->sysdata;
  101. iobar_table[current_iomm_table_entry] = bar_num;
  102. bar_size -= IOMM_TABLE_ENTRY_SIZE;
  103. ++current_iomm_table_entry;
  104. }
  105. max_io_memory = BASE_IO_MEMORY +
  106. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  107. spin_unlock(&iomm_table_lock);
  108. }
  109. /*
  110. * allocate_device_bars
  111. *
  112. * - Allocates ALL pci_dev BAR's and updates the resources with the
  113. * BAR value. BARS with zero length will have the resources
  114. * The HvCallPci_getBarParms is used to get the size of the BAR
  115. * space. It calls iomm_table_allocate_entry to allocate
  116. * each entry.
  117. * - Loops through The Bar resources(0 - 5) including the ROM
  118. * is resource(6).
  119. */
  120. static void allocate_device_bars(struct pci_dev *dev)
  121. {
  122. int bar_num;
  123. for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
  124. iomm_table_allocate_entry(dev, bar_num);
  125. }
  126. /*
  127. * Log error information to system console.
  128. * Filter out the device not there errors.
  129. * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
  130. * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
  131. * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
  132. */
  133. static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
  134. int AgentId, int HvRc)
  135. {
  136. if (HvRc == 0x0302)
  137. return;
  138. printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
  139. Error_Text, Bus, SubBus, AgentId, HvRc);
  140. }
  141. /*
  142. * iSeries_pcibios_init
  143. *
  144. * Description:
  145. * This function checks for all possible system PCI host bridges that connect
  146. * PCI buses. The system hypervisor is queried as to the guest partition
  147. * ownership status. A pci_controller is built for any bus which is partially
  148. * owned or fully owned by this guest partition.
  149. */
  150. void iSeries_pcibios_init(void)
  151. {
  152. struct pci_controller *phb;
  153. struct device_node *node;
  154. struct device_node *dn;
  155. for_each_node_by_type(node, "pci") {
  156. HvBusNumber bus;
  157. u32 *busp;
  158. busp = (u32 *)get_property(node, "bus-range", NULL);
  159. if (busp == NULL)
  160. continue;
  161. bus = *busp;
  162. printk("bus %d appears to exist\n", bus);
  163. phb = pcibios_alloc_controller(node);
  164. if (phb == NULL)
  165. continue;
  166. phb->pci_mem_offset = phb->local_number = bus;
  167. phb->first_busno = bus;
  168. phb->last_busno = bus;
  169. phb->ops = &iSeries_pci_ops;
  170. /* Find and connect the devices. */
  171. for (dn = NULL; (dn = of_get_next_child(node, dn)) != NULL;) {
  172. struct pci_dn *pdn;
  173. u8 irq;
  174. int err;
  175. u32 *agent;
  176. u32 *reg;
  177. u32 *lsn;
  178. reg = (u32 *)get_property(dn, "reg", NULL);
  179. if (reg == NULL) {
  180. printk(KERN_DEBUG "no reg property!\n");
  181. continue;
  182. }
  183. busp = (u32 *)get_property(dn, "linux,subbus", NULL);
  184. if (busp == NULL) {
  185. printk(KERN_DEBUG "no subbus property!\n");
  186. continue;
  187. }
  188. agent = (u32 *)get_property(dn, "linux,agent-id", NULL);
  189. if (agent == NULL) {
  190. printk(KERN_DEBUG "no agent-id\n");
  191. continue;
  192. }
  193. lsn = (u32 *)get_property(dn,
  194. "linux,logical-slot-number", NULL);
  195. if (lsn == NULL) {
  196. printk(KERN_DEBUG "no logical-slot-number\n");
  197. continue;
  198. }
  199. irq = iSeries_allocate_IRQ(bus, 0, *busp);
  200. err = HvCallXm_connectBusUnit(bus, *busp, *agent, irq);
  201. if (err) {
  202. pci_Log_Error("Connect Bus Unit",
  203. bus, *busp, *agent, err);
  204. continue;
  205. }
  206. err = HvCallPci_configStore8(bus, *busp, *agent,
  207. PCI_INTERRUPT_LINE, irq);
  208. if (err) {
  209. pci_Log_Error("PciCfgStore Irq Failed!",
  210. bus, *busp, *agent, err);
  211. continue;
  212. }
  213. pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
  214. if (pdn == NULL)
  215. return;
  216. dn->data = pdn;
  217. pdn->node = dn;
  218. pdn->busno = bus;
  219. pdn->devfn = (reg[0] >> 8) & 0xff;
  220. pdn->bussubno = *busp;
  221. pdn->Irq = irq;
  222. pdn->LogicalSlot = *lsn;
  223. list_add_tail(&pdn->Device_List,
  224. &iSeries_Global_Device_List);
  225. }
  226. }
  227. }
  228. /*
  229. * iSeries_pci_final_fixup(void)
  230. */
  231. void __init iSeries_pci_final_fixup(void)
  232. {
  233. struct pci_dev *pdev = NULL;
  234. struct device_node *node;
  235. int DeviceCount = 0;
  236. /* Fix up at the device node and pci_dev relationship */
  237. mf_display_src(0xC9000100);
  238. printk("pcibios_final_fixup\n");
  239. for_each_pci_dev(pdev) {
  240. node = find_Device_Node(pdev->bus->number, pdev->devfn);
  241. printk("pci dev %p (%x.%x), node %p\n", pdev,
  242. pdev->bus->number, pdev->devfn, node);
  243. if (node != NULL) {
  244. ++DeviceCount;
  245. pdev->sysdata = (void *)node;
  246. PCI_DN(node)->pcidev = pdev;
  247. allocate_device_bars(pdev);
  248. iSeries_Device_Information(pdev, DeviceCount);
  249. iommu_devnode_init_iSeries(node);
  250. } else
  251. printk("PCI: Device Tree not found for 0x%016lX\n",
  252. (unsigned long)pdev);
  253. pdev->irq = PCI_DN(node)->Irq;
  254. }
  255. iSeries_activate_IRQs();
  256. mf_display_src(0xC9000200);
  257. }
  258. void pcibios_fixup_bus(struct pci_bus *PciBus)
  259. {
  260. }
  261. void pcibios_fixup_resources(struct pci_dev *pdev)
  262. {
  263. }
  264. /*
  265. * I/0 Memory copy MUST use mmio commands on iSeries
  266. * To do; For performance, include the hv call directly
  267. */
  268. void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count)
  269. {
  270. u8 ByteValue = c;
  271. long NumberOfBytes = Count;
  272. while (NumberOfBytes > 0) {
  273. iSeries_Write_Byte(ByteValue, dest++);
  274. -- NumberOfBytes;
  275. }
  276. }
  277. EXPORT_SYMBOL(iSeries_memset_io);
  278. void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count)
  279. {
  280. char *src = source;
  281. long NumberOfBytes = count;
  282. while (NumberOfBytes > 0) {
  283. iSeries_Write_Byte(*src++, dest++);
  284. -- NumberOfBytes;
  285. }
  286. }
  287. EXPORT_SYMBOL(iSeries_memcpy_toio);
  288. void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count)
  289. {
  290. char *dst = dest;
  291. long NumberOfBytes = count;
  292. while (NumberOfBytes > 0) {
  293. *dst++ = iSeries_Read_Byte(src++);
  294. -- NumberOfBytes;
  295. }
  296. }
  297. EXPORT_SYMBOL(iSeries_memcpy_fromio);
  298. /*
  299. * Look down the chain to find the matching Device Device
  300. */
  301. static struct device_node *find_Device_Node(int bus, int devfn)
  302. {
  303. struct pci_dn *pdn;
  304. list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
  305. if ((bus == pdn->busno) && (devfn == pdn->devfn))
  306. return pdn->node;
  307. }
  308. return NULL;
  309. }
  310. #if 0
  311. /*
  312. * Returns the device node for the passed pci_dev
  313. * Sanity Check Node PciDev to passed pci_dev
  314. * If none is found, returns a NULL which the client must handle.
  315. */
  316. static struct device_node *get_Device_Node(struct pci_dev *pdev)
  317. {
  318. struct device_node *node;
  319. node = pdev->sysdata;
  320. if (node == NULL || PCI_DN(node)->pcidev != pdev)
  321. node = find_Device_Node(pdev->bus->number, pdev->devfn);
  322. return node;
  323. }
  324. #endif
  325. /*
  326. * Config space read and write functions.
  327. * For now at least, we look for the device node for the bus and devfn
  328. * that we are asked to access. It may be possible to translate the devfn
  329. * to a subbus and deviceid more directly.
  330. */
  331. static u64 hv_cfg_read_func[4] = {
  332. HvCallPciConfigLoad8, HvCallPciConfigLoad16,
  333. HvCallPciConfigLoad32, HvCallPciConfigLoad32
  334. };
  335. static u64 hv_cfg_write_func[4] = {
  336. HvCallPciConfigStore8, HvCallPciConfigStore16,
  337. HvCallPciConfigStore32, HvCallPciConfigStore32
  338. };
  339. /*
  340. * Read PCI config space
  341. */
  342. static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
  343. int offset, int size, u32 *val)
  344. {
  345. struct device_node *node = find_Device_Node(bus->number, devfn);
  346. u64 fn;
  347. struct HvCallPci_LoadReturn ret;
  348. if (node == NULL)
  349. return PCIBIOS_DEVICE_NOT_FOUND;
  350. if (offset > 255) {
  351. *val = ~0;
  352. return PCIBIOS_BAD_REGISTER_NUMBER;
  353. }
  354. fn = hv_cfg_read_func[(size - 1) & 3];
  355. HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
  356. if (ret.rc != 0) {
  357. *val = ~0;
  358. return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
  359. }
  360. *val = ret.value;
  361. return 0;
  362. }
  363. /*
  364. * Write PCI config space
  365. */
  366. static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
  367. int offset, int size, u32 val)
  368. {
  369. struct device_node *node = find_Device_Node(bus->number, devfn);
  370. u64 fn;
  371. u64 ret;
  372. if (node == NULL)
  373. return PCIBIOS_DEVICE_NOT_FOUND;
  374. if (offset > 255)
  375. return PCIBIOS_BAD_REGISTER_NUMBER;
  376. fn = hv_cfg_write_func[(size - 1) & 3];
  377. ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
  378. if (ret != 0)
  379. return PCIBIOS_DEVICE_NOT_FOUND;
  380. return 0;
  381. }
  382. static struct pci_ops iSeries_pci_ops = {
  383. .read = iSeries_pci_read_config,
  384. .write = iSeries_pci_write_config
  385. };
  386. /*
  387. * Check Return Code
  388. * -> On Failure, print and log information.
  389. * Increment Retry Count, if exceeds max, panic partition.
  390. *
  391. * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
  392. * PCI: Device 23.90 ReadL Retry( 1)
  393. * PCI: Device 23.90 ReadL Retry Successful(1)
  394. */
  395. static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
  396. int *retry, u64 ret)
  397. {
  398. if (ret != 0) {
  399. struct pci_dn *pdn = PCI_DN(DevNode);
  400. (*retry)++;
  401. printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
  402. TextHdr, pdn->busno, pdn->devfn,
  403. *retry, (int)ret);
  404. /*
  405. * Bump the retry and check for retry count exceeded.
  406. * If, Exceeded, panic the system.
  407. */
  408. if (((*retry) > Pci_Retry_Max) &&
  409. (Pci_Error_Flag > 0)) {
  410. mf_display_src(0xB6000103);
  411. panic_timeout = 0;
  412. panic("PCI: Hardware I/O Error, SRC B6000103, "
  413. "Automatic Reboot Disabled.\n");
  414. }
  415. return -1; /* Retry Try */
  416. }
  417. return 0;
  418. }
  419. /*
  420. * Translate the I/O Address into a device node, bar, and bar offset.
  421. * Note: Make sure the passed variable end up on the stack to avoid
  422. * the exposure of being device global.
  423. */
  424. static inline struct device_node *xlate_iomm_address(
  425. const volatile void __iomem *IoAddress,
  426. u64 *dsaptr, u64 *BarOffsetPtr)
  427. {
  428. unsigned long OrigIoAddr;
  429. unsigned long BaseIoAddr;
  430. unsigned long TableIndex;
  431. struct device_node *DevNode;
  432. OrigIoAddr = (unsigned long __force)IoAddress;
  433. if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
  434. return NULL;
  435. BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
  436. TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
  437. DevNode = iomm_table[TableIndex];
  438. if (DevNode != NULL) {
  439. int barnum = iobar_table[TableIndex];
  440. *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
  441. *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
  442. } else
  443. panic("PCI: Invalid PCI IoAddress detected!\n");
  444. return DevNode;
  445. }
  446. /*
  447. * Read MM I/O Instructions for the iSeries
  448. * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
  449. * else, data is returned in big Endian format.
  450. *
  451. * iSeries_Read_Byte = Read Byte ( 8 bit)
  452. * iSeries_Read_Word = Read Word (16 bit)
  453. * iSeries_Read_Long = Read Long (32 bit)
  454. */
  455. u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
  456. {
  457. u64 BarOffset;
  458. u64 dsa;
  459. int retry = 0;
  460. struct HvCallPci_LoadReturn ret;
  461. struct device_node *DevNode =
  462. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  463. if (DevNode == NULL) {
  464. static unsigned long last_jiffies;
  465. static int num_printed;
  466. if ((jiffies - last_jiffies) > 60 * HZ) {
  467. last_jiffies = jiffies;
  468. num_printed = 0;
  469. }
  470. if (num_printed++ < 10)
  471. printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
  472. return 0xff;
  473. }
  474. do {
  475. HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
  476. } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
  477. return (u8)ret.value;
  478. }
  479. EXPORT_SYMBOL(iSeries_Read_Byte);
  480. u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
  481. {
  482. u64 BarOffset;
  483. u64 dsa;
  484. int retry = 0;
  485. struct HvCallPci_LoadReturn ret;
  486. struct device_node *DevNode =
  487. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  488. if (DevNode == NULL) {
  489. static unsigned long last_jiffies;
  490. static int num_printed;
  491. if ((jiffies - last_jiffies) > 60 * HZ) {
  492. last_jiffies = jiffies;
  493. num_printed = 0;
  494. }
  495. if (num_printed++ < 10)
  496. printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
  497. return 0xffff;
  498. }
  499. do {
  500. HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
  501. BarOffset, 0);
  502. } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
  503. return swab16((u16)ret.value);
  504. }
  505. EXPORT_SYMBOL(iSeries_Read_Word);
  506. u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
  507. {
  508. u64 BarOffset;
  509. u64 dsa;
  510. int retry = 0;
  511. struct HvCallPci_LoadReturn ret;
  512. struct device_node *DevNode =
  513. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  514. if (DevNode == NULL) {
  515. static unsigned long last_jiffies;
  516. static int num_printed;
  517. if ((jiffies - last_jiffies) > 60 * HZ) {
  518. last_jiffies = jiffies;
  519. num_printed = 0;
  520. }
  521. if (num_printed++ < 10)
  522. printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
  523. return 0xffffffff;
  524. }
  525. do {
  526. HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
  527. BarOffset, 0);
  528. } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
  529. return swab32((u32)ret.value);
  530. }
  531. EXPORT_SYMBOL(iSeries_Read_Long);
  532. /*
  533. * Write MM I/O Instructions for the iSeries
  534. *
  535. * iSeries_Write_Byte = Write Byte (8 bit)
  536. * iSeries_Write_Word = Write Word(16 bit)
  537. * iSeries_Write_Long = Write Long(32 bit)
  538. */
  539. void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
  540. {
  541. u64 BarOffset;
  542. u64 dsa;
  543. int retry = 0;
  544. u64 rc;
  545. struct device_node *DevNode =
  546. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  547. if (DevNode == NULL) {
  548. static unsigned long last_jiffies;
  549. static int num_printed;
  550. if ((jiffies - last_jiffies) > 60 * HZ) {
  551. last_jiffies = jiffies;
  552. num_printed = 0;
  553. }
  554. if (num_printed++ < 10)
  555. printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
  556. return;
  557. }
  558. do {
  559. rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
  560. } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
  561. }
  562. EXPORT_SYMBOL(iSeries_Write_Byte);
  563. void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
  564. {
  565. u64 BarOffset;
  566. u64 dsa;
  567. int retry = 0;
  568. u64 rc;
  569. struct device_node *DevNode =
  570. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  571. if (DevNode == NULL) {
  572. static unsigned long last_jiffies;
  573. static int num_printed;
  574. if ((jiffies - last_jiffies) > 60 * HZ) {
  575. last_jiffies = jiffies;
  576. num_printed = 0;
  577. }
  578. if (num_printed++ < 10)
  579. printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
  580. return;
  581. }
  582. do {
  583. rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
  584. } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
  585. }
  586. EXPORT_SYMBOL(iSeries_Write_Word);
  587. void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
  588. {
  589. u64 BarOffset;
  590. u64 dsa;
  591. int retry = 0;
  592. u64 rc;
  593. struct device_node *DevNode =
  594. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  595. if (DevNode == NULL) {
  596. static unsigned long last_jiffies;
  597. static int num_printed;
  598. if ((jiffies - last_jiffies) > 60 * HZ) {
  599. last_jiffies = jiffies;
  600. num_printed = 0;
  601. }
  602. if (num_printed++ < 10)
  603. printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
  604. return;
  605. }
  606. do {
  607. rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
  608. } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
  609. }
  610. EXPORT_SYMBOL(iSeries_Write_Long);