pci.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. /*
  2. * Copyright (C) 2001 Allan Trautman, IBM Corporation
  3. *
  4. * iSeries specific routines for PCI.
  5. *
  6. * Based on code from pci.c and iSeries_pci.c 32bit
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/string.h>
  25. #include <linux/init.h>
  26. #include <linux/module.h>
  27. #include <linux/ide.h>
  28. #include <linux/pci.h>
  29. #include <asm/io.h>
  30. #include <asm/irq.h>
  31. #include <asm/prom.h>
  32. #include <asm/machdep.h>
  33. #include <asm/pci-bridge.h>
  34. #include <asm/iommu.h>
  35. #include <asm/abs_addr.h>
  36. #include <asm/firmware.h>
  37. #include <asm/iseries/hv_call_xm.h>
  38. #include <asm/iseries/mf.h>
  39. #include <asm/iseries/iommu.h>
  40. #include <asm/ppc-pci.h>
  41. #include "irq.h"
  42. #include "pci.h"
  43. #include "call_pci.h"
  44. /*
  45. * Forward declares of prototypes.
  46. */
  47. static struct device_node *find_Device_Node(int bus, int devfn);
  48. static int Pci_Retry_Max = 3; /* Only retry 3 times */
  49. static int Pci_Error_Flag = 1; /* Set Retry Error on. */
  50. static struct pci_ops iSeries_pci_ops;
  51. /*
  52. * Table defines
  53. * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
  54. */
  55. #define IOMM_TABLE_MAX_ENTRIES 1024
  56. #define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
  57. #define BASE_IO_MEMORY 0xE000000000000000UL
  58. static unsigned long max_io_memory = BASE_IO_MEMORY;
  59. static long current_iomm_table_entry;
  60. /*
  61. * Lookup Tables.
  62. */
  63. static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
  64. static u8 iobar_table[IOMM_TABLE_MAX_ENTRIES];
  65. static const char pci_io_text[] = "iSeries PCI I/O";
  66. static DEFINE_SPINLOCK(iomm_table_lock);
  67. /*
  68. * iomm_table_allocate_entry
  69. *
  70. * Adds pci_dev entry in address translation table
  71. *
  72. * - Allocates the number of entries required in table base on BAR
  73. * size.
  74. * - Allocates starting at BASE_IO_MEMORY and increases.
  75. * - The size is round up to be a multiple of entry size.
  76. * - CurrentIndex is incremented to keep track of the last entry.
  77. * - Builds the resource entry for allocated BARs.
  78. */
  79. static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
  80. {
  81. struct resource *bar_res = &dev->resource[bar_num];
  82. long bar_size = pci_resource_len(dev, bar_num);
  83. /*
  84. * No space to allocate, quick exit, skip Allocation.
  85. */
  86. if (bar_size == 0)
  87. return;
  88. /*
  89. * Set Resource values.
  90. */
  91. spin_lock(&iomm_table_lock);
  92. bar_res->name = pci_io_text;
  93. bar_res->start = BASE_IO_MEMORY +
  94. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  95. bar_res->end = bar_res->start + bar_size - 1;
  96. /*
  97. * Allocate the number of table entries needed for BAR.
  98. */
  99. while (bar_size > 0 ) {
  100. iomm_table[current_iomm_table_entry] = dev->sysdata;
  101. iobar_table[current_iomm_table_entry] = bar_num;
  102. bar_size -= IOMM_TABLE_ENTRY_SIZE;
  103. ++current_iomm_table_entry;
  104. }
  105. max_io_memory = BASE_IO_MEMORY +
  106. IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
  107. spin_unlock(&iomm_table_lock);
  108. }
  109. /*
  110. * allocate_device_bars
  111. *
  112. * - Allocates ALL pci_dev BAR's and updates the resources with the
  113. * BAR value. BARS with zero length will have the resources
  114. * The HvCallPci_getBarParms is used to get the size of the BAR
  115. * space. It calls iomm_table_allocate_entry to allocate
  116. * each entry.
  117. * - Loops through The Bar resources(0 - 5) including the ROM
  118. * is resource(6).
  119. */
  120. static void allocate_device_bars(struct pci_dev *dev)
  121. {
  122. int bar_num;
  123. for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
  124. iomm_table_allocate_entry(dev, bar_num);
  125. }
  126. /*
  127. * Log error information to system console.
  128. * Filter out the device not there errors.
  129. * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
  130. * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
  131. * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
  132. */
  133. static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
  134. int AgentId, int HvRc)
  135. {
  136. if (HvRc == 0x0302)
  137. return;
  138. printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
  139. Error_Text, Bus, SubBus, AgentId, HvRc);
  140. }
  141. /*
  142. * iSeries_pcibios_init
  143. *
  144. * Description:
  145. * This function checks for all possible system PCI host bridges that connect
  146. * PCI buses. The system hypervisor is queried as to the guest partition
  147. * ownership status. A pci_controller is built for any bus which is partially
  148. * owned or fully owned by this guest partition.
  149. */
  150. void iSeries_pcibios_init(void)
  151. {
  152. struct pci_controller *phb;
  153. struct device_node *root = of_find_node_by_path("/");
  154. struct device_node *node = NULL;
  155. if (root == NULL) {
  156. printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
  157. "of device tree\n");
  158. return;
  159. }
  160. while ((node = of_get_next_child(root, node)) != NULL) {
  161. HvBusNumber bus;
  162. const u32 *busp;
  163. if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
  164. continue;
  165. busp = get_property(node, "bus-range", NULL);
  166. if (busp == NULL)
  167. continue;
  168. bus = *busp;
  169. printk("bus %d appears to exist\n", bus);
  170. phb = pcibios_alloc_controller(node);
  171. if (phb == NULL)
  172. continue;
  173. phb->pci_mem_offset = phb->local_number = bus;
  174. phb->first_busno = bus;
  175. phb->last_busno = bus;
  176. phb->ops = &iSeries_pci_ops;
  177. }
  178. of_node_put(root);
  179. pci_devs_phb_init();
  180. }
  181. /*
  182. * iSeries_pci_final_fixup(void)
  183. */
  184. void __init iSeries_pci_final_fixup(void)
  185. {
  186. struct pci_dev *pdev = NULL;
  187. struct device_node *node;
  188. int DeviceCount = 0;
  189. /* Fix up at the device node and pci_dev relationship */
  190. mf_display_src(0xC9000100);
  191. printk("pcibios_final_fixup\n");
  192. for_each_pci_dev(pdev) {
  193. node = find_Device_Node(pdev->bus->number, pdev->devfn);
  194. printk("pci dev %p (%x.%x), node %p\n", pdev,
  195. pdev->bus->number, pdev->devfn, node);
  196. if (node != NULL) {
  197. struct pci_dn *pdn = PCI_DN(node);
  198. const u32 *agent;
  199. agent = get_property(node, "linux,agent-id", NULL);
  200. if ((pdn != NULL) && (agent != NULL)) {
  201. u8 irq = iSeries_allocate_IRQ(pdn->busno, 0,
  202. pdn->bussubno);
  203. int err;
  204. err = HvCallXm_connectBusUnit(pdn->busno, pdn->bussubno,
  205. *agent, irq);
  206. if (err)
  207. pci_Log_Error("Connect Bus Unit",
  208. pdn->busno, pdn->bussubno, *agent, err);
  209. else {
  210. err = HvCallPci_configStore8(pdn->busno, pdn->bussubno,
  211. *agent,
  212. PCI_INTERRUPT_LINE,
  213. irq);
  214. if (err)
  215. pci_Log_Error("PciCfgStore Irq Failed!",
  216. pdn->busno, pdn->bussubno, *agent, err);
  217. }
  218. if (!err)
  219. pdev->irq = irq;
  220. }
  221. ++DeviceCount;
  222. pdev->sysdata = (void *)node;
  223. PCI_DN(node)->pcidev = pdev;
  224. allocate_device_bars(pdev);
  225. iSeries_Device_Information(pdev, DeviceCount);
  226. iommu_devnode_init_iSeries(node);
  227. } else
  228. printk("PCI: Device Tree not found for 0x%016lX\n",
  229. (unsigned long)pdev);
  230. }
  231. iSeries_activate_IRQs();
  232. mf_display_src(0xC9000200);
  233. }
  234. void pcibios_fixup_resources(struct pci_dev *pdev)
  235. {
  236. }
  237. /*
  238. * Look down the chain to find the matching Device Device
  239. */
  240. static struct device_node *find_Device_Node(int bus, int devfn)
  241. {
  242. struct device_node *node;
  243. for (node = NULL; (node = of_find_all_nodes(node)); ) {
  244. struct pci_dn *pdn = PCI_DN(node);
  245. if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
  246. return node;
  247. }
  248. return NULL;
  249. }
  250. #if 0
  251. /*
  252. * Returns the device node for the passed pci_dev
  253. * Sanity Check Node PciDev to passed pci_dev
  254. * If none is found, returns a NULL which the client must handle.
  255. */
  256. static struct device_node *get_Device_Node(struct pci_dev *pdev)
  257. {
  258. struct device_node *node;
  259. node = pdev->sysdata;
  260. if (node == NULL || PCI_DN(node)->pcidev != pdev)
  261. node = find_Device_Node(pdev->bus->number, pdev->devfn);
  262. return node;
  263. }
  264. #endif
  265. /*
  266. * Config space read and write functions.
  267. * For now at least, we look for the device node for the bus and devfn
  268. * that we are asked to access. It may be possible to translate the devfn
  269. * to a subbus and deviceid more directly.
  270. */
  271. static u64 hv_cfg_read_func[4] = {
  272. HvCallPciConfigLoad8, HvCallPciConfigLoad16,
  273. HvCallPciConfigLoad32, HvCallPciConfigLoad32
  274. };
  275. static u64 hv_cfg_write_func[4] = {
  276. HvCallPciConfigStore8, HvCallPciConfigStore16,
  277. HvCallPciConfigStore32, HvCallPciConfigStore32
  278. };
  279. /*
  280. * Read PCI config space
  281. */
  282. static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
  283. int offset, int size, u32 *val)
  284. {
  285. struct device_node *node = find_Device_Node(bus->number, devfn);
  286. u64 fn;
  287. struct HvCallPci_LoadReturn ret;
  288. if (node == NULL)
  289. return PCIBIOS_DEVICE_NOT_FOUND;
  290. if (offset > 255) {
  291. *val = ~0;
  292. return PCIBIOS_BAD_REGISTER_NUMBER;
  293. }
  294. fn = hv_cfg_read_func[(size - 1) & 3];
  295. HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
  296. if (ret.rc != 0) {
  297. *val = ~0;
  298. return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
  299. }
  300. *val = ret.value;
  301. return 0;
  302. }
  303. /*
  304. * Write PCI config space
  305. */
  306. static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
  307. int offset, int size, u32 val)
  308. {
  309. struct device_node *node = find_Device_Node(bus->number, devfn);
  310. u64 fn;
  311. u64 ret;
  312. if (node == NULL)
  313. return PCIBIOS_DEVICE_NOT_FOUND;
  314. if (offset > 255)
  315. return PCIBIOS_BAD_REGISTER_NUMBER;
  316. fn = hv_cfg_write_func[(size - 1) & 3];
  317. ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
  318. if (ret != 0)
  319. return PCIBIOS_DEVICE_NOT_FOUND;
  320. return 0;
  321. }
  322. static struct pci_ops iSeries_pci_ops = {
  323. .read = iSeries_pci_read_config,
  324. .write = iSeries_pci_write_config
  325. };
  326. /*
  327. * Check Return Code
  328. * -> On Failure, print and log information.
  329. * Increment Retry Count, if exceeds max, panic partition.
  330. *
  331. * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
  332. * PCI: Device 23.90 ReadL Retry( 1)
  333. * PCI: Device 23.90 ReadL Retry Successful(1)
  334. */
  335. static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
  336. int *retry, u64 ret)
  337. {
  338. if (ret != 0) {
  339. struct pci_dn *pdn = PCI_DN(DevNode);
  340. (*retry)++;
  341. printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
  342. TextHdr, pdn->busno, pdn->devfn,
  343. *retry, (int)ret);
  344. /*
  345. * Bump the retry and check for retry count exceeded.
  346. * If, Exceeded, panic the system.
  347. */
  348. if (((*retry) > Pci_Retry_Max) &&
  349. (Pci_Error_Flag > 0)) {
  350. mf_display_src(0xB6000103);
  351. panic_timeout = 0;
  352. panic("PCI: Hardware I/O Error, SRC B6000103, "
  353. "Automatic Reboot Disabled.\n");
  354. }
  355. return -1; /* Retry Try */
  356. }
  357. return 0;
  358. }
  359. /*
  360. * Translate the I/O Address into a device node, bar, and bar offset.
  361. * Note: Make sure the passed variable end up on the stack to avoid
  362. * the exposure of being device global.
  363. */
  364. static inline struct device_node *xlate_iomm_address(
  365. const volatile void __iomem *IoAddress,
  366. u64 *dsaptr, u64 *BarOffsetPtr)
  367. {
  368. unsigned long OrigIoAddr;
  369. unsigned long BaseIoAddr;
  370. unsigned long TableIndex;
  371. struct device_node *DevNode;
  372. OrigIoAddr = (unsigned long __force)IoAddress;
  373. if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
  374. return NULL;
  375. BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
  376. TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
  377. DevNode = iomm_table[TableIndex];
  378. if (DevNode != NULL) {
  379. int barnum = iobar_table[TableIndex];
  380. *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
  381. *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
  382. } else
  383. panic("PCI: Invalid PCI IoAddress detected!\n");
  384. return DevNode;
  385. }
  386. /*
  387. * Read MM I/O Instructions for the iSeries
  388. * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
  389. * else, data is returned in big Endian format.
  390. *
  391. * iSeries_Read_Byte = Read Byte ( 8 bit)
  392. * iSeries_Read_Word = Read Word (16 bit)
  393. * iSeries_Read_Long = Read Long (32 bit)
  394. */
  395. static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
  396. {
  397. u64 BarOffset;
  398. u64 dsa;
  399. int retry = 0;
  400. struct HvCallPci_LoadReturn ret;
  401. struct device_node *DevNode =
  402. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  403. if (DevNode == NULL) {
  404. static unsigned long last_jiffies;
  405. static int num_printed;
  406. if ((jiffies - last_jiffies) > 60 * HZ) {
  407. last_jiffies = jiffies;
  408. num_printed = 0;
  409. }
  410. if (num_printed++ < 10)
  411. printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
  412. return 0xff;
  413. }
  414. do {
  415. HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
  416. } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
  417. return (u8)ret.value;
  418. }
  419. static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
  420. {
  421. u64 BarOffset;
  422. u64 dsa;
  423. int retry = 0;
  424. struct HvCallPci_LoadReturn ret;
  425. struct device_node *DevNode =
  426. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  427. if (DevNode == NULL) {
  428. static unsigned long last_jiffies;
  429. static int num_printed;
  430. if ((jiffies - last_jiffies) > 60 * HZ) {
  431. last_jiffies = jiffies;
  432. num_printed = 0;
  433. }
  434. if (num_printed++ < 10)
  435. printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
  436. return 0xffff;
  437. }
  438. do {
  439. HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
  440. BarOffset, 0);
  441. } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
  442. return swab16((u16)ret.value);
  443. }
  444. static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
  445. {
  446. u64 BarOffset;
  447. u64 dsa;
  448. int retry = 0;
  449. struct HvCallPci_LoadReturn ret;
  450. struct device_node *DevNode =
  451. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  452. if (DevNode == NULL) {
  453. static unsigned long last_jiffies;
  454. static int num_printed;
  455. if ((jiffies - last_jiffies) > 60 * HZ) {
  456. last_jiffies = jiffies;
  457. num_printed = 0;
  458. }
  459. if (num_printed++ < 10)
  460. printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
  461. return 0xffffffff;
  462. }
  463. do {
  464. HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
  465. BarOffset, 0);
  466. } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
  467. return swab32((u32)ret.value);
  468. }
  469. /*
  470. * Write MM I/O Instructions for the iSeries
  471. *
  472. * iSeries_Write_Byte = Write Byte (8 bit)
  473. * iSeries_Write_Word = Write Word(16 bit)
  474. * iSeries_Write_Long = Write Long(32 bit)
  475. */
  476. static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
  477. {
  478. u64 BarOffset;
  479. u64 dsa;
  480. int retry = 0;
  481. u64 rc;
  482. struct device_node *DevNode =
  483. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  484. if (DevNode == NULL) {
  485. static unsigned long last_jiffies;
  486. static int num_printed;
  487. if ((jiffies - last_jiffies) > 60 * HZ) {
  488. last_jiffies = jiffies;
  489. num_printed = 0;
  490. }
  491. if (num_printed++ < 10)
  492. printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
  493. return;
  494. }
  495. do {
  496. rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
  497. } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
  498. }
  499. static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
  500. {
  501. u64 BarOffset;
  502. u64 dsa;
  503. int retry = 0;
  504. u64 rc;
  505. struct device_node *DevNode =
  506. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  507. if (DevNode == NULL) {
  508. static unsigned long last_jiffies;
  509. static int num_printed;
  510. if ((jiffies - last_jiffies) > 60 * HZ) {
  511. last_jiffies = jiffies;
  512. num_printed = 0;
  513. }
  514. if (num_printed++ < 10)
  515. printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
  516. return;
  517. }
  518. do {
  519. rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
  520. } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
  521. }
  522. static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
  523. {
  524. u64 BarOffset;
  525. u64 dsa;
  526. int retry = 0;
  527. u64 rc;
  528. struct device_node *DevNode =
  529. xlate_iomm_address(IoAddress, &dsa, &BarOffset);
  530. if (DevNode == NULL) {
  531. static unsigned long last_jiffies;
  532. static int num_printed;
  533. if ((jiffies - last_jiffies) > 60 * HZ) {
  534. last_jiffies = jiffies;
  535. num_printed = 0;
  536. }
  537. if (num_printed++ < 10)
  538. printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
  539. return;
  540. }
  541. do {
  542. rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
  543. } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
  544. }
  545. extern unsigned char __raw_readb(const volatile void __iomem *addr)
  546. {
  547. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  548. return *(volatile unsigned char __force *)addr;
  549. }
  550. EXPORT_SYMBOL(__raw_readb);
  551. extern unsigned short __raw_readw(const volatile void __iomem *addr)
  552. {
  553. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  554. return *(volatile unsigned short __force *)addr;
  555. }
  556. EXPORT_SYMBOL(__raw_readw);
  557. extern unsigned int __raw_readl(const volatile void __iomem *addr)
  558. {
  559. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  560. return *(volatile unsigned int __force *)addr;
  561. }
  562. EXPORT_SYMBOL(__raw_readl);
  563. extern unsigned long __raw_readq(const volatile void __iomem *addr)
  564. {
  565. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  566. return *(volatile unsigned long __force *)addr;
  567. }
  568. EXPORT_SYMBOL(__raw_readq);
  569. extern void __raw_writeb(unsigned char v, volatile void __iomem *addr)
  570. {
  571. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  572. *(volatile unsigned char __force *)addr = v;
  573. }
  574. EXPORT_SYMBOL(__raw_writeb);
  575. extern void __raw_writew(unsigned short v, volatile void __iomem *addr)
  576. {
  577. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  578. *(volatile unsigned short __force *)addr = v;
  579. }
  580. EXPORT_SYMBOL(__raw_writew);
  581. extern void __raw_writel(unsigned int v, volatile void __iomem *addr)
  582. {
  583. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  584. *(volatile unsigned int __force *)addr = v;
  585. }
  586. EXPORT_SYMBOL(__raw_writel);
  587. extern void __raw_writeq(unsigned long v, volatile void __iomem *addr)
  588. {
  589. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  590. *(volatile unsigned long __force *)addr = v;
  591. }
  592. EXPORT_SYMBOL(__raw_writeq);
  593. int in_8(const volatile unsigned char __iomem *addr)
  594. {
  595. if (firmware_has_feature(FW_FEATURE_ISERIES))
  596. return iSeries_Read_Byte(addr);
  597. return __in_8(addr);
  598. }
  599. EXPORT_SYMBOL(in_8);
  600. void out_8(volatile unsigned char __iomem *addr, int val)
  601. {
  602. if (firmware_has_feature(FW_FEATURE_ISERIES))
  603. iSeries_Write_Byte(val, addr);
  604. else
  605. __out_8(addr, val);
  606. }
  607. EXPORT_SYMBOL(out_8);
  608. int in_le16(const volatile unsigned short __iomem *addr)
  609. {
  610. if (firmware_has_feature(FW_FEATURE_ISERIES))
  611. return iSeries_Read_Word(addr);
  612. return __in_le16(addr);
  613. }
  614. EXPORT_SYMBOL(in_le16);
  615. int in_be16(const volatile unsigned short __iomem *addr)
  616. {
  617. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  618. return __in_be16(addr);
  619. }
  620. EXPORT_SYMBOL(in_be16);
  621. void out_le16(volatile unsigned short __iomem *addr, int val)
  622. {
  623. if (firmware_has_feature(FW_FEATURE_ISERIES))
  624. iSeries_Write_Word(val, addr);
  625. else
  626. __out_le16(addr, val);
  627. }
  628. EXPORT_SYMBOL(out_le16);
  629. void out_be16(volatile unsigned short __iomem *addr, int val)
  630. {
  631. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  632. __out_be16(addr, val);
  633. }
  634. EXPORT_SYMBOL(out_be16);
  635. unsigned in_le32(const volatile unsigned __iomem *addr)
  636. {
  637. if (firmware_has_feature(FW_FEATURE_ISERIES))
  638. return iSeries_Read_Long(addr);
  639. return __in_le32(addr);
  640. }
  641. EXPORT_SYMBOL(in_le32);
  642. unsigned in_be32(const volatile unsigned __iomem *addr)
  643. {
  644. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  645. return __in_be32(addr);
  646. }
  647. EXPORT_SYMBOL(in_be32);
  648. void out_le32(volatile unsigned __iomem *addr, int val)
  649. {
  650. if (firmware_has_feature(FW_FEATURE_ISERIES))
  651. iSeries_Write_Long(val, addr);
  652. else
  653. __out_le32(addr, val);
  654. }
  655. EXPORT_SYMBOL(out_le32);
  656. void out_be32(volatile unsigned __iomem *addr, int val)
  657. {
  658. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  659. __out_be32(addr, val);
  660. }
  661. EXPORT_SYMBOL(out_be32);
  662. unsigned long in_le64(const volatile unsigned long __iomem *addr)
  663. {
  664. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  665. return __in_le64(addr);
  666. }
  667. EXPORT_SYMBOL(in_le64);
  668. unsigned long in_be64(const volatile unsigned long __iomem *addr)
  669. {
  670. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  671. return __in_be64(addr);
  672. }
  673. EXPORT_SYMBOL(in_be64);
  674. void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
  675. {
  676. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  677. __out_le64(addr, val);
  678. }
  679. EXPORT_SYMBOL(out_le64);
  680. void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
  681. {
  682. BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
  683. __out_be64(addr, val);
  684. }
  685. EXPORT_SYMBOL(out_be64);
  686. void memset_io(volatile void __iomem *addr, int c, unsigned long n)
  687. {
  688. if (firmware_has_feature(FW_FEATURE_ISERIES)) {
  689. volatile char __iomem *d = addr;
  690. while (n-- > 0) {
  691. iSeries_Write_Byte(c, d++);
  692. }
  693. } else
  694. eeh_memset_io(addr, c, n);
  695. }
  696. EXPORT_SYMBOL(memset_io);
  697. void memcpy_fromio(void *dest, const volatile void __iomem *src,
  698. unsigned long n)
  699. {
  700. if (firmware_has_feature(FW_FEATURE_ISERIES)) {
  701. char *d = dest;
  702. const volatile char __iomem *s = src;
  703. while (n-- > 0) {
  704. *d++ = iSeries_Read_Byte(s++);
  705. }
  706. } else
  707. eeh_memcpy_fromio(dest, src, n);
  708. }
  709. EXPORT_SYMBOL(memcpy_fromio);
  710. void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
  711. {
  712. if (firmware_has_feature(FW_FEATURE_ISERIES)) {
  713. const char *s = src;
  714. volatile char __iomem *d = dest;
  715. while (n-- > 0) {
  716. iSeries_Write_Byte(*s++, d++);
  717. }
  718. } else
  719. eeh_memcpy_toio(dest, src, n);
  720. }
  721. EXPORT_SYMBOL(memcpy_toio);