host_pci.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /*
  2. * Broadcom specific AMBA
  3. * PCI Host
  4. *
  5. * Licensed under the GNU/GPL. See COPYING for details.
  6. */
  7. #include "bcma_private.h"
  8. #include <linux/slab.h>
  9. #include <linux/bcma/bcma.h>
  10. #include <linux/pci.h>
  11. #include <linux/module.h>
  12. static void bcma_host_pci_switch_core(struct bcma_device *core)
  13. {
  14. pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
  15. core->addr);
  16. pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
  17. core->wrap);
  18. core->bus->mapped_core = core;
  19. pr_debug("Switched to core: 0x%X\n", core->id.id);
  20. }
  21. static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
  22. {
  23. if (core->bus->mapped_core != core)
  24. bcma_host_pci_switch_core(core);
  25. return ioread8(core->bus->mmio + offset);
  26. }
  27. static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
  28. {
  29. if (core->bus->mapped_core != core)
  30. bcma_host_pci_switch_core(core);
  31. return ioread16(core->bus->mmio + offset);
  32. }
  33. static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
  34. {
  35. if (core->bus->mapped_core != core)
  36. bcma_host_pci_switch_core(core);
  37. return ioread32(core->bus->mmio + offset);
  38. }
  39. static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
  40. u8 value)
  41. {
  42. if (core->bus->mapped_core != core)
  43. bcma_host_pci_switch_core(core);
  44. iowrite8(value, core->bus->mmio + offset);
  45. }
  46. static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
  47. u16 value)
  48. {
  49. if (core->bus->mapped_core != core)
  50. bcma_host_pci_switch_core(core);
  51. iowrite16(value, core->bus->mmio + offset);
  52. }
  53. static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
  54. u32 value)
  55. {
  56. if (core->bus->mapped_core != core)
  57. bcma_host_pci_switch_core(core);
  58. iowrite32(value, core->bus->mmio + offset);
  59. }
  60. #ifdef CONFIG_BCMA_BLOCKIO
  61. void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
  62. size_t count, u16 offset, u8 reg_width)
  63. {
  64. void __iomem *addr = core->bus->mmio + offset;
  65. if (core->bus->mapped_core != core)
  66. bcma_host_pci_switch_core(core);
  67. switch (reg_width) {
  68. case sizeof(u8):
  69. ioread8_rep(addr, buffer, count);
  70. break;
  71. case sizeof(u16):
  72. WARN_ON(count & 1);
  73. ioread16_rep(addr, buffer, count >> 1);
  74. break;
  75. case sizeof(u32):
  76. WARN_ON(count & 3);
  77. ioread32_rep(addr, buffer, count >> 2);
  78. break;
  79. default:
  80. WARN_ON(1);
  81. }
  82. }
  83. void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
  84. size_t count, u16 offset, u8 reg_width)
  85. {
  86. void __iomem *addr = core->bus->mmio + offset;
  87. if (core->bus->mapped_core != core)
  88. bcma_host_pci_switch_core(core);
  89. switch (reg_width) {
  90. case sizeof(u8):
  91. iowrite8_rep(addr, buffer, count);
  92. break;
  93. case sizeof(u16):
  94. WARN_ON(count & 1);
  95. iowrite16_rep(addr, buffer, count >> 1);
  96. break;
  97. case sizeof(u32):
  98. WARN_ON(count & 3);
  99. iowrite32_rep(addr, buffer, count >> 2);
  100. break;
  101. default:
  102. WARN_ON(1);
  103. }
  104. }
  105. #endif
  106. static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
  107. {
  108. if (core->bus->mapped_core != core)
  109. bcma_host_pci_switch_core(core);
  110. return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
  111. }
  112. static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
  113. u32 value)
  114. {
  115. if (core->bus->mapped_core != core)
  116. bcma_host_pci_switch_core(core);
  117. iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
  118. }
  119. const struct bcma_host_ops bcma_host_pci_ops = {
  120. .read8 = bcma_host_pci_read8,
  121. .read16 = bcma_host_pci_read16,
  122. .read32 = bcma_host_pci_read32,
  123. .write8 = bcma_host_pci_write8,
  124. .write16 = bcma_host_pci_write16,
  125. .write32 = bcma_host_pci_write32,
  126. #ifdef CONFIG_BCMA_BLOCKIO
  127. .block_read = bcma_host_pci_block_read,
  128. .block_write = bcma_host_pci_block_write,
  129. #endif
  130. .aread32 = bcma_host_pci_aread32,
  131. .awrite32 = bcma_host_pci_awrite32,
  132. };
  133. static int bcma_host_pci_probe(struct pci_dev *dev,
  134. const struct pci_device_id *id)
  135. {
  136. struct bcma_bus *bus;
  137. int err = -ENOMEM;
  138. const char *name;
  139. u32 val;
  140. /* Alloc */
  141. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  142. if (!bus)
  143. goto out;
  144. /* Basic PCI configuration */
  145. err = pci_enable_device(dev);
  146. if (err)
  147. goto err_kfree_bus;
  148. name = dev_name(&dev->dev);
  149. if (dev->driver && dev->driver->name)
  150. name = dev->driver->name;
  151. err = pci_request_regions(dev, name);
  152. if (err)
  153. goto err_pci_disable;
  154. pci_set_master(dev);
  155. /* Disable the RETRY_TIMEOUT register (0x41) to keep
  156. * PCI Tx retries from interfering with C3 CPU state */
  157. pci_read_config_dword(dev, 0x40, &val);
  158. if ((val & 0x0000ff00) != 0)
  159. pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
  160. /* SSB needed additional powering up, do we have any AMBA PCI cards? */
  161. if (!pci_is_pcie(dev))
  162. pr_err("PCI card detected, report problems.\n");
  163. /* Map MMIO */
  164. err = -ENOMEM;
  165. bus->mmio = pci_iomap(dev, 0, ~0UL);
  166. if (!bus->mmio)
  167. goto err_pci_release_regions;
  168. /* Host specific */
  169. bus->host_pci = dev;
  170. bus->hosttype = BCMA_HOSTTYPE_PCI;
  171. bus->ops = &bcma_host_pci_ops;
  172. /* Register */
  173. err = bcma_bus_register(bus);
  174. if (err)
  175. goto err_pci_unmap_mmio;
  176. pci_set_drvdata(dev, bus);
  177. out:
  178. return err;
  179. err_pci_unmap_mmio:
  180. pci_iounmap(dev, bus->mmio);
  181. err_pci_release_regions:
  182. pci_release_regions(dev);
  183. err_pci_disable:
  184. pci_disable_device(dev);
  185. err_kfree_bus:
  186. kfree(bus);
  187. return err;
  188. }
  189. static void bcma_host_pci_remove(struct pci_dev *dev)
  190. {
  191. struct bcma_bus *bus = pci_get_drvdata(dev);
  192. bcma_bus_unregister(bus);
  193. pci_iounmap(dev, bus->mmio);
  194. pci_release_regions(dev);
  195. pci_disable_device(dev);
  196. kfree(bus);
  197. pci_set_drvdata(dev, NULL);
  198. }
  199. static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
  200. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
  201. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
  202. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
  203. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
  204. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
  205. { 0, },
  206. };
  207. MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
  208. static struct pci_driver bcma_pci_bridge_driver = {
  209. .name = "bcma-pci-bridge",
  210. .id_table = bcma_pci_bridge_tbl,
  211. .probe = bcma_host_pci_probe,
  212. .remove = bcma_host_pci_remove,
  213. };
  214. int __init bcma_host_pci_init(void)
  215. {
  216. return pci_register_driver(&bcma_pci_bridge_driver);
  217. }
  218. void __exit bcma_host_pci_exit(void)
  219. {
  220. pci_unregister_driver(&bcma_pci_bridge_driver);
  221. }