host_pci.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * Broadcom specific AMBA
  3. * PCI Host
  4. *
  5. * Licensed under the GNU/GPL. See COPYING for details.
  6. */
  7. #include "bcma_private.h"
  8. #include <linux/bcma/bcma.h>
  9. #include <linux/pci.h>
  10. static void bcma_host_pci_switch_core(struct bcma_device *core)
  11. {
  12. pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
  13. core->addr);
  14. pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
  15. core->wrap);
  16. core->bus->mapped_core = core;
  17. pr_debug("Switched to core: 0x%X\n", core->id.id);
  18. }
  19. static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
  20. {
  21. if (core->bus->mapped_core != core)
  22. bcma_host_pci_switch_core(core);
  23. return ioread8(core->bus->mmio + offset);
  24. }
  25. static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
  26. {
  27. if (core->bus->mapped_core != core)
  28. bcma_host_pci_switch_core(core);
  29. return ioread16(core->bus->mmio + offset);
  30. }
  31. static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
  32. {
  33. if (core->bus->mapped_core != core)
  34. bcma_host_pci_switch_core(core);
  35. return ioread32(core->bus->mmio + offset);
  36. }
  37. static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
  38. u8 value)
  39. {
  40. if (core->bus->mapped_core != core)
  41. bcma_host_pci_switch_core(core);
  42. iowrite8(value, core->bus->mmio + offset);
  43. }
  44. static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
  45. u16 value)
  46. {
  47. if (core->bus->mapped_core != core)
  48. bcma_host_pci_switch_core(core);
  49. iowrite16(value, core->bus->mmio + offset);
  50. }
  51. static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
  52. u32 value)
  53. {
  54. if (core->bus->mapped_core != core)
  55. bcma_host_pci_switch_core(core);
  56. iowrite32(value, core->bus->mmio + offset);
  57. }
  58. #ifdef CONFIG_BCMA_BLOCKIO
  59. void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
  60. size_t count, u16 offset, u8 reg_width)
  61. {
  62. void __iomem *addr = core->bus->mmio + offset;
  63. if (core->bus->mapped_core != core)
  64. bcma_host_pci_switch_core(core);
  65. switch (reg_width) {
  66. case sizeof(u8):
  67. ioread8_rep(addr, buffer, count);
  68. break;
  69. case sizeof(u16):
  70. WARN_ON(count & 1);
  71. ioread16_rep(addr, buffer, count >> 1);
  72. break;
  73. case sizeof(u32):
  74. WARN_ON(count & 3);
  75. ioread32_rep(addr, buffer, count >> 2);
  76. break;
  77. default:
  78. WARN_ON(1);
  79. }
  80. }
  81. void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
  82. size_t count, u16 offset, u8 reg_width)
  83. {
  84. void __iomem *addr = core->bus->mmio + offset;
  85. if (core->bus->mapped_core != core)
  86. bcma_host_pci_switch_core(core);
  87. switch (reg_width) {
  88. case sizeof(u8):
  89. iowrite8_rep(addr, buffer, count);
  90. break;
  91. case sizeof(u16):
  92. WARN_ON(count & 1);
  93. iowrite16_rep(addr, buffer, count >> 1);
  94. break;
  95. case sizeof(u32):
  96. WARN_ON(count & 3);
  97. iowrite32_rep(addr, buffer, count >> 2);
  98. break;
  99. default:
  100. WARN_ON(1);
  101. }
  102. }
  103. #endif
  104. static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
  105. {
  106. if (core->bus->mapped_core != core)
  107. bcma_host_pci_switch_core(core);
  108. return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
  109. }
  110. static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
  111. u32 value)
  112. {
  113. if (core->bus->mapped_core != core)
  114. bcma_host_pci_switch_core(core);
  115. iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
  116. }
  117. const struct bcma_host_ops bcma_host_pci_ops = {
  118. .read8 = bcma_host_pci_read8,
  119. .read16 = bcma_host_pci_read16,
  120. .read32 = bcma_host_pci_read32,
  121. .write8 = bcma_host_pci_write8,
  122. .write16 = bcma_host_pci_write16,
  123. .write32 = bcma_host_pci_write32,
  124. #ifdef CONFIG_BCMA_BLOCKIO
  125. .block_read = bcma_host_pci_block_read,
  126. .block_write = bcma_host_pci_block_write,
  127. #endif
  128. .aread32 = bcma_host_pci_aread32,
  129. .awrite32 = bcma_host_pci_awrite32,
  130. };
  131. static int bcma_host_pci_probe(struct pci_dev *dev,
  132. const struct pci_device_id *id)
  133. {
  134. struct bcma_bus *bus;
  135. int err = -ENOMEM;
  136. const char *name;
  137. u32 val;
  138. /* Alloc */
  139. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  140. if (!bus)
  141. goto out;
  142. /* Basic PCI configuration */
  143. err = pci_enable_device(dev);
  144. if (err)
  145. goto err_kfree_bus;
  146. name = dev_name(&dev->dev);
  147. if (dev->driver && dev->driver->name)
  148. name = dev->driver->name;
  149. err = pci_request_regions(dev, name);
  150. if (err)
  151. goto err_pci_disable;
  152. pci_set_master(dev);
  153. /* Disable the RETRY_TIMEOUT register (0x41) to keep
  154. * PCI Tx retries from interfering with C3 CPU state */
  155. pci_read_config_dword(dev, 0x40, &val);
  156. if ((val & 0x0000ff00) != 0)
  157. pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
  158. /* SSB needed additional powering up, do we have any AMBA PCI cards? */
  159. if (!pci_is_pcie(dev))
  160. pr_err("PCI card detected, report problems.\n");
  161. /* Map MMIO */
  162. err = -ENOMEM;
  163. bus->mmio = pci_iomap(dev, 0, ~0UL);
  164. if (!bus->mmio)
  165. goto err_pci_release_regions;
  166. /* Host specific */
  167. bus->host_pci = dev;
  168. bus->hosttype = BCMA_HOSTTYPE_PCI;
  169. bus->ops = &bcma_host_pci_ops;
  170. /* Register */
  171. err = bcma_bus_register(bus);
  172. if (err)
  173. goto err_pci_unmap_mmio;
  174. pci_set_drvdata(dev, bus);
  175. out:
  176. return err;
  177. err_pci_unmap_mmio:
  178. pci_iounmap(dev, bus->mmio);
  179. err_pci_release_regions:
  180. pci_release_regions(dev);
  181. err_pci_disable:
  182. pci_disable_device(dev);
  183. err_kfree_bus:
  184. kfree(bus);
  185. return err;
  186. }
  187. static void bcma_host_pci_remove(struct pci_dev *dev)
  188. {
  189. struct bcma_bus *bus = pci_get_drvdata(dev);
  190. bcma_bus_unregister(bus);
  191. pci_iounmap(dev, bus->mmio);
  192. pci_release_regions(dev);
  193. pci_disable_device(dev);
  194. kfree(bus);
  195. pci_set_drvdata(dev, NULL);
  196. }
  197. static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
  198. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
  199. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
  200. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
  201. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
  202. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
  203. { 0, },
  204. };
  205. MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
  206. static struct pci_driver bcma_pci_bridge_driver = {
  207. .name = "bcma-pci-bridge",
  208. .id_table = bcma_pci_bridge_tbl,
  209. .probe = bcma_host_pci_probe,
  210. .remove = bcma_host_pci_remove,
  211. };
  212. int __init bcma_host_pci_init(void)
  213. {
  214. return pci_register_driver(&bcma_pci_bridge_driver);
  215. }
  216. void __exit bcma_host_pci_exit(void)
  217. {
  218. pci_unregister_driver(&bcma_pci_bridge_driver);
  219. }