host_pci.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Broadcom specific AMBA
  3. * PCI Host
  4. *
  5. * Licensed under the GNU/GPL. See COPYING for details.
  6. */
  7. #include "bcma_private.h"
  8. #include <linux/slab.h>
  9. #include <linux/bcma/bcma.h>
  10. #include <linux/pci.h>
  11. static void bcma_host_pci_switch_core(struct bcma_device *core)
  12. {
  13. pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
  14. core->addr);
  15. pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
  16. core->wrap);
  17. core->bus->mapped_core = core;
  18. pr_debug("Switched to core: 0x%X\n", core->id.id);
  19. }
  20. static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
  21. {
  22. if (core->bus->mapped_core != core)
  23. bcma_host_pci_switch_core(core);
  24. return ioread8(core->bus->mmio + offset);
  25. }
  26. static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
  27. {
  28. if (core->bus->mapped_core != core)
  29. bcma_host_pci_switch_core(core);
  30. return ioread16(core->bus->mmio + offset);
  31. }
  32. static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
  33. {
  34. if (core->bus->mapped_core != core)
  35. bcma_host_pci_switch_core(core);
  36. return ioread32(core->bus->mmio + offset);
  37. }
  38. static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
  39. u8 value)
  40. {
  41. if (core->bus->mapped_core != core)
  42. bcma_host_pci_switch_core(core);
  43. iowrite8(value, core->bus->mmio + offset);
  44. }
  45. static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
  46. u16 value)
  47. {
  48. if (core->bus->mapped_core != core)
  49. bcma_host_pci_switch_core(core);
  50. iowrite16(value, core->bus->mmio + offset);
  51. }
  52. static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
  53. u32 value)
  54. {
  55. if (core->bus->mapped_core != core)
  56. bcma_host_pci_switch_core(core);
  57. iowrite32(value, core->bus->mmio + offset);
  58. }
  59. #ifdef CONFIG_BCMA_BLOCKIO
  60. void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
  61. size_t count, u16 offset, u8 reg_width)
  62. {
  63. void __iomem *addr = core->bus->mmio + offset;
  64. if (core->bus->mapped_core != core)
  65. bcma_host_pci_switch_core(core);
  66. switch (reg_width) {
  67. case sizeof(u8):
  68. ioread8_rep(addr, buffer, count);
  69. break;
  70. case sizeof(u16):
  71. WARN_ON(count & 1);
  72. ioread16_rep(addr, buffer, count >> 1);
  73. break;
  74. case sizeof(u32):
  75. WARN_ON(count & 3);
  76. ioread32_rep(addr, buffer, count >> 2);
  77. break;
  78. default:
  79. WARN_ON(1);
  80. }
  81. }
  82. void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
  83. size_t count, u16 offset, u8 reg_width)
  84. {
  85. void __iomem *addr = core->bus->mmio + offset;
  86. if (core->bus->mapped_core != core)
  87. bcma_host_pci_switch_core(core);
  88. switch (reg_width) {
  89. case sizeof(u8):
  90. iowrite8_rep(addr, buffer, count);
  91. break;
  92. case sizeof(u16):
  93. WARN_ON(count & 1);
  94. iowrite16_rep(addr, buffer, count >> 1);
  95. break;
  96. case sizeof(u32):
  97. WARN_ON(count & 3);
  98. iowrite32_rep(addr, buffer, count >> 2);
  99. break;
  100. default:
  101. WARN_ON(1);
  102. }
  103. }
  104. #endif
  105. static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
  106. {
  107. if (core->bus->mapped_core != core)
  108. bcma_host_pci_switch_core(core);
  109. return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
  110. }
  111. static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
  112. u32 value)
  113. {
  114. if (core->bus->mapped_core != core)
  115. bcma_host_pci_switch_core(core);
  116. iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
  117. }
  118. const struct bcma_host_ops bcma_host_pci_ops = {
  119. .read8 = bcma_host_pci_read8,
  120. .read16 = bcma_host_pci_read16,
  121. .read32 = bcma_host_pci_read32,
  122. .write8 = bcma_host_pci_write8,
  123. .write16 = bcma_host_pci_write16,
  124. .write32 = bcma_host_pci_write32,
  125. #ifdef CONFIG_BCMA_BLOCKIO
  126. .block_read = bcma_host_pci_block_read,
  127. .block_write = bcma_host_pci_block_write,
  128. #endif
  129. .aread32 = bcma_host_pci_aread32,
  130. .awrite32 = bcma_host_pci_awrite32,
  131. };
  132. static int bcma_host_pci_probe(struct pci_dev *dev,
  133. const struct pci_device_id *id)
  134. {
  135. struct bcma_bus *bus;
  136. int err = -ENOMEM;
  137. const char *name;
  138. u32 val;
  139. /* Alloc */
  140. bus = kzalloc(sizeof(*bus), GFP_KERNEL);
  141. if (!bus)
  142. goto out;
  143. /* Basic PCI configuration */
  144. err = pci_enable_device(dev);
  145. if (err)
  146. goto err_kfree_bus;
  147. name = dev_name(&dev->dev);
  148. if (dev->driver && dev->driver->name)
  149. name = dev->driver->name;
  150. err = pci_request_regions(dev, name);
  151. if (err)
  152. goto err_pci_disable;
  153. pci_set_master(dev);
  154. /* Disable the RETRY_TIMEOUT register (0x41) to keep
  155. * PCI Tx retries from interfering with C3 CPU state */
  156. pci_read_config_dword(dev, 0x40, &val);
  157. if ((val & 0x0000ff00) != 0)
  158. pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
  159. /* SSB needed additional powering up, do we have any AMBA PCI cards? */
  160. if (!pci_is_pcie(dev))
  161. pr_err("PCI card detected, report problems.\n");
  162. /* Map MMIO */
  163. err = -ENOMEM;
  164. bus->mmio = pci_iomap(dev, 0, ~0UL);
  165. if (!bus->mmio)
  166. goto err_pci_release_regions;
  167. /* Host specific */
  168. bus->host_pci = dev;
  169. bus->hosttype = BCMA_HOSTTYPE_PCI;
  170. bus->ops = &bcma_host_pci_ops;
  171. /* Register */
  172. err = bcma_bus_register(bus);
  173. if (err)
  174. goto err_pci_unmap_mmio;
  175. pci_set_drvdata(dev, bus);
  176. out:
  177. return err;
  178. err_pci_unmap_mmio:
  179. pci_iounmap(dev, bus->mmio);
  180. err_pci_release_regions:
  181. pci_release_regions(dev);
  182. err_pci_disable:
  183. pci_disable_device(dev);
  184. err_kfree_bus:
  185. kfree(bus);
  186. return err;
  187. }
  188. static void bcma_host_pci_remove(struct pci_dev *dev)
  189. {
  190. struct bcma_bus *bus = pci_get_drvdata(dev);
  191. bcma_bus_unregister(bus);
  192. pci_iounmap(dev, bus->mmio);
  193. pci_release_regions(dev);
  194. pci_disable_device(dev);
  195. kfree(bus);
  196. pci_set_drvdata(dev, NULL);
  197. }
  198. static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
  199. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
  200. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
  201. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
  202. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
  203. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
  204. { 0, },
  205. };
  206. MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
  207. static struct pci_driver bcma_pci_bridge_driver = {
  208. .name = "bcma-pci-bridge",
  209. .id_table = bcma_pci_bridge_tbl,
  210. .probe = bcma_host_pci_probe,
  211. .remove = bcma_host_pci_remove,
  212. };
  213. int __init bcma_host_pci_init(void)
  214. {
  215. return pci_register_driver(&bcma_pci_bridge_driver);
  216. }
  217. void __exit bcma_host_pci_exit(void)
  218. {
  219. pci_unregister_driver(&bcma_pci_bridge_driver);
  220. }