driver_pci.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * Broadcom specific AMBA
  3. * PCI Core
  4. *
  5. * Copyright 2005, 2011, Broadcom Corporation
  6. * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
  7. * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
  8. *
  9. * Licensed under the GNU/GPL. See COPYING for details.
  10. */
  11. #include "bcma_private.h"
  12. #include <linux/export.h>
  13. #include <linux/bcma/bcma.h>
  14. /**************************************************
  15. * R/W ops.
  16. **************************************************/
  17. u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
  18. {
  19. pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
  20. pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
  21. return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
  22. }
  23. static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
  24. {
  25. pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
  26. pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
  27. pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
  28. }
  29. static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
  30. {
  31. u32 v;
  32. int i;
  33. v = BCMA_CORE_PCI_MDIODATA_START;
  34. v |= BCMA_CORE_PCI_MDIODATA_WRITE;
  35. v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
  36. BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
  37. v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
  38. BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
  39. v |= BCMA_CORE_PCI_MDIODATA_TA;
  40. v |= (phy << 4);
  41. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
  42. udelay(10);
  43. for (i = 0; i < 200; i++) {
  44. v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
  45. if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
  46. break;
  47. msleep(1);
  48. }
  49. }
  50. static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
  51. {
  52. int max_retries = 10;
  53. u16 ret = 0;
  54. u32 v;
  55. int i;
  56. /* enable mdio access to SERDES */
  57. v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
  58. v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
  59. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
  60. if (pc->core->id.rev >= 10) {
  61. max_retries = 200;
  62. bcma_pcie_mdio_set_phy(pc, device);
  63. v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
  64. BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
  65. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
  66. } else {
  67. v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
  68. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
  69. }
  70. v = BCMA_CORE_PCI_MDIODATA_START;
  71. v |= BCMA_CORE_PCI_MDIODATA_READ;
  72. v |= BCMA_CORE_PCI_MDIODATA_TA;
  73. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
  74. /* Wait for the device to complete the transaction */
  75. udelay(10);
  76. for (i = 0; i < max_retries; i++) {
  77. v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
  78. if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
  79. udelay(10);
  80. ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
  81. break;
  82. }
  83. msleep(1);
  84. }
  85. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
  86. return ret;
  87. }
  88. static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
  89. u8 address, u16 data)
  90. {
  91. int max_retries = 10;
  92. u32 v;
  93. int i;
  94. /* enable mdio access to SERDES */
  95. v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
  96. v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
  97. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
  98. if (pc->core->id.rev >= 10) {
  99. max_retries = 200;
  100. bcma_pcie_mdio_set_phy(pc, device);
  101. v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
  102. BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
  103. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
  104. } else {
  105. v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
  106. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
  107. }
  108. v = BCMA_CORE_PCI_MDIODATA_START;
  109. v |= BCMA_CORE_PCI_MDIODATA_WRITE;
  110. v |= BCMA_CORE_PCI_MDIODATA_TA;
  111. v |= data;
  112. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
  113. /* Wait for the device to complete the transaction */
  114. udelay(10);
  115. for (i = 0; i < max_retries; i++) {
  116. v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
  117. if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
  118. break;
  119. msleep(1);
  120. }
  121. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
  122. }
  123. /**************************************************
  124. * Workarounds.
  125. **************************************************/
  126. static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
  127. {
  128. u32 tmp;
  129. tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
  130. if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
  131. return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
  132. BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
  133. else
  134. return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
  135. }
  136. static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
  137. {
  138. u16 tmp;
  139. bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
  140. BCMA_CORE_PCI_SERDES_RX_CTRL,
  141. bcma_pcicore_polarity_workaround(pc));
  142. tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
  143. BCMA_CORE_PCI_SERDES_PLL_CTRL);
  144. if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
  145. bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
  146. BCMA_CORE_PCI_SERDES_PLL_CTRL,
  147. tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
  148. }
  149. /**************************************************
  150. * Init.
  151. **************************************************/
  152. static void __devinit bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
  153. {
  154. bcma_pcicore_serdes_workaround(pc);
  155. }
  156. void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
  157. {
  158. if (pc->setup_done)
  159. return;
  160. #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
  161. pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
  162. if (pc->hostmode)
  163. bcma_core_pci_hostmode_init(pc);
  164. #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
  165. if (!pc->hostmode)
  166. bcma_core_pci_clientmode_init(pc);
  167. }
  168. int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
  169. bool enable)
  170. {
  171. struct pci_dev *pdev = pc->core->bus->host_pci;
  172. u32 coremask, tmp;
  173. int err = 0;
  174. if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
  175. /* This bcma device is not on a PCI host-bus. So the IRQs are
  176. * not routed through the PCI core.
  177. * So we must not enable routing through the PCI core. */
  178. goto out;
  179. }
  180. err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
  181. if (err)
  182. goto out;
  183. coremask = BIT(core->core_index) << 8;
  184. if (enable)
  185. tmp |= coremask;
  186. else
  187. tmp &= ~coremask;
  188. err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
  189. out:
  190. return err;
  191. }
  192. EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
  193. void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
  194. {
  195. u32 w;
  196. w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
  197. if (extend)
  198. w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
  199. else
  200. w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
  201. bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
  202. bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
  203. }
  204. EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);