driver_pci.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Broadcom specific AMBA
  3. * PCI Core
  4. *
  5. * Copyright 2005, 2011, Broadcom Corporation
  6. * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
  7. * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
  8. *
  9. * Licensed under the GNU/GPL. See COPYING for details.
  10. */
  11. #include "bcma_private.h"
  12. #include <linux/export.h>
  13. #include <linux/bcma/bcma.h>
  14. /**************************************************
  15. * R/W ops.
  16. **************************************************/
  17. u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
  18. {
  19. pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
  20. pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
  21. return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
  22. }
  23. static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
  24. {
  25. pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
  26. pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
  27. pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
  28. }
  29. static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
  30. {
  31. u32 v;
  32. int i;
  33. v = BCMA_CORE_PCI_MDIODATA_START;
  34. v |= BCMA_CORE_PCI_MDIODATA_WRITE;
  35. v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
  36. BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
  37. v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
  38. BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
  39. v |= BCMA_CORE_PCI_MDIODATA_TA;
  40. v |= (phy << 4);
  41. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
  42. udelay(10);
  43. for (i = 0; i < 200; i++) {
  44. v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
  45. if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
  46. break;
  47. usleep_range(1000, 2000);
  48. }
  49. }
  50. static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
  51. {
  52. int max_retries = 10;
  53. u16 ret = 0;
  54. u32 v;
  55. int i;
  56. /* enable mdio access to SERDES */
  57. v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
  58. v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
  59. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
  60. if (pc->core->id.rev >= 10) {
  61. max_retries = 200;
  62. bcma_pcie_mdio_set_phy(pc, device);
  63. v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
  64. BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
  65. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
  66. } else {
  67. v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
  68. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
  69. }
  70. v = BCMA_CORE_PCI_MDIODATA_START;
  71. v |= BCMA_CORE_PCI_MDIODATA_READ;
  72. v |= BCMA_CORE_PCI_MDIODATA_TA;
  73. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
  74. /* Wait for the device to complete the transaction */
  75. udelay(10);
  76. for (i = 0; i < max_retries; i++) {
  77. v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
  78. if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
  79. udelay(10);
  80. ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
  81. break;
  82. }
  83. usleep_range(1000, 2000);
  84. }
  85. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
  86. return ret;
  87. }
  88. static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
  89. u8 address, u16 data)
  90. {
  91. int max_retries = 10;
  92. u32 v;
  93. int i;
  94. /* enable mdio access to SERDES */
  95. v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
  96. v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
  97. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
  98. if (pc->core->id.rev >= 10) {
  99. max_retries = 200;
  100. bcma_pcie_mdio_set_phy(pc, device);
  101. v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
  102. BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
  103. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
  104. } else {
  105. v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
  106. v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
  107. }
  108. v = BCMA_CORE_PCI_MDIODATA_START;
  109. v |= BCMA_CORE_PCI_MDIODATA_WRITE;
  110. v |= BCMA_CORE_PCI_MDIODATA_TA;
  111. v |= data;
  112. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
  113. /* Wait for the device to complete the transaction */
  114. udelay(10);
  115. for (i = 0; i < max_retries; i++) {
  116. v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
  117. if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
  118. break;
  119. usleep_range(1000, 2000);
  120. }
  121. pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
  122. }
  123. static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
  124. u8 address, u16 data)
  125. {
  126. bcma_pcie_mdio_write(pc, device, address, data);
  127. return bcma_pcie_mdio_read(pc, device, address);
  128. }
  129. /**************************************************
  130. * Workarounds.
  131. **************************************************/
  132. static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
  133. {
  134. u32 tmp;
  135. tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
  136. if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
  137. return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
  138. BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
  139. else
  140. return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
  141. }
  142. static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
  143. {
  144. u16 tmp;
  145. bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
  146. BCMA_CORE_PCI_SERDES_RX_CTRL,
  147. bcma_pcicore_polarity_workaround(pc));
  148. tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
  149. BCMA_CORE_PCI_SERDES_PLL_CTRL);
  150. if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
  151. bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
  152. BCMA_CORE_PCI_SERDES_PLL_CTRL,
  153. tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
  154. }
  155. static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
  156. {
  157. struct bcma_device *core = pc->core;
  158. u16 val16, core_index;
  159. uint regoff;
  160. regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
  161. core_index = (u16)core->core_index;
  162. val16 = pcicore_read16(pc, regoff);
  163. if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
  164. != core_index) {
  165. val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
  166. (val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
  167. pcicore_write16(pc, regoff, val16);
  168. }
  169. }
  170. /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
  171. /* Needs to happen when coming out of 'standby'/'hibernate' */
  172. static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
  173. {
  174. u16 val16;
  175. uint regoff;
  176. regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
  177. val16 = pcicore_read16(pc, regoff);
  178. if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
  179. val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
  180. pcicore_write16(pc, regoff, val16);
  181. }
  182. }
  183. /**************************************************
  184. * Init.
  185. **************************************************/
  186. static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
  187. {
  188. bcma_core_pci_fixcfg(pc);
  189. bcma_pcicore_serdes_workaround(pc);
  190. bcma_core_pci_config_fixup(pc);
  191. }
  192. void bcma_core_pci_init(struct bcma_drv_pci *pc)
  193. {
  194. if (pc->setup_done)
  195. return;
  196. #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
  197. pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
  198. if (pc->hostmode)
  199. bcma_core_pci_hostmode_init(pc);
  200. #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
  201. if (!pc->hostmode)
  202. bcma_core_pci_clientmode_init(pc);
  203. }
  204. void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
  205. {
  206. struct bcma_drv_pci *pc;
  207. u16 data;
  208. if (bus->hosttype != BCMA_HOSTTYPE_PCI)
  209. return;
  210. pc = &bus->drv_pci[0];
  211. if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
  212. data = up ? 0x74 : 0x7C;
  213. bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
  214. BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
  215. bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
  216. BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
  217. } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
  218. data = up ? 0x75 : 0x7D;
  219. bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
  220. BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
  221. bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
  222. BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
  223. }
  224. }
  225. EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
  226. int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
  227. bool enable)
  228. {
  229. struct pci_dev *pdev;
  230. u32 coremask, tmp;
  231. int err = 0;
  232. if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
  233. /* This bcma device is not on a PCI host-bus. So the IRQs are
  234. * not routed through the PCI core.
  235. * So we must not enable routing through the PCI core. */
  236. goto out;
  237. }
  238. pdev = pc->core->bus->host_pci;
  239. err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
  240. if (err)
  241. goto out;
  242. coremask = BIT(core->core_index) << 8;
  243. if (enable)
  244. tmp |= coremask;
  245. else
  246. tmp &= ~coremask;
  247. err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
  248. out:
  249. return err;
  250. }
  251. EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
  252. static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
  253. {
  254. u32 w;
  255. w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
  256. if (extend)
  257. w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
  258. else
  259. w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
  260. bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
  261. bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
  262. }
  263. void bcma_core_pci_up(struct bcma_bus *bus)
  264. {
  265. struct bcma_drv_pci *pc;
  266. if (bus->hosttype != BCMA_HOSTTYPE_PCI)
  267. return;
  268. pc = &bus->drv_pci[0];
  269. bcma_core_pci_extend_L1timer(pc, true);
  270. }
  271. EXPORT_SYMBOL_GPL(bcma_core_pci_up);
  272. void bcma_core_pci_down(struct bcma_bus *bus)
  273. {
  274. struct bcma_drv_pci *pc;
  275. if (bus->hosttype != BCMA_HOSTTYPE_PCI)
  276. return;
  277. pc = &bus->drv_pci[0];
  278. bcma_core_pci_extend_L1timer(pc, false);
  279. }
  280. EXPORT_SYMBOL_GPL(bcma_core_pci_down);