nicpci.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * Copyright (c) 2010 Broadcom Corporation
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/delay.h>
  18. #include <linux/pci.h>
  19. #include <defs.h>
  20. #include <soc.h>
  21. #include <chipcommon.h>
  22. #include "aiutils.h"
  23. #include "pub.h"
  24. #include "nicpci.h"
  25. /* SPROM offsets */
  26. #define SRSH_ASPM_OFFSET 4 /* word 4 */
  27. #define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
  28. #define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
  29. #define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
  30. #define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
  31. #define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
  32. #define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
  33. #define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
  34. #define SRSH_BD_OFFSET 6 /* word 6 */
  35. /* chipcontrol */
  36. #define CHIPCTRL_4321_PLL_DOWN 0x800000/* serdes PLL down override */
  37. /* MDIO control */
  38. #define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
  39. #define MDIOCTL_DIVISOR_VAL 0x2
  40. #define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
  41. #define MDIOCTL_ACCESS_DONE 0x100 /* Transaction complete */
  42. /* MDIO Data */
  43. #define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
  44. #define MDIODATA_TA 0x00020000 /* Turnaround */
  45. #define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
  46. #define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
  47. #define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
  48. #define MDIODATA_DEVADDR_MASK 0x0f800000
  49. /* Physmedia devaddr Mask */
  50. /* MDIO Data for older revisions < 10 */
  51. #define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift */
  52. #define MDIODATA_REGADDR_MASK_OLD 0x003c0000
  53. /* Regaddr Mask */
  54. #define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift */
  55. #define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000
  56. /* Physmedia devaddr Mask */
  57. /* Transactions flags */
  58. #define MDIODATA_WRITE 0x10000000
  59. #define MDIODATA_READ 0x20000000
  60. #define MDIODATA_START 0x40000000
  61. #define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
  62. #define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
  63. /* serdes regs (rev < 10) */
  64. #define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
  65. #define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
  66. #define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
  67. /* SERDES RX registers */
  68. #define SERDES_RX_CTRL 1 /* Rx cntrl */
  69. #define SERDES_RX_TIMER1 2 /* Rx Timer1 */
  70. #define SERDES_RX_CDR 6 /* CDR */
  71. #define SERDES_RX_CDRBW 7 /* CDR BW */
  72. /* SERDES RX control register */
  73. #define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
  74. #define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
  75. /* SERDES PLL registers */
  76. #define SERDES_PLL_CTRL 1 /* PLL control reg */
  77. #define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
  78. /* Linkcontrol reg offset in PCIE Cap */
  79. #define PCIE_CAP_LINKCTRL_OFFSET 16 /* offset in pcie cap */
  80. #define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
  81. #define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
  82. #define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
  83. #define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
  84. #define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
  85. #define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
  86. #define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
  87. /* Power management threshold */
  88. #define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
  89. #define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
  90. #define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
  91. #define PCIE_ASPMTIMER_EXTEND 0x01000000
  92. /* > rev7:
  93. * enable extend ASPM timer
  94. */
  95. /* different register spaces to access thru pcie indirect access */
  96. #define PCIE_CONFIGREGS 1 /* Access to config space */
  97. #define PCIE_PCIEREGS 2 /* Access to pcie registers */
  98. /* PCIE protocol PHY diagnostic registers */
  99. #define PCIE_PLP_STATUSREG 0x204 /* Status */
  100. /* Status reg PCIE_PLP_STATUSREG */
  101. #define PCIE_PLP_POLARITYINV_STAT 0x10
  102. /* PCIE protocol DLLP diagnostic registers */
  103. #define PCIE_DLLP_LCREG 0x100 /* Link Control */
  104. #define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
  105. /* PCIE protocol TLP diagnostic registers */
  106. #define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
  107. /* Sonics to PCI translation types */
  108. #define SBTOPCI_PREF 0x4 /* prefetch enable */
  109. #define SBTOPCI_BURST 0x8 /* burst enable */
  110. #define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
  111. #define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
  112. /* PCI core index in SROM shadow area */
  113. #define SRSH_PI_OFFSET 0 /* first word */
  114. #define SRSH_PI_MASK 0xf000 /* bit 15:12 */
  115. #define SRSH_PI_SHIFT 12 /* bit 15:12 */
  116. #define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
  117. #define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
  118. /* Sonics side: PCI core and host control registers */
  119. struct sbpciregs {
  120. u32 control; /* PCI control */
  121. u32 PAD[3];
  122. u32 arbcontrol; /* PCI arbiter control */
  123. u32 clkrun; /* Clkrun Control (>=rev11) */
  124. u32 PAD[2];
  125. u32 intstatus; /* Interrupt status */
  126. u32 intmask; /* Interrupt mask */
  127. u32 sbtopcimailbox; /* Sonics to PCI mailbox */
  128. u32 PAD[9];
  129. u32 bcastaddr; /* Sonics broadcast address */
  130. u32 bcastdata; /* Sonics broadcast data */
  131. u32 PAD[2];
  132. u32 gpioin; /* ro: gpio input (>=rev2) */
  133. u32 gpioout; /* rw: gpio output (>=rev2) */
  134. u32 gpioouten; /* rw: gpio output enable (>= rev2) */
  135. u32 gpiocontrol; /* rw: gpio control (>= rev2) */
  136. u32 PAD[36];
  137. u32 sbtopci0; /* Sonics to PCI translation 0 */
  138. u32 sbtopci1; /* Sonics to PCI translation 1 */
  139. u32 sbtopci2; /* Sonics to PCI translation 2 */
  140. u32 PAD[189];
  141. u32 pcicfg[4][64]; /* 0x400 - 0x7FF, PCI Cfg Space (>=rev8) */
  142. u16 sprom[36]; /* SPROM shadow Area */
  143. u32 PAD[46];
  144. };
  145. /* SB side: PCIE core and host control registers */
  146. struct sbpcieregs {
  147. u32 control; /* host mode only */
  148. u32 PAD[2];
  149. u32 biststatus; /* bist Status: 0x00C */
  150. u32 gpiosel; /* PCIE gpio sel: 0x010 */
  151. u32 gpioouten; /* PCIE gpio outen: 0x14 */
  152. u32 PAD[2];
  153. u32 intstatus; /* Interrupt status: 0x20 */
  154. u32 intmask; /* Interrupt mask: 0x24 */
  155. u32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
  156. u32 PAD[53];
  157. u32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
  158. u32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
  159. u32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
  160. u32 PAD[5];
  161. /* pcie core supports in direct access to config space */
  162. u32 configaddr; /* pcie config space access: Address field: 0x120 */
  163. u32 configdata; /* pcie config space access: Data field: 0x124 */
  164. /* mdio access to serdes */
  165. u32 mdiocontrol; /* controls the mdio access: 0x128 */
  166. u32 mdiodata; /* Data to the mdio access: 0x12c */
  167. /* pcie protocol phy/dllp/tlp register indirect access mechanism */
  168. u32 pcieindaddr; /* indirect access to
  169. * the internal register: 0x130
  170. */
  171. u32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
  172. u32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
  173. u32 PAD[177];
  174. u32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
  175. u16 sprom[64]; /* SPROM shadow Area */
  176. };
  177. struct pcicore_info {
  178. struct bcma_device *core;
  179. struct si_pub *sih; /* System interconnect handle */
  180. struct pci_dev *dev;
  181. u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
  182. * in the config space
  183. */
  184. bool pcie_pr42767;
  185. u8 pcie_polarity;
  186. u8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
  187. u8 pmecap_offset; /* PM Capability offset in the config space */
  188. bool pmecap; /* Capable of generating PME */
  189. };
  190. #define PCIE_ASPM(sih) \
  191. ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
  192. ((ai_get_buscorerev(sih) >= 3) && \
  193. (ai_get_buscorerev(sih) <= 5)))
  194. /* delay needed between the mdio control/ mdiodata register data access */
  195. static void pr28829_delay(void)
  196. {
  197. udelay(10);
  198. }
  199. /* Initialize the PCI core.
  200. * It's caller's responsibility to make sure that this is done only once
  201. */
  202. struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
  203. {
  204. struct pcicore_info *pi;
  205. /* alloc struct pcicore_info */
  206. pi = kzalloc(sizeof(struct pcicore_info), GFP_ATOMIC);
  207. if (pi == NULL)
  208. return NULL;
  209. pi->sih = sih;
  210. pi->dev = core->bus->host_pci;
  211. pi->core = core;
  212. if (core->id.id == PCIE_CORE_ID) {
  213. u8 cap_ptr;
  214. cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
  215. NULL, NULL);
  216. pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
  217. }
  218. return pi;
  219. }
  220. void pcicore_deinit(struct pcicore_info *pch)
  221. {
  222. kfree(pch);
  223. }
  224. /* return cap_offset if requested capability exists in the PCI config space */
  225. /* Note that it's caller's responsibility to make sure it's a pci bus */
  226. u8
  227. pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
  228. unsigned char *buf, u32 *buflen)
  229. {
  230. u8 cap_id;
  231. u8 cap_ptr = 0;
  232. u32 bufsize;
  233. u8 byte_val;
  234. /* check for Header type 0 */
  235. pci_read_config_byte(dev, PCI_HEADER_TYPE, &byte_val);
  236. if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
  237. goto end;
  238. /* check if the capability pointer field exists */
  239. pci_read_config_byte(dev, PCI_STATUS, &byte_val);
  240. if (!(byte_val & PCI_STATUS_CAP_LIST))
  241. goto end;
  242. pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
  243. /* check if the capability pointer is 0x00 */
  244. if (cap_ptr == 0x00)
  245. goto end;
  246. /* loop thru the capability list
  247. * and see if the pcie capability exists
  248. */
  249. pci_read_config_byte(dev, cap_ptr, &cap_id);
  250. while (cap_id != req_cap_id) {
  251. pci_read_config_byte(dev, cap_ptr + 1, &cap_ptr);
  252. if (cap_ptr == 0x00)
  253. break;
  254. pci_read_config_byte(dev, cap_ptr, &cap_id);
  255. }
  256. if (cap_id != req_cap_id)
  257. goto end;
  258. /* found the caller requested capability */
  259. if (buf != NULL && buflen != NULL) {
  260. u8 cap_data;
  261. bufsize = *buflen;
  262. if (!bufsize)
  263. goto end;
  264. *buflen = 0;
  265. /* copy the capability data excluding cap ID and next ptr */
  266. cap_data = cap_ptr + 2;
  267. if ((bufsize + cap_data) > PCI_SZPCR)
  268. bufsize = PCI_SZPCR - cap_data;
  269. *buflen = bufsize;
  270. while (bufsize--) {
  271. pci_read_config_byte(dev, cap_data, buf);
  272. cap_data++;
  273. buf++;
  274. }
  275. }
  276. end:
  277. return cap_ptr;
  278. }
  279. /* ***** Register Access API */
  280. static uint
  281. pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
  282. {
  283. uint retval = 0xFFFFFFFF;
  284. switch (addrtype) {
  285. case PCIE_CONFIGREGS:
  286. bcma_write32(core, PCIEREGOFFS(configaddr), offset);
  287. (void)bcma_read32(core, PCIEREGOFFS(configaddr));
  288. retval = bcma_read32(core, PCIEREGOFFS(configdata));
  289. break;
  290. case PCIE_PCIEREGS:
  291. bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
  292. (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
  293. retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
  294. break;
  295. }
  296. return retval;
  297. }
  298. static uint pcie_writereg(struct bcma_device *core, uint addrtype,
  299. uint offset, uint val)
  300. {
  301. switch (addrtype) {
  302. case PCIE_CONFIGREGS:
  303. bcma_write32(core, PCIEREGOFFS(configaddr), offset);
  304. bcma_write32(core, PCIEREGOFFS(configdata), val);
  305. break;
  306. case PCIE_PCIEREGS:
  307. bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
  308. bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
  309. break;
  310. default:
  311. break;
  312. }
  313. return 0;
  314. }
  315. static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
  316. {
  317. uint mdiodata, i = 0;
  318. uint pcie_serdes_spinwait = 200;
  319. mdiodata = (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
  320. (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
  321. (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
  322. (blk << 4));
  323. bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
  324. pr28829_delay();
  325. /* retry till the transaction is complete */
  326. while (i < pcie_serdes_spinwait) {
  327. if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
  328. MDIOCTL_ACCESS_DONE)
  329. break;
  330. udelay(1000);
  331. i++;
  332. }
  333. if (i >= pcie_serdes_spinwait)
  334. return false;
  335. return true;
  336. }
  337. static int
  338. pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
  339. uint *val)
  340. {
  341. uint mdiodata;
  342. uint i = 0;
  343. uint pcie_serdes_spinwait = 10;
  344. /* enable mdio access to SERDES */
  345. bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
  346. MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
  347. if (ai_get_buscorerev(pi->sih) >= 10) {
  348. /* new serdes is slower in rw,
  349. * using two layers of reg address mapping
  350. */
  351. if (!pcie_mdiosetblock(pi, physmedia))
  352. return 1;
  353. mdiodata = ((MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
  354. (regaddr << MDIODATA_REGADDR_SHF));
  355. pcie_serdes_spinwait *= 20;
  356. } else {
  357. mdiodata = ((physmedia << MDIODATA_DEVADDR_SHF_OLD) |
  358. (regaddr << MDIODATA_REGADDR_SHF_OLD));
  359. }
  360. if (!write)
  361. mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
  362. else
  363. mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
  364. *val);
  365. bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
  366. pr28829_delay();
  367. /* retry till the transaction is complete */
  368. while (i < pcie_serdes_spinwait) {
  369. if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
  370. MDIOCTL_ACCESS_DONE) {
  371. if (!write) {
  372. pr28829_delay();
  373. *val = (bcma_read32(pi->core,
  374. PCIEREGOFFS(mdiodata)) &
  375. MDIODATA_MASK);
  376. }
  377. /* Disable mdio access to SERDES */
  378. bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
  379. return 0;
  380. }
  381. udelay(1000);
  382. i++;
  383. }
  384. /* Timed out. Disable mdio access to SERDES. */
  385. bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
  386. return 1;
  387. }
  388. /* use the mdio interface to read from mdio slaves */
  389. static int
  390. pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint regaddr,
  391. uint *regval)
  392. {
  393. return pcie_mdioop(pi, physmedia, regaddr, false, regval);
  394. }
  395. /* use the mdio interface to write to mdio slaves */
  396. static int
  397. pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint regaddr, uint val)
  398. {
  399. return pcie_mdioop(pi, physmedia, regaddr, true, &val);
  400. }
  401. /* ***** Support functions ***** */
  402. static u8 pcie_clkreq(struct pcicore_info *pi, u32 mask, u32 val)
  403. {
  404. u32 reg_val;
  405. u8 offset;
  406. offset = pi->pciecap_lcreg_offset;
  407. if (!offset)
  408. return 0;
  409. pci_read_config_dword(pi->dev, offset, &reg_val);
  410. /* set operation */
  411. if (mask) {
  412. if (val)
  413. reg_val |= PCIE_CLKREQ_ENAB;
  414. else
  415. reg_val &= ~PCIE_CLKREQ_ENAB;
  416. pci_write_config_dword(pi->dev, offset, reg_val);
  417. pci_read_config_dword(pi->dev, offset, &reg_val);
  418. }
  419. if (reg_val & PCIE_CLKREQ_ENAB)
  420. return 1;
  421. else
  422. return 0;
  423. }
  424. static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
  425. {
  426. u32 w;
  427. struct si_pub *sih = pi->sih;
  428. if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
  429. ai_get_buscorerev(sih) < 7)
  430. return;
  431. w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
  432. if (extend)
  433. w |= PCIE_ASPMTIMER_EXTEND;
  434. else
  435. w &= ~PCIE_ASPMTIMER_EXTEND;
  436. pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
  437. w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
  438. }
  439. /* centralized clkreq control policy */
  440. static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
  441. {
  442. struct si_pub *sih = pi->sih;
  443. switch (state) {
  444. case SI_DOATTACH:
  445. if (PCIE_ASPM(sih))
  446. pcie_clkreq(pi, 1, 0);
  447. break;
  448. case SI_PCIDOWN:
  449. /* turn on serdes PLL down */
  450. if (ai_get_buscorerev(sih) == 6) {
  451. ai_cc_reg(sih,
  452. offsetof(struct chipcregs, chipcontrol_addr),
  453. ~0, 0);
  454. ai_cc_reg(sih,
  455. offsetof(struct chipcregs, chipcontrol_data),
  456. ~0x40, 0);
  457. } else if (pi->pcie_pr42767) {
  458. pcie_clkreq(pi, 1, 1);
  459. }
  460. break;
  461. case SI_PCIUP:
  462. /* turn off serdes PLL down */
  463. if (ai_get_buscorerev(sih) == 6) {
  464. ai_cc_reg(sih,
  465. offsetof(struct chipcregs, chipcontrol_addr),
  466. ~0, 0);
  467. ai_cc_reg(sih,
  468. offsetof(struct chipcregs, chipcontrol_data),
  469. ~0x40, 0x40);
  470. } else if (PCIE_ASPM(sih)) { /* disable clkreq */
  471. pcie_clkreq(pi, 1, 0);
  472. }
  473. break;
  474. }
  475. }
  476. /* ***** PCI core WARs ***** */
  477. /* Done only once at attach time */
  478. static void pcie_war_polarity(struct pcicore_info *pi)
  479. {
  480. u32 w;
  481. if (pi->pcie_polarity != 0)
  482. return;
  483. w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
  484. /* Detect the current polarity at attach and force that polarity and
  485. * disable changing the polarity
  486. */
  487. if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
  488. pi->pcie_polarity = SERDES_RX_CTRL_FORCE;
  489. else
  490. pi->pcie_polarity = (SERDES_RX_CTRL_FORCE |
  491. SERDES_RX_CTRL_POLARITY);
  492. }
  493. /* enable ASPM and CLKREQ if srom doesn't have it */
  494. /* Needs to happen when update to shadow SROM is needed
  495. * : Coming out of 'standby'/'hibernate'
  496. * : If pcie_war_aspm_ovr state changed
  497. */
  498. static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
  499. {
  500. struct si_pub *sih = pi->sih;
  501. u16 val16;
  502. u32 w;
  503. if (!PCIE_ASPM(sih))
  504. return;
  505. /* bypass this on QT or VSIM */
  506. val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
  507. val16 &= ~SRSH_ASPM_ENB;
  508. if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
  509. val16 |= SRSH_ASPM_ENB;
  510. else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
  511. val16 |= SRSH_ASPM_L1_ENB;
  512. else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
  513. val16 |= SRSH_ASPM_L0s_ENB;
  514. bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
  515. pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
  516. w &= ~PCIE_ASPM_ENAB;
  517. w |= pi->pcie_war_aspm_ovr;
  518. pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
  519. val16 = bcma_read16(pi->core,
  520. PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
  521. if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
  522. val16 |= SRSH_CLKREQ_ENB;
  523. pi->pcie_pr42767 = true;
  524. } else
  525. val16 &= ~SRSH_CLKREQ_ENB;
  526. bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
  527. val16);
  528. }
  529. /* Apply the polarity determined at the start */
  530. /* Needs to happen when coming out of 'standby'/'hibernate' */
  531. static void pcie_war_serdes(struct pcicore_info *pi)
  532. {
  533. u32 w = 0;
  534. if (pi->pcie_polarity != 0)
  535. pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CTRL,
  536. pi->pcie_polarity);
  537. pcie_mdioread(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, &w);
  538. if (w & PLL_CTRL_FREQDET_EN) {
  539. w &= ~PLL_CTRL_FREQDET_EN;
  540. pcie_mdiowrite(pi, MDIODATA_DEV_PLL, SERDES_PLL_CTRL, w);
  541. }
  542. }
  543. /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
  544. /* Needs to happen when coming out of 'standby'/'hibernate' */
  545. static void pcie_misc_config_fixup(struct pcicore_info *pi)
  546. {
  547. u16 val16;
  548. val16 = bcma_read16(pi->core,
  549. PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
  550. if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
  551. val16 |= SRSH_L23READY_EXIT_NOPERST;
  552. bcma_write16(pi->core,
  553. PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
  554. }
  555. }
  556. /* quick hack for testing */
  557. /* Needs to happen when coming out of 'standby'/'hibernate' */
  558. static void pcie_war_noplldown(struct pcicore_info *pi)
  559. {
  560. /* turn off serdes PLL down */
  561. ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
  562. CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
  563. /* clear srom shadow backdoor */
  564. bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
  565. }
  566. /* Needs to happen when coming out of 'standby'/'hibernate' */
  567. static void pcie_war_pci_setup(struct pcicore_info *pi)
  568. {
  569. struct si_pub *sih = pi->sih;
  570. u32 w;
  571. if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
  572. w = pcie_readreg(pi->core, PCIE_PCIEREGS,
  573. PCIE_TLP_WORKAROUNDSREG);
  574. w |= 0x8;
  575. pcie_writereg(pi->core, PCIE_PCIEREGS,
  576. PCIE_TLP_WORKAROUNDSREG, w);
  577. }
  578. if (ai_get_buscorerev(sih) == 1) {
  579. w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
  580. w |= 0x40;
  581. pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
  582. }
  583. if (ai_get_buscorerev(sih) == 0) {
  584. pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
  585. pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
  586. pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
  587. } else if (PCIE_ASPM(sih)) {
  588. /* Change the L1 threshold for better performance */
  589. w = pcie_readreg(pi->core, PCIE_PCIEREGS,
  590. PCIE_DLLP_PMTHRESHREG);
  591. w &= ~PCIE_L1THRESHOLDTIME_MASK;
  592. w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
  593. pcie_writereg(pi->core, PCIE_PCIEREGS,
  594. PCIE_DLLP_PMTHRESHREG, w);
  595. pcie_war_serdes(pi);
  596. pcie_war_aspm_clkreq(pi);
  597. } else if (ai_get_buscorerev(pi->sih) == 7)
  598. pcie_war_noplldown(pi);
  599. /* Note that the fix is actually in the SROM,
  600. * that's why this is open-ended
  601. */
  602. if (ai_get_buscorerev(pi->sih) >= 6)
  603. pcie_misc_config_fixup(pi);
  604. }
  605. /* ***** Functions called during driver state changes ***** */
  606. void pcicore_attach(struct pcicore_info *pi, int state)
  607. {
  608. struct si_pub *sih = pi->sih;
  609. u32 bfl2 = (u32)getintvar(sih, BRCMS_SROM_BOARDFLAGS2);
  610. /* Determine if this board needs override */
  611. if (PCIE_ASPM(sih)) {
  612. if (bfl2 & BFL2_PCIEWAR_OVR)
  613. pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
  614. else
  615. pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
  616. }
  617. /* These need to happen in this order only */
  618. pcie_war_polarity(pi);
  619. pcie_war_serdes(pi);
  620. pcie_war_aspm_clkreq(pi);
  621. pcie_clkreq_upd(pi, state);
  622. }
  623. void pcicore_hwup(struct pcicore_info *pi)
  624. {
  625. if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
  626. return;
  627. pcie_war_pci_setup(pi);
  628. }
  629. void pcicore_up(struct pcicore_info *pi, int state)
  630. {
  631. if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
  632. return;
  633. /* Restore L1 timer for better performance */
  634. pcie_extendL1timer(pi, true);
  635. pcie_clkreq_upd(pi, state);
  636. }
  637. /* When the device is going to enter D3 state
  638. * (or the system is going to enter S3/S4 states)
  639. */
  640. void pcicore_sleep(struct pcicore_info *pi)
  641. {
  642. u32 w;
  643. if (!pi || !PCIE_ASPM(pi->sih))
  644. return;
  645. pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
  646. w &= ~PCIE_CAP_LCREG_ASPML1;
  647. pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
  648. pi->pcie_pr42767 = false;
  649. }
  650. void pcicore_down(struct pcicore_info *pi, int state)
  651. {
  652. if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
  653. return;
  654. pcie_clkreq_upd(pi, state);
  655. /* Reduce L1 timer for better power savings */
  656. pcie_extendL1timer(pi, false);
  657. }
  658. void pcicore_fixcfg(struct pcicore_info *pi)
  659. {
  660. struct bcma_device *core = pi->core;
  661. u16 val16;
  662. uint regoff;
  663. switch (pi->core->id.id) {
  664. case BCMA_CORE_PCI:
  665. regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
  666. break;
  667. case BCMA_CORE_PCIE:
  668. regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
  669. break;
  670. default:
  671. return;
  672. }
  673. val16 = bcma_read16(pi->core, regoff);
  674. if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
  675. (u16)core->core_index) {
  676. val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
  677. (val16 & ~SRSH_PI_MASK);
  678. bcma_write16(pi->core, regoff, val16);
  679. }
  680. }
  681. /* precondition: current core is pci core */
  682. void
  683. pcicore_pci_setup(struct pcicore_info *pi)
  684. {
  685. bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
  686. SBTOPCI_PREF | SBTOPCI_BURST);
  687. if (pi->core->id.rev >= 11) {
  688. bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
  689. SBTOPCI_RC_READMULTI);
  690. bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
  691. (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
  692. }
  693. }