pci-imx6.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * PCIe host controller driver for Freescale i.MX6 SoCs
  3. *
  4. * Copyright (C) 2013 Kosagi
  5. * http://www.kosagi.com
  6. *
  7. * Author: Sean Cross <xobs@kosagi.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/gpio.h>
  16. #include <linux/kernel.h>
  17. #include <linux/mfd/syscon.h>
  18. #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
  19. #include <linux/module.h>
  20. #include <linux/of_gpio.h>
  21. #include <linux/pci.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/regmap.h>
  24. #include <linux/resource.h>
  25. #include <linux/signal.h>
  26. #include <linux/types.h>
  27. #include "pcie-designware.h"
  28. #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
  29. struct imx6_pcie {
  30. int reset_gpio;
  31. int power_on_gpio;
  32. int wake_up_gpio;
  33. int disable_gpio;
  34. struct clk *lvds_gate;
  35. struct clk *sata_ref_100m;
  36. struct clk *pcie_ref_125m;
  37. struct clk *pcie_axi;
  38. struct pcie_port pp;
  39. struct regmap *iomuxc_gpr;
  40. void __iomem *mem_base;
  41. };
  42. /* PCIe Port Logic registers (memory-mapped) */
  43. #define PL_OFFSET 0x700
  44. #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
  45. #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
  46. #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
  47. #define PCIE_PHY_CTRL_DATA_LOC 0
  48. #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
  49. #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
  50. #define PCIE_PHY_CTRL_WR_LOC 18
  51. #define PCIE_PHY_CTRL_RD_LOC 19
  52. #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
  53. #define PCIE_PHY_STAT_ACK_LOC 16
  54. /* PHY registers (not memory-mapped) */
  55. #define PCIE_PHY_RX_ASIC_OUT 0x100D
  56. #define PHY_RX_OVRD_IN_LO 0x1005
  57. #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
  58. #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
  59. static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
  60. {
  61. u32 val;
  62. u32 max_iterations = 10;
  63. u32 wait_counter = 0;
  64. do {
  65. val = readl(dbi_base + PCIE_PHY_STAT);
  66. val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
  67. wait_counter++;
  68. if (val == exp_val)
  69. return 0;
  70. udelay(1);
  71. } while (wait_counter < max_iterations);
  72. return -ETIMEDOUT;
  73. }
  74. static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
  75. {
  76. u32 val;
  77. int ret;
  78. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  79. writel(val, dbi_base + PCIE_PHY_CTRL);
  80. val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
  81. writel(val, dbi_base + PCIE_PHY_CTRL);
  82. ret = pcie_phy_poll_ack(dbi_base, 1);
  83. if (ret)
  84. return ret;
  85. val = addr << PCIE_PHY_CTRL_DATA_LOC;
  86. writel(val, dbi_base + PCIE_PHY_CTRL);
  87. ret = pcie_phy_poll_ack(dbi_base, 0);
  88. if (ret)
  89. return ret;
  90. return 0;
  91. }
  92. /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
  93. static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
  94. {
  95. u32 val, phy_ctl;
  96. int ret;
  97. ret = pcie_phy_wait_ack(dbi_base, addr);
  98. if (ret)
  99. return ret;
  100. /* assert Read signal */
  101. phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
  102. writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
  103. ret = pcie_phy_poll_ack(dbi_base, 1);
  104. if (ret)
  105. return ret;
  106. val = readl(dbi_base + PCIE_PHY_STAT);
  107. *data = val & 0xffff;
  108. /* deassert Read signal */
  109. writel(0x00, dbi_base + PCIE_PHY_CTRL);
  110. ret = pcie_phy_poll_ack(dbi_base, 0);
  111. if (ret)
  112. return ret;
  113. return 0;
  114. }
  115. static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
  116. {
  117. u32 var;
  118. int ret;
  119. /* write addr */
  120. /* cap addr */
  121. ret = pcie_phy_wait_ack(dbi_base, addr);
  122. if (ret)
  123. return ret;
  124. var = data << PCIE_PHY_CTRL_DATA_LOC;
  125. writel(var, dbi_base + PCIE_PHY_CTRL);
  126. /* capture data */
  127. var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
  128. writel(var, dbi_base + PCIE_PHY_CTRL);
  129. ret = pcie_phy_poll_ack(dbi_base, 1);
  130. if (ret)
  131. return ret;
  132. /* deassert cap data */
  133. var = data << PCIE_PHY_CTRL_DATA_LOC;
  134. writel(var, dbi_base + PCIE_PHY_CTRL);
  135. /* wait for ack de-assertion */
  136. ret = pcie_phy_poll_ack(dbi_base, 0);
  137. if (ret)
  138. return ret;
  139. /* assert wr signal */
  140. var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
  141. writel(var, dbi_base + PCIE_PHY_CTRL);
  142. /* wait for ack */
  143. ret = pcie_phy_poll_ack(dbi_base, 1);
  144. if (ret)
  145. return ret;
  146. /* deassert wr signal */
  147. var = data << PCIE_PHY_CTRL_DATA_LOC;
  148. writel(var, dbi_base + PCIE_PHY_CTRL);
  149. /* wait for ack de-assertion */
  150. ret = pcie_phy_poll_ack(dbi_base, 0);
  151. if (ret)
  152. return ret;
  153. writel(0x0, dbi_base + PCIE_PHY_CTRL);
  154. return 0;
  155. }
  156. /* Added for PCI abort handling */
  157. static int imx6q_pcie_abort_handler(unsigned long addr,
  158. unsigned int fsr, struct pt_regs *regs)
  159. {
  160. return 0;
  161. }
  162. static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
  163. {
  164. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  165. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  166. IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
  167. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  168. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  169. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  170. IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
  171. gpio_set_value(imx6_pcie->reset_gpio, 0);
  172. msleep(100);
  173. gpio_set_value(imx6_pcie->reset_gpio, 1);
  174. return 0;
  175. }
  176. static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
  177. {
  178. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  179. int ret;
  180. if (gpio_is_valid(imx6_pcie->power_on_gpio))
  181. gpio_set_value(imx6_pcie->power_on_gpio, 1);
  182. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  183. IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
  184. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
  185. IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
  186. ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
  187. if (ret) {
  188. dev_err(pp->dev, "unable to enable sata_ref_100m\n");
  189. goto err_sata_ref;
  190. }
  191. ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
  192. if (ret) {
  193. dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
  194. goto err_pcie_ref;
  195. }
  196. ret = clk_prepare_enable(imx6_pcie->lvds_gate);
  197. if (ret) {
  198. dev_err(pp->dev, "unable to enable lvds_gate\n");
  199. goto err_lvds_gate;
  200. }
  201. ret = clk_prepare_enable(imx6_pcie->pcie_axi);
  202. if (ret) {
  203. dev_err(pp->dev, "unable to enable pcie_axi\n");
  204. goto err_pcie_axi;
  205. }
  206. /* allow the clocks to stabilize */
  207. usleep_range(200, 500);
  208. return 0;
  209. err_pcie_axi:
  210. clk_disable_unprepare(imx6_pcie->lvds_gate);
  211. err_lvds_gate:
  212. clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
  213. err_pcie_ref:
  214. clk_disable_unprepare(imx6_pcie->sata_ref_100m);
  215. err_sata_ref:
  216. return ret;
  217. }
  218. static void imx6_pcie_init_phy(struct pcie_port *pp)
  219. {
  220. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  221. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  222. IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
  223. /* configure constant input signal to the pcie ctrl and phy */
  224. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  225. IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
  226. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  227. IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
  228. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  229. IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
  230. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  231. IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
  232. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  233. IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
  234. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  235. IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
  236. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
  237. IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
  238. }
  239. static void imx6_pcie_host_init(struct pcie_port *pp)
  240. {
  241. int count = 0;
  242. struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
  243. imx6_pcie_assert_core_reset(pp);
  244. imx6_pcie_init_phy(pp);
  245. imx6_pcie_deassert_core_reset(pp);
  246. dw_pcie_setup_rc(pp);
  247. regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
  248. IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
  249. while (!dw_pcie_link_up(pp)) {
  250. usleep_range(100, 1000);
  251. count++;
  252. if (count >= 200) {
  253. dev_err(pp->dev, "phy link never came up\n");
  254. dev_dbg(pp->dev,
  255. "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
  256. readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
  257. readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
  258. break;
  259. }
  260. }
  261. return;
  262. }
  263. static int imx6_pcie_link_up(struct pcie_port *pp)
  264. {
  265. u32 rc, ltssm, rx_valid, temp;
  266. /* link is debug bit 36, debug register 1 starts at bit 32 */
  267. rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
  268. if (rc)
  269. return -EAGAIN;
  270. /*
  271. * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
  272. * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
  273. * If (MAC/LTSSM.state == Recovery.RcvrLock)
  274. * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
  275. * to gen2 is stuck
  276. */
  277. pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
  278. ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
  279. if (rx_valid & 0x01)
  280. return 0;
  281. if (ltssm != 0x0d)
  282. return 0;
  283. dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
  284. pcie_phy_read(pp->dbi_base,
  285. PHY_RX_OVRD_IN_LO, &temp);
  286. temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
  287. | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  288. pcie_phy_write(pp->dbi_base,
  289. PHY_RX_OVRD_IN_LO, temp);
  290. usleep_range(2000, 3000);
  291. pcie_phy_read(pp->dbi_base,
  292. PHY_RX_OVRD_IN_LO, &temp);
  293. temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
  294. | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
  295. pcie_phy_write(pp->dbi_base,
  296. PHY_RX_OVRD_IN_LO, temp);
  297. return 0;
  298. }
  299. static struct pcie_host_ops imx6_pcie_host_ops = {
  300. .link_up = imx6_pcie_link_up,
  301. .host_init = imx6_pcie_host_init,
  302. };
  303. static int imx6_add_pcie_port(struct pcie_port *pp,
  304. struct platform_device *pdev)
  305. {
  306. int ret;
  307. pp->irq = platform_get_irq(pdev, 0);
  308. if (!pp->irq) {
  309. dev_err(&pdev->dev, "failed to get irq\n");
  310. return -ENODEV;
  311. }
  312. pp->root_bus_nr = -1;
  313. pp->ops = &imx6_pcie_host_ops;
  314. spin_lock_init(&pp->conf_lock);
  315. ret = dw_pcie_host_init(pp);
  316. if (ret) {
  317. dev_err(&pdev->dev, "failed to initialize host\n");
  318. return ret;
  319. }
  320. return 0;
  321. }
  322. static int __init imx6_pcie_probe(struct platform_device *pdev)
  323. {
  324. struct imx6_pcie *imx6_pcie;
  325. struct pcie_port *pp;
  326. struct device_node *np = pdev->dev.of_node;
  327. struct resource *dbi_base;
  328. int ret;
  329. imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
  330. if (!imx6_pcie)
  331. return -ENOMEM;
  332. pp = &imx6_pcie->pp;
  333. pp->dev = &pdev->dev;
  334. /* Added for PCI abort handling */
  335. hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
  336. "imprecise external abort");
  337. dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  338. if (!dbi_base) {
  339. dev_err(&pdev->dev, "dbi_base memory resource not found\n");
  340. return -ENODEV;
  341. }
  342. pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
  343. if (IS_ERR(pp->dbi_base)) {
  344. ret = PTR_ERR(pp->dbi_base);
  345. goto err;
  346. }
  347. /* Fetch GPIOs */
  348. imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
  349. if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
  350. dev_err(&pdev->dev, "no reset-gpio defined\n");
  351. ret = -ENODEV;
  352. }
  353. ret = devm_gpio_request_one(&pdev->dev,
  354. imx6_pcie->reset_gpio,
  355. GPIOF_OUT_INIT_LOW,
  356. "PCIe reset");
  357. if (ret) {
  358. dev_err(&pdev->dev, "unable to get reset gpio\n");
  359. goto err;
  360. }
  361. imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
  362. if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
  363. ret = devm_gpio_request_one(&pdev->dev,
  364. imx6_pcie->power_on_gpio,
  365. GPIOF_OUT_INIT_LOW,
  366. "PCIe power enable");
  367. if (ret) {
  368. dev_err(&pdev->dev, "unable to get power-on gpio\n");
  369. goto err;
  370. }
  371. }
  372. imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
  373. if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
  374. ret = devm_gpio_request_one(&pdev->dev,
  375. imx6_pcie->wake_up_gpio,
  376. GPIOF_IN,
  377. "PCIe wake up");
  378. if (ret) {
  379. dev_err(&pdev->dev, "unable to get wake-up gpio\n");
  380. goto err;
  381. }
  382. }
  383. imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
  384. if (gpio_is_valid(imx6_pcie->disable_gpio)) {
  385. ret = devm_gpio_request_one(&pdev->dev,
  386. imx6_pcie->disable_gpio,
  387. GPIOF_OUT_INIT_HIGH,
  388. "PCIe disable endpoint");
  389. if (ret) {
  390. dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
  391. goto err;
  392. }
  393. }
  394. /* Fetch clocks */
  395. imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
  396. if (IS_ERR(imx6_pcie->lvds_gate)) {
  397. dev_err(&pdev->dev,
  398. "lvds_gate clock select missing or invalid\n");
  399. ret = PTR_ERR(imx6_pcie->lvds_gate);
  400. goto err;
  401. }
  402. imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
  403. if (IS_ERR(imx6_pcie->sata_ref_100m)) {
  404. dev_err(&pdev->dev,
  405. "sata_ref_100m clock source missing or invalid\n");
  406. ret = PTR_ERR(imx6_pcie->sata_ref_100m);
  407. goto err;
  408. }
  409. imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
  410. if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
  411. dev_err(&pdev->dev,
  412. "pcie_ref_125m clock source missing or invalid\n");
  413. ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
  414. goto err;
  415. }
  416. imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
  417. if (IS_ERR(imx6_pcie->pcie_axi)) {
  418. dev_err(&pdev->dev,
  419. "pcie_axi clock source missing or invalid\n");
  420. ret = PTR_ERR(imx6_pcie->pcie_axi);
  421. goto err;
  422. }
  423. /* Grab GPR config register range */
  424. imx6_pcie->iomuxc_gpr =
  425. syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
  426. if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
  427. dev_err(&pdev->dev, "unable to find iomuxc registers\n");
  428. ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
  429. goto err;
  430. }
  431. ret = imx6_add_pcie_port(pp, pdev);
  432. if (ret < 0)
  433. goto err;
  434. platform_set_drvdata(pdev, imx6_pcie);
  435. return 0;
  436. err:
  437. return ret;
  438. }
  439. static const struct of_device_id imx6_pcie_of_match[] = {
  440. { .compatible = "fsl,imx6q-pcie", },
  441. {},
  442. };
  443. MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
  444. static struct platform_driver imx6_pcie_driver = {
  445. .driver = {
  446. .name = "imx6q-pcie",
  447. .owner = THIS_MODULE,
  448. .of_match_table = of_match_ptr(imx6_pcie_of_match),
  449. },
  450. };
  451. /* Freescale PCIe driver does not allow module unload */
  452. static int __init imx6_pcie_init(void)
  453. {
  454. return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
  455. }
  456. module_init(imx6_pcie_init);
  457. MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
  458. MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
  459. MODULE_LICENSE("GPL v2");