pcie.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. /*
  2. * arch/arm/mach-tegra/pci.c
  3. *
  4. * PCIe host controller driver for TEGRA(2) SOCs
  5. *
  6. * Copyright (c) 2010, CompuLab, Ltd.
  7. * Author: Mike Rapoport <mike@compulab.co.il>
  8. *
  9. * Based on NVIDIA PCIe driver
  10. * Copyright (c) 2008-2009, NVIDIA Corporation.
  11. *
  12. * Bits taken from arch/arm/mach-dove/pcie.c
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but WITHOUT
  20. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  21. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  22. * more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/pci.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/irq.h>
  32. #include <linux/clk.h>
  33. #include <linux/delay.h>
  34. #include <asm/sizes.h>
  35. #include <asm/mach/pci.h>
  36. #include <mach/pinmux.h>
  37. #include <mach/iomap.h>
  38. #include <mach/clk.h>
  39. /* register definitions */
  40. #define AFI_OFFSET 0x3800
  41. #define PADS_OFFSET 0x3000
  42. #define RP0_OFFSET 0x0000
  43. #define RP1_OFFSET 0x1000
  44. #define AFI_AXI_BAR0_SZ 0x00
  45. #define AFI_AXI_BAR1_SZ 0x04
  46. #define AFI_AXI_BAR2_SZ 0x08
  47. #define AFI_AXI_BAR3_SZ 0x0c
  48. #define AFI_AXI_BAR4_SZ 0x10
  49. #define AFI_AXI_BAR5_SZ 0x14
  50. #define AFI_AXI_BAR0_START 0x18
  51. #define AFI_AXI_BAR1_START 0x1c
  52. #define AFI_AXI_BAR2_START 0x20
  53. #define AFI_AXI_BAR3_START 0x24
  54. #define AFI_AXI_BAR4_START 0x28
  55. #define AFI_AXI_BAR5_START 0x2c
  56. #define AFI_FPCI_BAR0 0x30
  57. #define AFI_FPCI_BAR1 0x34
  58. #define AFI_FPCI_BAR2 0x38
  59. #define AFI_FPCI_BAR3 0x3c
  60. #define AFI_FPCI_BAR4 0x40
  61. #define AFI_FPCI_BAR5 0x44
  62. #define AFI_CACHE_BAR0_SZ 0x48
  63. #define AFI_CACHE_BAR0_ST 0x4c
  64. #define AFI_CACHE_BAR1_SZ 0x50
  65. #define AFI_CACHE_BAR1_ST 0x54
  66. #define AFI_MSI_BAR_SZ 0x60
  67. #define AFI_MSI_FPCI_BAR_ST 0x64
  68. #define AFI_MSI_AXI_BAR_ST 0x68
  69. #define AFI_CONFIGURATION 0xac
  70. #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
  71. #define AFI_FPCI_ERROR_MASKS 0xb0
  72. #define AFI_INTR_MASK 0xb4
  73. #define AFI_INTR_MASK_INT_MASK (1 << 0)
  74. #define AFI_INTR_MASK_MSI_MASK (1 << 8)
  75. #define AFI_INTR_CODE 0xb8
  76. #define AFI_INTR_CODE_MASK 0xf
  77. #define AFI_INTR_MASTER_ABORT 4
  78. #define AFI_INTR_LEGACY 6
  79. #define AFI_INTR_SIGNATURE 0xbc
  80. #define AFI_SM_INTR_ENABLE 0xc4
  81. #define AFI_AFI_INTR_ENABLE 0xc8
  82. #define AFI_INTR_EN_INI_SLVERR (1 << 0)
  83. #define AFI_INTR_EN_INI_DECERR (1 << 1)
  84. #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
  85. #define AFI_INTR_EN_TGT_DECERR (1 << 3)
  86. #define AFI_INTR_EN_TGT_WRERR (1 << 4)
  87. #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
  88. #define AFI_INTR_EN_AXI_DECERR (1 << 6)
  89. #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
  90. #define AFI_PCIE_CONFIG 0x0f8
  91. #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
  92. #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
  93. #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
  94. #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
  95. #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
  96. #define AFI_FUSE 0x104
  97. #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
  98. #define AFI_PEX0_CTRL 0x110
  99. #define AFI_PEX1_CTRL 0x118
  100. #define AFI_PEX_CTRL_RST (1 << 0)
  101. #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
  102. #define RP_VEND_XP 0x00000F00
  103. #define RP_VEND_XP_DL_UP (1 << 30)
  104. #define RP_LINK_CONTROL_STATUS 0x00000090
  105. #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
  106. #define PADS_CTL_SEL 0x0000009C
  107. #define PADS_CTL 0x000000A0
  108. #define PADS_CTL_IDDQ_1L (1 << 0)
  109. #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
  110. #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
  111. #define PADS_PLL_CTL 0x000000B8
  112. #define PADS_PLL_CTL_RST_B4SM (1 << 1)
  113. #define PADS_PLL_CTL_LOCKDET (1 << 8)
  114. #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
  115. #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
  116. #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
  117. #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
  118. #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
  119. #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
  120. #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
  121. /* PMC access is required for PCIE xclk (un)clamping */
  122. #define PMC_SCRATCH42 0x144
  123. #define PMC_SCRATCH42_PCX_CLAMP (1 << 0)
  124. static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
  125. #define pmc_writel(value, reg) \
  126. __raw_writel(value, (u32)reg_pmc_base + (reg))
  127. #define pmc_readl(reg) \
  128. __raw_readl((u32)reg_pmc_base + (reg))
  129. /*
  130. * Tegra2 defines 1GB in the AXI address map for PCIe.
  131. *
  132. * That address space is split into different regions, with sizes and
  133. * offsets as follows:
  134. *
  135. * 0x80000000 - 0x80003fff - PCI controller registers
  136. * 0x80004000 - 0x80103fff - PCI configuration space
  137. * 0x80104000 - 0x80203fff - PCI extended configuration space
  138. * 0x80203fff - 0x803fffff - unused
  139. * 0x80400000 - 0x8040ffff - downstream IO
  140. * 0x80410000 - 0x8fffffff - unused
  141. * 0x90000000 - 0x9fffffff - non-prefetchable memory
  142. * 0xa0000000 - 0xbfffffff - prefetchable memory
  143. */
  144. #define TEGRA_PCIE_BASE 0x80000000
  145. #define PCIE_REGS_SZ SZ_16K
  146. #define PCIE_CFG_OFF PCIE_REGS_SZ
  147. #define PCIE_CFG_SZ SZ_1M
  148. #define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF)
  149. #define PCIE_EXT_CFG_SZ SZ_1M
  150. #define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ)
  151. #define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M)
  152. #define MMIO_SIZE SZ_64K
  153. #define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M)
  154. #define MEM_SIZE_0 SZ_128M
  155. #define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0)
  156. #define MEM_SIZE_1 SZ_128M
  157. #define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1)
  158. #define PREFETCH_MEM_SIZE_0 SZ_128M
  159. #define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0)
  160. #define PREFETCH_MEM_SIZE_1 SZ_128M
  161. #define PCIE_CONF_BUS(b) ((b) << 16)
  162. #define PCIE_CONF_DEV(d) ((d) << 11)
  163. #define PCIE_CONF_FUNC(f) ((f) << 8)
  164. #define PCIE_CONF_REG(r) \
  165. (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF))
  166. struct tegra_pcie_port {
  167. int index;
  168. u8 root_bus_nr;
  169. void __iomem *base;
  170. bool link_up;
  171. char io_space_name[16];
  172. char mem_space_name[16];
  173. char prefetch_space_name[20];
  174. struct resource res[3];
  175. };
  176. struct tegra_pcie_info {
  177. struct tegra_pcie_port port[2];
  178. int num_ports;
  179. void __iomem *regs;
  180. struct resource res_mmio;
  181. struct clk *pex_clk;
  182. struct clk *afi_clk;
  183. struct clk *pcie_xclk;
  184. struct clk *pll_e;
  185. };
  186. static struct tegra_pcie_info tegra_pcie = {
  187. .res_mmio = {
  188. .name = "PCI IO",
  189. .start = MMIO_BASE,
  190. .end = MMIO_BASE + MMIO_SIZE - 1,
  191. .flags = IORESOURCE_MEM,
  192. },
  193. };
  194. void __iomem *tegra_pcie_io_base;
  195. EXPORT_SYMBOL(tegra_pcie_io_base);
  196. static inline void afi_writel(u32 value, unsigned long offset)
  197. {
  198. writel(value, offset + AFI_OFFSET + tegra_pcie.regs);
  199. }
  200. static inline u32 afi_readl(unsigned long offset)
  201. {
  202. return readl(offset + AFI_OFFSET + tegra_pcie.regs);
  203. }
  204. static inline void pads_writel(u32 value, unsigned long offset)
  205. {
  206. writel(value, offset + PADS_OFFSET + tegra_pcie.regs);
  207. }
  208. static inline u32 pads_readl(unsigned long offset)
  209. {
  210. return readl(offset + PADS_OFFSET + tegra_pcie.regs);
  211. }
  212. static struct tegra_pcie_port *bus_to_port(int bus)
  213. {
  214. int i;
  215. for (i = tegra_pcie.num_ports - 1; i >= 0; i--) {
  216. int rbus = tegra_pcie.port[i].root_bus_nr;
  217. if (rbus != -1 && rbus == bus)
  218. break;
  219. }
  220. return i >= 0 ? tegra_pcie.port + i : NULL;
  221. }
  222. static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
  223. int where, int size, u32 *val)
  224. {
  225. struct tegra_pcie_port *pp = bus_to_port(bus->number);
  226. void __iomem *addr;
  227. if (pp) {
  228. if (devfn != 0) {
  229. *val = 0xffffffff;
  230. return PCIBIOS_DEVICE_NOT_FOUND;
  231. }
  232. addr = pp->base + (where & ~0x3);
  233. } else {
  234. addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) +
  235. PCIE_CONF_DEV(PCI_SLOT(devfn)) +
  236. PCIE_CONF_FUNC(PCI_FUNC(devfn)) +
  237. PCIE_CONF_REG(where));
  238. }
  239. *val = readl(addr);
  240. if (size == 1)
  241. *val = (*val >> (8 * (where & 3))) & 0xff;
  242. else if (size == 2)
  243. *val = (*val >> (8 * (where & 3))) & 0xffff;
  244. return PCIBIOS_SUCCESSFUL;
  245. }
  246. static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
  247. int where, int size, u32 val)
  248. {
  249. struct tegra_pcie_port *pp = bus_to_port(bus->number);
  250. void __iomem *addr;
  251. u32 mask;
  252. u32 tmp;
  253. if (pp) {
  254. if (devfn != 0)
  255. return PCIBIOS_DEVICE_NOT_FOUND;
  256. addr = pp->base + (where & ~0x3);
  257. } else {
  258. addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) +
  259. PCIE_CONF_DEV(PCI_SLOT(devfn)) +
  260. PCIE_CONF_FUNC(PCI_FUNC(devfn)) +
  261. PCIE_CONF_REG(where));
  262. }
  263. if (size == 4) {
  264. writel(val, addr);
  265. return PCIBIOS_SUCCESSFUL;
  266. }
  267. if (size == 2)
  268. mask = ~(0xffff << ((where & 0x3) * 8));
  269. else if (size == 1)
  270. mask = ~(0xff << ((where & 0x3) * 8));
  271. else
  272. return PCIBIOS_BAD_REGISTER_NUMBER;
  273. tmp = readl(addr) & mask;
  274. tmp |= val << ((where & 0x3) * 8);
  275. writel(tmp, addr);
  276. return PCIBIOS_SUCCESSFUL;
  277. }
  278. static struct pci_ops tegra_pcie_ops = {
  279. .read = tegra_pcie_read_conf,
  280. .write = tegra_pcie_write_conf,
  281. };
  282. static void __devinit tegra_pcie_fixup_bridge(struct pci_dev *dev)
  283. {
  284. u16 reg;
  285. if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
  286. pci_read_config_word(dev, PCI_COMMAND, &reg);
  287. reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  288. PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
  289. pci_write_config_word(dev, PCI_COMMAND, reg);
  290. }
  291. }
  292. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
  293. /* Tegra PCIE root complex wrongly reports device class */
  294. static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev)
  295. {
  296. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  297. }
  298. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
  299. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
  300. /* Tegra PCIE requires relaxed ordering */
  301. static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
  302. {
  303. u16 val16;
  304. int pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  305. if (pos <= 0) {
  306. dev_err(&dev->dev, "skipping relaxed ordering fixup\n");
  307. return;
  308. }
  309. pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16);
  310. val16 |= PCI_EXP_DEVCTL_RELAX_EN;
  311. pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16);
  312. }
  313. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
  314. static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
  315. {
  316. struct tegra_pcie_port *pp;
  317. if (nr >= tegra_pcie.num_ports)
  318. return 0;
  319. pp = tegra_pcie.port + nr;
  320. pp->root_bus_nr = sys->busnr;
  321. /*
  322. * IORESOURCE_IO
  323. */
  324. snprintf(pp->io_space_name, sizeof(pp->io_space_name),
  325. "PCIe %d I/O", pp->index);
  326. pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
  327. pp->res[0].name = pp->io_space_name;
  328. if (pp->index == 0) {
  329. pp->res[0].start = PCIBIOS_MIN_IO;
  330. pp->res[0].end = pp->res[0].start + SZ_32K - 1;
  331. } else {
  332. pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K;
  333. pp->res[0].end = IO_SPACE_LIMIT;
  334. }
  335. pp->res[0].flags = IORESOURCE_IO;
  336. if (request_resource(&ioport_resource, &pp->res[0]))
  337. panic("Request PCIe IO resource failed\n");
  338. sys->resource[0] = &pp->res[0];
  339. /*
  340. * IORESOURCE_MEM
  341. */
  342. snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
  343. "PCIe %d MEM", pp->index);
  344. pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
  345. pp->res[1].name = pp->mem_space_name;
  346. if (pp->index == 0) {
  347. pp->res[1].start = MEM_BASE_0;
  348. pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1;
  349. } else {
  350. pp->res[1].start = MEM_BASE_1;
  351. pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1;
  352. }
  353. pp->res[1].flags = IORESOURCE_MEM;
  354. if (request_resource(&iomem_resource, &pp->res[1]))
  355. panic("Request PCIe Memory resource failed\n");
  356. sys->resource[1] = &pp->res[1];
  357. /*
  358. * IORESOURCE_MEM | IORESOURCE_PREFETCH
  359. */
  360. snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name),
  361. "PCIe %d PREFETCH MEM", pp->index);
  362. pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0;
  363. pp->res[2].name = pp->prefetch_space_name;
  364. if (pp->index == 0) {
  365. pp->res[2].start = PREFETCH_MEM_BASE_0;
  366. pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1;
  367. } else {
  368. pp->res[2].start = PREFETCH_MEM_BASE_1;
  369. pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1;
  370. }
  371. pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
  372. if (request_resource(&iomem_resource, &pp->res[2]))
  373. panic("Request PCIe Prefetch Memory resource failed\n");
  374. sys->resource[2] = &pp->res[2];
  375. return 1;
  376. }
  377. static int tegra_pcie_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
  378. {
  379. return INT_PCIE_INTR;
  380. }
  381. static struct pci_bus __init *tegra_pcie_scan_bus(int nr,
  382. struct pci_sys_data *sys)
  383. {
  384. struct tegra_pcie_port *pp;
  385. if (nr >= tegra_pcie.num_ports)
  386. return 0;
  387. pp = tegra_pcie.port + nr;
  388. pp->root_bus_nr = sys->busnr;
  389. return pci_scan_bus(sys->busnr, &tegra_pcie_ops, sys);
  390. }
  391. static struct hw_pci tegra_pcie_hw __initdata = {
  392. .nr_controllers = 2,
  393. .setup = tegra_pcie_setup,
  394. .scan = tegra_pcie_scan_bus,
  395. .swizzle = pci_std_swizzle,
  396. .map_irq = tegra_pcie_map_irq,
  397. };
  398. static irqreturn_t tegra_pcie_isr(int irq, void *arg)
  399. {
  400. const char *err_msg[] = {
  401. "Unknown",
  402. "AXI slave error",
  403. "AXI decode error",
  404. "Target abort",
  405. "Master abort",
  406. "Invalid write",
  407. "Response decoding error",
  408. "AXI response decoding error",
  409. "Transcation timeout",
  410. };
  411. u32 code, signature;
  412. code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
  413. signature = afi_readl(AFI_INTR_SIGNATURE);
  414. afi_writel(0, AFI_INTR_CODE);
  415. if (code == AFI_INTR_LEGACY)
  416. return IRQ_NONE;
  417. if (code >= ARRAY_SIZE(err_msg))
  418. code = 0;
  419. /*
  420. * do not pollute kernel log with master abort reports since they
  421. * happen a lot during enumeration
  422. */
  423. if (code == AFI_INTR_MASTER_ABORT)
  424. pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature);
  425. else
  426. pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature);
  427. return IRQ_HANDLED;
  428. }
  429. static void tegra_pcie_setup_translations(void)
  430. {
  431. u32 fpci_bar;
  432. u32 size;
  433. u32 axi_address;
  434. /* Bar 0: config Bar */
  435. fpci_bar = ((u32)0xfdff << 16);
  436. size = PCIE_CFG_SZ;
  437. axi_address = TEGRA_PCIE_BASE + PCIE_CFG_OFF;
  438. afi_writel(axi_address, AFI_AXI_BAR0_START);
  439. afi_writel(size >> 12, AFI_AXI_BAR0_SZ);
  440. afi_writel(fpci_bar, AFI_FPCI_BAR0);
  441. /* Bar 1: extended config Bar */
  442. fpci_bar = ((u32)0xfe1 << 20);
  443. size = PCIE_EXT_CFG_SZ;
  444. axi_address = TEGRA_PCIE_BASE + PCIE_EXT_CFG_OFF;
  445. afi_writel(axi_address, AFI_AXI_BAR1_START);
  446. afi_writel(size >> 12, AFI_AXI_BAR1_SZ);
  447. afi_writel(fpci_bar, AFI_FPCI_BAR1);
  448. /* Bar 2: downstream IO bar */
  449. fpci_bar = ((__u32)0xfdfc << 16);
  450. size = MMIO_SIZE;
  451. axi_address = MMIO_BASE;
  452. afi_writel(axi_address, AFI_AXI_BAR2_START);
  453. afi_writel(size >> 12, AFI_AXI_BAR2_SZ);
  454. afi_writel(fpci_bar, AFI_FPCI_BAR2);
  455. /* Bar 3: prefetchable memory BAR */
  456. fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1;
  457. size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1;
  458. axi_address = PREFETCH_MEM_BASE_0;
  459. afi_writel(axi_address, AFI_AXI_BAR3_START);
  460. afi_writel(size >> 12, AFI_AXI_BAR3_SZ);
  461. afi_writel(fpci_bar, AFI_FPCI_BAR3);
  462. /* Bar 4: non prefetchable memory BAR */
  463. fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1;
  464. size = MEM_SIZE_0 + MEM_SIZE_1;
  465. axi_address = MEM_BASE_0;
  466. afi_writel(axi_address, AFI_AXI_BAR4_START);
  467. afi_writel(size >> 12, AFI_AXI_BAR4_SZ);
  468. afi_writel(fpci_bar, AFI_FPCI_BAR4);
  469. /* Bar 5: NULL out the remaining BAR as it is not used */
  470. fpci_bar = 0;
  471. size = 0;
  472. axi_address = 0;
  473. afi_writel(axi_address, AFI_AXI_BAR5_START);
  474. afi_writel(size >> 12, AFI_AXI_BAR5_SZ);
  475. afi_writel(fpci_bar, AFI_FPCI_BAR5);
  476. /* map all upstream transactions as uncached */
  477. afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST);
  478. afi_writel(0, AFI_CACHE_BAR0_SZ);
  479. afi_writel(0, AFI_CACHE_BAR1_ST);
  480. afi_writel(0, AFI_CACHE_BAR1_SZ);
  481. /* No MSI */
  482. afi_writel(0, AFI_MSI_FPCI_BAR_ST);
  483. afi_writel(0, AFI_MSI_BAR_SZ);
  484. afi_writel(0, AFI_MSI_AXI_BAR_ST);
  485. afi_writel(0, AFI_MSI_BAR_SZ);
  486. }
  487. static void tegra_pcie_enable_controller(void)
  488. {
  489. u32 val, reg;
  490. int i;
  491. /* Enable slot clock and pulse the reset signals */
  492. for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) {
  493. val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN;
  494. afi_writel(val, reg);
  495. val &= ~AFI_PEX_CTRL_RST;
  496. afi_writel(val, reg);
  497. val = afi_readl(reg) | AFI_PEX_CTRL_RST;
  498. afi_writel(val, reg);
  499. }
  500. /* Enable dual controller and both ports */
  501. val = afi_readl(AFI_PCIE_CONFIG);
  502. val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE |
  503. AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE |
  504. AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK);
  505. val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
  506. afi_writel(val, AFI_PCIE_CONFIG);
  507. val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
  508. afi_writel(val, AFI_FUSE);
  509. /* Initialze internal PHY, enable up to 16 PCIE lanes */
  510. pads_writel(0x0, PADS_CTL_SEL);
  511. /* override IDDQ to 1 on all 4 lanes */
  512. val = pads_readl(PADS_CTL) | PADS_CTL_IDDQ_1L;
  513. pads_writel(val, PADS_CTL);
  514. /*
  515. * set up PHY PLL inputs select PLLE output as refclock,
  516. * set TX ref sel to div10 (not div5)
  517. */
  518. val = pads_readl(PADS_PLL_CTL);
  519. val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
  520. val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10);
  521. pads_writel(val, PADS_PLL_CTL);
  522. /* take PLL out of reset */
  523. val = pads_readl(PADS_PLL_CTL) | PADS_PLL_CTL_RST_B4SM;
  524. pads_writel(val, PADS_PLL_CTL);
  525. /*
  526. * Hack, set the clock voltage to the DEFAULT provided by hw folks.
  527. * This doesn't exist in the documentation
  528. */
  529. pads_writel(0xfa5cfa5c, 0xc8);
  530. /* Wait for the PLL to lock */
  531. do {
  532. val = pads_readl(PADS_PLL_CTL);
  533. } while (!(val & PADS_PLL_CTL_LOCKDET));
  534. /* turn off IDDQ override */
  535. val = pads_readl(PADS_CTL) & ~PADS_CTL_IDDQ_1L;
  536. pads_writel(val, PADS_CTL);
  537. /* enable TX/RX data */
  538. val = pads_readl(PADS_CTL);
  539. val |= (PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
  540. pads_writel(val, PADS_CTL);
  541. /* Take the PCIe interface module out of reset */
  542. tegra_periph_reset_deassert(tegra_pcie.pcie_xclk);
  543. /* Finally enable PCIe */
  544. val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI;
  545. afi_writel(val, AFI_CONFIGURATION);
  546. val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
  547. AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
  548. AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR);
  549. afi_writel(val, AFI_AFI_INTR_ENABLE);
  550. afi_writel(0xffffffff, AFI_SM_INTR_ENABLE);
  551. /* FIXME: No MSI for now, only INT */
  552. afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
  553. /* Disable all execptions */
  554. afi_writel(0, AFI_FPCI_ERROR_MASKS);
  555. return;
  556. }
  557. static void tegra_pcie_xclk_clamp(bool clamp)
  558. {
  559. u32 reg;
  560. reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP;
  561. if (clamp)
  562. reg |= PMC_SCRATCH42_PCX_CLAMP;
  563. pmc_writel(reg, PMC_SCRATCH42);
  564. }
  565. static int tegra_pcie_power_on(void)
  566. {
  567. tegra_pcie_xclk_clamp(true);
  568. tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
  569. tegra_pcie_xclk_clamp(false);
  570. clk_enable(tegra_pcie.afi_clk);
  571. clk_enable(tegra_pcie.pex_clk);
  572. return clk_enable(tegra_pcie.pll_e);
  573. }
  574. static void tegra_pcie_power_off(void)
  575. {
  576. tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
  577. tegra_periph_reset_assert(tegra_pcie.afi_clk);
  578. tegra_periph_reset_assert(tegra_pcie.pex_clk);
  579. tegra_pcie_xclk_clamp(true);
  580. }
  581. static int tegra_pcie_clocks_get(void)
  582. {
  583. int err;
  584. tegra_pcie.pex_clk = clk_get(NULL, "pex");
  585. if (IS_ERR(tegra_pcie.pex_clk))
  586. return PTR_ERR(tegra_pcie.pex_clk);
  587. tegra_pcie.afi_clk = clk_get(NULL, "afi");
  588. if (IS_ERR(tegra_pcie.afi_clk)) {
  589. err = PTR_ERR(tegra_pcie.afi_clk);
  590. goto err_afi_clk;
  591. }
  592. tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk");
  593. if (IS_ERR(tegra_pcie.pcie_xclk)) {
  594. err = PTR_ERR(tegra_pcie.pcie_xclk);
  595. goto err_pcie_xclk;
  596. }
  597. tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e");
  598. if (IS_ERR(tegra_pcie.pll_e)) {
  599. err = PTR_ERR(tegra_pcie.pll_e);
  600. goto err_pll_e;
  601. }
  602. return 0;
  603. err_pll_e:
  604. clk_put(tegra_pcie.pcie_xclk);
  605. err_pcie_xclk:
  606. clk_put(tegra_pcie.afi_clk);
  607. err_afi_clk:
  608. clk_put(tegra_pcie.pex_clk);
  609. return err;
  610. }
  611. static void tegra_pcie_clocks_put(void)
  612. {
  613. clk_put(tegra_pcie.pll_e);
  614. clk_put(tegra_pcie.pcie_xclk);
  615. clk_put(tegra_pcie.afi_clk);
  616. clk_put(tegra_pcie.pex_clk);
  617. }
  618. static int __init tegra_pcie_get_resources(void)
  619. {
  620. struct resource *res_mmio = &tegra_pcie.res_mmio;
  621. int err;
  622. err = tegra_pcie_clocks_get();
  623. if (err) {
  624. pr_err("PCIE: failed to get clocks: %d\n", err);
  625. return err;
  626. }
  627. err = tegra_pcie_power_on();
  628. if (err) {
  629. pr_err("PCIE: failed to power up: %d\n", err);
  630. goto err_pwr_on;
  631. }
  632. tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_IOMAP_SZ);
  633. if (tegra_pcie.regs == NULL) {
  634. pr_err("PCIE: Failed to map PCI/AFI registers\n");
  635. err = -ENOMEM;
  636. goto err_map_reg;
  637. }
  638. err = request_resource(&iomem_resource, res_mmio);
  639. if (err) {
  640. pr_err("PCIE: Failed to request resources: %d\n", err);
  641. goto err_req_io;
  642. }
  643. tegra_pcie_io_base = ioremap_nocache(res_mmio->start,
  644. resource_size(res_mmio));
  645. if (tegra_pcie_io_base == NULL) {
  646. pr_err("PCIE: Failed to map IO\n");
  647. err = -ENOMEM;
  648. goto err_map_io;
  649. }
  650. err = request_irq(INT_PCIE_INTR, tegra_pcie_isr,
  651. IRQF_SHARED, "PCIE", &tegra_pcie);
  652. if (err) {
  653. pr_err("PCIE: Failed to register IRQ: %d\n", err);
  654. goto err_irq;
  655. }
  656. set_irq_flags(INT_PCIE_INTR, IRQF_VALID);
  657. return 0;
  658. err_irq:
  659. iounmap(tegra_pcie_io_base);
  660. err_map_io:
  661. release_resource(&tegra_pcie.res_mmio);
  662. err_req_io:
  663. iounmap(tegra_pcie.regs);
  664. err_map_reg:
  665. tegra_pcie_power_off();
  666. err_pwr_on:
  667. tegra_pcie_clocks_put();
  668. return err;
  669. }
  670. /*
  671. * FIXME: If there are no PCIe cards attached, then calling this function
  672. * can result in the increase of the bootup time as there are big timeout
  673. * loops.
  674. */
  675. #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
  676. static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx,
  677. u32 reset_reg)
  678. {
  679. u32 reg;
  680. int retries = 3;
  681. int timeout;
  682. do {
  683. timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
  684. while (timeout) {
  685. reg = readl(pp->base + RP_VEND_XP);
  686. if (reg & RP_VEND_XP_DL_UP)
  687. break;
  688. mdelay(1);
  689. timeout--;
  690. }
  691. if (!timeout) {
  692. pr_err("PCIE: port %d: link down, retrying\n", idx);
  693. goto retry;
  694. }
  695. timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
  696. while (timeout) {
  697. reg = readl(pp->base + RP_LINK_CONTROL_STATUS);
  698. if (reg & 0x20000000)
  699. return true;
  700. mdelay(1);
  701. timeout--;
  702. }
  703. retry:
  704. /* Pulse the PEX reset */
  705. reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
  706. afi_writel(reg, reset_reg);
  707. mdelay(1);
  708. reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
  709. afi_writel(reg, reset_reg);
  710. retries--;
  711. } while (retries);
  712. return false;
  713. }
  714. static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
  715. {
  716. struct tegra_pcie_port *pp;
  717. pp = tegra_pcie.port + tegra_pcie.num_ports;
  718. pp->index = -1;
  719. pp->base = tegra_pcie.regs + offset;
  720. pp->link_up = tegra_pcie_check_link(pp, index, reset_reg);
  721. if (!pp->link_up) {
  722. pp->base = NULL;
  723. printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index);
  724. return;
  725. }
  726. tegra_pcie.num_ports++;
  727. pp->index = index;
  728. pp->root_bus_nr = -1;
  729. memset(pp->res, 0, sizeof(pp->res));
  730. }
  731. int __init tegra_pcie_init(bool init_port0, bool init_port1)
  732. {
  733. int err;
  734. if (!(init_port0 || init_port1))
  735. return -ENODEV;
  736. err = tegra_pcie_get_resources();
  737. if (err)
  738. return err;
  739. tegra_pcie_enable_controller();
  740. /* setup the AFI address translations */
  741. tegra_pcie_setup_translations();
  742. if (init_port0)
  743. tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL);
  744. if (init_port1)
  745. tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL);
  746. pci_common_init(&tegra_pcie_hw);
  747. return 0;
  748. }