pcie.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946
  1. /*
  2. * arch/arm/mach-tegra/pci.c
  3. *
  4. * PCIe host controller driver for TEGRA(2) SOCs
  5. *
  6. * Copyright (c) 2010, CompuLab, Ltd.
  7. * Author: Mike Rapoport <mike@compulab.co.il>
  8. *
  9. * Based on NVIDIA PCIe driver
  10. * Copyright (c) 2008-2009, NVIDIA Corporation.
  11. *
  12. * Bits taken from arch/arm/mach-dove/pcie.c
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but WITHOUT
  20. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  21. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  22. * more details.
  23. *
  24. * You should have received a copy of the GNU General Public License along
  25. * with this program; if not, write to the Free Software Foundation, Inc.,
  26. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/pci.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/irq.h>
  32. #include <linux/clk.h>
  33. #include <linux/delay.h>
  34. #include <linux/export.h>
  35. #include <asm/sizes.h>
  36. #include <asm/mach/pci.h>
  37. #include <mach/iomap.h>
  38. #include <mach/clk.h>
  39. #include <mach/powergate.h>
  40. #include "board.h"
  41. /* register definitions */
  42. #define AFI_OFFSET 0x3800
  43. #define PADS_OFFSET 0x3000
  44. #define RP0_OFFSET 0x0000
  45. #define RP1_OFFSET 0x1000
  46. #define AFI_AXI_BAR0_SZ 0x00
  47. #define AFI_AXI_BAR1_SZ 0x04
  48. #define AFI_AXI_BAR2_SZ 0x08
  49. #define AFI_AXI_BAR3_SZ 0x0c
  50. #define AFI_AXI_BAR4_SZ 0x10
  51. #define AFI_AXI_BAR5_SZ 0x14
  52. #define AFI_AXI_BAR0_START 0x18
  53. #define AFI_AXI_BAR1_START 0x1c
  54. #define AFI_AXI_BAR2_START 0x20
  55. #define AFI_AXI_BAR3_START 0x24
  56. #define AFI_AXI_BAR4_START 0x28
  57. #define AFI_AXI_BAR5_START 0x2c
  58. #define AFI_FPCI_BAR0 0x30
  59. #define AFI_FPCI_BAR1 0x34
  60. #define AFI_FPCI_BAR2 0x38
  61. #define AFI_FPCI_BAR3 0x3c
  62. #define AFI_FPCI_BAR4 0x40
  63. #define AFI_FPCI_BAR5 0x44
  64. #define AFI_CACHE_BAR0_SZ 0x48
  65. #define AFI_CACHE_BAR0_ST 0x4c
  66. #define AFI_CACHE_BAR1_SZ 0x50
  67. #define AFI_CACHE_BAR1_ST 0x54
  68. #define AFI_MSI_BAR_SZ 0x60
  69. #define AFI_MSI_FPCI_BAR_ST 0x64
  70. #define AFI_MSI_AXI_BAR_ST 0x68
  71. #define AFI_CONFIGURATION 0xac
  72. #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
  73. #define AFI_FPCI_ERROR_MASKS 0xb0
  74. #define AFI_INTR_MASK 0xb4
  75. #define AFI_INTR_MASK_INT_MASK (1 << 0)
  76. #define AFI_INTR_MASK_MSI_MASK (1 << 8)
  77. #define AFI_INTR_CODE 0xb8
  78. #define AFI_INTR_CODE_MASK 0xf
  79. #define AFI_INTR_MASTER_ABORT 4
  80. #define AFI_INTR_LEGACY 6
  81. #define AFI_INTR_SIGNATURE 0xbc
  82. #define AFI_SM_INTR_ENABLE 0xc4
  83. #define AFI_AFI_INTR_ENABLE 0xc8
  84. #define AFI_INTR_EN_INI_SLVERR (1 << 0)
  85. #define AFI_INTR_EN_INI_DECERR (1 << 1)
  86. #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
  87. #define AFI_INTR_EN_TGT_DECERR (1 << 3)
  88. #define AFI_INTR_EN_TGT_WRERR (1 << 4)
  89. #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
  90. #define AFI_INTR_EN_AXI_DECERR (1 << 6)
  91. #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
  92. #define AFI_PCIE_CONFIG 0x0f8
  93. #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
  94. #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
  95. #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
  96. #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
  97. #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
  98. #define AFI_FUSE 0x104
  99. #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
  100. #define AFI_PEX0_CTRL 0x110
  101. #define AFI_PEX1_CTRL 0x118
  102. #define AFI_PEX_CTRL_RST (1 << 0)
  103. #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
  104. #define RP_VEND_XP 0x00000F00
  105. #define RP_VEND_XP_DL_UP (1 << 30)
  106. #define RP_LINK_CONTROL_STATUS 0x00000090
  107. #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
  108. #define PADS_CTL_SEL 0x0000009C
  109. #define PADS_CTL 0x000000A0
  110. #define PADS_CTL_IDDQ_1L (1 << 0)
  111. #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
  112. #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
  113. #define PADS_PLL_CTL 0x000000B8
  114. #define PADS_PLL_CTL_RST_B4SM (1 << 1)
  115. #define PADS_PLL_CTL_LOCKDET (1 << 8)
  116. #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
  117. #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
  118. #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
  119. #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
  120. #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
  121. #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
  122. #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
  123. /* PMC access is required for PCIE xclk (un)clamping */
  124. #define PMC_SCRATCH42 0x144
  125. #define PMC_SCRATCH42_PCX_CLAMP (1 << 0)
  126. static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE);
  127. #define pmc_writel(value, reg) \
  128. __raw_writel(value, reg_pmc_base + (reg))
  129. #define pmc_readl(reg) \
  130. __raw_readl(reg_pmc_base + (reg))
  131. /*
  132. * Tegra2 defines 1GB in the AXI address map for PCIe.
  133. *
  134. * That address space is split into different regions, with sizes and
  135. * offsets as follows:
  136. *
  137. * 0x80000000 - 0x80003fff - PCI controller registers
  138. * 0x80004000 - 0x80103fff - PCI configuration space
  139. * 0x80104000 - 0x80203fff - PCI extended configuration space
  140. * 0x80203fff - 0x803fffff - unused
  141. * 0x80400000 - 0x8040ffff - downstream IO
  142. * 0x80410000 - 0x8fffffff - unused
  143. * 0x90000000 - 0x9fffffff - non-prefetchable memory
  144. * 0xa0000000 - 0xbfffffff - prefetchable memory
  145. */
  146. #define TEGRA_PCIE_BASE 0x80000000
  147. #define PCIE_REGS_SZ SZ_16K
  148. #define PCIE_CFG_OFF PCIE_REGS_SZ
  149. #define PCIE_CFG_SZ SZ_1M
  150. #define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF)
  151. #define PCIE_EXT_CFG_SZ SZ_1M
  152. #define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ)
  153. #define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M)
  154. #define MMIO_SIZE SZ_64K
  155. #define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M)
  156. #define MEM_SIZE_0 SZ_128M
  157. #define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0)
  158. #define MEM_SIZE_1 SZ_128M
  159. #define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1)
  160. #define PREFETCH_MEM_SIZE_0 SZ_128M
  161. #define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0)
  162. #define PREFETCH_MEM_SIZE_1 SZ_128M
  163. #define PCIE_CONF_BUS(b) ((b) << 16)
  164. #define PCIE_CONF_DEV(d) ((d) << 11)
  165. #define PCIE_CONF_FUNC(f) ((f) << 8)
  166. #define PCIE_CONF_REG(r) \
  167. (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF))
  168. struct tegra_pcie_port {
  169. int index;
  170. u8 root_bus_nr;
  171. void __iomem *base;
  172. bool link_up;
  173. char io_space_name[16];
  174. char mem_space_name[16];
  175. char prefetch_space_name[20];
  176. struct resource res[3];
  177. };
  178. struct tegra_pcie_info {
  179. struct tegra_pcie_port port[2];
  180. int num_ports;
  181. void __iomem *regs;
  182. struct resource res_mmio;
  183. struct clk *pex_clk;
  184. struct clk *afi_clk;
  185. struct clk *pcie_xclk;
  186. struct clk *pll_e;
  187. };
  188. static struct tegra_pcie_info tegra_pcie = {
  189. .res_mmio = {
  190. .name = "PCI IO",
  191. .start = MMIO_BASE,
  192. .end = MMIO_BASE + MMIO_SIZE - 1,
  193. .flags = IORESOURCE_MEM,
  194. },
  195. };
  196. void __iomem *tegra_pcie_io_base;
  197. EXPORT_SYMBOL(tegra_pcie_io_base);
  198. static inline void afi_writel(u32 value, unsigned long offset)
  199. {
  200. writel(value, offset + AFI_OFFSET + tegra_pcie.regs);
  201. }
  202. static inline u32 afi_readl(unsigned long offset)
  203. {
  204. return readl(offset + AFI_OFFSET + tegra_pcie.regs);
  205. }
  206. static inline void pads_writel(u32 value, unsigned long offset)
  207. {
  208. writel(value, offset + PADS_OFFSET + tegra_pcie.regs);
  209. }
  210. static inline u32 pads_readl(unsigned long offset)
  211. {
  212. return readl(offset + PADS_OFFSET + tegra_pcie.regs);
  213. }
  214. static struct tegra_pcie_port *bus_to_port(int bus)
  215. {
  216. int i;
  217. for (i = tegra_pcie.num_ports - 1; i >= 0; i--) {
  218. int rbus = tegra_pcie.port[i].root_bus_nr;
  219. if (rbus != -1 && rbus == bus)
  220. break;
  221. }
  222. return i >= 0 ? tegra_pcie.port + i : NULL;
  223. }
  224. static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
  225. int where, int size, u32 *val)
  226. {
  227. struct tegra_pcie_port *pp = bus_to_port(bus->number);
  228. void __iomem *addr;
  229. if (pp) {
  230. if (devfn != 0) {
  231. *val = 0xffffffff;
  232. return PCIBIOS_DEVICE_NOT_FOUND;
  233. }
  234. addr = pp->base + (where & ~0x3);
  235. } else {
  236. addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) +
  237. PCIE_CONF_DEV(PCI_SLOT(devfn)) +
  238. PCIE_CONF_FUNC(PCI_FUNC(devfn)) +
  239. PCIE_CONF_REG(where));
  240. }
  241. *val = readl(addr);
  242. if (size == 1)
  243. *val = (*val >> (8 * (where & 3))) & 0xff;
  244. else if (size == 2)
  245. *val = (*val >> (8 * (where & 3))) & 0xffff;
  246. return PCIBIOS_SUCCESSFUL;
  247. }
  248. static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
  249. int where, int size, u32 val)
  250. {
  251. struct tegra_pcie_port *pp = bus_to_port(bus->number);
  252. void __iomem *addr;
  253. u32 mask;
  254. u32 tmp;
  255. if (pp) {
  256. if (devfn != 0)
  257. return PCIBIOS_DEVICE_NOT_FOUND;
  258. addr = pp->base + (where & ~0x3);
  259. } else {
  260. addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) +
  261. PCIE_CONF_DEV(PCI_SLOT(devfn)) +
  262. PCIE_CONF_FUNC(PCI_FUNC(devfn)) +
  263. PCIE_CONF_REG(where));
  264. }
  265. if (size == 4) {
  266. writel(val, addr);
  267. return PCIBIOS_SUCCESSFUL;
  268. }
  269. if (size == 2)
  270. mask = ~(0xffff << ((where & 0x3) * 8));
  271. else if (size == 1)
  272. mask = ~(0xff << ((where & 0x3) * 8));
  273. else
  274. return PCIBIOS_BAD_REGISTER_NUMBER;
  275. tmp = readl(addr) & mask;
  276. tmp |= val << ((where & 0x3) * 8);
  277. writel(tmp, addr);
  278. return PCIBIOS_SUCCESSFUL;
  279. }
  280. static struct pci_ops tegra_pcie_ops = {
  281. .read = tegra_pcie_read_conf,
  282. .write = tegra_pcie_write_conf,
  283. };
  284. static void __devinit tegra_pcie_fixup_bridge(struct pci_dev *dev)
  285. {
  286. u16 reg;
  287. if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
  288. pci_read_config_word(dev, PCI_COMMAND, &reg);
  289. reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  290. PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
  291. pci_write_config_word(dev, PCI_COMMAND, reg);
  292. }
  293. }
  294. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
  295. /* Tegra PCIE root complex wrongly reports device class */
  296. static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev)
  297. {
  298. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  299. }
  300. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
  301. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
  302. /* Tegra PCIE requires relaxed ordering */
  303. static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev)
  304. {
  305. u16 val16;
  306. int pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  307. if (pos <= 0) {
  308. dev_err(&dev->dev, "skipping relaxed ordering fixup\n");
  309. return;
  310. }
  311. pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16);
  312. val16 |= PCI_EXP_DEVCTL_RELAX_EN;
  313. pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16);
  314. }
  315. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
  316. static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
  317. {
  318. struct tegra_pcie_port *pp;
  319. if (nr >= tegra_pcie.num_ports)
  320. return 0;
  321. pp = tegra_pcie.port + nr;
  322. pp->root_bus_nr = sys->busnr;
  323. /*
  324. * IORESOURCE_IO
  325. */
  326. snprintf(pp->io_space_name, sizeof(pp->io_space_name),
  327. "PCIe %d I/O", pp->index);
  328. pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0;
  329. pp->res[0].name = pp->io_space_name;
  330. if (pp->index == 0) {
  331. pp->res[0].start = PCIBIOS_MIN_IO;
  332. pp->res[0].end = pp->res[0].start + SZ_32K - 1;
  333. } else {
  334. pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K;
  335. pp->res[0].end = IO_SPACE_LIMIT;
  336. }
  337. pp->res[0].flags = IORESOURCE_IO;
  338. if (request_resource(&ioport_resource, &pp->res[0]))
  339. panic("Request PCIe IO resource failed\n");
  340. pci_add_resource_offset(&sys->resources, &pp->res[0], sys->io_offset);
  341. /*
  342. * IORESOURCE_MEM
  343. */
  344. snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
  345. "PCIe %d MEM", pp->index);
  346. pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0;
  347. pp->res[1].name = pp->mem_space_name;
  348. if (pp->index == 0) {
  349. pp->res[1].start = MEM_BASE_0;
  350. pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1;
  351. } else {
  352. pp->res[1].start = MEM_BASE_1;
  353. pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1;
  354. }
  355. pp->res[1].flags = IORESOURCE_MEM;
  356. if (request_resource(&iomem_resource, &pp->res[1]))
  357. panic("Request PCIe Memory resource failed\n");
  358. pci_add_resource_offset(&sys->resources, &pp->res[1], sys->mem_offset);
  359. /*
  360. * IORESOURCE_MEM | IORESOURCE_PREFETCH
  361. */
  362. snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name),
  363. "PCIe %d PREFETCH MEM", pp->index);
  364. pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0;
  365. pp->res[2].name = pp->prefetch_space_name;
  366. if (pp->index == 0) {
  367. pp->res[2].start = PREFETCH_MEM_BASE_0;
  368. pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1;
  369. } else {
  370. pp->res[2].start = PREFETCH_MEM_BASE_1;
  371. pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1;
  372. }
  373. pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
  374. if (request_resource(&iomem_resource, &pp->res[2]))
  375. panic("Request PCIe Prefetch Memory resource failed\n");
  376. pci_add_resource_offset(&sys->resources, &pp->res[2], sys->mem_offset);
  377. return 1;
  378. }
  379. static int tegra_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
  380. {
  381. return INT_PCIE_INTR;
  382. }
  383. static struct pci_bus __init *tegra_pcie_scan_bus(int nr,
  384. struct pci_sys_data *sys)
  385. {
  386. struct tegra_pcie_port *pp;
  387. if (nr >= tegra_pcie.num_ports)
  388. return NULL;
  389. pp = tegra_pcie.port + nr;
  390. pp->root_bus_nr = sys->busnr;
  391. return pci_scan_root_bus(NULL, sys->busnr, &tegra_pcie_ops, sys,
  392. &sys->resources);
  393. }
  394. static struct hw_pci tegra_pcie_hw __initdata = {
  395. .nr_controllers = 2,
  396. .setup = tegra_pcie_setup,
  397. .scan = tegra_pcie_scan_bus,
  398. .swizzle = pci_std_swizzle,
  399. .map_irq = tegra_pcie_map_irq,
  400. };
  401. static irqreturn_t tegra_pcie_isr(int irq, void *arg)
  402. {
  403. const char *err_msg[] = {
  404. "Unknown",
  405. "AXI slave error",
  406. "AXI decode error",
  407. "Target abort",
  408. "Master abort",
  409. "Invalid write",
  410. "Response decoding error",
  411. "AXI response decoding error",
  412. "Transcation timeout",
  413. };
  414. u32 code, signature;
  415. code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
  416. signature = afi_readl(AFI_INTR_SIGNATURE);
  417. afi_writel(0, AFI_INTR_CODE);
  418. if (code == AFI_INTR_LEGACY)
  419. return IRQ_NONE;
  420. if (code >= ARRAY_SIZE(err_msg))
  421. code = 0;
  422. /*
  423. * do not pollute kernel log with master abort reports since they
  424. * happen a lot during enumeration
  425. */
  426. if (code == AFI_INTR_MASTER_ABORT)
  427. pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature);
  428. else
  429. pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature);
  430. return IRQ_HANDLED;
  431. }
  432. static void tegra_pcie_setup_translations(void)
  433. {
  434. u32 fpci_bar;
  435. u32 size;
  436. u32 axi_address;
  437. /* Bar 0: config Bar */
  438. fpci_bar = ((u32)0xfdff << 16);
  439. size = PCIE_CFG_SZ;
  440. axi_address = TEGRA_PCIE_BASE + PCIE_CFG_OFF;
  441. afi_writel(axi_address, AFI_AXI_BAR0_START);
  442. afi_writel(size >> 12, AFI_AXI_BAR0_SZ);
  443. afi_writel(fpci_bar, AFI_FPCI_BAR0);
  444. /* Bar 1: extended config Bar */
  445. fpci_bar = ((u32)0xfe1 << 20);
  446. size = PCIE_EXT_CFG_SZ;
  447. axi_address = TEGRA_PCIE_BASE + PCIE_EXT_CFG_OFF;
  448. afi_writel(axi_address, AFI_AXI_BAR1_START);
  449. afi_writel(size >> 12, AFI_AXI_BAR1_SZ);
  450. afi_writel(fpci_bar, AFI_FPCI_BAR1);
  451. /* Bar 2: downstream IO bar */
  452. fpci_bar = ((__u32)0xfdfc << 16);
  453. size = MMIO_SIZE;
  454. axi_address = MMIO_BASE;
  455. afi_writel(axi_address, AFI_AXI_BAR2_START);
  456. afi_writel(size >> 12, AFI_AXI_BAR2_SZ);
  457. afi_writel(fpci_bar, AFI_FPCI_BAR2);
  458. /* Bar 3: prefetchable memory BAR */
  459. fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1;
  460. size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1;
  461. axi_address = PREFETCH_MEM_BASE_0;
  462. afi_writel(axi_address, AFI_AXI_BAR3_START);
  463. afi_writel(size >> 12, AFI_AXI_BAR3_SZ);
  464. afi_writel(fpci_bar, AFI_FPCI_BAR3);
  465. /* Bar 4: non prefetchable memory BAR */
  466. fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1;
  467. size = MEM_SIZE_0 + MEM_SIZE_1;
  468. axi_address = MEM_BASE_0;
  469. afi_writel(axi_address, AFI_AXI_BAR4_START);
  470. afi_writel(size >> 12, AFI_AXI_BAR4_SZ);
  471. afi_writel(fpci_bar, AFI_FPCI_BAR4);
  472. /* Bar 5: NULL out the remaining BAR as it is not used */
  473. fpci_bar = 0;
  474. size = 0;
  475. axi_address = 0;
  476. afi_writel(axi_address, AFI_AXI_BAR5_START);
  477. afi_writel(size >> 12, AFI_AXI_BAR5_SZ);
  478. afi_writel(fpci_bar, AFI_FPCI_BAR5);
  479. /* map all upstream transactions as uncached */
  480. afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST);
  481. afi_writel(0, AFI_CACHE_BAR0_SZ);
  482. afi_writel(0, AFI_CACHE_BAR1_ST);
  483. afi_writel(0, AFI_CACHE_BAR1_SZ);
  484. /* No MSI */
  485. afi_writel(0, AFI_MSI_FPCI_BAR_ST);
  486. afi_writel(0, AFI_MSI_BAR_SZ);
  487. afi_writel(0, AFI_MSI_AXI_BAR_ST);
  488. afi_writel(0, AFI_MSI_BAR_SZ);
  489. }
  490. static int tegra_pcie_enable_controller(void)
  491. {
  492. u32 val, reg;
  493. int i, timeout;
  494. /* Enable slot clock and pulse the reset signals */
  495. for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) {
  496. val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN;
  497. afi_writel(val, reg);
  498. val &= ~AFI_PEX_CTRL_RST;
  499. afi_writel(val, reg);
  500. val = afi_readl(reg) | AFI_PEX_CTRL_RST;
  501. afi_writel(val, reg);
  502. }
  503. /* Enable dual controller and both ports */
  504. val = afi_readl(AFI_PCIE_CONFIG);
  505. val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE |
  506. AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE |
  507. AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK);
  508. val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
  509. afi_writel(val, AFI_PCIE_CONFIG);
  510. val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
  511. afi_writel(val, AFI_FUSE);
  512. /* Initialze internal PHY, enable up to 16 PCIE lanes */
  513. pads_writel(0x0, PADS_CTL_SEL);
  514. /* override IDDQ to 1 on all 4 lanes */
  515. val = pads_readl(PADS_CTL) | PADS_CTL_IDDQ_1L;
  516. pads_writel(val, PADS_CTL);
  517. /*
  518. * set up PHY PLL inputs select PLLE output as refclock,
  519. * set TX ref sel to div10 (not div5)
  520. */
  521. val = pads_readl(PADS_PLL_CTL);
  522. val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
  523. val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10);
  524. pads_writel(val, PADS_PLL_CTL);
  525. /* take PLL out of reset */
  526. val = pads_readl(PADS_PLL_CTL) | PADS_PLL_CTL_RST_B4SM;
  527. pads_writel(val, PADS_PLL_CTL);
  528. /*
  529. * Hack, set the clock voltage to the DEFAULT provided by hw folks.
  530. * This doesn't exist in the documentation
  531. */
  532. pads_writel(0xfa5cfa5c, 0xc8);
  533. /* Wait for the PLL to lock */
  534. timeout = 300;
  535. do {
  536. val = pads_readl(PADS_PLL_CTL);
  537. usleep_range(1000, 1000);
  538. if (--timeout == 0) {
  539. pr_err("Tegra PCIe error: timeout waiting for PLL\n");
  540. return -EBUSY;
  541. }
  542. } while (!(val & PADS_PLL_CTL_LOCKDET));
  543. /* turn off IDDQ override */
  544. val = pads_readl(PADS_CTL) & ~PADS_CTL_IDDQ_1L;
  545. pads_writel(val, PADS_CTL);
  546. /* enable TX/RX data */
  547. val = pads_readl(PADS_CTL);
  548. val |= (PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
  549. pads_writel(val, PADS_CTL);
  550. /* Take the PCIe interface module out of reset */
  551. tegra_periph_reset_deassert(tegra_pcie.pcie_xclk);
  552. /* Finally enable PCIe */
  553. val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI;
  554. afi_writel(val, AFI_CONFIGURATION);
  555. val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
  556. AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
  557. AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR);
  558. afi_writel(val, AFI_AFI_INTR_ENABLE);
  559. afi_writel(0xffffffff, AFI_SM_INTR_ENABLE);
  560. /* FIXME: No MSI for now, only INT */
  561. afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
  562. /* Disable all execptions */
  563. afi_writel(0, AFI_FPCI_ERROR_MASKS);
  564. return 0;
  565. }
  566. static void tegra_pcie_xclk_clamp(bool clamp)
  567. {
  568. u32 reg;
  569. reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP;
  570. if (clamp)
  571. reg |= PMC_SCRATCH42_PCX_CLAMP;
  572. pmc_writel(reg, PMC_SCRATCH42);
  573. }
  574. static void tegra_pcie_power_off(void)
  575. {
  576. tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
  577. tegra_periph_reset_assert(tegra_pcie.afi_clk);
  578. tegra_periph_reset_assert(tegra_pcie.pex_clk);
  579. tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
  580. tegra_pcie_xclk_clamp(true);
  581. }
  582. static int tegra_pcie_power_regate(void)
  583. {
  584. int err;
  585. tegra_pcie_power_off();
  586. tegra_pcie_xclk_clamp(true);
  587. tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
  588. tegra_periph_reset_assert(tegra_pcie.afi_clk);
  589. err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
  590. tegra_pcie.pex_clk);
  591. if (err) {
  592. pr_err("PCIE: powerup sequence failed: %d\n", err);
  593. return err;
  594. }
  595. tegra_periph_reset_deassert(tegra_pcie.afi_clk);
  596. tegra_pcie_xclk_clamp(false);
  597. clk_enable(tegra_pcie.afi_clk);
  598. clk_enable(tegra_pcie.pex_clk);
  599. return clk_enable(tegra_pcie.pll_e);
  600. }
  601. static int tegra_pcie_clocks_get(void)
  602. {
  603. int err;
  604. tegra_pcie.pex_clk = clk_get(NULL, "pex");
  605. if (IS_ERR(tegra_pcie.pex_clk))
  606. return PTR_ERR(tegra_pcie.pex_clk);
  607. tegra_pcie.afi_clk = clk_get(NULL, "afi");
  608. if (IS_ERR(tegra_pcie.afi_clk)) {
  609. err = PTR_ERR(tegra_pcie.afi_clk);
  610. goto err_afi_clk;
  611. }
  612. tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk");
  613. if (IS_ERR(tegra_pcie.pcie_xclk)) {
  614. err = PTR_ERR(tegra_pcie.pcie_xclk);
  615. goto err_pcie_xclk;
  616. }
  617. tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e");
  618. if (IS_ERR(tegra_pcie.pll_e)) {
  619. err = PTR_ERR(tegra_pcie.pll_e);
  620. goto err_pll_e;
  621. }
  622. return 0;
  623. err_pll_e:
  624. clk_put(tegra_pcie.pcie_xclk);
  625. err_pcie_xclk:
  626. clk_put(tegra_pcie.afi_clk);
  627. err_afi_clk:
  628. clk_put(tegra_pcie.pex_clk);
  629. return err;
  630. }
  631. static void tegra_pcie_clocks_put(void)
  632. {
  633. clk_put(tegra_pcie.pll_e);
  634. clk_put(tegra_pcie.pcie_xclk);
  635. clk_put(tegra_pcie.afi_clk);
  636. clk_put(tegra_pcie.pex_clk);
  637. }
  638. static int __init tegra_pcie_get_resources(void)
  639. {
  640. struct resource *res_mmio = &tegra_pcie.res_mmio;
  641. int err;
  642. err = tegra_pcie_clocks_get();
  643. if (err) {
  644. pr_err("PCIE: failed to get clocks: %d\n", err);
  645. return err;
  646. }
  647. err = tegra_pcie_power_regate();
  648. if (err) {
  649. pr_err("PCIE: failed to power up: %d\n", err);
  650. goto err_pwr_on;
  651. }
  652. tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_IOMAP_SZ);
  653. if (tegra_pcie.regs == NULL) {
  654. pr_err("PCIE: Failed to map PCI/AFI registers\n");
  655. err = -ENOMEM;
  656. goto err_map_reg;
  657. }
  658. err = request_resource(&iomem_resource, res_mmio);
  659. if (err) {
  660. pr_err("PCIE: Failed to request resources: %d\n", err);
  661. goto err_req_io;
  662. }
  663. tegra_pcie_io_base = ioremap_nocache(res_mmio->start,
  664. resource_size(res_mmio));
  665. if (tegra_pcie_io_base == NULL) {
  666. pr_err("PCIE: Failed to map IO\n");
  667. err = -ENOMEM;
  668. goto err_map_io;
  669. }
  670. err = request_irq(INT_PCIE_INTR, tegra_pcie_isr,
  671. IRQF_SHARED, "PCIE", &tegra_pcie);
  672. if (err) {
  673. pr_err("PCIE: Failed to register IRQ: %d\n", err);
  674. goto err_irq;
  675. }
  676. set_irq_flags(INT_PCIE_INTR, IRQF_VALID);
  677. return 0;
  678. err_irq:
  679. iounmap(tegra_pcie_io_base);
  680. err_map_io:
  681. release_resource(&tegra_pcie.res_mmio);
  682. err_req_io:
  683. iounmap(tegra_pcie.regs);
  684. err_map_reg:
  685. tegra_pcie_power_off();
  686. err_pwr_on:
  687. tegra_pcie_clocks_put();
  688. return err;
  689. }
  690. /*
  691. * FIXME: If there are no PCIe cards attached, then calling this function
  692. * can result in the increase of the bootup time as there are big timeout
  693. * loops.
  694. */
  695. #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
  696. static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx,
  697. u32 reset_reg)
  698. {
  699. u32 reg;
  700. int retries = 3;
  701. int timeout;
  702. do {
  703. timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
  704. while (timeout) {
  705. reg = readl(pp->base + RP_VEND_XP);
  706. if (reg & RP_VEND_XP_DL_UP)
  707. break;
  708. mdelay(1);
  709. timeout--;
  710. }
  711. if (!timeout) {
  712. pr_err("PCIE: port %d: link down, retrying\n", idx);
  713. goto retry;
  714. }
  715. timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
  716. while (timeout) {
  717. reg = readl(pp->base + RP_LINK_CONTROL_STATUS);
  718. if (reg & 0x20000000)
  719. return true;
  720. mdelay(1);
  721. timeout--;
  722. }
  723. retry:
  724. /* Pulse the PEX reset */
  725. reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
  726. afi_writel(reg, reset_reg);
  727. mdelay(1);
  728. reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
  729. afi_writel(reg, reset_reg);
  730. retries--;
  731. } while (retries);
  732. return false;
  733. }
  734. static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
  735. {
  736. struct tegra_pcie_port *pp;
  737. pp = tegra_pcie.port + tegra_pcie.num_ports;
  738. pp->index = -1;
  739. pp->base = tegra_pcie.regs + offset;
  740. pp->link_up = tegra_pcie_check_link(pp, index, reset_reg);
  741. if (!pp->link_up) {
  742. pp->base = NULL;
  743. printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index);
  744. return;
  745. }
  746. tegra_pcie.num_ports++;
  747. pp->index = index;
  748. pp->root_bus_nr = -1;
  749. memset(pp->res, 0, sizeof(pp->res));
  750. }
  751. int __init tegra_pcie_init(bool init_port0, bool init_port1)
  752. {
  753. int err;
  754. if (!(init_port0 || init_port1))
  755. return -ENODEV;
  756. pcibios_min_mem = 0;
  757. err = tegra_pcie_get_resources();
  758. if (err)
  759. return err;
  760. err = tegra_pcie_enable_controller();
  761. if (err)
  762. return err;
  763. /* setup the AFI address translations */
  764. tegra_pcie_setup_translations();
  765. if (init_port0)
  766. tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL);
  767. if (init_port1)
  768. tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL);
  769. pci_common_init(&tegra_pcie_hw);
  770. return 0;
  771. }