pci_gx.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611
  1. /*
  2. * Copyright 2012 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/mmzone.h>
  16. #include <linux/pci.h>
  17. #include <linux/delay.h>
  18. #include <linux/string.h>
  19. #include <linux/init.h>
  20. #include <linux/capability.h>
  21. #include <linux/sched.h>
  22. #include <linux/errno.h>
  23. #include <linux/irq.h>
  24. #include <linux/msi.h>
  25. #include <linux/io.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/ctype.h>
  28. #include <asm/processor.h>
  29. #include <asm/sections.h>
  30. #include <asm/byteorder.h>
  31. #include <gxio/iorpc_globals.h>
  32. #include <gxio/kiorpc.h>
  33. #include <gxio/trio.h>
  34. #include <gxio/iorpc_trio.h>
  35. #include <hv/drv_trio_intf.h>
  36. #include <arch/sim.h>
  37. /*
  38. * This file containes the routines to search for PCI buses,
  39. * enumerate the buses, and configure any attached devices.
  40. */
  41. #define DEBUG_PCI_CFG 0
  42. #if DEBUG_PCI_CFG
  43. #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
  44. pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
  45. size, val, bus, dev, func, offset & 0xFFF);
  46. #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
  47. pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
  48. size, val, bus, dev, func, offset & 0xFFF);
  49. #else
  50. #define TRACE_CFG_WR(...)
  51. #define TRACE_CFG_RD(...)
  52. #endif
  53. static int pci_probe = 1;
  54. /* Information on the PCIe RC ports configuration. */
  55. static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
  56. /*
  57. * On some platforms with one or more Gx endpoint ports, we need to
  58. * delay the PCIe RC port probe for a few seconds to work around
  59. * a HW PCIe link-training bug. The exact delay is specified with
  60. * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
  61. * where T is the TRIO instance number, P is the port number and S is
  62. * the delay in seconds. If the argument is specified, but the delay is
  63. * not provided, the value will be DEFAULT_RC_DELAY.
  64. */
  65. static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
  66. /* Default number of seconds that the PCIe RC port probe can be delayed. */
  67. #define DEFAULT_RC_DELAY 10
  68. /* The PCI I/O space size in each PCI domain. */
  69. #define IO_SPACE_SIZE 0x10000
  70. /* Provide shorter versions of some very long constant names. */
  71. #define AUTO_CONFIG_RC \
  72. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
  73. #define AUTO_CONFIG_RC_G1 \
  74. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
  75. #define AUTO_CONFIG_EP \
  76. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
  77. #define AUTO_CONFIG_EP_G1 \
  78. TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
  79. /* Array of the PCIe ports configuration info obtained from the BIB. */
  80. struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
  81. /* Number of configured TRIO instances. */
  82. int num_trio_shims;
  83. /* All drivers share the TRIO contexts defined here. */
  84. gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
  85. /* Pointer to an array of PCIe RC controllers. */
  86. struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
  87. int num_rc_controllers;
  88. static struct pci_ops tile_cfg_ops;
  89. /* Mask of CPUs that should receive PCIe interrupts. */
  90. static struct cpumask intr_cpus_map;
  91. /* We don't need to worry about the alignment of resources. */
  92. resource_size_t pcibios_align_resource(void *data, const struct resource *res,
  93. resource_size_t size,
  94. resource_size_t align)
  95. {
  96. return res->start;
  97. }
  98. EXPORT_SYMBOL(pcibios_align_resource);
  99. /*
  100. * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
  101. * For now, we simply send interrupts to non-dataplane CPUs.
  102. * We may implement methods to allow user to specify the target CPUs,
  103. * e.g. via boot arguments.
  104. */
  105. static int tile_irq_cpu(int irq)
  106. {
  107. unsigned int count;
  108. int i = 0;
  109. int cpu;
  110. count = cpumask_weight(&intr_cpus_map);
  111. if (unlikely(count == 0)) {
  112. pr_warning("intr_cpus_map empty, interrupts will be"
  113. " delievered to dataplane tiles\n");
  114. return irq % (smp_height * smp_width);
  115. }
  116. count = irq % count;
  117. for_each_cpu(cpu, &intr_cpus_map) {
  118. if (i++ == count)
  119. break;
  120. }
  121. return cpu;
  122. }
  123. /* Open a file descriptor to the TRIO shim. */
  124. static int tile_pcie_open(int trio_index)
  125. {
  126. gxio_trio_context_t *context = &trio_contexts[trio_index];
  127. int ret;
  128. int mac;
  129. /* This opens a file descriptor to the TRIO shim. */
  130. ret = gxio_trio_init(context, trio_index);
  131. if (ret < 0)
  132. goto gxio_trio_init_failure;
  133. /* Allocate an ASID for the kernel. */
  134. ret = gxio_trio_alloc_asids(context, 1, 0, 0);
  135. if (ret < 0) {
  136. pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
  137. trio_index);
  138. goto asid_alloc_failure;
  139. }
  140. context->asid = ret;
  141. #ifdef USE_SHARED_PCIE_CONFIG_REGION
  142. /*
  143. * Alloc a PIO region for config access, shared by all MACs per TRIO.
  144. * This shouldn't fail since the kernel is supposed to the first
  145. * client of the TRIO's PIO regions.
  146. */
  147. ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
  148. if (ret < 0) {
  149. pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
  150. trio_index);
  151. goto pio_alloc_failure;
  152. }
  153. context->pio_cfg_index = ret;
  154. /*
  155. * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
  156. * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
  157. */
  158. ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
  159. 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
  160. if (ret < 0) {
  161. pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
  162. trio_index);
  163. goto pio_alloc_failure;
  164. }
  165. #endif
  166. /* Get the properties of the PCIe ports on this TRIO instance. */
  167. ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
  168. if (ret < 0) {
  169. pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
  170. " on TRIO %d\n", ret, trio_index);
  171. goto get_port_property_failure;
  172. }
  173. context->mmio_base_mac =
  174. iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
  175. if (context->mmio_base_mac == NULL) {
  176. pr_err("PCI: TRIO config space mapping failure, error %d,"
  177. " on TRIO %d\n", ret, trio_index);
  178. ret = -ENOMEM;
  179. goto trio_mmio_mapping_failure;
  180. }
  181. /* Check the port strap state which will override the BIB setting. */
  182. for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
  183. TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
  184. unsigned int reg_offset;
  185. /* Ignore ports that are not specified in the BIB. */
  186. if (!pcie_ports[trio_index].ports[mac].allow_rc &&
  187. !pcie_ports[trio_index].ports[mac].allow_ep)
  188. continue;
  189. reg_offset =
  190. (TRIO_PCIE_INTFC_PORT_CONFIG <<
  191. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  192. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  193. TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
  194. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  195. port_config.word =
  196. __gxio_mmio_read(context->mmio_base_mac + reg_offset);
  197. if (port_config.strap_state != AUTO_CONFIG_RC &&
  198. port_config.strap_state != AUTO_CONFIG_RC_G1) {
  199. /*
  200. * If this is really intended to be an EP port, record
  201. * it so that the endpoint driver will know about it.
  202. */
  203. if (port_config.strap_state == AUTO_CONFIG_EP ||
  204. port_config.strap_state == AUTO_CONFIG_EP_G1)
  205. pcie_ports[trio_index].ports[mac].allow_ep = 1;
  206. }
  207. }
  208. return ret;
  209. trio_mmio_mapping_failure:
  210. get_port_property_failure:
  211. asid_alloc_failure:
  212. #ifdef USE_SHARED_PCIE_CONFIG_REGION
  213. pio_alloc_failure:
  214. #endif
  215. hv_dev_close(context->fd);
  216. gxio_trio_init_failure:
  217. context->fd = -1;
  218. return ret;
  219. }
  220. static int __init tile_trio_init(void)
  221. {
  222. int i;
  223. /* We loop over all the TRIO shims. */
  224. for (i = 0; i < TILEGX_NUM_TRIO; i++) {
  225. if (tile_pcie_open(i) < 0)
  226. continue;
  227. num_trio_shims++;
  228. }
  229. return 0;
  230. }
  231. postcore_initcall(tile_trio_init);
  232. static void tilegx_legacy_irq_ack(struct irq_data *d)
  233. {
  234. __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
  235. }
  236. static void tilegx_legacy_irq_mask(struct irq_data *d)
  237. {
  238. __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
  239. }
  240. static void tilegx_legacy_irq_unmask(struct irq_data *d)
  241. {
  242. __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
  243. }
  244. static struct irq_chip tilegx_legacy_irq_chip = {
  245. .name = "tilegx_legacy_irq",
  246. .irq_ack = tilegx_legacy_irq_ack,
  247. .irq_mask = tilegx_legacy_irq_mask,
  248. .irq_unmask = tilegx_legacy_irq_unmask,
  249. /* TBD: support set_affinity. */
  250. };
  251. /*
  252. * This is a wrapper function of the kernel level-trigger interrupt
  253. * handler handle_level_irq() for PCI legacy interrupts. The TRIO
  254. * is configured such that only INTx Assert interrupts are proxied
  255. * to Linux which just calls handle_level_irq() after clearing the
  256. * MAC INTx Assert status bit associated with this interrupt.
  257. */
  258. static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
  259. {
  260. struct pci_controller *controller = irq_desc_get_handler_data(desc);
  261. gxio_trio_context_t *trio_context = controller->trio;
  262. uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
  263. int mac = controller->mac;
  264. unsigned int reg_offset;
  265. uint64_t level_mask;
  266. handle_level_irq(irq, desc);
  267. /*
  268. * Clear the INTx Level status, otherwise future interrupts are
  269. * not sent.
  270. */
  271. reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
  272. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  273. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  274. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  275. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  276. level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
  277. __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
  278. }
  279. /*
  280. * Create kernel irqs and set up the handlers for the legacy interrupts.
  281. * Also some minimum initialization for the MSI support.
  282. */
  283. static int tile_init_irqs(struct pci_controller *controller)
  284. {
  285. int i;
  286. int j;
  287. int irq;
  288. int result;
  289. cpumask_copy(&intr_cpus_map, cpu_online_mask);
  290. for (i = 0; i < 4; i++) {
  291. gxio_trio_context_t *context = controller->trio;
  292. int cpu;
  293. /* Ask the kernel to allocate an IRQ. */
  294. irq = create_irq();
  295. if (irq < 0) {
  296. pr_err("PCI: no free irq vectors, failed for %d\n", i);
  297. goto free_irqs;
  298. }
  299. controller->irq_intx_table[i] = irq;
  300. /* Distribute the 4 IRQs to different tiles. */
  301. cpu = tile_irq_cpu(irq);
  302. /* Configure the TRIO intr binding for this IRQ. */
  303. result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
  304. cpu_y(cpu), KERNEL_PL,
  305. irq, controller->mac, i);
  306. if (result < 0) {
  307. pr_err("PCI: MAC intx config failed for %d\n", i);
  308. goto free_irqs;
  309. }
  310. /* Register the IRQ handler with the kernel. */
  311. irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
  312. trio_handle_level_irq);
  313. irq_set_chip_data(irq, (void *)(uint64_t)i);
  314. irq_set_handler_data(irq, controller);
  315. }
  316. return 0;
  317. free_irqs:
  318. for (j = 0; j < i; j++)
  319. destroy_irq(controller->irq_intx_table[j]);
  320. return -1;
  321. }
  322. /*
  323. * Return 1 if the port is strapped to operate in RC mode.
  324. */
  325. static int
  326. strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
  327. {
  328. TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
  329. unsigned int reg_offset;
  330. /* Check the port configuration. */
  331. reg_offset =
  332. (TRIO_PCIE_INTFC_PORT_CONFIG <<
  333. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  334. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  335. TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
  336. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  337. port_config.word =
  338. __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);
  339. if (port_config.strap_state == AUTO_CONFIG_RC ||
  340. port_config.strap_state == AUTO_CONFIG_RC_G1)
  341. return 1;
  342. else
  343. return 0;
  344. }
  345. /*
  346. * Find valid controllers and fill in pci_controller structs for each
  347. * of them.
  348. *
  349. * Return the number of controllers discovered.
  350. */
  351. int __init tile_pci_init(void)
  352. {
  353. int ctl_index = 0;
  354. int i, j;
  355. if (!pci_probe) {
  356. pr_info("PCI: disabled by boot argument\n");
  357. return 0;
  358. }
  359. pr_info("PCI: Searching for controllers...\n");
  360. if (num_trio_shims == 0 || sim_is_simulator())
  361. return 0;
  362. /*
  363. * Now determine which PCIe ports are configured to operate in RC
  364. * mode. There is a differece in the port configuration capability
  365. * between the Gx36 and Gx72 devices.
  366. *
  367. * The Gx36 has configuration capability for each of the 3 PCIe
  368. * interfaces (disable, auto endpoint, auto RC, etc.).
  369. * On the Gx72, you can only select one of the 3 PCIe interfaces per
  370. * TRIO to train automatically. Further, the allowable training modes
  371. * are reduced to four options (auto endpoint, auto RC, stream x1,
  372. * stream x4).
  373. *
  374. * For Gx36 ports, it must be allowed to be in RC mode by the
  375. * Board Information Block, and the hardware strapping pins must be
  376. * set to RC mode.
  377. *
  378. * For Gx72 ports, the port will operate in RC mode if either of the
  379. * following is true:
  380. * 1. It is allowed to be in RC mode by the Board Information Block,
  381. * and the BIB doesn't allow the EP mode.
  382. * 2. It is allowed to be in either the RC or the EP mode by the BIB,
  383. * and the hardware strapping pin is set to RC mode.
  384. */
  385. for (i = 0; i < TILEGX_NUM_TRIO; i++) {
  386. gxio_trio_context_t *context = &trio_contexts[i];
  387. if (context->fd < 0)
  388. continue;
  389. for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
  390. int is_rc = 0;
  391. if (pcie_ports[i].is_gx72 &&
  392. pcie_ports[i].ports[j].allow_rc) {
  393. if (!pcie_ports[i].ports[j].allow_ep ||
  394. strapped_for_rc(context, j))
  395. is_rc = 1;
  396. } else if (pcie_ports[i].ports[j].allow_rc &&
  397. strapped_for_rc(context, j)) {
  398. is_rc = 1;
  399. }
  400. if (is_rc) {
  401. pcie_rc[i][j] = 1;
  402. num_rc_controllers++;
  403. }
  404. }
  405. }
  406. /* Return if no PCIe ports are configured to operate in RC mode. */
  407. if (num_rc_controllers == 0)
  408. return 0;
  409. /* Set the TRIO pointer and MAC index for each PCIe RC port. */
  410. for (i = 0; i < TILEGX_NUM_TRIO; i++) {
  411. for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
  412. if (pcie_rc[i][j]) {
  413. pci_controllers[ctl_index].trio =
  414. &trio_contexts[i];
  415. pci_controllers[ctl_index].mac = j;
  416. pci_controllers[ctl_index].trio_index = i;
  417. ctl_index++;
  418. if (ctl_index == num_rc_controllers)
  419. goto out;
  420. }
  421. }
  422. }
  423. out:
  424. /* Configure each PCIe RC port. */
  425. for (i = 0; i < num_rc_controllers; i++) {
  426. /* Configure the PCIe MAC to run in RC mode. */
  427. struct pci_controller *controller = &pci_controllers[i];
  428. controller->index = i;
  429. controller->ops = &tile_cfg_ops;
  430. controller->io_space.start = PCIBIOS_MIN_IO +
  431. (i * IO_SPACE_SIZE);
  432. controller->io_space.end = controller->io_space.start +
  433. IO_SPACE_SIZE - 1;
  434. BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
  435. controller->io_space.flags = IORESOURCE_IO;
  436. snprintf(controller->io_space_name,
  437. sizeof(controller->io_space_name),
  438. "PCI I/O domain %d", i);
  439. controller->io_space.name = controller->io_space_name;
  440. /*
  441. * The PCI memory resource is located above the PA space.
  442. * For every host bridge, the BAR window or the MMIO aperture
  443. * is in range [3GB, 4GB - 1] of a 4GB space beyond the
  444. * PA space.
  445. */
  446. controller->mem_offset = TILE_PCI_MEM_START +
  447. (i * TILE_PCI_BAR_WINDOW_TOP);
  448. controller->mem_space.start = controller->mem_offset +
  449. TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
  450. controller->mem_space.end = controller->mem_offset +
  451. TILE_PCI_BAR_WINDOW_TOP - 1;
  452. controller->mem_space.flags = IORESOURCE_MEM;
  453. snprintf(controller->mem_space_name,
  454. sizeof(controller->mem_space_name),
  455. "PCI mem domain %d", i);
  456. controller->mem_space.name = controller->mem_space_name;
  457. }
  458. return num_rc_controllers;
  459. }
  460. /*
  461. * (pin - 1) converts from the PCI standard's [1:4] convention to
  462. * a normal [0:3] range.
  463. */
  464. static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
  465. {
  466. struct pci_controller *controller =
  467. (struct pci_controller *)dev->sysdata;
  468. return controller->irq_intx_table[pin - 1];
  469. }
  470. static void fixup_read_and_payload_sizes(struct pci_controller *controller)
  471. {
  472. gxio_trio_context_t *trio_context = controller->trio;
  473. struct pci_bus *root_bus = controller->root_bus;
  474. TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
  475. TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
  476. unsigned int reg_offset;
  477. struct pci_bus *child;
  478. int mac;
  479. int err;
  480. mac = controller->mac;
  481. /* Set our max read request size to be 4KB. */
  482. reg_offset =
  483. (TRIO_PCIE_RC_DEVICE_CONTROL <<
  484. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  485. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  486. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  487. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  488. dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
  489. reg_offset);
  490. dev_control.max_read_req_sz = 5;
  491. __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
  492. dev_control.word);
  493. /*
  494. * Set the max payload size supported by this Gx PCIe MAC.
  495. * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
  496. * experiments have shown that setting MPS to 256 yields the
  497. * best performance.
  498. */
  499. reg_offset =
  500. (TRIO_PCIE_RC_DEVICE_CAP <<
  501. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  502. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  503. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  504. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  505. rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
  506. reg_offset);
  507. rc_dev_cap.mps_sup = 1;
  508. __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
  509. rc_dev_cap.word);
  510. /* Configure PCI Express MPS setting. */
  511. list_for_each_entry(child, &root_bus->children, node) {
  512. struct pci_dev *self = child->self;
  513. if (!self)
  514. continue;
  515. pcie_bus_configure_settings(child, self->pcie_mpss);
  516. }
  517. /*
  518. * Set the mac_config register in trio based on the MPS/MRS of the link.
  519. */
  520. reg_offset =
  521. (TRIO_PCIE_RC_DEVICE_CONTROL <<
  522. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  523. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  524. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  525. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  526. dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
  527. reg_offset);
  528. err = gxio_trio_set_mps_mrs(trio_context,
  529. dev_control.max_payload_size,
  530. dev_control.max_read_req_sz,
  531. mac);
  532. if (err < 0) {
  533. pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
  534. "MAC %d on TRIO %d\n",
  535. mac, controller->trio_index);
  536. }
  537. }
  538. static int setup_pcie_rc_delay(char *str)
  539. {
  540. unsigned long delay = 0;
  541. unsigned long trio_index;
  542. unsigned long mac;
  543. if (str == NULL || !isdigit(*str))
  544. return -EINVAL;
  545. trio_index = simple_strtoul(str, (char **)&str, 10);
  546. if (trio_index >= TILEGX_NUM_TRIO)
  547. return -EINVAL;
  548. if (*str != ',')
  549. return -EINVAL;
  550. str++;
  551. if (!isdigit(*str))
  552. return -EINVAL;
  553. mac = simple_strtoul(str, (char **)&str, 10);
  554. if (mac >= TILEGX_TRIO_PCIES)
  555. return -EINVAL;
  556. if (*str != '\0') {
  557. if (*str != ',')
  558. return -EINVAL;
  559. str++;
  560. if (!isdigit(*str))
  561. return -EINVAL;
  562. delay = simple_strtoul(str, (char **)&str, 10);
  563. }
  564. rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
  565. return 0;
  566. }
  567. early_param("pcie_rc_delay", setup_pcie_rc_delay);
  568. /* PCI initialization entry point, called by subsys_initcall. */
  569. int __init pcibios_init(void)
  570. {
  571. resource_size_t offset;
  572. LIST_HEAD(resources);
  573. int next_busno;
  574. int i;
  575. tile_pci_init();
  576. if (num_rc_controllers == 0)
  577. return 0;
  578. /*
  579. * Delay a bit in case devices aren't ready. Some devices are
  580. * known to require at least 20ms here, but we use a more
  581. * conservative value.
  582. */
  583. msleep(250);
  584. /* Scan all of the recorded PCI controllers. */
  585. for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
  586. struct pci_controller *controller = &pci_controllers[i];
  587. gxio_trio_context_t *trio_context = controller->trio;
  588. TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
  589. TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
  590. struct pci_bus *bus;
  591. unsigned int reg_offset;
  592. unsigned int class_code_revision;
  593. int trio_index;
  594. int mac;
  595. int ret;
  596. if (trio_context->fd < 0)
  597. continue;
  598. trio_index = controller->trio_index;
  599. mac = controller->mac;
  600. /*
  601. * Check for PCIe link-up status to decide if we need
  602. * to force the link to come up.
  603. */
  604. reg_offset =
  605. (TRIO_PCIE_INTFC_PORT_STATUS <<
  606. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  607. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  608. TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
  609. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  610. port_status.word =
  611. __gxio_mmio_read(trio_context->mmio_base_mac +
  612. reg_offset);
  613. if (!port_status.dl_up) {
  614. if (rc_delay[trio_index][mac]) {
  615. pr_info("Delaying PCIe RC TRIO init %d sec"
  616. " on MAC %d on TRIO %d\n",
  617. rc_delay[trio_index][mac], mac,
  618. trio_index);
  619. msleep(rc_delay[trio_index][mac] * 1000);
  620. }
  621. ret = gxio_trio_force_rc_link_up(trio_context, mac);
  622. if (ret < 0)
  623. pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
  624. "MAC %d on TRIO %d\n", mac, trio_index);
  625. }
  626. pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
  627. trio_index, controller->mac);
  628. /* Delay the bus probe if needed. */
  629. if (rc_delay[trio_index][mac]) {
  630. pr_info("Delaying PCIe RC bus enumerating %d sec"
  631. " on MAC %d on TRIO %d\n",
  632. rc_delay[trio_index][mac], mac,
  633. trio_index);
  634. msleep(rc_delay[trio_index][mac] * 1000);
  635. } else {
  636. /*
  637. * Wait a bit here because some EP devices
  638. * take longer to come up.
  639. */
  640. msleep(1000);
  641. }
  642. /* Check for PCIe link-up status again. */
  643. port_status.word =
  644. __gxio_mmio_read(trio_context->mmio_base_mac +
  645. reg_offset);
  646. if (!port_status.dl_up) {
  647. if (pcie_ports[trio_index].ports[mac].removable) {
  648. pr_info("PCI: link is down, MAC %d on TRIO %d\n",
  649. mac, trio_index);
  650. pr_info("This is expected if no PCIe card"
  651. " is connected to this link\n");
  652. } else
  653. pr_err("PCI: link is down, MAC %d on TRIO %d\n",
  654. mac, trio_index);
  655. continue;
  656. }
  657. /*
  658. * Ensure that the link can come out of L1 power down state.
  659. * Strictly speaking, this is needed only in the case of
  660. * heavy RC-initiated DMAs.
  661. */
  662. reg_offset =
  663. (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
  664. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  665. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
  666. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  667. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  668. tx_fifo_ctl.word =
  669. __gxio_mmio_read(trio_context->mmio_base_mac +
  670. reg_offset);
  671. tx_fifo_ctl.min_p_credits = 0;
  672. __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
  673. tx_fifo_ctl.word);
  674. /*
  675. * Change the device ID so that Linux bus crawl doesn't confuse
  676. * the internal bridge with any Tilera endpoints.
  677. */
  678. reg_offset =
  679. (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
  680. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  681. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  682. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  683. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  684. __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
  685. (TILERA_GX36_RC_DEV_ID <<
  686. TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
  687. TILERA_VENDOR_ID);
  688. /* Set the internal P2P bridge class code. */
  689. reg_offset =
  690. (TRIO_PCIE_RC_REVISION_ID <<
  691. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  692. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
  693. TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  694. (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  695. class_code_revision =
  696. __gxio_mmio_read32(trio_context->mmio_base_mac +
  697. reg_offset);
  698. class_code_revision = (class_code_revision & 0xff) |
  699. (PCI_CLASS_BRIDGE_PCI << 16);
  700. __gxio_mmio_write32(trio_context->mmio_base_mac +
  701. reg_offset, class_code_revision);
  702. #ifdef USE_SHARED_PCIE_CONFIG_REGION
  703. /* Map in the MMIO space for the PIO region. */
  704. offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
  705. (((unsigned long long)mac) <<
  706. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
  707. #else
  708. /* Alloc a PIO region for PCI config access per MAC. */
  709. ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
  710. if (ret < 0) {
  711. pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
  712. "on TRIO %d, give up\n", mac, trio_index);
  713. continue;
  714. }
  715. trio_context->pio_cfg_index[mac] = ret;
  716. /* For PIO CFG, the bus_address_hi parameter is 0. */
  717. ret = gxio_trio_init_pio_region_aux(trio_context,
  718. trio_context->pio_cfg_index[mac],
  719. mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
  720. if (ret < 0) {
  721. pr_err("PCI: PCI CFG PIO init failure for mac %d "
  722. "on TRIO %d, give up\n", mac, trio_index);
  723. continue;
  724. }
  725. offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
  726. (((unsigned long long)mac) <<
  727. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
  728. #endif
  729. trio_context->mmio_base_pio_cfg[mac] =
  730. iorpc_ioremap(trio_context->fd, offset,
  731. (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT));
  732. if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
  733. pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
  734. mac, trio_index);
  735. continue;
  736. }
  737. /* Initialize the PCIe interrupts. */
  738. if (tile_init_irqs(controller)) {
  739. pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
  740. mac, trio_index);
  741. continue;
  742. }
  743. /*
  744. * The PCI memory resource is located above the PA space.
  745. * The memory range for the PCI root bus should not overlap
  746. * with the physical RAM.
  747. */
  748. pci_add_resource_offset(&resources, &controller->mem_space,
  749. controller->mem_offset);
  750. pci_add_resource(&resources, &controller->io_space);
  751. controller->first_busno = next_busno;
  752. bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
  753. controller, &resources);
  754. controller->root_bus = bus;
  755. next_busno = bus->busn_res.end + 1;
  756. }
  757. /* Do machine dependent PCI interrupt routing */
  758. pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
  759. /*
  760. * This comes from the generic Linux PCI driver.
  761. *
  762. * It allocates all of the resources (I/O memory, etc)
  763. * associated with the devices read in above.
  764. */
  765. pci_assign_unassigned_resources();
  766. /* Record the I/O resources in the PCI controller structure. */
  767. for (i = 0; i < num_rc_controllers; i++) {
  768. struct pci_controller *controller = &pci_controllers[i];
  769. gxio_trio_context_t *trio_context = controller->trio;
  770. struct pci_bus *root_bus = pci_controllers[i].root_bus;
  771. int ret;
  772. int j;
  773. /*
  774. * Skip controllers that are not properly initialized or
  775. * have down links.
  776. */
  777. if (root_bus == NULL)
  778. continue;
  779. /* Configure the max_payload_size values for this domain. */
  780. fixup_read_and_payload_sizes(controller);
  781. /* Alloc a PIO region for PCI memory access for each RC port. */
  782. ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
  783. if (ret < 0) {
  784. pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
  785. "give up\n", controller->trio_index,
  786. controller->mac);
  787. continue;
  788. }
  789. controller->pio_mem_index = ret;
  790. /*
  791. * For PIO MEM, the bus_address_hi parameter is hard-coded 0
  792. * because we always assign 32-bit PCI bus BAR ranges.
  793. */
  794. ret = gxio_trio_init_pio_region_aux(trio_context,
  795. controller->pio_mem_index,
  796. controller->mac,
  797. 0,
  798. 0);
  799. if (ret < 0) {
  800. pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
  801. "give up\n", controller->trio_index,
  802. controller->mac);
  803. continue;
  804. }
  805. #ifdef CONFIG_TILE_PCI_IO
  806. /*
  807. * Alloc a PIO region for PCI I/O space access for each RC port.
  808. */
  809. ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
  810. if (ret < 0) {
  811. pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
  812. "give up\n", controller->trio_index,
  813. controller->mac);
  814. continue;
  815. }
  816. controller->pio_io_index = ret;
  817. /*
  818. * For PIO IO, the bus_address_hi parameter is hard-coded 0
  819. * because PCI I/O address space is 32-bit.
  820. */
  821. ret = gxio_trio_init_pio_region_aux(trio_context,
  822. controller->pio_io_index,
  823. controller->mac,
  824. 0,
  825. HV_TRIO_PIO_FLAG_IO_SPACE);
  826. if (ret < 0) {
  827. pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
  828. "give up\n", controller->trio_index,
  829. controller->mac);
  830. continue;
  831. }
  832. #endif
  833. /*
  834. * Configure a Mem-Map region for each memory controller so
  835. * that Linux can map all of its PA space to the PCI bus.
  836. * Use the IOMMU to handle hash-for-home memory.
  837. */
  838. for_each_online_node(j) {
  839. unsigned long start_pfn = node_start_pfn[j];
  840. unsigned long end_pfn = node_end_pfn[j];
  841. unsigned long nr_pages = end_pfn - start_pfn;
  842. ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
  843. 0);
  844. if (ret < 0) {
  845. pr_err("PCI: Mem-Map alloc failure on TRIO %d "
  846. "mac %d for MC %d, give up\n",
  847. controller->trio_index,
  848. controller->mac, j);
  849. goto alloc_mem_map_failed;
  850. }
  851. controller->mem_maps[j] = ret;
  852. /*
  853. * Initialize the Mem-Map and the I/O MMU so that all
  854. * the physical memory can be accessed by the endpoint
  855. * devices. The base bus address is set to the base CPA
  856. * of this memory controller plus an offset (see pci.h).
  857. * The region's base VA is set to the base CPA. The
  858. * I/O MMU table essentially translates the CPA to
  859. * the real PA. Implicitly, for node 0, we create
  860. * a separate Mem-Map region that serves as the inbound
  861. * window for legacy 32-bit devices. This is a direct
  862. * map of the low 4GB CPA space.
  863. */
  864. ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
  865. controller->mem_maps[j],
  866. start_pfn << PAGE_SHIFT,
  867. nr_pages << PAGE_SHIFT,
  868. trio_context->asid,
  869. controller->mac,
  870. (start_pfn << PAGE_SHIFT) +
  871. TILE_PCI_MEM_MAP_BASE_OFFSET,
  872. j,
  873. GXIO_TRIO_ORDER_MODE_UNORDERED);
  874. if (ret < 0) {
  875. pr_err("PCI: Mem-Map init failure on TRIO %d "
  876. "mac %d for MC %d, give up\n",
  877. controller->trio_index,
  878. controller->mac, j);
  879. goto alloc_mem_map_failed;
  880. }
  881. continue;
  882. alloc_mem_map_failed:
  883. break;
  884. }
  885. }
  886. return 0;
  887. }
  888. subsys_initcall(pcibios_init);
  889. /* No bus fixups needed. */
  890. void pcibios_fixup_bus(struct pci_bus *bus)
  891. {
  892. }
  893. /* Process any "pci=" kernel boot arguments. */
  894. char *pcibios_setup(char *str)
  895. {
  896. if (!strcmp(str, "off")) {
  897. pci_probe = 0;
  898. return NULL;
  899. }
  900. return str;
  901. }
  902. /*
  903. * Enable memory address decoding, as appropriate, for the
  904. * device described by the 'dev' struct.
  905. *
  906. * This is called from the generic PCI layer, and can be called
  907. * for bridges or endpoints.
  908. */
  909. int pcibios_enable_device(struct pci_dev *dev, int mask)
  910. {
  911. return pci_enable_resources(dev, mask);
  912. }
  913. /* Called for each device after PCI setup is done. */
  914. static void pcibios_fixup_final(struct pci_dev *pdev)
  915. {
  916. set_dma_ops(&pdev->dev, gx_pci_dma_map_ops);
  917. set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
  918. pdev->dev.archdata.max_direct_dma_addr =
  919. TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
  920. }
  921. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
  922. /* Map a PCI MMIO bus address into VA space. */
  923. void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
  924. {
  925. struct pci_controller *controller = NULL;
  926. resource_size_t bar_start;
  927. resource_size_t bar_end;
  928. resource_size_t offset;
  929. resource_size_t start;
  930. resource_size_t end;
  931. int trio_fd;
  932. int i;
  933. start = phys_addr;
  934. end = phys_addr + size - 1;
  935. /*
  936. * By searching phys_addr in each controller's mem_space, we can
  937. * determine the controller that should accept the PCI memory access.
  938. */
  939. for (i = 0; i < num_rc_controllers; i++) {
  940. /*
  941. * Skip controllers that are not properly initialized or
  942. * have down links.
  943. */
  944. if (pci_controllers[i].root_bus == NULL)
  945. continue;
  946. bar_start = pci_controllers[i].mem_space.start;
  947. bar_end = pci_controllers[i].mem_space.end;
  948. if ((start >= bar_start) && (end <= bar_end)) {
  949. controller = &pci_controllers[i];
  950. break;
  951. }
  952. }
  953. if (controller == NULL)
  954. return NULL;
  955. trio_fd = controller->trio->fd;
  956. /* Convert the resource start to the bus address offset. */
  957. start = phys_addr - controller->mem_offset;
  958. offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
  959. /* We need to keep the PCI bus address's in-page offset in the VA. */
  960. return iorpc_ioremap(trio_fd, offset, size) +
  961. (start & (PAGE_SIZE - 1));
  962. }
  963. EXPORT_SYMBOL(ioremap);
  964. #ifdef CONFIG_TILE_PCI_IO
  965. /* Map a PCI I/O address into VA space. */
  966. void __iomem *ioport_map(unsigned long port, unsigned int size)
  967. {
  968. struct pci_controller *controller = NULL;
  969. resource_size_t bar_start;
  970. resource_size_t bar_end;
  971. resource_size_t offset;
  972. resource_size_t start;
  973. resource_size_t end;
  974. int trio_fd;
  975. int i;
  976. start = port;
  977. end = port + size - 1;
  978. /*
  979. * By searching the port in each controller's io_space, we can
  980. * determine the controller that should accept the PCI I/O access.
  981. */
  982. for (i = 0; i < num_rc_controllers; i++) {
  983. /*
  984. * Skip controllers that are not properly initialized or
  985. * have down links.
  986. */
  987. if (pci_controllers[i].root_bus == NULL)
  988. continue;
  989. bar_start = pci_controllers[i].io_space.start;
  990. bar_end = pci_controllers[i].io_space.end;
  991. if ((start >= bar_start) && (end <= bar_end)) {
  992. controller = &pci_controllers[i];
  993. break;
  994. }
  995. }
  996. if (controller == NULL)
  997. return NULL;
  998. trio_fd = controller->trio->fd;
  999. /* Convert the resource start to the bus address offset. */
  1000. port -= controller->io_space.start;
  1001. offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;
  1002. /* We need to keep the PCI bus address's in-page offset in the VA. */
  1003. return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
  1004. }
  1005. EXPORT_SYMBOL(ioport_map);
  1006. void ioport_unmap(void __iomem *addr)
  1007. {
  1008. iounmap(addr);
  1009. }
  1010. EXPORT_SYMBOL(ioport_unmap);
  1011. #endif
  1012. void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
  1013. {
  1014. iounmap(addr);
  1015. }
  1016. EXPORT_SYMBOL(pci_iounmap);
  1017. /****************************************************************
  1018. *
  1019. * Tile PCI config space read/write routines
  1020. *
  1021. ****************************************************************/
  1022. /*
  1023. * These are the normal read and write ops
  1024. * These are expanded with macros from pci_bus_read_config_byte() etc.
  1025. *
  1026. * devfn is the combined PCI device & function.
  1027. *
  1028. * offset is in bytes, from the start of config space for the
  1029. * specified bus & device.
  1030. */
  1031. static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
  1032. int size, u32 *val)
  1033. {
  1034. struct pci_controller *controller = bus->sysdata;
  1035. gxio_trio_context_t *trio_context = controller->trio;
  1036. int busnum = bus->number & 0xff;
  1037. int device = PCI_SLOT(devfn);
  1038. int function = PCI_FUNC(devfn);
  1039. int config_type = 1;
  1040. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
  1041. void *mmio_addr;
  1042. /*
  1043. * Map all accesses to the local device on root bus into the
  1044. * MMIO space of the MAC. Accesses to the downstream devices
  1045. * go to the PIO space.
  1046. */
  1047. if (pci_is_root_bus(bus)) {
  1048. if (device == 0) {
  1049. /*
  1050. * This is the internal downstream P2P bridge,
  1051. * access directly.
  1052. */
  1053. unsigned int reg_offset;
  1054. reg_offset = ((offset & 0xFFF) <<
  1055. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  1056. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
  1057. << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  1058. (controller->mac <<
  1059. TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  1060. mmio_addr = trio_context->mmio_base_mac + reg_offset;
  1061. goto valid_device;
  1062. } else {
  1063. /*
  1064. * We fake an empty device for (device > 0),
  1065. * since there is only one device on bus 0.
  1066. */
  1067. goto invalid_device;
  1068. }
  1069. }
  1070. /*
  1071. * Accesses to the directly attached device have to be
  1072. * sent as type-0 configs.
  1073. */
  1074. if (busnum == (controller->first_busno + 1)) {
  1075. /*
  1076. * There is only one device off of our built-in P2P bridge.
  1077. */
  1078. if (device != 0)
  1079. goto invalid_device;
  1080. config_type = 0;
  1081. }
  1082. cfg_addr.word = 0;
  1083. cfg_addr.reg_addr = (offset & 0xFFF);
  1084. cfg_addr.fn = function;
  1085. cfg_addr.dev = device;
  1086. cfg_addr.bus = busnum;
  1087. cfg_addr.type = config_type;
  1088. /*
  1089. * Note that we don't set the mac field in cfg_addr because the
  1090. * mapping is per port.
  1091. */
  1092. mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
  1093. cfg_addr.word;
  1094. valid_device:
  1095. switch (size) {
  1096. case 4:
  1097. *val = __gxio_mmio_read32(mmio_addr);
  1098. break;
  1099. case 2:
  1100. *val = __gxio_mmio_read16(mmio_addr);
  1101. break;
  1102. case 1:
  1103. *val = __gxio_mmio_read8(mmio_addr);
  1104. break;
  1105. default:
  1106. return PCIBIOS_FUNC_NOT_SUPPORTED;
  1107. }
  1108. TRACE_CFG_RD(size, *val, busnum, device, function, offset);
  1109. return 0;
  1110. invalid_device:
  1111. switch (size) {
  1112. case 4:
  1113. *val = 0xFFFFFFFF;
  1114. break;
  1115. case 2:
  1116. *val = 0xFFFF;
  1117. break;
  1118. case 1:
  1119. *val = 0xFF;
  1120. break;
  1121. default:
  1122. return PCIBIOS_FUNC_NOT_SUPPORTED;
  1123. }
  1124. return 0;
  1125. }
  1126. /*
  1127. * See tile_cfg_read() for relevent comments.
  1128. * Note that "val" is the value to write, not a pointer to that value.
  1129. */
  1130. static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
  1131. int size, u32 val)
  1132. {
  1133. struct pci_controller *controller = bus->sysdata;
  1134. gxio_trio_context_t *trio_context = controller->trio;
  1135. int busnum = bus->number & 0xff;
  1136. int device = PCI_SLOT(devfn);
  1137. int function = PCI_FUNC(devfn);
  1138. int config_type = 1;
  1139. TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
  1140. void *mmio_addr;
  1141. u32 val_32 = (u32)val;
  1142. u16 val_16 = (u16)val;
  1143. u8 val_8 = (u8)val;
  1144. /*
  1145. * Map all accesses to the local device on root bus into the
  1146. * MMIO space of the MAC. Accesses to the downstream devices
  1147. * go to the PIO space.
  1148. */
  1149. if (pci_is_root_bus(bus)) {
  1150. if (device == 0) {
  1151. /*
  1152. * This is the internal downstream P2P bridge,
  1153. * access directly.
  1154. */
  1155. unsigned int reg_offset;
  1156. reg_offset = ((offset & 0xFFF) <<
  1157. TRIO_CFG_REGION_ADDR__REG_SHIFT) |
  1158. (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
  1159. << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
  1160. (controller->mac <<
  1161. TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
  1162. mmio_addr = trio_context->mmio_base_mac + reg_offset;
  1163. goto valid_device;
  1164. } else {
  1165. /*
  1166. * We fake an empty device for (device > 0),
  1167. * since there is only one device on bus 0.
  1168. */
  1169. goto invalid_device;
  1170. }
  1171. }
  1172. /*
  1173. * Accesses to the directly attached device have to be
  1174. * sent as type-0 configs.
  1175. */
  1176. if (busnum == (controller->first_busno + 1)) {
  1177. /*
  1178. * There is only one device off of our built-in P2P bridge.
  1179. */
  1180. if (device != 0)
  1181. goto invalid_device;
  1182. config_type = 0;
  1183. }
  1184. cfg_addr.word = 0;
  1185. cfg_addr.reg_addr = (offset & 0xFFF);
  1186. cfg_addr.fn = function;
  1187. cfg_addr.dev = device;
  1188. cfg_addr.bus = busnum;
  1189. cfg_addr.type = config_type;
  1190. /*
  1191. * Note that we don't set the mac field in cfg_addr because the
  1192. * mapping is per port.
  1193. */
  1194. mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
  1195. cfg_addr.word;
  1196. valid_device:
  1197. switch (size) {
  1198. case 4:
  1199. __gxio_mmio_write32(mmio_addr, val_32);
  1200. TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
  1201. break;
  1202. case 2:
  1203. __gxio_mmio_write16(mmio_addr, val_16);
  1204. TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
  1205. break;
  1206. case 1:
  1207. __gxio_mmio_write8(mmio_addr, val_8);
  1208. TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
  1209. break;
  1210. default:
  1211. return PCIBIOS_FUNC_NOT_SUPPORTED;
  1212. }
  1213. invalid_device:
  1214. return 0;
  1215. }
  1216. static struct pci_ops tile_cfg_ops = {
  1217. .read = tile_cfg_read,
  1218. .write = tile_cfg_write,
  1219. };
  1220. /* MSI support starts here. */
  1221. static unsigned int tilegx_msi_startup(struct irq_data *d)
  1222. {
  1223. if (d->msi_desc)
  1224. unmask_msi_irq(d);
  1225. return 0;
  1226. }
  1227. static void tilegx_msi_ack(struct irq_data *d)
  1228. {
  1229. __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
  1230. }
  1231. static void tilegx_msi_mask(struct irq_data *d)
  1232. {
  1233. mask_msi_irq(d);
  1234. __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
  1235. }
  1236. static void tilegx_msi_unmask(struct irq_data *d)
  1237. {
  1238. __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
  1239. unmask_msi_irq(d);
  1240. }
  1241. static struct irq_chip tilegx_msi_chip = {
  1242. .name = "tilegx_msi",
  1243. .irq_startup = tilegx_msi_startup,
  1244. .irq_ack = tilegx_msi_ack,
  1245. .irq_mask = tilegx_msi_mask,
  1246. .irq_unmask = tilegx_msi_unmask,
  1247. /* TBD: support set_affinity. */
  1248. };
  1249. int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  1250. {
  1251. struct pci_controller *controller;
  1252. gxio_trio_context_t *trio_context;
  1253. struct msi_msg msg;
  1254. int default_irq;
  1255. uint64_t mem_map_base;
  1256. uint64_t mem_map_limit;
  1257. u64 msi_addr;
  1258. int mem_map;
  1259. int cpu;
  1260. int irq;
  1261. int ret;
  1262. irq = create_irq();
  1263. if (irq < 0)
  1264. return irq;
  1265. /*
  1266. * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
  1267. * devices that are not capable of generating a 64-bit message address.
  1268. * These devices will fall back to using the legacy interrupts.
  1269. * Most PCIe endpoint devices do support 64-bit message addressing.
  1270. */
  1271. if (desc->msi_attrib.is_64 == 0) {
  1272. dev_printk(KERN_INFO, &pdev->dev,
  1273. "64-bit MSI message address not supported, "
  1274. "falling back to legacy interrupts.\n");
  1275. ret = -ENOMEM;
  1276. goto is_64_failure;
  1277. }
  1278. default_irq = desc->msi_attrib.default_irq;
  1279. controller = irq_get_handler_data(default_irq);
  1280. BUG_ON(!controller);
  1281. trio_context = controller->trio;
  1282. /*
  1283. * Allocate a scatter-queue that will accept the MSI write and
  1284. * trigger the TILE-side interrupts. We use the scatter-queue regions
  1285. * before the mem map regions, because the latter are needed by more
  1286. * applications.
  1287. */
  1288. mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
  1289. if (mem_map >= 0) {
  1290. TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
  1291. .pop = 0,
  1292. .doorbell = 1,
  1293. }};
  1294. mem_map += TRIO_NUM_MAP_MEM_REGIONS;
  1295. mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
  1296. mem_map * MEM_MAP_INTR_REGION_SIZE;
  1297. mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
  1298. msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
  1299. msg.data = (unsigned int)doorbell_template.word;
  1300. } else {
  1301. /* SQ regions are out, allocate from map mem regions. */
  1302. mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
  1303. if (mem_map < 0) {
  1304. dev_printk(KERN_INFO, &pdev->dev,
  1305. "%s Mem-Map alloc failure. "
  1306. "Failed to initialize MSI interrupts. "
  1307. "Falling back to legacy interrupts.\n",
  1308. desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
  1309. ret = -ENOMEM;
  1310. goto msi_mem_map_alloc_failure;
  1311. }
  1312. mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
  1313. mem_map * MEM_MAP_INTR_REGION_SIZE;
  1314. mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
  1315. msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
  1316. TRIO_MAP_MEM_REG_INT0;
  1317. msg.data = mem_map;
  1318. }
  1319. /* We try to distribute different IRQs to different tiles. */
  1320. cpu = tile_irq_cpu(irq);
  1321. /*
  1322. * Now call up to the HV to configure the MSI interrupt and
  1323. * set up the IPI binding.
  1324. */
  1325. ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
  1326. KERNEL_PL, irq, controller->mac,
  1327. mem_map, mem_map_base, mem_map_limit,
  1328. trio_context->asid);
  1329. if (ret < 0) {
  1330. dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
  1331. goto hv_msi_config_failure;
  1332. }
  1333. irq_set_msi_desc(irq, desc);
  1334. msg.address_hi = msi_addr >> 32;
  1335. msg.address_lo = msi_addr & 0xffffffff;
  1336. write_msi_msg(irq, &msg);
  1337. irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
  1338. irq_set_handler_data(irq, controller);
  1339. return 0;
  1340. hv_msi_config_failure:
  1341. /* Free mem-map */
  1342. msi_mem_map_alloc_failure:
  1343. is_64_failure:
  1344. destroy_irq(irq);
  1345. return ret;
  1346. }
  1347. void arch_teardown_msi_irq(unsigned int irq)
  1348. {
  1349. destroy_irq(irq);
  1350. }