ct-ca9x4.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * Versatile Express Core Tile Cortex A9x4 Support
  3. */
  4. #include <linux/init.h>
  5. #include <linux/gfp.h>
  6. #include <linux/device.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/amba/bus.h>
  10. #include <linux/amba/clcd.h>
  11. #include <linux/clkdev.h>
  12. #include <asm/hardware/arm_timer.h>
  13. #include <asm/hardware/cache-l2x0.h>
  14. #include <asm/hardware/gic.h>
  15. #include <asm/pmu.h>
  16. #include <asm/smp_scu.h>
  17. #include <asm/smp_twd.h>
  18. #include <mach/ct-ca9x4.h>
  19. #include <asm/hardware/timer-sp.h>
  20. #include <asm/mach/map.h>
  21. #include <asm/mach/time.h>
  22. #include "core.h"
  23. #include <mach/motherboard.h>
  24. #include <plat/clcd.h>
  25. static struct map_desc ct_ca9x4_io_desc[] __initdata = {
  26. {
  27. .virtual = V2T_PERIPH,
  28. .pfn = __phys_to_pfn(CT_CA9X4_MPIC),
  29. .length = SZ_8K,
  30. .type = MT_DEVICE,
  31. },
  32. };
  33. static void __init ct_ca9x4_map_io(void)
  34. {
  35. iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
  36. }
  37. #ifdef CONFIG_HAVE_ARM_TWD
  38. static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
  39. static void __init ca9x4_twd_init(void)
  40. {
  41. int err = twd_local_timer_register(&twd_local_timer);
  42. if (err)
  43. pr_err("twd_local_timer_register failed %d\n", err);
  44. }
  45. #else
  46. #define ca9x4_twd_init() do {} while(0)
  47. #endif
  48. static void __init ct_ca9x4_init_irq(void)
  49. {
  50. gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
  51. ioremap(A9_MPCORE_GIC_CPU, SZ_256));
  52. ca9x4_twd_init();
  53. }
  54. static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
  55. {
  56. u32 site = v2m_get_master_site();
  57. /*
  58. * Old firmware was using the "site" component of the command
  59. * to control the DVI muxer (while it should be always 0 ie. MB).
  60. * Newer firmware uses the data register. Keep both for compatibility.
  61. */
  62. v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE(site), site);
  63. v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE(SYS_CFG_SITE_MB), 2);
  64. }
  65. static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
  66. {
  67. unsigned long framesize = 1024 * 768 * 2;
  68. fb->panel = versatile_clcd_get_panel("XVGA");
  69. if (!fb->panel)
  70. return -EINVAL;
  71. return versatile_clcd_setup_dma(fb, framesize);
  72. }
  73. static struct clcd_board ct_ca9x4_clcd_data = {
  74. .name = "CT-CA9X4",
  75. .caps = CLCD_CAP_5551 | CLCD_CAP_565,
  76. .check = clcdfb_check,
  77. .decode = clcdfb_decode,
  78. .enable = ct_ca9x4_clcd_enable,
  79. .setup = ct_ca9x4_clcd_setup,
  80. .mmap = versatile_clcd_mmap_dma,
  81. .remove = versatile_clcd_remove_dma,
  82. };
  83. static AMBA_AHB_DEVICE(clcd, "ct:clcd", 0, CT_CA9X4_CLCDC, IRQ_CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
  84. static AMBA_APB_DEVICE(dmc, "ct:dmc", 0, CT_CA9X4_DMC, IRQ_CT_CA9X4_DMC, NULL);
  85. static AMBA_APB_DEVICE(smc, "ct:smc", 0, CT_CA9X4_SMC, IRQ_CT_CA9X4_SMC, NULL);
  86. static AMBA_APB_DEVICE(gpio, "ct:gpio", 0, CT_CA9X4_GPIO, IRQ_CT_CA9X4_GPIO, NULL);
  87. static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
  88. &clcd_device,
  89. &dmc_device,
  90. &smc_device,
  91. &gpio_device,
  92. };
  93. static long ct_round(struct clk *clk, unsigned long rate)
  94. {
  95. return rate;
  96. }
  97. static int ct_set(struct clk *clk, unsigned long rate)
  98. {
  99. u32 site = v2m_get_master_site();
  100. return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE(site) | 1, rate);
  101. }
  102. static const struct clk_ops osc1_clk_ops = {
  103. .round = ct_round,
  104. .set = ct_set,
  105. };
  106. static struct clk osc1_clk = {
  107. .ops = &osc1_clk_ops,
  108. .rate = 24000000,
  109. };
  110. static struct clk ct_sp804_clk = {
  111. .rate = 1000000,
  112. };
  113. static struct clk_lookup lookups[] = {
  114. { /* CLCD */
  115. .dev_id = "ct:clcd",
  116. .clk = &osc1_clk,
  117. }, { /* SP804 timers */
  118. .dev_id = "sp804",
  119. .con_id = "ct-timer0",
  120. .clk = &ct_sp804_clk,
  121. }, { /* SP804 timers */
  122. .dev_id = "sp804",
  123. .con_id = "ct-timer1",
  124. .clk = &ct_sp804_clk,
  125. },
  126. };
  127. static struct resource pmu_resources[] = {
  128. [0] = {
  129. .start = IRQ_CT_CA9X4_PMU_CPU0,
  130. .end = IRQ_CT_CA9X4_PMU_CPU0,
  131. .flags = IORESOURCE_IRQ,
  132. },
  133. [1] = {
  134. .start = IRQ_CT_CA9X4_PMU_CPU1,
  135. .end = IRQ_CT_CA9X4_PMU_CPU1,
  136. .flags = IORESOURCE_IRQ,
  137. },
  138. [2] = {
  139. .start = IRQ_CT_CA9X4_PMU_CPU2,
  140. .end = IRQ_CT_CA9X4_PMU_CPU2,
  141. .flags = IORESOURCE_IRQ,
  142. },
  143. [3] = {
  144. .start = IRQ_CT_CA9X4_PMU_CPU3,
  145. .end = IRQ_CT_CA9X4_PMU_CPU3,
  146. .flags = IORESOURCE_IRQ,
  147. },
  148. };
  149. static struct platform_device pmu_device = {
  150. .name = "arm-pmu",
  151. .id = ARM_PMU_DEVICE_CPU,
  152. .num_resources = ARRAY_SIZE(pmu_resources),
  153. .resource = pmu_resources,
  154. };
  155. static void __init ct_ca9x4_init_early(void)
  156. {
  157. clkdev_add_table(lookups, ARRAY_SIZE(lookups));
  158. }
  159. static void __init ct_ca9x4_init(void)
  160. {
  161. int i;
  162. #ifdef CONFIG_CACHE_L2X0
  163. void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
  164. /* set RAM latencies to 1 cycle for this core tile. */
  165. writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
  166. writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
  167. l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
  168. #endif
  169. for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
  170. amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
  171. platform_device_register(&pmu_device);
  172. }
  173. #ifdef CONFIG_SMP
  174. static void *ct_ca9x4_scu_base __initdata;
  175. static void __init ct_ca9x4_init_cpu_map(void)
  176. {
  177. int i, ncores;
  178. ct_ca9x4_scu_base = ioremap(A9_MPCORE_SCU, SZ_128);
  179. if (WARN_ON(!ct_ca9x4_scu_base))
  180. return;
  181. ncores = scu_get_core_count(ct_ca9x4_scu_base);
  182. if (ncores > nr_cpu_ids) {
  183. pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
  184. ncores, nr_cpu_ids);
  185. ncores = nr_cpu_ids;
  186. }
  187. for (i = 0; i < ncores; ++i)
  188. set_cpu_possible(i, true);
  189. set_smp_cross_call(gic_raise_softirq);
  190. }
  191. static void __init ct_ca9x4_smp_enable(unsigned int max_cpus)
  192. {
  193. scu_enable(ct_ca9x4_scu_base);
  194. }
  195. #endif
  196. struct ct_desc ct_ca9x4_desc __initdata = {
  197. .id = V2M_CT_ID_CA9,
  198. .name = "CA9x4",
  199. .map_io = ct_ca9x4_map_io,
  200. .init_early = ct_ca9x4_init_early,
  201. .init_irq = ct_ca9x4_init_irq,
  202. .init_tile = ct_ca9x4_init,
  203. #ifdef CONFIG_SMP
  204. .init_cpu_map = ct_ca9x4_init_cpu_map,
  205. .smp_enable = ct_ca9x4_smp_enable,
  206. #endif
  207. };