common.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  3. * http://www.samsung.com
  4. *
  5. * Common Codes for EXYNOS
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/io.h>
  15. #include <linux/device.h>
  16. #include <linux/gpio.h>
  17. #include <linux/sched.h>
  18. #include <linux/serial_core.h>
  19. #include <linux/of.h>
  20. #include <linux/of_fdt.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/export.h>
  23. #include <linux/irqdomain.h>
  24. #include <linux/of_address.h>
  25. #include <asm/proc-fns.h>
  26. #include <asm/exception.h>
  27. #include <asm/hardware/cache-l2x0.h>
  28. #include <asm/hardware/gic.h>
  29. #include <asm/mach/map.h>
  30. #include <asm/mach/irq.h>
  31. #include <asm/cacheflush.h>
  32. #include <mach/regs-irq.h>
  33. #include <mach/regs-pmu.h>
  34. #include <mach/regs-gpio.h>
  35. #include <mach/pmu.h>
  36. #include <plat/cpu.h>
  37. #include <plat/clock.h>
  38. #include <plat/devs.h>
  39. #include <plat/pm.h>
  40. #include <plat/sdhci.h>
  41. #include <plat/gpio-cfg.h>
  42. #include <plat/adc-core.h>
  43. #include <plat/fb-core.h>
  44. #include <plat/fimc-core.h>
  45. #include <plat/iic-core.h>
  46. #include <plat/tv-core.h>
  47. #include <plat/spi-core.h>
  48. #include <plat/regs-serial.h>
  49. #include "common.h"
  50. #define L2_AUX_VAL 0x7C470001
  51. #define L2_AUX_MASK 0xC200ffff
  52. static const char name_exynos4210[] = "EXYNOS4210";
  53. static const char name_exynos4212[] = "EXYNOS4212";
  54. static const char name_exynos4412[] = "EXYNOS4412";
  55. static const char name_exynos5250[] = "EXYNOS5250";
  56. static const char name_exynos5440[] = "EXYNOS5440";
  57. static void exynos4_map_io(void);
  58. static void exynos5_map_io(void);
  59. static void exynos5440_map_io(void);
  60. static void exynos4_init_clocks(int xtal);
  61. static void exynos5_init_clocks(int xtal);
  62. static void exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no);
  63. static int exynos_init(void);
  64. static struct cpu_table cpu_ids[] __initdata = {
  65. {
  66. .idcode = EXYNOS4210_CPU_ID,
  67. .idmask = EXYNOS4_CPU_MASK,
  68. .map_io = exynos4_map_io,
  69. .init_clocks = exynos4_init_clocks,
  70. .init_uarts = exynos4_init_uarts,
  71. .init = exynos_init,
  72. .name = name_exynos4210,
  73. }, {
  74. .idcode = EXYNOS4212_CPU_ID,
  75. .idmask = EXYNOS4_CPU_MASK,
  76. .map_io = exynos4_map_io,
  77. .init_clocks = exynos4_init_clocks,
  78. .init_uarts = exynos4_init_uarts,
  79. .init = exynos_init,
  80. .name = name_exynos4212,
  81. }, {
  82. .idcode = EXYNOS4412_CPU_ID,
  83. .idmask = EXYNOS4_CPU_MASK,
  84. .map_io = exynos4_map_io,
  85. .init_clocks = exynos4_init_clocks,
  86. .init_uarts = exynos4_init_uarts,
  87. .init = exynos_init,
  88. .name = name_exynos4412,
  89. }, {
  90. .idcode = EXYNOS5250_SOC_ID,
  91. .idmask = EXYNOS5_SOC_MASK,
  92. .map_io = exynos5_map_io,
  93. .init_clocks = exynos5_init_clocks,
  94. .init = exynos_init,
  95. .name = name_exynos5250,
  96. }, {
  97. .idcode = EXYNOS5440_SOC_ID,
  98. .idmask = EXYNOS5_SOC_MASK,
  99. .map_io = exynos5440_map_io,
  100. .init = exynos_init,
  101. .name = name_exynos5440,
  102. },
  103. };
  104. /* Initial IO mappings */
  105. static struct map_desc exynos_iodesc[] __initdata = {
  106. {
  107. .virtual = (unsigned long)S5P_VA_CHIPID,
  108. .pfn = __phys_to_pfn(EXYNOS_PA_CHIPID),
  109. .length = SZ_4K,
  110. .type = MT_DEVICE,
  111. },
  112. };
  113. #ifdef CONFIG_ARCH_EXYNOS5
  114. static struct map_desc exynos5440_iodesc[] __initdata = {
  115. {
  116. .virtual = (unsigned long)S5P_VA_CHIPID,
  117. .pfn = __phys_to_pfn(EXYNOS5440_PA_CHIPID),
  118. .length = SZ_4K,
  119. .type = MT_DEVICE,
  120. },
  121. };
  122. #endif
  123. static struct map_desc exynos4_iodesc[] __initdata = {
  124. {
  125. .virtual = (unsigned long)S3C_VA_SYS,
  126. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
  127. .length = SZ_64K,
  128. .type = MT_DEVICE,
  129. }, {
  130. .virtual = (unsigned long)S3C_VA_TIMER,
  131. .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
  132. .length = SZ_16K,
  133. .type = MT_DEVICE,
  134. }, {
  135. .virtual = (unsigned long)S3C_VA_WATCHDOG,
  136. .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
  137. .length = SZ_4K,
  138. .type = MT_DEVICE,
  139. }, {
  140. .virtual = (unsigned long)S5P_VA_SROMC,
  141. .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
  142. .length = SZ_4K,
  143. .type = MT_DEVICE,
  144. }, {
  145. .virtual = (unsigned long)S5P_VA_SYSTIMER,
  146. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
  147. .length = SZ_4K,
  148. .type = MT_DEVICE,
  149. }, {
  150. .virtual = (unsigned long)S5P_VA_PMU,
  151. .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
  152. .length = SZ_64K,
  153. .type = MT_DEVICE,
  154. }, {
  155. .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
  156. .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
  157. .length = SZ_4K,
  158. .type = MT_DEVICE,
  159. }, {
  160. .virtual = (unsigned long)S5P_VA_GIC_CPU,
  161. .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
  162. .length = SZ_64K,
  163. .type = MT_DEVICE,
  164. }, {
  165. .virtual = (unsigned long)S5P_VA_GIC_DIST,
  166. .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
  167. .length = SZ_64K,
  168. .type = MT_DEVICE,
  169. }, {
  170. .virtual = (unsigned long)S3C_VA_UART,
  171. .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
  172. .length = SZ_512K,
  173. .type = MT_DEVICE,
  174. }, {
  175. .virtual = (unsigned long)S5P_VA_CMU,
  176. .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
  177. .length = SZ_128K,
  178. .type = MT_DEVICE,
  179. }, {
  180. .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
  181. .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
  182. .length = SZ_8K,
  183. .type = MT_DEVICE,
  184. }, {
  185. .virtual = (unsigned long)S5P_VA_L2CC,
  186. .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
  187. .length = SZ_4K,
  188. .type = MT_DEVICE,
  189. }, {
  190. .virtual = (unsigned long)S5P_VA_DMC0,
  191. .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
  192. .length = SZ_64K,
  193. .type = MT_DEVICE,
  194. }, {
  195. .virtual = (unsigned long)S5P_VA_DMC1,
  196. .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
  197. .length = SZ_64K,
  198. .type = MT_DEVICE,
  199. }, {
  200. .virtual = (unsigned long)S3C_VA_USB_HSPHY,
  201. .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
  202. .length = SZ_4K,
  203. .type = MT_DEVICE,
  204. },
  205. };
  206. static struct map_desc exynos4_iodesc0[] __initdata = {
  207. {
  208. .virtual = (unsigned long)S5P_VA_SYSRAM,
  209. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
  210. .length = SZ_4K,
  211. .type = MT_DEVICE,
  212. },
  213. };
  214. static struct map_desc exynos4_iodesc1[] __initdata = {
  215. {
  216. .virtual = (unsigned long)S5P_VA_SYSRAM,
  217. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
  218. .length = SZ_4K,
  219. .type = MT_DEVICE,
  220. },
  221. };
  222. static struct map_desc exynos5_iodesc[] __initdata = {
  223. {
  224. .virtual = (unsigned long)S3C_VA_SYS,
  225. .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
  226. .length = SZ_64K,
  227. .type = MT_DEVICE,
  228. }, {
  229. .virtual = (unsigned long)S3C_VA_TIMER,
  230. .pfn = __phys_to_pfn(EXYNOS5_PA_TIMER),
  231. .length = SZ_16K,
  232. .type = MT_DEVICE,
  233. }, {
  234. .virtual = (unsigned long)S3C_VA_WATCHDOG,
  235. .pfn = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
  236. .length = SZ_4K,
  237. .type = MT_DEVICE,
  238. }, {
  239. .virtual = (unsigned long)S5P_VA_SROMC,
  240. .pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
  241. .length = SZ_4K,
  242. .type = MT_DEVICE,
  243. }, {
  244. .virtual = (unsigned long)S5P_VA_SYSTIMER,
  245. .pfn = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
  246. .length = SZ_4K,
  247. .type = MT_DEVICE,
  248. }, {
  249. .virtual = (unsigned long)S5P_VA_SYSRAM,
  250. .pfn = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
  251. .length = SZ_4K,
  252. .type = MT_DEVICE,
  253. }, {
  254. .virtual = (unsigned long)S5P_VA_CMU,
  255. .pfn = __phys_to_pfn(EXYNOS5_PA_CMU),
  256. .length = 144 * SZ_1K,
  257. .type = MT_DEVICE,
  258. }, {
  259. .virtual = (unsigned long)S5P_VA_PMU,
  260. .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
  261. .length = SZ_64K,
  262. .type = MT_DEVICE,
  263. }, {
  264. .virtual = (unsigned long)S3C_VA_UART,
  265. .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
  266. .length = SZ_512K,
  267. .type = MT_DEVICE,
  268. },
  269. };
  270. static struct map_desc exynos5440_iodesc0[] __initdata = {
  271. {
  272. .virtual = (unsigned long)S3C_VA_UART,
  273. .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
  274. .length = SZ_512K,
  275. .type = MT_DEVICE,
  276. },
  277. };
  278. void exynos4_restart(char mode, const char *cmd)
  279. {
  280. __raw_writel(0x1, S5P_SWRESET);
  281. }
  282. void exynos5_restart(char mode, const char *cmd)
  283. {
  284. u32 val;
  285. void __iomem *addr;
  286. if (of_machine_is_compatible("samsung,exynos5250")) {
  287. val = 0x1;
  288. addr = EXYNOS_SWRESET;
  289. } else if (of_machine_is_compatible("samsung,exynos5440")) {
  290. val = (0x10 << 20) | (0x1 << 16);
  291. addr = EXYNOS5440_SWRESET;
  292. } else {
  293. pr_err("%s: cannot support non-DT\n", __func__);
  294. return;
  295. }
  296. __raw_writel(val, addr);
  297. }
  298. void __init exynos_init_late(void)
  299. {
  300. if (of_machine_is_compatible("samsung,exynos5440"))
  301. /* to be supported later */
  302. return;
  303. exynos_pm_late_initcall();
  304. }
  305. /*
  306. * exynos_map_io
  307. *
  308. * register the standard cpu IO areas
  309. */
  310. void __init exynos_init_io(struct map_desc *mach_desc, int size)
  311. {
  312. struct map_desc *iodesc = exynos_iodesc;
  313. int iodesc_sz = ARRAY_SIZE(exynos_iodesc);
  314. #if defined(CONFIG_OF) && defined(CONFIG_ARCH_EXYNOS5)
  315. unsigned long root = of_get_flat_dt_root();
  316. /* initialize the io descriptors we need for initialization */
  317. if (of_flat_dt_is_compatible(root, "samsung,exynos5440")) {
  318. iodesc = exynos5440_iodesc;
  319. iodesc_sz = ARRAY_SIZE(exynos5440_iodesc);
  320. }
  321. #endif
  322. iotable_init(iodesc, iodesc_sz);
  323. if (mach_desc)
  324. iotable_init(mach_desc, size);
  325. /* detect cpu id and rev. */
  326. s5p_init_cpu(S5P_VA_CHIPID);
  327. s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
  328. }
  329. static void __init exynos4_map_io(void)
  330. {
  331. iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
  332. if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
  333. iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
  334. else
  335. iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
  336. /* initialize device information early */
  337. exynos4_default_sdhci0();
  338. exynos4_default_sdhci1();
  339. exynos4_default_sdhci2();
  340. exynos4_default_sdhci3();
  341. s3c_adc_setname("samsung-adc-v3");
  342. s3c_fimc_setname(0, "exynos4-fimc");
  343. s3c_fimc_setname(1, "exynos4-fimc");
  344. s3c_fimc_setname(2, "exynos4-fimc");
  345. s3c_fimc_setname(3, "exynos4-fimc");
  346. s3c_sdhci_setname(0, "exynos4-sdhci");
  347. s3c_sdhci_setname(1, "exynos4-sdhci");
  348. s3c_sdhci_setname(2, "exynos4-sdhci");
  349. s3c_sdhci_setname(3, "exynos4-sdhci");
  350. /* The I2C bus controllers are directly compatible with s3c2440 */
  351. s3c_i2c0_setname("s3c2440-i2c");
  352. s3c_i2c1_setname("s3c2440-i2c");
  353. s3c_i2c2_setname("s3c2440-i2c");
  354. s5p_fb_setname(0, "exynos4-fb");
  355. s5p_hdmi_setname("exynos4-hdmi");
  356. s3c64xx_spi_setname("exynos4210-spi");
  357. }
  358. static void __init exynos5_map_io(void)
  359. {
  360. iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
  361. }
  362. static void __init exynos4_init_clocks(int xtal)
  363. {
  364. printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
  365. s3c24xx_register_baseclocks(xtal);
  366. s5p_register_clocks(xtal);
  367. if (soc_is_exynos4210())
  368. exynos4210_register_clocks();
  369. else if (soc_is_exynos4212() || soc_is_exynos4412())
  370. exynos4212_register_clocks();
  371. exynos4_register_clocks();
  372. exynos4_setup_clocks();
  373. }
  374. static void __init exynos5440_map_io(void)
  375. {
  376. iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
  377. }
  378. static void __init exynos5_init_clocks(int xtal)
  379. {
  380. printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
  381. /* EXYNOS5440 can support only common clock framework */
  382. if (soc_is_exynos5440())
  383. return;
  384. #ifdef CONFIG_SOC_EXYNOS5250
  385. s3c24xx_register_baseclocks(xtal);
  386. s5p_register_clocks(xtal);
  387. exynos5_register_clocks();
  388. exynos5_setup_clocks();
  389. #endif
  390. }
  391. #define COMBINER_ENABLE_SET 0x0
  392. #define COMBINER_ENABLE_CLEAR 0x4
  393. #define COMBINER_INT_STATUS 0xC
  394. static DEFINE_SPINLOCK(irq_controller_lock);
  395. struct combiner_chip_data {
  396. unsigned int irq_offset;
  397. unsigned int irq_mask;
  398. void __iomem *base;
  399. };
  400. static struct irq_domain *combiner_irq_domain;
  401. static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
  402. static inline void __iomem *combiner_base(struct irq_data *data)
  403. {
  404. struct combiner_chip_data *combiner_data =
  405. irq_data_get_irq_chip_data(data);
  406. return combiner_data->base;
  407. }
  408. static void combiner_mask_irq(struct irq_data *data)
  409. {
  410. u32 mask = 1 << (data->hwirq % 32);
  411. __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
  412. }
  413. static void combiner_unmask_irq(struct irq_data *data)
  414. {
  415. u32 mask = 1 << (data->hwirq % 32);
  416. __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
  417. }
  418. static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  419. {
  420. struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
  421. struct irq_chip *chip = irq_get_chip(irq);
  422. unsigned int cascade_irq, combiner_irq;
  423. unsigned long status;
  424. chained_irq_enter(chip, desc);
  425. spin_lock(&irq_controller_lock);
  426. status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
  427. spin_unlock(&irq_controller_lock);
  428. status &= chip_data->irq_mask;
  429. if (status == 0)
  430. goto out;
  431. combiner_irq = __ffs(status);
  432. cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
  433. if (unlikely(cascade_irq >= NR_IRQS))
  434. do_bad_IRQ(cascade_irq, desc);
  435. else
  436. generic_handle_irq(cascade_irq);
  437. out:
  438. chained_irq_exit(chip, desc);
  439. }
  440. static struct irq_chip combiner_chip = {
  441. .name = "COMBINER",
  442. .irq_mask = combiner_mask_irq,
  443. .irq_unmask = combiner_unmask_irq,
  444. };
  445. static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
  446. {
  447. unsigned int max_nr;
  448. if (soc_is_exynos5250())
  449. max_nr = EXYNOS5_MAX_COMBINER_NR;
  450. else
  451. max_nr = EXYNOS4_MAX_COMBINER_NR;
  452. if (combiner_nr >= max_nr)
  453. BUG();
  454. if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
  455. BUG();
  456. irq_set_chained_handler(irq, combiner_handle_cascade_irq);
  457. }
  458. static void __init combiner_init_one(unsigned int combiner_nr,
  459. void __iomem *base)
  460. {
  461. combiner_data[combiner_nr].base = base;
  462. combiner_data[combiner_nr].irq_offset = irq_find_mapping(
  463. combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
  464. combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
  465. /* Disable all interrupts */
  466. __raw_writel(combiner_data[combiner_nr].irq_mask,
  467. base + COMBINER_ENABLE_CLEAR);
  468. }
  469. #ifdef CONFIG_OF
  470. static int combiner_irq_domain_xlate(struct irq_domain *d,
  471. struct device_node *controller,
  472. const u32 *intspec, unsigned int intsize,
  473. unsigned long *out_hwirq,
  474. unsigned int *out_type)
  475. {
  476. if (d->of_node != controller)
  477. return -EINVAL;
  478. if (intsize < 2)
  479. return -EINVAL;
  480. *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
  481. *out_type = 0;
  482. return 0;
  483. }
  484. #else
  485. static int combiner_irq_domain_xlate(struct irq_domain *d,
  486. struct device_node *controller,
  487. const u32 *intspec, unsigned int intsize,
  488. unsigned long *out_hwirq,
  489. unsigned int *out_type)
  490. {
  491. return -EINVAL;
  492. }
  493. #endif
  494. static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
  495. irq_hw_number_t hw)
  496. {
  497. irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
  498. irq_set_chip_data(irq, &combiner_data[hw >> 3]);
  499. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  500. return 0;
  501. }
  502. static struct irq_domain_ops combiner_irq_domain_ops = {
  503. .xlate = combiner_irq_domain_xlate,
  504. .map = combiner_irq_domain_map,
  505. };
  506. static void __init combiner_init(void __iomem *combiner_base,
  507. struct device_node *np)
  508. {
  509. int i, irq, irq_base;
  510. unsigned int max_nr, nr_irq;
  511. if (np) {
  512. if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
  513. pr_warning("%s: number of combiners not specified, "
  514. "setting default as %d.\n",
  515. __func__, EXYNOS4_MAX_COMBINER_NR);
  516. max_nr = EXYNOS4_MAX_COMBINER_NR;
  517. }
  518. } else {
  519. max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
  520. EXYNOS4_MAX_COMBINER_NR;
  521. }
  522. nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
  523. irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
  524. if (IS_ERR_VALUE(irq_base)) {
  525. irq_base = COMBINER_IRQ(0, 0);
  526. pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
  527. }
  528. combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
  529. &combiner_irq_domain_ops, &combiner_data);
  530. if (WARN_ON(!combiner_irq_domain)) {
  531. pr_warning("%s: irq domain init failed\n", __func__);
  532. return;
  533. }
  534. for (i = 0; i < max_nr; i++) {
  535. combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
  536. irq = IRQ_SPI(i);
  537. #ifdef CONFIG_OF
  538. if (np)
  539. irq = irq_of_parse_and_map(np, i);
  540. #endif
  541. combiner_cascade_irq(i, irq);
  542. }
  543. }
  544. #ifdef CONFIG_OF
  545. static int __init combiner_of_init(struct device_node *np,
  546. struct device_node *parent)
  547. {
  548. void __iomem *combiner_base;
  549. combiner_base = of_iomap(np, 0);
  550. if (!combiner_base) {
  551. pr_err("%s: failed to map combiner registers\n", __func__);
  552. return -ENXIO;
  553. }
  554. combiner_init(combiner_base, np);
  555. return 0;
  556. }
  557. static const struct of_device_id exynos_dt_irq_match[] = {
  558. { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
  559. { .compatible = "arm,cortex-a15-gic", .data = gic_of_init, },
  560. { .compatible = "samsung,exynos4210-combiner",
  561. .data = combiner_of_init, },
  562. {},
  563. };
  564. #endif
  565. void __init exynos4_init_irq(void)
  566. {
  567. unsigned int gic_bank_offset;
  568. gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
  569. if (!of_have_populated_dt())
  570. gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
  571. #ifdef CONFIG_OF
  572. else
  573. of_irq_init(exynos_dt_irq_match);
  574. #endif
  575. if (!of_have_populated_dt())
  576. combiner_init(S5P_VA_COMBINER_BASE, NULL);
  577. /*
  578. * The parameters of s5p_init_irq() are for VIC init.
  579. * Theses parameters should be NULL and 0 because EXYNOS4
  580. * uses GIC instead of VIC.
  581. */
  582. s5p_init_irq(NULL, 0);
  583. }
  584. void __init exynos5_init_irq(void)
  585. {
  586. #ifdef CONFIG_OF
  587. of_irq_init(exynos_dt_irq_match);
  588. #endif
  589. /*
  590. * The parameters of s5p_init_irq() are for VIC init.
  591. * Theses parameters should be NULL and 0 because EXYNOS4
  592. * uses GIC instead of VIC.
  593. */
  594. if (!of_machine_is_compatible("samsung,exynos5440"))
  595. s5p_init_irq(NULL, 0);
  596. gic_arch_extn.irq_set_wake = s3c_irq_wake;
  597. }
  598. struct bus_type exynos_subsys = {
  599. .name = "exynos-core",
  600. .dev_name = "exynos-core",
  601. };
  602. static struct device exynos4_dev = {
  603. .bus = &exynos_subsys,
  604. };
  605. static int __init exynos_core_init(void)
  606. {
  607. return subsys_system_register(&exynos_subsys, NULL);
  608. }
  609. core_initcall(exynos_core_init);
  610. #ifdef CONFIG_CACHE_L2X0
  611. static int __init exynos4_l2x0_cache_init(void)
  612. {
  613. int ret;
  614. if (soc_is_exynos5250() || soc_is_exynos5440())
  615. return 0;
  616. ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
  617. if (!ret) {
  618. l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
  619. clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
  620. return 0;
  621. }
  622. if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
  623. l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
  624. /* TAG, Data Latency Control: 2 cycles */
  625. l2x0_saved_regs.tag_latency = 0x110;
  626. if (soc_is_exynos4212() || soc_is_exynos4412())
  627. l2x0_saved_regs.data_latency = 0x120;
  628. else
  629. l2x0_saved_regs.data_latency = 0x110;
  630. l2x0_saved_regs.prefetch_ctrl = 0x30000007;
  631. l2x0_saved_regs.pwr_ctrl =
  632. (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
  633. l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
  634. __raw_writel(l2x0_saved_regs.tag_latency,
  635. S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
  636. __raw_writel(l2x0_saved_regs.data_latency,
  637. S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
  638. /* L2X0 Prefetch Control */
  639. __raw_writel(l2x0_saved_regs.prefetch_ctrl,
  640. S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
  641. /* L2X0 Power Control */
  642. __raw_writel(l2x0_saved_regs.pwr_ctrl,
  643. S5P_VA_L2CC + L2X0_POWER_CTRL);
  644. clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
  645. clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
  646. }
  647. l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
  648. return 0;
  649. }
  650. early_initcall(exynos4_l2x0_cache_init);
  651. #endif
  652. static int __init exynos_init(void)
  653. {
  654. printk(KERN_INFO "EXYNOS: Initializing architecture\n");
  655. return device_register(&exynos4_dev);
  656. }
  657. /* uart registration process */
  658. static void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
  659. {
  660. struct s3c2410_uartcfg *tcfg = cfg;
  661. u32 ucnt;
  662. for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
  663. tcfg->has_fracval = 1;
  664. s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
  665. }
  666. static void __iomem *exynos_eint_base;
  667. static DEFINE_SPINLOCK(eint_lock);
  668. static unsigned int eint0_15_data[16];
  669. static inline int exynos4_irq_to_gpio(unsigned int irq)
  670. {
  671. if (irq < IRQ_EINT(0))
  672. return -EINVAL;
  673. irq -= IRQ_EINT(0);
  674. if (irq < 8)
  675. return EXYNOS4_GPX0(irq);
  676. irq -= 8;
  677. if (irq < 8)
  678. return EXYNOS4_GPX1(irq);
  679. irq -= 8;
  680. if (irq < 8)
  681. return EXYNOS4_GPX2(irq);
  682. irq -= 8;
  683. if (irq < 8)
  684. return EXYNOS4_GPX3(irq);
  685. return -EINVAL;
  686. }
  687. static inline int exynos5_irq_to_gpio(unsigned int irq)
  688. {
  689. if (irq < IRQ_EINT(0))
  690. return -EINVAL;
  691. irq -= IRQ_EINT(0);
  692. if (irq < 8)
  693. return EXYNOS5_GPX0(irq);
  694. irq -= 8;
  695. if (irq < 8)
  696. return EXYNOS5_GPX1(irq);
  697. irq -= 8;
  698. if (irq < 8)
  699. return EXYNOS5_GPX2(irq);
  700. irq -= 8;
  701. if (irq < 8)
  702. return EXYNOS5_GPX3(irq);
  703. return -EINVAL;
  704. }
  705. static unsigned int exynos4_eint0_15_src_int[16] = {
  706. EXYNOS4_IRQ_EINT0,
  707. EXYNOS4_IRQ_EINT1,
  708. EXYNOS4_IRQ_EINT2,
  709. EXYNOS4_IRQ_EINT3,
  710. EXYNOS4_IRQ_EINT4,
  711. EXYNOS4_IRQ_EINT5,
  712. EXYNOS4_IRQ_EINT6,
  713. EXYNOS4_IRQ_EINT7,
  714. EXYNOS4_IRQ_EINT8,
  715. EXYNOS4_IRQ_EINT9,
  716. EXYNOS4_IRQ_EINT10,
  717. EXYNOS4_IRQ_EINT11,
  718. EXYNOS4_IRQ_EINT12,
  719. EXYNOS4_IRQ_EINT13,
  720. EXYNOS4_IRQ_EINT14,
  721. EXYNOS4_IRQ_EINT15,
  722. };
  723. static unsigned int exynos5_eint0_15_src_int[16] = {
  724. EXYNOS5_IRQ_EINT0,
  725. EXYNOS5_IRQ_EINT1,
  726. EXYNOS5_IRQ_EINT2,
  727. EXYNOS5_IRQ_EINT3,
  728. EXYNOS5_IRQ_EINT4,
  729. EXYNOS5_IRQ_EINT5,
  730. EXYNOS5_IRQ_EINT6,
  731. EXYNOS5_IRQ_EINT7,
  732. EXYNOS5_IRQ_EINT8,
  733. EXYNOS5_IRQ_EINT9,
  734. EXYNOS5_IRQ_EINT10,
  735. EXYNOS5_IRQ_EINT11,
  736. EXYNOS5_IRQ_EINT12,
  737. EXYNOS5_IRQ_EINT13,
  738. EXYNOS5_IRQ_EINT14,
  739. EXYNOS5_IRQ_EINT15,
  740. };
  741. static inline void exynos_irq_eint_mask(struct irq_data *data)
  742. {
  743. u32 mask;
  744. spin_lock(&eint_lock);
  745. mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
  746. mask |= EINT_OFFSET_BIT(data->irq);
  747. __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
  748. spin_unlock(&eint_lock);
  749. }
  750. static void exynos_irq_eint_unmask(struct irq_data *data)
  751. {
  752. u32 mask;
  753. spin_lock(&eint_lock);
  754. mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
  755. mask &= ~(EINT_OFFSET_BIT(data->irq));
  756. __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
  757. spin_unlock(&eint_lock);
  758. }
  759. static inline void exynos_irq_eint_ack(struct irq_data *data)
  760. {
  761. __raw_writel(EINT_OFFSET_BIT(data->irq),
  762. EINT_PEND(exynos_eint_base, data->irq));
  763. }
  764. static void exynos_irq_eint_maskack(struct irq_data *data)
  765. {
  766. exynos_irq_eint_mask(data);
  767. exynos_irq_eint_ack(data);
  768. }
  769. static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
  770. {
  771. int offs = EINT_OFFSET(data->irq);
  772. int shift;
  773. u32 ctrl, mask;
  774. u32 newvalue = 0;
  775. switch (type) {
  776. case IRQ_TYPE_EDGE_RISING:
  777. newvalue = S5P_IRQ_TYPE_EDGE_RISING;
  778. break;
  779. case IRQ_TYPE_EDGE_FALLING:
  780. newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
  781. break;
  782. case IRQ_TYPE_EDGE_BOTH:
  783. newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
  784. break;
  785. case IRQ_TYPE_LEVEL_LOW:
  786. newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
  787. break;
  788. case IRQ_TYPE_LEVEL_HIGH:
  789. newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
  790. break;
  791. default:
  792. printk(KERN_ERR "No such irq type %d", type);
  793. return -EINVAL;
  794. }
  795. shift = (offs & 0x7) * 4;
  796. mask = 0x7 << shift;
  797. spin_lock(&eint_lock);
  798. ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
  799. ctrl &= ~mask;
  800. ctrl |= newvalue << shift;
  801. __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
  802. spin_unlock(&eint_lock);
  803. if (soc_is_exynos5250())
  804. s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
  805. else
  806. s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
  807. return 0;
  808. }
  809. static struct irq_chip exynos_irq_eint = {
  810. .name = "exynos-eint",
  811. .irq_mask = exynos_irq_eint_mask,
  812. .irq_unmask = exynos_irq_eint_unmask,
  813. .irq_mask_ack = exynos_irq_eint_maskack,
  814. .irq_ack = exynos_irq_eint_ack,
  815. .irq_set_type = exynos_irq_eint_set_type,
  816. #ifdef CONFIG_PM
  817. .irq_set_wake = s3c_irqext_wake,
  818. #endif
  819. };
  820. /*
  821. * exynos4_irq_demux_eint
  822. *
  823. * This function demuxes the IRQ from from EINTs 16 to 31.
  824. * It is designed to be inlined into the specific handler
  825. * s5p_irq_demux_eintX_Y.
  826. *
  827. * Each EINT pend/mask registers handle eight of them.
  828. */
  829. static inline void exynos_irq_demux_eint(unsigned int start)
  830. {
  831. unsigned int irq;
  832. u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
  833. u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
  834. status &= ~mask;
  835. status &= 0xff;
  836. while (status) {
  837. irq = fls(status) - 1;
  838. generic_handle_irq(irq + start);
  839. status &= ~(1 << irq);
  840. }
  841. }
  842. static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
  843. {
  844. struct irq_chip *chip = irq_get_chip(irq);
  845. chained_irq_enter(chip, desc);
  846. exynos_irq_demux_eint(IRQ_EINT(16));
  847. exynos_irq_demux_eint(IRQ_EINT(24));
  848. chained_irq_exit(chip, desc);
  849. }
  850. static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
  851. {
  852. u32 *irq_data = irq_get_handler_data(irq);
  853. struct irq_chip *chip = irq_get_chip(irq);
  854. chained_irq_enter(chip, desc);
  855. generic_handle_irq(*irq_data);
  856. chained_irq_exit(chip, desc);
  857. }
  858. static int __init exynos_init_irq_eint(void)
  859. {
  860. int irq;
  861. #ifdef CONFIG_PINCTRL_SAMSUNG
  862. /*
  863. * The Samsung pinctrl driver provides an integrated gpio/pinmux/pinconf
  864. * functionality along with support for external gpio and wakeup
  865. * interrupts. If the samsung pinctrl driver is enabled and includes
  866. * the wakeup interrupt support, then the setting up external wakeup
  867. * interrupts here can be skipped. This check here is temporary to
  868. * allow exynos4 platforms that do not use Samsung pinctrl driver to
  869. * co-exist with platforms that do. When all of the Samsung Exynos4
  870. * platforms switch over to using the pinctrl driver, the wakeup
  871. * interrupt support code here can be completely removed.
  872. */
  873. static const struct of_device_id exynos_pinctrl_ids[] = {
  874. { .compatible = "samsung,pinctrl-exynos4210", },
  875. { .compatible = "samsung,pinctrl-exynos4x12", },
  876. };
  877. struct device_node *pctrl_np, *wkup_np;
  878. const char *wkup_compat = "samsung,exynos4210-wakeup-eint";
  879. for_each_matching_node(pctrl_np, exynos_pinctrl_ids) {
  880. if (of_device_is_available(pctrl_np)) {
  881. wkup_np = of_find_compatible_node(pctrl_np, NULL,
  882. wkup_compat);
  883. if (wkup_np)
  884. return -ENODEV;
  885. }
  886. }
  887. #endif
  888. if (soc_is_exynos5440())
  889. return 0;
  890. if (soc_is_exynos5250())
  891. exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
  892. else
  893. exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
  894. if (exynos_eint_base == NULL) {
  895. pr_err("unable to ioremap for EINT base address\n");
  896. return -ENOMEM;
  897. }
  898. for (irq = 0 ; irq <= 31 ; irq++) {
  899. irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
  900. handle_level_irq);
  901. set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
  902. }
  903. irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
  904. for (irq = 0 ; irq <= 15 ; irq++) {
  905. eint0_15_data[irq] = IRQ_EINT(irq);
  906. if (soc_is_exynos5250()) {
  907. irq_set_handler_data(exynos5_eint0_15_src_int[irq],
  908. &eint0_15_data[irq]);
  909. irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
  910. exynos_irq_eint0_15);
  911. } else {
  912. irq_set_handler_data(exynos4_eint0_15_src_int[irq],
  913. &eint0_15_data[irq]);
  914. irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
  915. exynos_irq_eint0_15);
  916. }
  917. }
  918. return 0;
  919. }
  920. arch_initcall(exynos_init_irq_eint);