common.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /*
  2. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
  3. * http://www.samsung.com
  4. *
  5. * Common Codes for EXYNOS
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/io.h>
  15. #include <linux/sysdev.h>
  16. #include <linux/gpio.h>
  17. #include <linux/sched.h>
  18. #include <linux/serial_core.h>
  19. #include <asm/proc-fns.h>
  20. #include <asm/hardware/cache-l2x0.h>
  21. #include <asm/hardware/gic.h>
  22. #include <asm/mach/map.h>
  23. #include <asm/mach/irq.h>
  24. #include <mach/regs-irq.h>
  25. #include <mach/regs-pmu.h>
  26. #include <mach/regs-gpio.h>
  27. #include <plat/cpu.h>
  28. #include <plat/clock.h>
  29. #include <plat/devs.h>
  30. #include <plat/pm.h>
  31. #include <plat/sdhci.h>
  32. #include <plat/gpio-cfg.h>
  33. #include <plat/adc-core.h>
  34. #include <plat/fb-core.h>
  35. #include <plat/fimc-core.h>
  36. #include <plat/iic-core.h>
  37. #include <plat/tv-core.h>
  38. #include <plat/regs-serial.h>
  39. #include "common.h"
  40. unsigned int gic_bank_offset __read_mostly;
  41. static const char name_exynos4210[] = "EXYNOS4210";
  42. static const char name_exynos4212[] = "EXYNOS4212";
  43. static const char name_exynos4412[] = "EXYNOS4412";
  44. static struct cpu_table cpu_ids[] __initdata = {
  45. {
  46. .idcode = EXYNOS4210_CPU_ID,
  47. .idmask = EXYNOS4_CPU_MASK,
  48. .map_io = exynos4_map_io,
  49. .init_clocks = exynos4_init_clocks,
  50. .init_uarts = exynos4_init_uarts,
  51. .init = exynos_init,
  52. .name = name_exynos4210,
  53. }, {
  54. .idcode = EXYNOS4212_CPU_ID,
  55. .idmask = EXYNOS4_CPU_MASK,
  56. .map_io = exynos4_map_io,
  57. .init_clocks = exynos4_init_clocks,
  58. .init_uarts = exynos4_init_uarts,
  59. .init = exynos_init,
  60. .name = name_exynos4212,
  61. }, {
  62. .idcode = EXYNOS4412_CPU_ID,
  63. .idmask = EXYNOS4_CPU_MASK,
  64. .map_io = exynos4_map_io,
  65. .init_clocks = exynos4_init_clocks,
  66. .init_uarts = exynos4_init_uarts,
  67. .init = exynos_init,
  68. .name = name_exynos4412,
  69. },
  70. };
  71. /* Initial IO mappings */
  72. static struct map_desc exynos_iodesc[] __initdata = {
  73. {
  74. .virtual = (unsigned long)S5P_VA_CHIPID,
  75. .pfn = __phys_to_pfn(EXYNOS4_PA_CHIPID),
  76. .length = SZ_4K,
  77. .type = MT_DEVICE,
  78. }, {
  79. .virtual = (unsigned long)S3C_VA_SYS,
  80. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
  81. .length = SZ_64K,
  82. .type = MT_DEVICE,
  83. }, {
  84. .virtual = (unsigned long)S3C_VA_TIMER,
  85. .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
  86. .length = SZ_16K,
  87. .type = MT_DEVICE,
  88. }, {
  89. .virtual = (unsigned long)S3C_VA_WATCHDOG,
  90. .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
  91. .length = SZ_4K,
  92. .type = MT_DEVICE,
  93. }, {
  94. .virtual = (unsigned long)S5P_VA_SROMC,
  95. .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
  96. .length = SZ_4K,
  97. .type = MT_DEVICE,
  98. }, {
  99. .virtual = (unsigned long)S5P_VA_SYSTIMER,
  100. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
  101. .length = SZ_4K,
  102. .type = MT_DEVICE,
  103. }, {
  104. .virtual = (unsigned long)S5P_VA_PMU,
  105. .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
  106. .length = SZ_64K,
  107. .type = MT_DEVICE,
  108. }, {
  109. .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
  110. .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
  111. .length = SZ_4K,
  112. .type = MT_DEVICE,
  113. }, {
  114. .virtual = (unsigned long)S5P_VA_GIC_CPU,
  115. .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
  116. .length = SZ_64K,
  117. .type = MT_DEVICE,
  118. }, {
  119. .virtual = (unsigned long)S5P_VA_GIC_DIST,
  120. .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
  121. .length = SZ_64K,
  122. .type = MT_DEVICE,
  123. }, {
  124. .virtual = (unsigned long)S3C_VA_UART,
  125. .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
  126. .length = SZ_512K,
  127. .type = MT_DEVICE,
  128. },
  129. };
  130. static struct map_desc exynos4_iodesc[] __initdata = {
  131. {
  132. .virtual = (unsigned long)S5P_VA_CMU,
  133. .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
  134. .length = SZ_128K,
  135. .type = MT_DEVICE,
  136. }, {
  137. .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
  138. .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
  139. .length = SZ_8K,
  140. .type = MT_DEVICE,
  141. }, {
  142. .virtual = (unsigned long)S5P_VA_L2CC,
  143. .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
  144. .length = SZ_4K,
  145. .type = MT_DEVICE,
  146. }, {
  147. .virtual = (unsigned long)S5P_VA_GPIO1,
  148. .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO1),
  149. .length = SZ_4K,
  150. .type = MT_DEVICE,
  151. }, {
  152. .virtual = (unsigned long)S5P_VA_GPIO2,
  153. .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO2),
  154. .length = SZ_4K,
  155. .type = MT_DEVICE,
  156. }, {
  157. .virtual = (unsigned long)S5P_VA_GPIO3,
  158. .pfn = __phys_to_pfn(EXYNOS4_PA_GPIO3),
  159. .length = SZ_256,
  160. .type = MT_DEVICE,
  161. }, {
  162. .virtual = (unsigned long)S5P_VA_DMC0,
  163. .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
  164. .length = SZ_4K,
  165. .type = MT_DEVICE,
  166. }, {
  167. .virtual = (unsigned long)S3C_VA_USB_HSPHY,
  168. .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
  169. .length = SZ_4K,
  170. .type = MT_DEVICE,
  171. },
  172. };
  173. static struct map_desc exynos4_iodesc0[] __initdata = {
  174. {
  175. .virtual = (unsigned long)S5P_VA_SYSRAM,
  176. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
  177. .length = SZ_4K,
  178. .type = MT_DEVICE,
  179. },
  180. };
  181. static struct map_desc exynos4_iodesc1[] __initdata = {
  182. {
  183. .virtual = (unsigned long)S5P_VA_SYSRAM,
  184. .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
  185. .length = SZ_4K,
  186. .type = MT_DEVICE,
  187. },
  188. };
  189. static void exynos_idle(void)
  190. {
  191. if (!need_resched())
  192. cpu_do_idle();
  193. local_irq_enable();
  194. }
  195. void exynos4_restart(char mode, const char *cmd)
  196. {
  197. __raw_writel(0x1, S5P_SWRESET);
  198. }
  199. /*
  200. * exynos_map_io
  201. *
  202. * register the standard cpu IO areas
  203. */
  204. void __init exynos_init_io(struct map_desc *mach_desc, int size)
  205. {
  206. /* initialize the io descriptors we need for initialization */
  207. iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
  208. if (mach_desc)
  209. iotable_init(mach_desc, size);
  210. /* detect cpu id and rev. */
  211. s5p_init_cpu(S5P_VA_CHIPID);
  212. s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
  213. }
  214. void __init exynos4_map_io(void)
  215. {
  216. iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
  217. if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
  218. iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
  219. else
  220. iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
  221. /* initialize device information early */
  222. exynos4_default_sdhci0();
  223. exynos4_default_sdhci1();
  224. exynos4_default_sdhci2();
  225. exynos4_default_sdhci3();
  226. s3c_adc_setname("samsung-adc-v3");
  227. s3c_fimc_setname(0, "exynos4-fimc");
  228. s3c_fimc_setname(1, "exynos4-fimc");
  229. s3c_fimc_setname(2, "exynos4-fimc");
  230. s3c_fimc_setname(3, "exynos4-fimc");
  231. /* The I2C bus controllers are directly compatible with s3c2440 */
  232. s3c_i2c0_setname("s3c2440-i2c");
  233. s3c_i2c1_setname("s3c2440-i2c");
  234. s3c_i2c2_setname("s3c2440-i2c");
  235. s5p_fb_setname(0, "exynos4-fb");
  236. s5p_hdmi_setname("exynos4-hdmi");
  237. }
  238. void __init exynos4_init_clocks(int xtal)
  239. {
  240. printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
  241. s3c24xx_register_baseclocks(xtal);
  242. s5p_register_clocks(xtal);
  243. if (soc_is_exynos4210())
  244. exynos4210_register_clocks();
  245. else if (soc_is_exynos4212() || soc_is_exynos4412())
  246. exynos4212_register_clocks();
  247. exynos4_register_clocks();
  248. exynos4_setup_clocks();
  249. }
  250. #define COMBINER_ENABLE_SET 0x0
  251. #define COMBINER_ENABLE_CLEAR 0x4
  252. #define COMBINER_INT_STATUS 0xC
  253. static DEFINE_SPINLOCK(irq_controller_lock);
  254. struct combiner_chip_data {
  255. unsigned int irq_offset;
  256. unsigned int irq_mask;
  257. void __iomem *base;
  258. };
  259. static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
  260. static inline void __iomem *combiner_base(struct irq_data *data)
  261. {
  262. struct combiner_chip_data *combiner_data =
  263. irq_data_get_irq_chip_data(data);
  264. return combiner_data->base;
  265. }
  266. static void combiner_mask_irq(struct irq_data *data)
  267. {
  268. u32 mask = 1 << (data->irq % 32);
  269. __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
  270. }
  271. static void combiner_unmask_irq(struct irq_data *data)
  272. {
  273. u32 mask = 1 << (data->irq % 32);
  274. __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
  275. }
  276. static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  277. {
  278. struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
  279. struct irq_chip *chip = irq_get_chip(irq);
  280. unsigned int cascade_irq, combiner_irq;
  281. unsigned long status;
  282. chained_irq_enter(chip, desc);
  283. spin_lock(&irq_controller_lock);
  284. status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
  285. spin_unlock(&irq_controller_lock);
  286. status &= chip_data->irq_mask;
  287. if (status == 0)
  288. goto out;
  289. combiner_irq = __ffs(status);
  290. cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
  291. if (unlikely(cascade_irq >= NR_IRQS))
  292. do_bad_IRQ(cascade_irq, desc);
  293. else
  294. generic_handle_irq(cascade_irq);
  295. out:
  296. chained_irq_exit(chip, desc);
  297. }
  298. static struct irq_chip combiner_chip = {
  299. .name = "COMBINER",
  300. .irq_mask = combiner_mask_irq,
  301. .irq_unmask = combiner_unmask_irq,
  302. };
  303. static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
  304. {
  305. if (combiner_nr >= MAX_COMBINER_NR)
  306. BUG();
  307. if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
  308. BUG();
  309. irq_set_chained_handler(irq, combiner_handle_cascade_irq);
  310. }
  311. static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
  312. unsigned int irq_start)
  313. {
  314. unsigned int i;
  315. if (combiner_nr >= MAX_COMBINER_NR)
  316. BUG();
  317. combiner_data[combiner_nr].base = base;
  318. combiner_data[combiner_nr].irq_offset = irq_start;
  319. combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
  320. /* Disable all interrupts */
  321. __raw_writel(combiner_data[combiner_nr].irq_mask,
  322. base + COMBINER_ENABLE_CLEAR);
  323. /* Setup the Linux IRQ subsystem */
  324. for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
  325. + MAX_IRQ_IN_COMBINER; i++) {
  326. irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
  327. irq_set_chip_data(i, &combiner_data[combiner_nr]);
  328. set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
  329. }
  330. }
  331. static void exynos4_gic_irq_fix_base(struct irq_data *d)
  332. {
  333. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  334. gic_data->cpu_base = S5P_VA_GIC_CPU +
  335. (gic_bank_offset * smp_processor_id());
  336. gic_data->dist_base = S5P_VA_GIC_DIST +
  337. (gic_bank_offset * smp_processor_id());
  338. }
  339. void __init exynos4_init_irq(void)
  340. {
  341. int irq;
  342. gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
  343. gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
  344. gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
  345. gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
  346. gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
  347. for (irq = 0; irq < MAX_COMBINER_NR; irq++) {
  348. combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
  349. COMBINER_IRQ(irq, 0));
  350. combiner_cascade_irq(irq, IRQ_SPI(irq));
  351. }
  352. /*
  353. * The parameters of s5p_init_irq() are for VIC init.
  354. * Theses parameters should be NULL and 0 because EXYNOS4
  355. * uses GIC instead of VIC.
  356. */
  357. s5p_init_irq(NULL, 0);
  358. }
  359. struct sysdev_class exynos4_sysclass = {
  360. .name = "exynos4-core",
  361. };
  362. static struct sys_device exynos4_sysdev = {
  363. .cls = &exynos4_sysclass,
  364. };
  365. static int __init exynos4_core_init(void)
  366. {
  367. return sysdev_class_register(&exynos4_sysclass);
  368. }
  369. core_initcall(exynos4_core_init);
  370. #ifdef CONFIG_CACHE_L2X0
  371. static int __init exynos4_l2x0_cache_init(void)
  372. {
  373. /* TAG, Data Latency Control: 2cycle */
  374. __raw_writel(0x110, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
  375. if (soc_is_exynos4210())
  376. __raw_writel(0x110, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
  377. else if (soc_is_exynos4212() || soc_is_exynos4412())
  378. __raw_writel(0x120, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
  379. /* L2X0 Prefetch Control */
  380. __raw_writel(0x30000007, S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
  381. /* L2X0 Power Control */
  382. __raw_writel(L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN,
  383. S5P_VA_L2CC + L2X0_POWER_CTRL);
  384. l2x0_init(S5P_VA_L2CC, 0x7C470001, 0xC200ffff);
  385. return 0;
  386. }
  387. early_initcall(exynos4_l2x0_cache_init);
  388. #endif
  389. int __init exynos_init(void)
  390. {
  391. printk(KERN_INFO "EXYNOS: Initializing architecture\n");
  392. /* set idle function */
  393. pm_idle = exynos_idle;
  394. return sysdev_register(&exynos4_sysdev);
  395. }
  396. static struct s3c24xx_uart_clksrc exynos4_serial_clocks[] = {
  397. [0] = {
  398. .name = "uclk1",
  399. .divisor = 1,
  400. .min_baud = 0,
  401. .max_baud = 0,
  402. },
  403. };
  404. /* uart registration process */
  405. void __init exynos4_init_uarts(struct s3c2410_uartcfg *cfg, int no)
  406. {
  407. struct s3c2410_uartcfg *tcfg = cfg;
  408. u32 ucnt;
  409. for (ucnt = 0; ucnt < no; ucnt++, tcfg++) {
  410. if (!tcfg->clocks) {
  411. tcfg->has_fracval = 1;
  412. tcfg->clocks = exynos4_serial_clocks;
  413. tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks);
  414. }
  415. tcfg->flags |= NO_NEED_CHECK_CLKSRC;
  416. }
  417. s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no);
  418. }
  419. static DEFINE_SPINLOCK(eint_lock);
  420. static unsigned int eint0_15_data[16];
  421. static unsigned int exynos4_get_irq_nr(unsigned int number)
  422. {
  423. u32 ret = 0;
  424. switch (number) {
  425. case 0 ... 3:
  426. ret = (number + IRQ_EINT0);
  427. break;
  428. case 4 ... 7:
  429. ret = (number + (IRQ_EINT4 - 4));
  430. break;
  431. case 8 ... 15:
  432. ret = (number + (IRQ_EINT8 - 8));
  433. break;
  434. default:
  435. printk(KERN_ERR "number available : %d\n", number);
  436. }
  437. return ret;
  438. }
  439. static inline void exynos4_irq_eint_mask(struct irq_data *data)
  440. {
  441. u32 mask;
  442. spin_lock(&eint_lock);
  443. mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
  444. mask |= eint_irq_to_bit(data->irq);
  445. __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
  446. spin_unlock(&eint_lock);
  447. }
  448. static void exynos4_irq_eint_unmask(struct irq_data *data)
  449. {
  450. u32 mask;
  451. spin_lock(&eint_lock);
  452. mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(data->irq)));
  453. mask &= ~(eint_irq_to_bit(data->irq));
  454. __raw_writel(mask, S5P_EINT_MASK(EINT_REG_NR(data->irq)));
  455. spin_unlock(&eint_lock);
  456. }
  457. static inline void exynos4_irq_eint_ack(struct irq_data *data)
  458. {
  459. __raw_writel(eint_irq_to_bit(data->irq),
  460. S5P_EINT_PEND(EINT_REG_NR(data->irq)));
  461. }
  462. static void exynos4_irq_eint_maskack(struct irq_data *data)
  463. {
  464. exynos4_irq_eint_mask(data);
  465. exynos4_irq_eint_ack(data);
  466. }
  467. static int exynos4_irq_eint_set_type(struct irq_data *data, unsigned int type)
  468. {
  469. int offs = EINT_OFFSET(data->irq);
  470. int shift;
  471. u32 ctrl, mask;
  472. u32 newvalue = 0;
  473. switch (type) {
  474. case IRQ_TYPE_EDGE_RISING:
  475. newvalue = S5P_IRQ_TYPE_EDGE_RISING;
  476. break;
  477. case IRQ_TYPE_EDGE_FALLING:
  478. newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
  479. break;
  480. case IRQ_TYPE_EDGE_BOTH:
  481. newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
  482. break;
  483. case IRQ_TYPE_LEVEL_LOW:
  484. newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
  485. break;
  486. case IRQ_TYPE_LEVEL_HIGH:
  487. newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
  488. break;
  489. default:
  490. printk(KERN_ERR "No such irq type %d", type);
  491. return -EINVAL;
  492. }
  493. shift = (offs & 0x7) * 4;
  494. mask = 0x7 << shift;
  495. spin_lock(&eint_lock);
  496. ctrl = __raw_readl(S5P_EINT_CON(EINT_REG_NR(data->irq)));
  497. ctrl &= ~mask;
  498. ctrl |= newvalue << shift;
  499. __raw_writel(ctrl, S5P_EINT_CON(EINT_REG_NR(data->irq)));
  500. spin_unlock(&eint_lock);
  501. switch (offs) {
  502. case 0 ... 7:
  503. s3c_gpio_cfgpin(EINT_GPIO_0(offs & 0x7), EINT_MODE);
  504. break;
  505. case 8 ... 15:
  506. s3c_gpio_cfgpin(EINT_GPIO_1(offs & 0x7), EINT_MODE);
  507. break;
  508. case 16 ... 23:
  509. s3c_gpio_cfgpin(EINT_GPIO_2(offs & 0x7), EINT_MODE);
  510. break;
  511. case 24 ... 31:
  512. s3c_gpio_cfgpin(EINT_GPIO_3(offs & 0x7), EINT_MODE);
  513. break;
  514. default:
  515. printk(KERN_ERR "No such irq number %d", offs);
  516. }
  517. return 0;
  518. }
  519. static struct irq_chip exynos4_irq_eint = {
  520. .name = "exynos4-eint",
  521. .irq_mask = exynos4_irq_eint_mask,
  522. .irq_unmask = exynos4_irq_eint_unmask,
  523. .irq_mask_ack = exynos4_irq_eint_maskack,
  524. .irq_ack = exynos4_irq_eint_ack,
  525. .irq_set_type = exynos4_irq_eint_set_type,
  526. #ifdef CONFIG_PM
  527. .irq_set_wake = s3c_irqext_wake,
  528. #endif
  529. };
  530. /*
  531. * exynos4_irq_demux_eint
  532. *
  533. * This function demuxes the IRQ from from EINTs 16 to 31.
  534. * It is designed to be inlined into the specific handler
  535. * s5p_irq_demux_eintX_Y.
  536. *
  537. * Each EINT pend/mask registers handle eight of them.
  538. */
  539. static inline void exynos4_irq_demux_eint(unsigned int start)
  540. {
  541. unsigned int irq;
  542. u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
  543. u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
  544. status &= ~mask;
  545. status &= 0xff;
  546. while (status) {
  547. irq = fls(status) - 1;
  548. generic_handle_irq(irq + start);
  549. status &= ~(1 << irq);
  550. }
  551. }
  552. static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
  553. {
  554. struct irq_chip *chip = irq_get_chip(irq);
  555. chained_irq_enter(chip, desc);
  556. exynos4_irq_demux_eint(IRQ_EINT(16));
  557. exynos4_irq_demux_eint(IRQ_EINT(24));
  558. chained_irq_exit(chip, desc);
  559. }
  560. static void exynos4_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
  561. {
  562. u32 *irq_data = irq_get_handler_data(irq);
  563. struct irq_chip *chip = irq_get_chip(irq);
  564. chained_irq_enter(chip, desc);
  565. chip->irq_mask(&desc->irq_data);
  566. if (chip->irq_ack)
  567. chip->irq_ack(&desc->irq_data);
  568. generic_handle_irq(*irq_data);
  569. chip->irq_unmask(&desc->irq_data);
  570. chained_irq_exit(chip, desc);
  571. }
  572. int __init exynos4_init_irq_eint(void)
  573. {
  574. int irq;
  575. for (irq = 0 ; irq <= 31 ; irq++) {
  576. irq_set_chip_and_handler(IRQ_EINT(irq), &exynos4_irq_eint,
  577. handle_level_irq);
  578. set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
  579. }
  580. irq_set_chained_handler(IRQ_EINT16_31, exynos4_irq_demux_eint16_31);
  581. for (irq = 0 ; irq <= 15 ; irq++) {
  582. eint0_15_data[irq] = IRQ_EINT(irq);
  583. irq_set_handler_data(exynos4_get_irq_nr(irq),
  584. &eint0_15_data[irq]);
  585. irq_set_chained_handler(exynos4_get_irq_nr(irq),
  586. exynos4_irq_eint0_15);
  587. }
  588. return 0;
  589. }
  590. arch_initcall(exynos4_init_irq_eint);