omap-wakeupgen.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. /*
  2. * OMAP WakeupGen Source file
  3. *
  4. * OMAP WakeupGen is the interrupt controller extension used along
  5. * with ARM GIC to wake the CPU out from low power states on
  6. * external interrupts. It is responsible for generating wakeup
  7. * event from the incoming interrupts and enable bits. It is
  8. * implemented in MPU always ON power domain. During normal operation,
  9. * WakeupGen delivers external interrupts directly to the GIC.
  10. *
  11. * Copyright (C) 2011 Texas Instruments, Inc.
  12. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/io.h>
  21. #include <linux/irq.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/cpu.h>
  24. #include <linux/notifier.h>
  25. #include <linux/cpu_pm.h>
  26. #include <asm/hardware/gic.h>
  27. #include <mach/omap-wakeupgen.h>
  28. #include <mach/omap-secure.h>
  29. #include "omap4-sar-layout.h"
  30. #include "common.h"
  31. #define MAX_NR_REG_BANKS 5
  32. #define MAX_IRQS 160
  33. #define WKG_MASK_ALL 0x00000000
  34. #define WKG_UNMASK_ALL 0xffffffff
  35. #define CPU_ENA_OFFSET 0x400
  36. #define CPU0_ID 0x0
  37. #define CPU1_ID 0x1
  38. #define OMAP4_NR_BANKS 4
  39. #define OMAP4_NR_IRQS 128
  40. static void __iomem *wakeupgen_base;
  41. static void __iomem *sar_base;
  42. static DEFINE_SPINLOCK(wakeupgen_lock);
  43. static unsigned int irq_target_cpu[NR_IRQS];
  44. static unsigned int irq_banks = MAX_NR_REG_BANKS;
  45. static unsigned int max_irqs = MAX_IRQS;
  46. static unsigned int omap_secure_apis;
  47. /*
  48. * Static helper functions.
  49. */
  50. static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
  51. {
  52. return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
  53. (cpu * CPU_ENA_OFFSET) + (idx * 4));
  54. }
  55. static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
  56. {
  57. __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
  58. (cpu * CPU_ENA_OFFSET) + (idx * 4));
  59. }
  60. static inline void sar_writel(u32 val, u32 offset, u8 idx)
  61. {
  62. __raw_writel(val, sar_base + offset + (idx * 4));
  63. }
  64. static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
  65. {
  66. unsigned int spi_irq;
  67. /*
  68. * PPIs and SGIs are not supported.
  69. */
  70. if (irq < OMAP44XX_IRQ_GIC_START)
  71. return -EINVAL;
  72. /*
  73. * Subtract the GIC offset.
  74. */
  75. spi_irq = irq - OMAP44XX_IRQ_GIC_START;
  76. if (spi_irq > MAX_IRQS) {
  77. pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
  78. return -EINVAL;
  79. }
  80. /*
  81. * Each WakeupGen register controls 32 interrupt.
  82. * i.e. 1 bit per SPI IRQ
  83. */
  84. *reg_index = spi_irq >> 5;
  85. *bit_posn = spi_irq %= 32;
  86. return 0;
  87. }
  88. static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
  89. {
  90. u32 val, bit_number;
  91. u8 i;
  92. if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
  93. return;
  94. val = wakeupgen_readl(i, cpu);
  95. val &= ~BIT(bit_number);
  96. wakeupgen_writel(val, i, cpu);
  97. }
  98. static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
  99. {
  100. u32 val, bit_number;
  101. u8 i;
  102. if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
  103. return;
  104. val = wakeupgen_readl(i, cpu);
  105. val |= BIT(bit_number);
  106. wakeupgen_writel(val, i, cpu);
  107. }
  108. /*
  109. * Architecture specific Mask extension
  110. */
  111. static void wakeupgen_mask(struct irq_data *d)
  112. {
  113. unsigned long flags;
  114. spin_lock_irqsave(&wakeupgen_lock, flags);
  115. _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
  116. spin_unlock_irqrestore(&wakeupgen_lock, flags);
  117. }
  118. /*
  119. * Architecture specific Unmask extension
  120. */
  121. static void wakeupgen_unmask(struct irq_data *d)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&wakeupgen_lock, flags);
  125. _wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
  126. spin_unlock_irqrestore(&wakeupgen_lock, flags);
  127. }
  128. #ifdef CONFIG_HOTPLUG_CPU
  129. static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
  130. static void _wakeupgen_save_masks(unsigned int cpu)
  131. {
  132. u8 i;
  133. for (i = 0; i < irq_banks; i++)
  134. per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
  135. }
  136. static void _wakeupgen_restore_masks(unsigned int cpu)
  137. {
  138. u8 i;
  139. for (i = 0; i < irq_banks; i++)
  140. wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
  141. }
  142. static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
  143. {
  144. u8 i;
  145. for (i = 0; i < irq_banks; i++)
  146. wakeupgen_writel(reg, i, cpu);
  147. }
  148. /*
  149. * Mask or unmask all interrupts on given CPU.
  150. * 0 = Mask all interrupts on the 'cpu'
  151. * 1 = Unmask all interrupts on the 'cpu'
  152. * Ensure that the initial mask is maintained. This is faster than
  153. * iterating through GIC registers to arrive at the correct masks.
  154. */
  155. static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
  156. {
  157. unsigned long flags;
  158. spin_lock_irqsave(&wakeupgen_lock, flags);
  159. if (set) {
  160. _wakeupgen_save_masks(cpu);
  161. _wakeupgen_set_all(cpu, WKG_MASK_ALL);
  162. } else {
  163. _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
  164. _wakeupgen_restore_masks(cpu);
  165. }
  166. spin_unlock_irqrestore(&wakeupgen_lock, flags);
  167. }
  168. #endif
  169. #ifdef CONFIG_CPU_PM
  170. static inline void omap4_irq_save_context(void)
  171. {
  172. u32 i, val;
  173. if (omap_rev() == OMAP4430_REV_ES1_0)
  174. return;
  175. for (i = 0; i < irq_banks; i++) {
  176. /* Save the CPUx interrupt mask for IRQ 0 to 127 */
  177. val = wakeupgen_readl(i, 0);
  178. sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
  179. val = wakeupgen_readl(i, 1);
  180. sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
  181. /*
  182. * Disable the secure interrupts for CPUx. The restore
  183. * code blindly restores secure and non-secure interrupt
  184. * masks from SAR RAM. Secure interrupts are not suppose
  185. * to be enabled from HLOS. So overwrite the SAR location
  186. * so that the secure interrupt remains disabled.
  187. */
  188. sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
  189. sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
  190. }
  191. /* Save AuxBoot* registers */
  192. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  193. __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
  194. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  195. __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
  196. /* Save SyncReq generation logic */
  197. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  198. __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
  199. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  200. __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
  201. /* Save SyncReq generation logic */
  202. val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
  203. __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
  204. val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
  205. __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
  206. /* Set the Backup Bit Mask status */
  207. val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
  208. val |= SAR_BACKUP_STATUS_WAKEUPGEN;
  209. __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
  210. }
  211. static inline void omap5_irq_save_context(void)
  212. {
  213. u32 i, val;
  214. for (i = 0; i < irq_banks; i++) {
  215. /* Save the CPUx interrupt mask for IRQ 0 to 159 */
  216. val = wakeupgen_readl(i, 0);
  217. sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
  218. val = wakeupgen_readl(i, 1);
  219. sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
  220. sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
  221. sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
  222. }
  223. /* Save AuxBoot* registers */
  224. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  225. __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
  226. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  227. __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
  228. /* Set the Backup Bit Mask status */
  229. val = __raw_readl(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
  230. val |= SAR_BACKUP_STATUS_WAKEUPGEN;
  231. __raw_writel(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
  232. }
  233. /*
  234. * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
  235. * ROM code. WakeupGen IP is integrated along with GIC to manage the
  236. * interrupt wakeups from CPU low power states. It manages
  237. * masking/unmasking of Shared peripheral interrupts(SPI). So the
  238. * interrupt enable/disable control should be in sync and consistent
  239. * at WakeupGen and GIC so that interrupts are not lost.
  240. */
  241. static void irq_save_context(void)
  242. {
  243. if (!sar_base)
  244. sar_base = omap4_get_sar_ram_base();
  245. if (soc_is_omap54xx())
  246. omap5_irq_save_context();
  247. else
  248. omap4_irq_save_context();
  249. }
  250. /*
  251. * Clear WakeupGen SAR backup status.
  252. */
  253. static void irq_sar_clear(void)
  254. {
  255. u32 val;
  256. u32 offset = SAR_BACKUP_STATUS_OFFSET;
  257. if (soc_is_omap54xx())
  258. offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
  259. val = __raw_readl(sar_base + offset);
  260. val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
  261. __raw_writel(val, sar_base + offset);
  262. }
  263. /*
  264. * Save GIC and Wakeupgen interrupt context using secure API
  265. * for HS/EMU devices.
  266. */
  267. static void irq_save_secure_context(void)
  268. {
  269. u32 ret;
  270. ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
  271. FLAG_START_CRITICAL,
  272. 0, 0, 0, 0, 0);
  273. if (ret != API_HAL_RET_VALUE_OK)
  274. pr_err("GIC and Wakeupgen context save failed\n");
  275. }
  276. #endif
  277. #ifdef CONFIG_HOTPLUG_CPU
  278. static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
  279. unsigned long action, void *hcpu)
  280. {
  281. unsigned int cpu = (unsigned int)hcpu;
  282. switch (action) {
  283. case CPU_ONLINE:
  284. wakeupgen_irqmask_all(cpu, 0);
  285. break;
  286. case CPU_DEAD:
  287. wakeupgen_irqmask_all(cpu, 1);
  288. break;
  289. }
  290. return NOTIFY_OK;
  291. }
  292. static struct notifier_block __refdata irq_hotplug_notifier = {
  293. .notifier_call = irq_cpu_hotplug_notify,
  294. };
  295. static void __init irq_hotplug_init(void)
  296. {
  297. register_hotcpu_notifier(&irq_hotplug_notifier);
  298. }
  299. #else
  300. static void __init irq_hotplug_init(void)
  301. {}
  302. #endif
  303. #ifdef CONFIG_CPU_PM
  304. static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  305. {
  306. switch (cmd) {
  307. case CPU_CLUSTER_PM_ENTER:
  308. if (omap_type() == OMAP2_DEVICE_TYPE_GP)
  309. irq_save_context();
  310. else
  311. irq_save_secure_context();
  312. break;
  313. case CPU_CLUSTER_PM_EXIT:
  314. if (omap_type() == OMAP2_DEVICE_TYPE_GP)
  315. irq_sar_clear();
  316. break;
  317. }
  318. return NOTIFY_OK;
  319. }
  320. static struct notifier_block irq_notifier_block = {
  321. .notifier_call = irq_notifier,
  322. };
  323. static void __init irq_pm_init(void)
  324. {
  325. /* FIXME: Remove this when MPU OSWR support is added */
  326. if (!soc_is_omap54xx())
  327. cpu_pm_register_notifier(&irq_notifier_block);
  328. }
  329. #else
  330. static void __init irq_pm_init(void)
  331. {}
  332. #endif
  333. void __iomem *omap_get_wakeupgen_base(void)
  334. {
  335. return wakeupgen_base;
  336. }
  337. int omap_secure_apis_support(void)
  338. {
  339. return omap_secure_apis;
  340. }
  341. /*
  342. * Initialise the wakeupgen module.
  343. */
  344. int __init omap_wakeupgen_init(void)
  345. {
  346. int i;
  347. unsigned int boot_cpu = smp_processor_id();
  348. /* Not supported on OMAP4 ES1.0 silicon */
  349. if (omap_rev() == OMAP4430_REV_ES1_0) {
  350. WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
  351. return -EPERM;
  352. }
  353. /* Static mapping, never released */
  354. wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K);
  355. if (WARN_ON(!wakeupgen_base))
  356. return -ENOMEM;
  357. if (cpu_is_omap44xx()) {
  358. irq_banks = OMAP4_NR_BANKS;
  359. max_irqs = OMAP4_NR_IRQS;
  360. omap_secure_apis = 1;
  361. }
  362. /* Clear all IRQ bitmasks at wakeupGen level */
  363. for (i = 0; i < irq_banks; i++) {
  364. wakeupgen_writel(0, i, CPU0_ID);
  365. wakeupgen_writel(0, i, CPU1_ID);
  366. }
  367. /*
  368. * Override GIC architecture specific functions to add
  369. * OMAP WakeupGen interrupt controller along with GIC
  370. */
  371. gic_arch_extn.irq_mask = wakeupgen_mask;
  372. gic_arch_extn.irq_unmask = wakeupgen_unmask;
  373. gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
  374. /*
  375. * FIXME: Add support to set_smp_affinity() once the core
  376. * GIC code has necessary hooks in place.
  377. */
  378. /* Associate all the IRQs to boot CPU like GIC init does. */
  379. for (i = 0; i < max_irqs; i++)
  380. irq_target_cpu[i] = boot_cpu;
  381. irq_hotplug_init();
  382. irq_pm_init();
  383. return 0;
  384. }