omap-wakeupgen.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /*
  2. * OMAP WakeupGen Source file
  3. *
  4. * OMAP WakeupGen is the interrupt controller extension used along
  5. * with ARM GIC to wake the CPU out from low power states on
  6. * external interrupts. It is responsible for generating wakeup
  7. * event from the incoming interrupts and enable bits. It is
  8. * implemented in MPU always ON power domain. During normal operation,
  9. * WakeupGen delivers external interrupts directly to the GIC.
  10. *
  11. * Copyright (C) 2011 Texas Instruments, Inc.
  12. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/init.h>
  20. #include <linux/io.h>
  21. #include <linux/irq.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/cpu.h>
  24. #include <linux/notifier.h>
  25. #include <linux/cpu_pm.h>
  26. #include <linux/irqchip/arm-gic.h>
  27. #include "omap-wakeupgen.h"
  28. #include "omap-secure.h"
  29. #include "soc.h"
  30. #include "omap4-sar-layout.h"
  31. #include "common.h"
  32. #define MAX_NR_REG_BANKS 5
  33. #define MAX_IRQS 160
  34. #define WKG_MASK_ALL 0x00000000
  35. #define WKG_UNMASK_ALL 0xffffffff
  36. #define CPU_ENA_OFFSET 0x400
  37. #define CPU0_ID 0x0
  38. #define CPU1_ID 0x1
  39. #define OMAP4_NR_BANKS 4
  40. #define OMAP4_NR_IRQS 128
  41. static void __iomem *wakeupgen_base;
  42. static void __iomem *sar_base;
  43. static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
  44. static unsigned int irq_target_cpu[MAX_IRQS];
  45. static unsigned int irq_banks = MAX_NR_REG_BANKS;
  46. static unsigned int max_irqs = MAX_IRQS;
  47. static unsigned int omap_secure_apis;
  48. /*
  49. * Static helper functions.
  50. */
  51. static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
  52. {
  53. return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
  54. (cpu * CPU_ENA_OFFSET) + (idx * 4));
  55. }
  56. static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
  57. {
  58. __raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
  59. (cpu * CPU_ENA_OFFSET) + (idx * 4));
  60. }
  61. static inline void sar_writel(u32 val, u32 offset, u8 idx)
  62. {
  63. __raw_writel(val, sar_base + offset + (idx * 4));
  64. }
  65. static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
  66. {
  67. unsigned int spi_irq;
  68. /*
  69. * PPIs and SGIs are not supported.
  70. */
  71. if (irq < OMAP44XX_IRQ_GIC_START)
  72. return -EINVAL;
  73. /*
  74. * Subtract the GIC offset.
  75. */
  76. spi_irq = irq - OMAP44XX_IRQ_GIC_START;
  77. if (spi_irq > MAX_IRQS) {
  78. pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
  79. return -EINVAL;
  80. }
  81. /*
  82. * Each WakeupGen register controls 32 interrupt.
  83. * i.e. 1 bit per SPI IRQ
  84. */
  85. *reg_index = spi_irq >> 5;
  86. *bit_posn = spi_irq %= 32;
  87. return 0;
  88. }
  89. static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
  90. {
  91. u32 val, bit_number;
  92. u8 i;
  93. if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
  94. return;
  95. val = wakeupgen_readl(i, cpu);
  96. val &= ~BIT(bit_number);
  97. wakeupgen_writel(val, i, cpu);
  98. }
  99. static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
  100. {
  101. u32 val, bit_number;
  102. u8 i;
  103. if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
  104. return;
  105. val = wakeupgen_readl(i, cpu);
  106. val |= BIT(bit_number);
  107. wakeupgen_writel(val, i, cpu);
  108. }
  109. /*
  110. * Architecture specific Mask extension
  111. */
  112. static void wakeupgen_mask(struct irq_data *d)
  113. {
  114. unsigned long flags;
  115. raw_spin_lock_irqsave(&wakeupgen_lock, flags);
  116. _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
  117. raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
  118. }
  119. /*
  120. * Architecture specific Unmask extension
  121. */
  122. static void wakeupgen_unmask(struct irq_data *d)
  123. {
  124. unsigned long flags;
  125. raw_spin_lock_irqsave(&wakeupgen_lock, flags);
  126. _wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
  127. raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
  128. }
  129. #ifdef CONFIG_HOTPLUG_CPU
  130. static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
  131. static void _wakeupgen_save_masks(unsigned int cpu)
  132. {
  133. u8 i;
  134. for (i = 0; i < irq_banks; i++)
  135. per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
  136. }
  137. static void _wakeupgen_restore_masks(unsigned int cpu)
  138. {
  139. u8 i;
  140. for (i = 0; i < irq_banks; i++)
  141. wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
  142. }
  143. static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
  144. {
  145. u8 i;
  146. for (i = 0; i < irq_banks; i++)
  147. wakeupgen_writel(reg, i, cpu);
  148. }
  149. /*
  150. * Mask or unmask all interrupts on given CPU.
  151. * 0 = Mask all interrupts on the 'cpu'
  152. * 1 = Unmask all interrupts on the 'cpu'
  153. * Ensure that the initial mask is maintained. This is faster than
  154. * iterating through GIC registers to arrive at the correct masks.
  155. */
  156. static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
  157. {
  158. unsigned long flags;
  159. raw_spin_lock_irqsave(&wakeupgen_lock, flags);
  160. if (set) {
  161. _wakeupgen_save_masks(cpu);
  162. _wakeupgen_set_all(cpu, WKG_MASK_ALL);
  163. } else {
  164. _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
  165. _wakeupgen_restore_masks(cpu);
  166. }
  167. raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
  168. }
  169. #endif
  170. #ifdef CONFIG_CPU_PM
  171. static inline void omap4_irq_save_context(void)
  172. {
  173. u32 i, val;
  174. if (omap_rev() == OMAP4430_REV_ES1_0)
  175. return;
  176. for (i = 0; i < irq_banks; i++) {
  177. /* Save the CPUx interrupt mask for IRQ 0 to 127 */
  178. val = wakeupgen_readl(i, 0);
  179. sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
  180. val = wakeupgen_readl(i, 1);
  181. sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
  182. /*
  183. * Disable the secure interrupts for CPUx. The restore
  184. * code blindly restores secure and non-secure interrupt
  185. * masks from SAR RAM. Secure interrupts are not suppose
  186. * to be enabled from HLOS. So overwrite the SAR location
  187. * so that the secure interrupt remains disabled.
  188. */
  189. sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
  190. sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
  191. }
  192. /* Save AuxBoot* registers */
  193. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  194. __raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
  195. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
  196. __raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
  197. /* Save SyncReq generation logic */
  198. val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
  199. __raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
  200. val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
  201. __raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
  202. /* Set the Backup Bit Mask status */
  203. val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
  204. val |= SAR_BACKUP_STATUS_WAKEUPGEN;
  205. __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
  206. }
  207. static inline void omap5_irq_save_context(void)
  208. {
  209. u32 i, val;
  210. for (i = 0; i < irq_banks; i++) {
  211. /* Save the CPUx interrupt mask for IRQ 0 to 159 */
  212. val = wakeupgen_readl(i, 0);
  213. sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
  214. val = wakeupgen_readl(i, 1);
  215. sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
  216. sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
  217. sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
  218. }
  219. /* Save AuxBoot* registers */
  220. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  221. __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
  222. val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
  223. __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
  224. /* Set the Backup Bit Mask status */
  225. val = __raw_readl(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
  226. val |= SAR_BACKUP_STATUS_WAKEUPGEN;
  227. __raw_writel(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
  228. }
  229. /*
  230. * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
  231. * ROM code. WakeupGen IP is integrated along with GIC to manage the
  232. * interrupt wakeups from CPU low power states. It manages
  233. * masking/unmasking of Shared peripheral interrupts(SPI). So the
  234. * interrupt enable/disable control should be in sync and consistent
  235. * at WakeupGen and GIC so that interrupts are not lost.
  236. */
  237. static void irq_save_context(void)
  238. {
  239. if (!sar_base)
  240. sar_base = omap4_get_sar_ram_base();
  241. if (soc_is_omap54xx())
  242. omap5_irq_save_context();
  243. else
  244. omap4_irq_save_context();
  245. }
  246. /*
  247. * Clear WakeupGen SAR backup status.
  248. */
  249. static void irq_sar_clear(void)
  250. {
  251. u32 val;
  252. u32 offset = SAR_BACKUP_STATUS_OFFSET;
  253. if (soc_is_omap54xx())
  254. offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
  255. val = __raw_readl(sar_base + offset);
  256. val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
  257. __raw_writel(val, sar_base + offset);
  258. }
  259. /*
  260. * Save GIC and Wakeupgen interrupt context using secure API
  261. * for HS/EMU devices.
  262. */
  263. static void irq_save_secure_context(void)
  264. {
  265. u32 ret;
  266. ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
  267. FLAG_START_CRITICAL,
  268. 0, 0, 0, 0, 0);
  269. if (ret != API_HAL_RET_VALUE_OK)
  270. pr_err("GIC and Wakeupgen context save failed\n");
  271. }
  272. #endif
  273. #ifdef CONFIG_HOTPLUG_CPU
  274. static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
  275. unsigned long action, void *hcpu)
  276. {
  277. unsigned int cpu = (unsigned int)hcpu;
  278. switch (action) {
  279. case CPU_ONLINE:
  280. wakeupgen_irqmask_all(cpu, 0);
  281. break;
  282. case CPU_DEAD:
  283. wakeupgen_irqmask_all(cpu, 1);
  284. break;
  285. }
  286. return NOTIFY_OK;
  287. }
  288. static struct notifier_block __refdata irq_hotplug_notifier = {
  289. .notifier_call = irq_cpu_hotplug_notify,
  290. };
  291. static void __init irq_hotplug_init(void)
  292. {
  293. register_hotcpu_notifier(&irq_hotplug_notifier);
  294. }
  295. #else
  296. static void __init irq_hotplug_init(void)
  297. {}
  298. #endif
  299. #ifdef CONFIG_CPU_PM
  300. static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  301. {
  302. switch (cmd) {
  303. case CPU_CLUSTER_PM_ENTER:
  304. if (omap_type() == OMAP2_DEVICE_TYPE_GP)
  305. irq_save_context();
  306. else
  307. irq_save_secure_context();
  308. break;
  309. case CPU_CLUSTER_PM_EXIT:
  310. if (omap_type() == OMAP2_DEVICE_TYPE_GP)
  311. irq_sar_clear();
  312. break;
  313. }
  314. return NOTIFY_OK;
  315. }
  316. static struct notifier_block irq_notifier_block = {
  317. .notifier_call = irq_notifier,
  318. };
  319. static void __init irq_pm_init(void)
  320. {
  321. /* FIXME: Remove this when MPU OSWR support is added */
  322. if (!soc_is_omap54xx())
  323. cpu_pm_register_notifier(&irq_notifier_block);
  324. }
  325. #else
  326. static void __init irq_pm_init(void)
  327. {}
  328. #endif
  329. void __iomem *omap_get_wakeupgen_base(void)
  330. {
  331. return wakeupgen_base;
  332. }
  333. int omap_secure_apis_support(void)
  334. {
  335. return omap_secure_apis;
  336. }
  337. /*
  338. * Initialise the wakeupgen module.
  339. */
  340. int __init omap_wakeupgen_init(void)
  341. {
  342. int i;
  343. unsigned int boot_cpu = smp_processor_id();
  344. /* Not supported on OMAP4 ES1.0 silicon */
  345. if (omap_rev() == OMAP4430_REV_ES1_0) {
  346. WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
  347. return -EPERM;
  348. }
  349. /* Static mapping, never released */
  350. wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K);
  351. if (WARN_ON(!wakeupgen_base))
  352. return -ENOMEM;
  353. if (cpu_is_omap44xx()) {
  354. irq_banks = OMAP4_NR_BANKS;
  355. max_irqs = OMAP4_NR_IRQS;
  356. omap_secure_apis = 1;
  357. }
  358. /* Clear all IRQ bitmasks at wakeupGen level */
  359. for (i = 0; i < irq_banks; i++) {
  360. wakeupgen_writel(0, i, CPU0_ID);
  361. wakeupgen_writel(0, i, CPU1_ID);
  362. }
  363. /*
  364. * Override GIC architecture specific functions to add
  365. * OMAP WakeupGen interrupt controller along with GIC
  366. */
  367. gic_arch_extn.irq_mask = wakeupgen_mask;
  368. gic_arch_extn.irq_unmask = wakeupgen_unmask;
  369. gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
  370. /*
  371. * FIXME: Add support to set_smp_affinity() once the core
  372. * GIC code has necessary hooks in place.
  373. */
  374. /* Associate all the IRQs to boot CPU like GIC init does. */
  375. for (i = 0; i < max_irqs; i++)
  376. irq_target_cpu[i] = boot_cpu;
  377. irq_hotplug_init();
  378. irq_pm_init();
  379. return 0;
  380. }