gpio-omap.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390
  1. /*
  2. * Support functions for OMAP GPIO
  3. *
  4. * Copyright (C) 2003-2005 Nokia Corporation
  5. * Written by Juha Yrjölä <juha.yrjola@nokia.com>
  6. *
  7. * Copyright (C) 2009 Texas Instruments
  8. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/init.h>
  15. #include <linux/module.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/syscore_ops.h>
  18. #include <linux/err.h>
  19. #include <linux/clk.h>
  20. #include <linux/io.h>
  21. #include <linux/slab.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/pm.h>
  24. #include <mach/hardware.h>
  25. #include <asm/irq.h>
  26. #include <mach/irqs.h>
  27. #include <asm/gpio.h>
  28. #include <asm/mach/irq.h>
  29. #define OFF_MODE 1
  30. static LIST_HEAD(omap_gpio_list);
  31. struct gpio_regs {
  32. u32 irqenable1;
  33. u32 irqenable2;
  34. u32 wake_en;
  35. u32 ctrl;
  36. u32 oe;
  37. u32 leveldetect0;
  38. u32 leveldetect1;
  39. u32 risingdetect;
  40. u32 fallingdetect;
  41. u32 dataout;
  42. };
  43. struct gpio_bank {
  44. struct list_head node;
  45. unsigned long pbase;
  46. void __iomem *base;
  47. u16 irq;
  48. u16 virtual_irq_start;
  49. u32 suspend_wakeup;
  50. u32 saved_wakeup;
  51. u32 non_wakeup_gpios;
  52. u32 enabled_non_wakeup_gpios;
  53. struct gpio_regs context;
  54. u32 saved_datain;
  55. u32 saved_fallingdetect;
  56. u32 saved_risingdetect;
  57. u32 level_mask;
  58. u32 toggle_mask;
  59. spinlock_t lock;
  60. struct gpio_chip chip;
  61. struct clk *dbck;
  62. u32 mod_usage;
  63. u32 dbck_enable_mask;
  64. bool dbck_enabled;
  65. struct device *dev;
  66. bool is_mpuio;
  67. bool dbck_flag;
  68. bool loses_context;
  69. int stride;
  70. u32 width;
  71. int context_loss_count;
  72. u16 id;
  73. int power_mode;
  74. bool workaround_enabled;
  75. void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
  76. int (*get_context_loss_count)(struct device *dev);
  77. struct omap_gpio_reg_offs *regs;
  78. };
  79. #define GPIO_INDEX(bank, gpio) (gpio % bank->width)
  80. #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
  81. #define GPIO_MOD_CTRL_BIT BIT(0)
  82. static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
  83. {
  84. void __iomem *reg = bank->base;
  85. u32 l;
  86. reg += bank->regs->direction;
  87. l = __raw_readl(reg);
  88. if (is_input)
  89. l |= 1 << gpio;
  90. else
  91. l &= ~(1 << gpio);
  92. __raw_writel(l, reg);
  93. }
  94. /* set data out value using dedicate set/clear register */
  95. static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
  96. {
  97. void __iomem *reg = bank->base;
  98. u32 l = GPIO_BIT(bank, gpio);
  99. if (enable)
  100. reg += bank->regs->set_dataout;
  101. else
  102. reg += bank->regs->clr_dataout;
  103. __raw_writel(l, reg);
  104. }
  105. /* set data out value using mask register */
  106. static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
  107. {
  108. void __iomem *reg = bank->base + bank->regs->dataout;
  109. u32 gpio_bit = GPIO_BIT(bank, gpio);
  110. u32 l;
  111. l = __raw_readl(reg);
  112. if (enable)
  113. l |= gpio_bit;
  114. else
  115. l &= ~gpio_bit;
  116. __raw_writel(l, reg);
  117. }
  118. static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
  119. {
  120. void __iomem *reg = bank->base + bank->regs->datain;
  121. return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
  122. }
  123. static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
  124. {
  125. void __iomem *reg = bank->base + bank->regs->dataout;
  126. return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
  127. }
  128. static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
  129. {
  130. int l = __raw_readl(base + reg);
  131. if (set)
  132. l |= mask;
  133. else
  134. l &= ~mask;
  135. __raw_writel(l, base + reg);
  136. }
  137. static inline void _gpio_dbck_enable(struct gpio_bank *bank)
  138. {
  139. if (bank->dbck_enable_mask && !bank->dbck_enabled) {
  140. clk_enable(bank->dbck);
  141. bank->dbck_enabled = true;
  142. }
  143. }
  144. static inline void _gpio_dbck_disable(struct gpio_bank *bank)
  145. {
  146. if (bank->dbck_enable_mask && bank->dbck_enabled) {
  147. clk_disable(bank->dbck);
  148. bank->dbck_enabled = false;
  149. }
  150. }
  151. /**
  152. * _set_gpio_debounce - low level gpio debounce time
  153. * @bank: the gpio bank we're acting upon
  154. * @gpio: the gpio number on this @gpio
  155. * @debounce: debounce time to use
  156. *
  157. * OMAP's debounce time is in 31us steps so we need
  158. * to convert and round up to the closest unit.
  159. */
  160. static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
  161. unsigned debounce)
  162. {
  163. void __iomem *reg;
  164. u32 val;
  165. u32 l;
  166. if (!bank->dbck_flag)
  167. return;
  168. if (debounce < 32)
  169. debounce = 0x01;
  170. else if (debounce > 7936)
  171. debounce = 0xff;
  172. else
  173. debounce = (debounce / 0x1f) - 1;
  174. l = GPIO_BIT(bank, gpio);
  175. reg = bank->base + bank->regs->debounce;
  176. __raw_writel(debounce, reg);
  177. reg = bank->base + bank->regs->debounce_en;
  178. val = __raw_readl(reg);
  179. if (debounce) {
  180. val |= l;
  181. clk_enable(bank->dbck);
  182. } else {
  183. val &= ~l;
  184. clk_disable(bank->dbck);
  185. }
  186. bank->dbck_enable_mask = val;
  187. __raw_writel(val, reg);
  188. }
  189. static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
  190. int trigger)
  191. {
  192. void __iomem *base = bank->base;
  193. u32 gpio_bit = 1 << gpio;
  194. _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
  195. trigger & IRQ_TYPE_LEVEL_LOW);
  196. _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
  197. trigger & IRQ_TYPE_LEVEL_HIGH);
  198. _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
  199. trigger & IRQ_TYPE_EDGE_RISING);
  200. _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
  201. trigger & IRQ_TYPE_EDGE_FALLING);
  202. if (likely(!(bank->non_wakeup_gpios & gpio_bit)))
  203. _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
  204. /* This part needs to be executed always for OMAP{34xx, 44xx} */
  205. if (!bank->regs->irqctrl) {
  206. /* On omap24xx proceed only when valid GPIO bit is set */
  207. if (bank->non_wakeup_gpios) {
  208. if (!(bank->non_wakeup_gpios & gpio_bit))
  209. goto exit;
  210. }
  211. /*
  212. * Log the edge gpio and manually trigger the IRQ
  213. * after resume if the input level changes
  214. * to avoid irq lost during PER RET/OFF mode
  215. * Applies for omap2 non-wakeup gpio and all omap3 gpios
  216. */
  217. if (trigger & IRQ_TYPE_EDGE_BOTH)
  218. bank->enabled_non_wakeup_gpios |= gpio_bit;
  219. else
  220. bank->enabled_non_wakeup_gpios &= ~gpio_bit;
  221. }
  222. exit:
  223. bank->level_mask =
  224. __raw_readl(bank->base + bank->regs->leveldetect0) |
  225. __raw_readl(bank->base + bank->regs->leveldetect1);
  226. }
  227. #ifdef CONFIG_ARCH_OMAP1
  228. /*
  229. * This only applies to chips that can't do both rising and falling edge
  230. * detection at once. For all other chips, this function is a noop.
  231. */
  232. static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
  233. {
  234. void __iomem *reg = bank->base;
  235. u32 l = 0;
  236. if (!bank->regs->irqctrl)
  237. return;
  238. reg += bank->regs->irqctrl;
  239. l = __raw_readl(reg);
  240. if ((l >> gpio) & 1)
  241. l &= ~(1 << gpio);
  242. else
  243. l |= 1 << gpio;
  244. __raw_writel(l, reg);
  245. }
  246. #else
  247. static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
  248. #endif
  249. static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
  250. {
  251. void __iomem *reg = bank->base;
  252. void __iomem *base = bank->base;
  253. u32 l = 0;
  254. if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
  255. set_gpio_trigger(bank, gpio, trigger);
  256. } else if (bank->regs->irqctrl) {
  257. reg += bank->regs->irqctrl;
  258. l = __raw_readl(reg);
  259. if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
  260. bank->toggle_mask |= 1 << gpio;
  261. if (trigger & IRQ_TYPE_EDGE_RISING)
  262. l |= 1 << gpio;
  263. else if (trigger & IRQ_TYPE_EDGE_FALLING)
  264. l &= ~(1 << gpio);
  265. else
  266. return -EINVAL;
  267. __raw_writel(l, reg);
  268. } else if (bank->regs->edgectrl1) {
  269. if (gpio & 0x08)
  270. reg += bank->regs->edgectrl2;
  271. else
  272. reg += bank->regs->edgectrl1;
  273. gpio &= 0x07;
  274. l = __raw_readl(reg);
  275. l &= ~(3 << (gpio << 1));
  276. if (trigger & IRQ_TYPE_EDGE_RISING)
  277. l |= 2 << (gpio << 1);
  278. if (trigger & IRQ_TYPE_EDGE_FALLING)
  279. l |= 1 << (gpio << 1);
  280. /* Enable wake-up during idle for dynamic tick */
  281. _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
  282. __raw_writel(l, reg);
  283. }
  284. return 0;
  285. }
  286. static int gpio_irq_type(struct irq_data *d, unsigned type)
  287. {
  288. struct gpio_bank *bank;
  289. unsigned gpio;
  290. int retval;
  291. unsigned long flags;
  292. if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
  293. gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
  294. else
  295. gpio = d->irq - IH_GPIO_BASE;
  296. if (type & ~IRQ_TYPE_SENSE_MASK)
  297. return -EINVAL;
  298. bank = irq_data_get_irq_chip_data(d);
  299. if (!bank->regs->leveldetect0 &&
  300. (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
  301. return -EINVAL;
  302. spin_lock_irqsave(&bank->lock, flags);
  303. retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
  304. spin_unlock_irqrestore(&bank->lock, flags);
  305. if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
  306. __irq_set_handler_locked(d->irq, handle_level_irq);
  307. else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
  308. __irq_set_handler_locked(d->irq, handle_edge_irq);
  309. return retval;
  310. }
  311. static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
  312. {
  313. void __iomem *reg = bank->base;
  314. reg += bank->regs->irqstatus;
  315. __raw_writel(gpio_mask, reg);
  316. /* Workaround for clearing DSP GPIO interrupts to allow retention */
  317. if (bank->regs->irqstatus2) {
  318. reg = bank->base + bank->regs->irqstatus2;
  319. __raw_writel(gpio_mask, reg);
  320. }
  321. /* Flush posted write for the irq status to avoid spurious interrupts */
  322. __raw_readl(reg);
  323. }
  324. static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
  325. {
  326. _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
  327. }
  328. static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
  329. {
  330. void __iomem *reg = bank->base;
  331. u32 l;
  332. u32 mask = (1 << bank->width) - 1;
  333. reg += bank->regs->irqenable;
  334. l = __raw_readl(reg);
  335. if (bank->regs->irqenable_inv)
  336. l = ~l;
  337. l &= mask;
  338. return l;
  339. }
  340. static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
  341. {
  342. void __iomem *reg = bank->base;
  343. u32 l;
  344. if (bank->regs->set_irqenable) {
  345. reg += bank->regs->set_irqenable;
  346. l = gpio_mask;
  347. } else {
  348. reg += bank->regs->irqenable;
  349. l = __raw_readl(reg);
  350. if (bank->regs->irqenable_inv)
  351. l &= ~gpio_mask;
  352. else
  353. l |= gpio_mask;
  354. }
  355. __raw_writel(l, reg);
  356. }
  357. static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
  358. {
  359. void __iomem *reg = bank->base;
  360. u32 l;
  361. if (bank->regs->clr_irqenable) {
  362. reg += bank->regs->clr_irqenable;
  363. l = gpio_mask;
  364. } else {
  365. reg += bank->regs->irqenable;
  366. l = __raw_readl(reg);
  367. if (bank->regs->irqenable_inv)
  368. l |= gpio_mask;
  369. else
  370. l &= ~gpio_mask;
  371. }
  372. __raw_writel(l, reg);
  373. }
  374. static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
  375. {
  376. _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
  377. }
  378. /*
  379. * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
  380. * 1510 does not seem to have a wake-up register. If JTAG is connected
  381. * to the target, system will wake up always on GPIO events. While
  382. * system is running all registered GPIO interrupts need to have wake-up
  383. * enabled. When system is suspended, only selected GPIO interrupts need
  384. * to have wake-up enabled.
  385. */
  386. static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
  387. {
  388. u32 gpio_bit = GPIO_BIT(bank, gpio);
  389. unsigned long flags;
  390. if (bank->non_wakeup_gpios & gpio_bit) {
  391. dev_err(bank->dev,
  392. "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
  393. return -EINVAL;
  394. }
  395. spin_lock_irqsave(&bank->lock, flags);
  396. if (enable)
  397. bank->suspend_wakeup |= gpio_bit;
  398. else
  399. bank->suspend_wakeup &= ~gpio_bit;
  400. spin_unlock_irqrestore(&bank->lock, flags);
  401. return 0;
  402. }
  403. static void _reset_gpio(struct gpio_bank *bank, int gpio)
  404. {
  405. _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
  406. _set_gpio_irqenable(bank, gpio, 0);
  407. _clear_gpio_irqstatus(bank, gpio);
  408. _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
  409. }
  410. /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
  411. static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
  412. {
  413. unsigned int gpio = d->irq - IH_GPIO_BASE;
  414. struct gpio_bank *bank;
  415. int retval;
  416. bank = irq_data_get_irq_chip_data(d);
  417. retval = _set_gpio_wakeup(bank, gpio, enable);
  418. return retval;
  419. }
  420. static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
  421. {
  422. struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
  423. unsigned long flags;
  424. /*
  425. * If this is the first gpio_request for the bank,
  426. * enable the bank module.
  427. */
  428. if (!bank->mod_usage)
  429. pm_runtime_get_sync(bank->dev);
  430. spin_lock_irqsave(&bank->lock, flags);
  431. /* Set trigger to none. You need to enable the desired trigger with
  432. * request_irq() or set_irq_type().
  433. */
  434. _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
  435. if (bank->regs->pinctrl) {
  436. void __iomem *reg = bank->base + bank->regs->pinctrl;
  437. /* Claim the pin for MPU */
  438. __raw_writel(__raw_readl(reg) | (1 << offset), reg);
  439. }
  440. if (bank->regs->ctrl && !bank->mod_usage) {
  441. void __iomem *reg = bank->base + bank->regs->ctrl;
  442. u32 ctrl;
  443. ctrl = __raw_readl(reg);
  444. /* Module is enabled, clocks are not gated */
  445. ctrl &= ~GPIO_MOD_CTRL_BIT;
  446. __raw_writel(ctrl, reg);
  447. }
  448. bank->mod_usage |= 1 << offset;
  449. spin_unlock_irqrestore(&bank->lock, flags);
  450. return 0;
  451. }
  452. static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
  453. {
  454. struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
  455. void __iomem *base = bank->base;
  456. unsigned long flags;
  457. spin_lock_irqsave(&bank->lock, flags);
  458. if (bank->regs->wkup_en)
  459. /* Disable wake-up during idle for dynamic tick */
  460. _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
  461. bank->mod_usage &= ~(1 << offset);
  462. if (bank->regs->ctrl && !bank->mod_usage) {
  463. void __iomem *reg = bank->base + bank->regs->ctrl;
  464. u32 ctrl;
  465. ctrl = __raw_readl(reg);
  466. /* Module is disabled, clocks are gated */
  467. ctrl |= GPIO_MOD_CTRL_BIT;
  468. __raw_writel(ctrl, reg);
  469. }
  470. _reset_gpio(bank, bank->chip.base + offset);
  471. spin_unlock_irqrestore(&bank->lock, flags);
  472. /*
  473. * If this is the last gpio to be freed in the bank,
  474. * disable the bank module.
  475. */
  476. if (!bank->mod_usage)
  477. pm_runtime_put(bank->dev);
  478. }
  479. /*
  480. * We need to unmask the GPIO bank interrupt as soon as possible to
  481. * avoid missing GPIO interrupts for other lines in the bank.
  482. * Then we need to mask-read-clear-unmask the triggered GPIO lines
  483. * in the bank to avoid missing nested interrupts for a GPIO line.
  484. * If we wait to unmask individual GPIO lines in the bank after the
  485. * line's interrupt handler has been run, we may miss some nested
  486. * interrupts.
  487. */
  488. static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
  489. {
  490. void __iomem *isr_reg = NULL;
  491. u32 isr;
  492. unsigned int gpio_irq, gpio_index;
  493. struct gpio_bank *bank;
  494. u32 retrigger = 0;
  495. int unmasked = 0;
  496. struct irq_chip *chip = irq_desc_get_chip(desc);
  497. chained_irq_enter(chip, desc);
  498. bank = irq_get_handler_data(irq);
  499. isr_reg = bank->base + bank->regs->irqstatus;
  500. pm_runtime_get_sync(bank->dev);
  501. if (WARN_ON(!isr_reg))
  502. goto exit;
  503. while(1) {
  504. u32 isr_saved, level_mask = 0;
  505. u32 enabled;
  506. enabled = _get_gpio_irqbank_mask(bank);
  507. isr_saved = isr = __raw_readl(isr_reg) & enabled;
  508. if (bank->level_mask)
  509. level_mask = bank->level_mask & enabled;
  510. /* clear edge sensitive interrupts before handler(s) are
  511. called so that we don't miss any interrupt occurred while
  512. executing them */
  513. _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
  514. _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
  515. _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
  516. /* if there is only edge sensitive GPIO pin interrupts
  517. configured, we could unmask GPIO bank interrupt immediately */
  518. if (!level_mask && !unmasked) {
  519. unmasked = 1;
  520. chained_irq_exit(chip, desc);
  521. }
  522. isr |= retrigger;
  523. retrigger = 0;
  524. if (!isr)
  525. break;
  526. gpio_irq = bank->virtual_irq_start;
  527. for (; isr != 0; isr >>= 1, gpio_irq++) {
  528. gpio_index = GPIO_INDEX(bank, irq_to_gpio(gpio_irq));
  529. if (!(isr & 1))
  530. continue;
  531. /*
  532. * Some chips can't respond to both rising and falling
  533. * at the same time. If this irq was requested with
  534. * both flags, we need to flip the ICR data for the IRQ
  535. * to respond to the IRQ for the opposite direction.
  536. * This will be indicated in the bank toggle_mask.
  537. */
  538. if (bank->toggle_mask & (1 << gpio_index))
  539. _toggle_gpio_edge_triggering(bank, gpio_index);
  540. generic_handle_irq(gpio_irq);
  541. }
  542. }
  543. /* if bank has any level sensitive GPIO pin interrupt
  544. configured, we must unmask the bank interrupt only after
  545. handler(s) are executed in order to avoid spurious bank
  546. interrupt */
  547. exit:
  548. if (!unmasked)
  549. chained_irq_exit(chip, desc);
  550. pm_runtime_put(bank->dev);
  551. }
  552. static void gpio_irq_shutdown(struct irq_data *d)
  553. {
  554. unsigned int gpio = d->irq - IH_GPIO_BASE;
  555. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  556. unsigned long flags;
  557. spin_lock_irqsave(&bank->lock, flags);
  558. _reset_gpio(bank, gpio);
  559. spin_unlock_irqrestore(&bank->lock, flags);
  560. }
  561. static void gpio_ack_irq(struct irq_data *d)
  562. {
  563. unsigned int gpio = d->irq - IH_GPIO_BASE;
  564. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  565. _clear_gpio_irqstatus(bank, gpio);
  566. }
  567. static void gpio_mask_irq(struct irq_data *d)
  568. {
  569. unsigned int gpio = d->irq - IH_GPIO_BASE;
  570. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  571. unsigned long flags;
  572. spin_lock_irqsave(&bank->lock, flags);
  573. _set_gpio_irqenable(bank, gpio, 0);
  574. _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
  575. spin_unlock_irqrestore(&bank->lock, flags);
  576. }
  577. static void gpio_unmask_irq(struct irq_data *d)
  578. {
  579. unsigned int gpio = d->irq - IH_GPIO_BASE;
  580. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  581. unsigned int irq_mask = GPIO_BIT(bank, gpio);
  582. u32 trigger = irqd_get_trigger_type(d);
  583. unsigned long flags;
  584. spin_lock_irqsave(&bank->lock, flags);
  585. if (trigger)
  586. _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
  587. /* For level-triggered GPIOs, the clearing must be done after
  588. * the HW source is cleared, thus after the handler has run */
  589. if (bank->level_mask & irq_mask) {
  590. _set_gpio_irqenable(bank, gpio, 0);
  591. _clear_gpio_irqstatus(bank, gpio);
  592. }
  593. _set_gpio_irqenable(bank, gpio, 1);
  594. spin_unlock_irqrestore(&bank->lock, flags);
  595. }
  596. static struct irq_chip gpio_irq_chip = {
  597. .name = "GPIO",
  598. .irq_shutdown = gpio_irq_shutdown,
  599. .irq_ack = gpio_ack_irq,
  600. .irq_mask = gpio_mask_irq,
  601. .irq_unmask = gpio_unmask_irq,
  602. .irq_set_type = gpio_irq_type,
  603. .irq_set_wake = gpio_wake_enable,
  604. };
  605. /*---------------------------------------------------------------------*/
  606. static int omap_mpuio_suspend_noirq(struct device *dev)
  607. {
  608. struct platform_device *pdev = to_platform_device(dev);
  609. struct gpio_bank *bank = platform_get_drvdata(pdev);
  610. void __iomem *mask_reg = bank->base +
  611. OMAP_MPUIO_GPIO_MASKIT / bank->stride;
  612. unsigned long flags;
  613. spin_lock_irqsave(&bank->lock, flags);
  614. bank->saved_wakeup = __raw_readl(mask_reg);
  615. __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
  616. spin_unlock_irqrestore(&bank->lock, flags);
  617. return 0;
  618. }
  619. static int omap_mpuio_resume_noirq(struct device *dev)
  620. {
  621. struct platform_device *pdev = to_platform_device(dev);
  622. struct gpio_bank *bank = platform_get_drvdata(pdev);
  623. void __iomem *mask_reg = bank->base +
  624. OMAP_MPUIO_GPIO_MASKIT / bank->stride;
  625. unsigned long flags;
  626. spin_lock_irqsave(&bank->lock, flags);
  627. __raw_writel(bank->saved_wakeup, mask_reg);
  628. spin_unlock_irqrestore(&bank->lock, flags);
  629. return 0;
  630. }
  631. static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
  632. .suspend_noirq = omap_mpuio_suspend_noirq,
  633. .resume_noirq = omap_mpuio_resume_noirq,
  634. };
  635. /* use platform_driver for this. */
  636. static struct platform_driver omap_mpuio_driver = {
  637. .driver = {
  638. .name = "mpuio",
  639. .pm = &omap_mpuio_dev_pm_ops,
  640. },
  641. };
  642. static struct platform_device omap_mpuio_device = {
  643. .name = "mpuio",
  644. .id = -1,
  645. .dev = {
  646. .driver = &omap_mpuio_driver.driver,
  647. }
  648. /* could list the /proc/iomem resources */
  649. };
  650. static inline void mpuio_init(struct gpio_bank *bank)
  651. {
  652. platform_set_drvdata(&omap_mpuio_device, bank);
  653. if (platform_driver_register(&omap_mpuio_driver) == 0)
  654. (void) platform_device_register(&omap_mpuio_device);
  655. }
  656. /*---------------------------------------------------------------------*/
  657. static int gpio_input(struct gpio_chip *chip, unsigned offset)
  658. {
  659. struct gpio_bank *bank;
  660. unsigned long flags;
  661. bank = container_of(chip, struct gpio_bank, chip);
  662. spin_lock_irqsave(&bank->lock, flags);
  663. _set_gpio_direction(bank, offset, 1);
  664. spin_unlock_irqrestore(&bank->lock, flags);
  665. return 0;
  666. }
  667. static int gpio_is_input(struct gpio_bank *bank, int mask)
  668. {
  669. void __iomem *reg = bank->base + bank->regs->direction;
  670. return __raw_readl(reg) & mask;
  671. }
  672. static int gpio_get(struct gpio_chip *chip, unsigned offset)
  673. {
  674. struct gpio_bank *bank;
  675. void __iomem *reg;
  676. int gpio;
  677. u32 mask;
  678. gpio = chip->base + offset;
  679. bank = container_of(chip, struct gpio_bank, chip);
  680. reg = bank->base;
  681. mask = GPIO_BIT(bank, gpio);
  682. if (gpio_is_input(bank, mask))
  683. return _get_gpio_datain(bank, gpio);
  684. else
  685. return _get_gpio_dataout(bank, gpio);
  686. }
  687. static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
  688. {
  689. struct gpio_bank *bank;
  690. unsigned long flags;
  691. bank = container_of(chip, struct gpio_bank, chip);
  692. spin_lock_irqsave(&bank->lock, flags);
  693. bank->set_dataout(bank, offset, value);
  694. _set_gpio_direction(bank, offset, 0);
  695. spin_unlock_irqrestore(&bank->lock, flags);
  696. return 0;
  697. }
  698. static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
  699. unsigned debounce)
  700. {
  701. struct gpio_bank *bank;
  702. unsigned long flags;
  703. bank = container_of(chip, struct gpio_bank, chip);
  704. if (!bank->dbck) {
  705. bank->dbck = clk_get(bank->dev, "dbclk");
  706. if (IS_ERR(bank->dbck))
  707. dev_err(bank->dev, "Could not get gpio dbck\n");
  708. }
  709. spin_lock_irqsave(&bank->lock, flags);
  710. _set_gpio_debounce(bank, offset, debounce);
  711. spin_unlock_irqrestore(&bank->lock, flags);
  712. return 0;
  713. }
  714. static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
  715. {
  716. struct gpio_bank *bank;
  717. unsigned long flags;
  718. bank = container_of(chip, struct gpio_bank, chip);
  719. spin_lock_irqsave(&bank->lock, flags);
  720. bank->set_dataout(bank, offset, value);
  721. spin_unlock_irqrestore(&bank->lock, flags);
  722. }
  723. static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
  724. {
  725. struct gpio_bank *bank;
  726. bank = container_of(chip, struct gpio_bank, chip);
  727. return bank->virtual_irq_start + offset;
  728. }
  729. /*---------------------------------------------------------------------*/
  730. static void __init omap_gpio_show_rev(struct gpio_bank *bank)
  731. {
  732. static bool called;
  733. u32 rev;
  734. if (called || bank->regs->revision == USHRT_MAX)
  735. return;
  736. rev = __raw_readw(bank->base + bank->regs->revision);
  737. pr_info("OMAP GPIO hardware version %d.%d\n",
  738. (rev >> 4) & 0x0f, rev & 0x0f);
  739. called = true;
  740. }
  741. /* This lock class tells lockdep that GPIO irqs are in a different
  742. * category than their parents, so it won't report false recursion.
  743. */
  744. static struct lock_class_key gpio_lock_class;
  745. static void omap_gpio_mod_init(struct gpio_bank *bank)
  746. {
  747. void __iomem *base = bank->base;
  748. u32 l = 0xffffffff;
  749. if (bank->width == 16)
  750. l = 0xffff;
  751. if (bank->is_mpuio) {
  752. __raw_writel(l, bank->base + bank->regs->irqenable);
  753. return;
  754. }
  755. _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
  756. _gpio_rmw(base, bank->regs->irqstatus, l,
  757. bank->regs->irqenable_inv == false);
  758. _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
  759. _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
  760. if (bank->regs->debounce_en)
  761. _gpio_rmw(base, bank->regs->debounce_en, 0, 1);
  762. /* Save OE default value (0xffffffff) in the context */
  763. bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
  764. /* Initialize interface clk ungated, module enabled */
  765. if (bank->regs->ctrl)
  766. _gpio_rmw(base, bank->regs->ctrl, 0, 1);
  767. }
  768. static __init void
  769. omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
  770. unsigned int num)
  771. {
  772. struct irq_chip_generic *gc;
  773. struct irq_chip_type *ct;
  774. gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
  775. handle_simple_irq);
  776. if (!gc) {
  777. dev_err(bank->dev, "Memory alloc failed for gc\n");
  778. return;
  779. }
  780. ct = gc->chip_types;
  781. /* NOTE: No ack required, reading IRQ status clears it. */
  782. ct->chip.irq_mask = irq_gc_mask_set_bit;
  783. ct->chip.irq_unmask = irq_gc_mask_clr_bit;
  784. ct->chip.irq_set_type = gpio_irq_type;
  785. if (bank->regs->wkup_en)
  786. ct->chip.irq_set_wake = gpio_wake_enable,
  787. ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
  788. irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
  789. IRQ_NOREQUEST | IRQ_NOPROBE, 0);
  790. }
  791. static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
  792. {
  793. int j;
  794. static int gpio;
  795. /*
  796. * REVISIT eventually switch from OMAP-specific gpio structs
  797. * over to the generic ones
  798. */
  799. bank->chip.request = omap_gpio_request;
  800. bank->chip.free = omap_gpio_free;
  801. bank->chip.direction_input = gpio_input;
  802. bank->chip.get = gpio_get;
  803. bank->chip.direction_output = gpio_output;
  804. bank->chip.set_debounce = gpio_debounce;
  805. bank->chip.set = gpio_set;
  806. bank->chip.to_irq = gpio_2irq;
  807. if (bank->is_mpuio) {
  808. bank->chip.label = "mpuio";
  809. if (bank->regs->wkup_en)
  810. bank->chip.dev = &omap_mpuio_device.dev;
  811. bank->chip.base = OMAP_MPUIO(0);
  812. } else {
  813. bank->chip.label = "gpio";
  814. bank->chip.base = gpio;
  815. gpio += bank->width;
  816. }
  817. bank->chip.ngpio = bank->width;
  818. gpiochip_add(&bank->chip);
  819. for (j = bank->virtual_irq_start;
  820. j < bank->virtual_irq_start + bank->width; j++) {
  821. irq_set_lockdep_class(j, &gpio_lock_class);
  822. irq_set_chip_data(j, bank);
  823. if (bank->is_mpuio) {
  824. omap_mpuio_alloc_gc(bank, j, bank->width);
  825. } else {
  826. irq_set_chip(j, &gpio_irq_chip);
  827. irq_set_handler(j, handle_simple_irq);
  828. set_irq_flags(j, IRQF_VALID);
  829. }
  830. }
  831. irq_set_chained_handler(bank->irq, gpio_irq_handler);
  832. irq_set_handler_data(bank->irq, bank);
  833. }
  834. static int __devinit omap_gpio_probe(struct platform_device *pdev)
  835. {
  836. struct omap_gpio_platform_data *pdata;
  837. struct resource *res;
  838. struct gpio_bank *bank;
  839. int ret = 0;
  840. if (!pdev->dev.platform_data) {
  841. ret = -EINVAL;
  842. goto err_exit;
  843. }
  844. bank = kzalloc(sizeof(struct gpio_bank), GFP_KERNEL);
  845. if (!bank) {
  846. dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
  847. ret = -ENOMEM;
  848. goto err_exit;
  849. }
  850. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  851. if (unlikely(!res)) {
  852. dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n",
  853. pdev->id);
  854. ret = -ENODEV;
  855. goto err_free;
  856. }
  857. bank->irq = res->start;
  858. bank->id = pdev->id;
  859. pdata = pdev->dev.platform_data;
  860. bank->virtual_irq_start = pdata->virtual_irq_start;
  861. bank->dev = &pdev->dev;
  862. bank->dbck_flag = pdata->dbck_flag;
  863. bank->stride = pdata->bank_stride;
  864. bank->width = pdata->bank_width;
  865. bank->is_mpuio = pdata->is_mpuio;
  866. bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
  867. bank->loses_context = pdata->loses_context;
  868. bank->get_context_loss_count = pdata->get_context_loss_count;
  869. bank->regs = pdata->regs;
  870. if (bank->regs->set_dataout && bank->regs->clr_dataout)
  871. bank->set_dataout = _set_gpio_dataout_reg;
  872. else
  873. bank->set_dataout = _set_gpio_dataout_mask;
  874. spin_lock_init(&bank->lock);
  875. /* Static mapping, never released */
  876. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  877. if (unlikely(!res)) {
  878. dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n",
  879. pdev->id);
  880. ret = -ENODEV;
  881. goto err_free;
  882. }
  883. bank->base = ioremap(res->start, resource_size(res));
  884. if (!bank->base) {
  885. dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n",
  886. pdev->id);
  887. ret = -ENOMEM;
  888. goto err_free;
  889. }
  890. platform_set_drvdata(pdev, bank);
  891. pm_runtime_enable(bank->dev);
  892. pm_runtime_irq_safe(bank->dev);
  893. pm_runtime_get_sync(bank->dev);
  894. if (bank->is_mpuio)
  895. mpuio_init(bank);
  896. omap_gpio_mod_init(bank);
  897. omap_gpio_chip_init(bank);
  898. omap_gpio_show_rev(bank);
  899. pm_runtime_put(bank->dev);
  900. list_add_tail(&bank->node, &omap_gpio_list);
  901. return ret;
  902. err_free:
  903. kfree(bank);
  904. err_exit:
  905. return ret;
  906. }
  907. #ifdef CONFIG_ARCH_OMAP2PLUS
  908. #if defined(CONFIG_PM_SLEEP)
  909. static int omap_gpio_suspend(struct device *dev)
  910. {
  911. struct platform_device *pdev = to_platform_device(dev);
  912. struct gpio_bank *bank = platform_get_drvdata(pdev);
  913. void __iomem *base = bank->base;
  914. void __iomem *wakeup_enable;
  915. unsigned long flags;
  916. if (!bank->mod_usage || !bank->loses_context)
  917. return 0;
  918. if (!bank->regs->wkup_en || !bank->suspend_wakeup)
  919. return 0;
  920. wakeup_enable = bank->base + bank->regs->wkup_en;
  921. spin_lock_irqsave(&bank->lock, flags);
  922. bank->saved_wakeup = __raw_readl(wakeup_enable);
  923. _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
  924. _gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
  925. spin_unlock_irqrestore(&bank->lock, flags);
  926. return 0;
  927. }
  928. static int omap_gpio_resume(struct device *dev)
  929. {
  930. struct platform_device *pdev = to_platform_device(dev);
  931. struct gpio_bank *bank = platform_get_drvdata(pdev);
  932. void __iomem *base = bank->base;
  933. unsigned long flags;
  934. if (!bank->mod_usage || !bank->loses_context)
  935. return 0;
  936. if (!bank->regs->wkup_en || !bank->saved_wakeup)
  937. return 0;
  938. spin_lock_irqsave(&bank->lock, flags);
  939. _gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
  940. _gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
  941. spin_unlock_irqrestore(&bank->lock, flags);
  942. return 0;
  943. }
  944. #endif /* CONFIG_PM_SLEEP */
  945. #if defined(CONFIG_PM_RUNTIME)
  946. static void omap_gpio_save_context(struct gpio_bank *bank);
  947. static void omap_gpio_restore_context(struct gpio_bank *bank);
  948. static int omap_gpio_runtime_suspend(struct device *dev)
  949. {
  950. struct platform_device *pdev = to_platform_device(dev);
  951. struct gpio_bank *bank = platform_get_drvdata(pdev);
  952. u32 l1 = 0, l2 = 0;
  953. unsigned long flags;
  954. spin_lock_irqsave(&bank->lock, flags);
  955. if (bank->power_mode != OFF_MODE) {
  956. bank->power_mode = 0;
  957. goto save_gpio_context;
  958. }
  959. /*
  960. * If going to OFF, remove triggering for all
  961. * non-wakeup GPIOs. Otherwise spurious IRQs will be
  962. * generated. See OMAP2420 Errata item 1.101.
  963. */
  964. if (!(bank->enabled_non_wakeup_gpios))
  965. goto save_gpio_context;
  966. bank->saved_datain = __raw_readl(bank->base +
  967. bank->regs->datain);
  968. l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
  969. l2 = __raw_readl(bank->base + bank->regs->risingdetect);
  970. bank->saved_fallingdetect = l1;
  971. bank->saved_risingdetect = l2;
  972. l1 &= ~bank->enabled_non_wakeup_gpios;
  973. l2 &= ~bank->enabled_non_wakeup_gpios;
  974. __raw_writel(l1, bank->base + bank->regs->fallingdetect);
  975. __raw_writel(l2, bank->base + bank->regs->risingdetect);
  976. bank->workaround_enabled = true;
  977. save_gpio_context:
  978. if (bank->get_context_loss_count)
  979. bank->context_loss_count =
  980. bank->get_context_loss_count(bank->dev);
  981. omap_gpio_save_context(bank);
  982. _gpio_dbck_disable(bank);
  983. spin_unlock_irqrestore(&bank->lock, flags);
  984. return 0;
  985. }
  986. static int omap_gpio_runtime_resume(struct device *dev)
  987. {
  988. struct platform_device *pdev = to_platform_device(dev);
  989. struct gpio_bank *bank = platform_get_drvdata(pdev);
  990. int context_lost_cnt_after;
  991. u32 l = 0, gen, gen0, gen1;
  992. unsigned long flags;
  993. spin_lock_irqsave(&bank->lock, flags);
  994. _gpio_dbck_enable(bank);
  995. if (!bank->enabled_non_wakeup_gpios || !bank->workaround_enabled) {
  996. spin_unlock_irqrestore(&bank->lock, flags);
  997. return 0;
  998. }
  999. if (bank->get_context_loss_count) {
  1000. context_lost_cnt_after =
  1001. bank->get_context_loss_count(bank->dev);
  1002. if (context_lost_cnt_after != bank->context_loss_count ||
  1003. !context_lost_cnt_after) {
  1004. omap_gpio_restore_context(bank);
  1005. } else {
  1006. spin_unlock_irqrestore(&bank->lock, flags);
  1007. return 0;
  1008. }
  1009. }
  1010. __raw_writel(bank->saved_fallingdetect,
  1011. bank->base + bank->regs->fallingdetect);
  1012. __raw_writel(bank->saved_risingdetect,
  1013. bank->base + bank->regs->risingdetect);
  1014. l = __raw_readl(bank->base + bank->regs->datain);
  1015. /*
  1016. * Check if any of the non-wakeup interrupt GPIOs have changed
  1017. * state. If so, generate an IRQ by software. This is
  1018. * horribly racy, but it's the best we can do to work around
  1019. * this silicon bug.
  1020. */
  1021. l ^= bank->saved_datain;
  1022. l &= bank->enabled_non_wakeup_gpios;
  1023. /*
  1024. * No need to generate IRQs for the rising edge for gpio IRQs
  1025. * configured with falling edge only; and vice versa.
  1026. */
  1027. gen0 = l & bank->saved_fallingdetect;
  1028. gen0 &= bank->saved_datain;
  1029. gen1 = l & bank->saved_risingdetect;
  1030. gen1 &= ~(bank->saved_datain);
  1031. /* FIXME: Consider GPIO IRQs with level detections properly! */
  1032. gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
  1033. /* Consider all GPIO IRQs needed to be updated */
  1034. gen |= gen0 | gen1;
  1035. if (gen) {
  1036. u32 old0, old1;
  1037. old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
  1038. old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
  1039. if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
  1040. __raw_writel(old0 | gen, bank->base +
  1041. bank->regs->leveldetect0);
  1042. __raw_writel(old1 | gen, bank->base +
  1043. bank->regs->leveldetect1);
  1044. }
  1045. if (cpu_is_omap44xx()) {
  1046. __raw_writel(old0 | l, bank->base +
  1047. bank->regs->leveldetect0);
  1048. __raw_writel(old1 | l, bank->base +
  1049. bank->regs->leveldetect1);
  1050. }
  1051. __raw_writel(old0, bank->base + bank->regs->leveldetect0);
  1052. __raw_writel(old1, bank->base + bank->regs->leveldetect1);
  1053. }
  1054. bank->workaround_enabled = false;
  1055. spin_unlock_irqrestore(&bank->lock, flags);
  1056. return 0;
  1057. }
  1058. #endif /* CONFIG_PM_RUNTIME */
  1059. void omap2_gpio_prepare_for_idle(int pwr_mode)
  1060. {
  1061. struct gpio_bank *bank;
  1062. list_for_each_entry(bank, &omap_gpio_list, node) {
  1063. if (!bank->mod_usage || !bank->loses_context)
  1064. continue;
  1065. bank->power_mode = pwr_mode;
  1066. pm_runtime_put_sync_suspend(bank->dev);
  1067. }
  1068. }
  1069. void omap2_gpio_resume_after_idle(void)
  1070. {
  1071. struct gpio_bank *bank;
  1072. list_for_each_entry(bank, &omap_gpio_list, node) {
  1073. if (!bank->mod_usage || !bank->loses_context)
  1074. continue;
  1075. pm_runtime_get_sync(bank->dev);
  1076. }
  1077. }
  1078. #if defined(CONFIG_PM_RUNTIME)
  1079. static void omap_gpio_save_context(struct gpio_bank *bank)
  1080. {
  1081. bank->context.irqenable1 =
  1082. __raw_readl(bank->base + bank->regs->irqenable);
  1083. bank->context.irqenable2 =
  1084. __raw_readl(bank->base + bank->regs->irqenable2);
  1085. bank->context.wake_en =
  1086. __raw_readl(bank->base + bank->regs->wkup_en);
  1087. bank->context.ctrl = __raw_readl(bank->base + bank->regs->ctrl);
  1088. bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
  1089. bank->context.leveldetect0 =
  1090. __raw_readl(bank->base + bank->regs->leveldetect0);
  1091. bank->context.leveldetect1 =
  1092. __raw_readl(bank->base + bank->regs->leveldetect1);
  1093. bank->context.risingdetect =
  1094. __raw_readl(bank->base + bank->regs->risingdetect);
  1095. bank->context.fallingdetect =
  1096. __raw_readl(bank->base + bank->regs->fallingdetect);
  1097. bank->context.dataout = __raw_readl(bank->base + bank->regs->dataout);
  1098. }
  1099. static void omap_gpio_restore_context(struct gpio_bank *bank)
  1100. {
  1101. __raw_writel(bank->context.irqenable1,
  1102. bank->base + bank->regs->irqenable);
  1103. __raw_writel(bank->context.irqenable2,
  1104. bank->base + bank->regs->irqenable2);
  1105. __raw_writel(bank->context.wake_en,
  1106. bank->base + bank->regs->wkup_en);
  1107. __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
  1108. __raw_writel(bank->context.oe, bank->base + bank->regs->direction);
  1109. __raw_writel(bank->context.leveldetect0,
  1110. bank->base + bank->regs->leveldetect0);
  1111. __raw_writel(bank->context.leveldetect1,
  1112. bank->base + bank->regs->leveldetect1);
  1113. __raw_writel(bank->context.risingdetect,
  1114. bank->base + bank->regs->risingdetect);
  1115. __raw_writel(bank->context.fallingdetect,
  1116. bank->base + bank->regs->fallingdetect);
  1117. __raw_writel(bank->context.dataout, bank->base + bank->regs->dataout);
  1118. }
  1119. #endif /* CONFIG_PM_RUNTIME */
  1120. #else
  1121. #define omap_gpio_suspend NULL
  1122. #define omap_gpio_resume NULL
  1123. #define omap_gpio_runtime_suspend NULL
  1124. #define omap_gpio_runtime_resume NULL
  1125. #endif
  1126. static const struct dev_pm_ops gpio_pm_ops = {
  1127. SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
  1128. SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
  1129. NULL)
  1130. };
  1131. static struct platform_driver omap_gpio_driver = {
  1132. .probe = omap_gpio_probe,
  1133. .driver = {
  1134. .name = "omap_gpio",
  1135. .pm = &gpio_pm_ops,
  1136. },
  1137. };
  1138. /*
  1139. * gpio driver register needs to be done before
  1140. * machine_init functions access gpio APIs.
  1141. * Hence omap_gpio_drv_reg() is a postcore_initcall.
  1142. */
  1143. static int __init omap_gpio_drv_reg(void)
  1144. {
  1145. return platform_driver_register(&omap_gpio_driver);
  1146. }
  1147. postcore_initcall(omap_gpio_drv_reg);