gpio-omap.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665
  1. /*
  2. * Support functions for OMAP GPIO
  3. *
  4. * Copyright (C) 2003-2005 Nokia Corporation
  5. * Written by Juha Yrjölä <juha.yrjola@nokia.com>
  6. *
  7. * Copyright (C) 2009 Texas Instruments
  8. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. */
  14. #include <linux/init.h>
  15. #include <linux/module.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/syscore_ops.h>
  18. #include <linux/err.h>
  19. #include <linux/clk.h>
  20. #include <linux/io.h>
  21. #include <linux/slab.h>
  22. #include <linux/pm_runtime.h>
  23. #include <mach/hardware.h>
  24. #include <asm/irq.h>
  25. #include <mach/irqs.h>
  26. #include <mach/gpio.h>
  27. #include <asm/mach/irq.h>
  28. struct gpio_bank {
  29. unsigned long pbase;
  30. void __iomem *base;
  31. u16 irq;
  32. u16 virtual_irq_start;
  33. int method;
  34. #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
  35. u32 suspend_wakeup;
  36. u32 saved_wakeup;
  37. #endif
  38. u32 non_wakeup_gpios;
  39. u32 enabled_non_wakeup_gpios;
  40. u32 saved_datain;
  41. u32 saved_fallingdetect;
  42. u32 saved_risingdetect;
  43. u32 level_mask;
  44. u32 toggle_mask;
  45. spinlock_t lock;
  46. struct gpio_chip chip;
  47. struct clk *dbck;
  48. u32 mod_usage;
  49. u32 dbck_enable_mask;
  50. struct device *dev;
  51. bool dbck_flag;
  52. int stride;
  53. u32 width;
  54. void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
  55. struct omap_gpio_reg_offs *regs;
  56. };
  57. #ifdef CONFIG_ARCH_OMAP3
  58. struct omap3_gpio_regs {
  59. u32 irqenable1;
  60. u32 irqenable2;
  61. u32 wake_en;
  62. u32 ctrl;
  63. u32 oe;
  64. u32 leveldetect0;
  65. u32 leveldetect1;
  66. u32 risingdetect;
  67. u32 fallingdetect;
  68. u32 dataout;
  69. };
  70. static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
  71. #endif
  72. /*
  73. * TODO: Cleanup gpio_bank usage as it is having information
  74. * related to all instances of the device
  75. */
  76. static struct gpio_bank *gpio_bank;
  77. /* TODO: Analyze removing gpio_bank_count usage from driver code */
  78. int gpio_bank_count;
  79. #define GPIO_INDEX(bank, gpio) (gpio % bank->width)
  80. #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
  81. static inline int gpio_valid(int gpio)
  82. {
  83. if (gpio < 0)
  84. return -1;
  85. if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
  86. if (gpio >= OMAP_MAX_GPIO_LINES + 16)
  87. return -1;
  88. return 0;
  89. }
  90. if (cpu_is_omap15xx() && gpio < 16)
  91. return 0;
  92. if ((cpu_is_omap16xx()) && gpio < 64)
  93. return 0;
  94. if (cpu_is_omap7xx() && gpio < 192)
  95. return 0;
  96. if (cpu_is_omap2420() && gpio < 128)
  97. return 0;
  98. if (cpu_is_omap2430() && gpio < 160)
  99. return 0;
  100. if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
  101. return 0;
  102. return -1;
  103. }
  104. static int check_gpio(int gpio)
  105. {
  106. if (unlikely(gpio_valid(gpio) < 0)) {
  107. printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
  108. dump_stack();
  109. return -1;
  110. }
  111. return 0;
  112. }
  113. static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
  114. {
  115. void __iomem *reg = bank->base;
  116. u32 l;
  117. reg += bank->regs->direction;
  118. l = __raw_readl(reg);
  119. if (is_input)
  120. l |= 1 << gpio;
  121. else
  122. l &= ~(1 << gpio);
  123. __raw_writel(l, reg);
  124. }
  125. /* set data out value using dedicate set/clear register */
  126. static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
  127. {
  128. void __iomem *reg = bank->base;
  129. u32 l = GPIO_BIT(bank, gpio);
  130. if (enable)
  131. reg += bank->regs->set_dataout;
  132. else
  133. reg += bank->regs->clr_dataout;
  134. __raw_writel(l, reg);
  135. }
  136. /* set data out value using mask register */
  137. static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
  138. {
  139. void __iomem *reg = bank->base + bank->regs->dataout;
  140. u32 gpio_bit = GPIO_BIT(bank, gpio);
  141. u32 l;
  142. l = __raw_readl(reg);
  143. if (enable)
  144. l |= gpio_bit;
  145. else
  146. l &= ~gpio_bit;
  147. __raw_writel(l, reg);
  148. }
  149. static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
  150. {
  151. void __iomem *reg = bank->base + bank->regs->datain;
  152. if (check_gpio(gpio) < 0)
  153. return -EINVAL;
  154. return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
  155. }
  156. static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
  157. {
  158. void __iomem *reg = bank->base + bank->regs->dataout;
  159. if (check_gpio(gpio) < 0)
  160. return -EINVAL;
  161. return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
  162. }
  163. #define MOD_REG_BIT(reg, bit_mask, set) \
  164. do { \
  165. int l = __raw_readl(base + reg); \
  166. if (set) l |= bit_mask; \
  167. else l &= ~bit_mask; \
  168. __raw_writel(l, base + reg); \
  169. } while(0)
  170. /**
  171. * _set_gpio_debounce - low level gpio debounce time
  172. * @bank: the gpio bank we're acting upon
  173. * @gpio: the gpio number on this @gpio
  174. * @debounce: debounce time to use
  175. *
  176. * OMAP's debounce time is in 31us steps so we need
  177. * to convert and round up to the closest unit.
  178. */
  179. static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
  180. unsigned debounce)
  181. {
  182. void __iomem *reg = bank->base;
  183. u32 val;
  184. u32 l;
  185. if (!bank->dbck_flag)
  186. return;
  187. if (debounce < 32)
  188. debounce = 0x01;
  189. else if (debounce > 7936)
  190. debounce = 0xff;
  191. else
  192. debounce = (debounce / 0x1f) - 1;
  193. l = GPIO_BIT(bank, gpio);
  194. if (bank->method == METHOD_GPIO_44XX)
  195. reg += OMAP4_GPIO_DEBOUNCINGTIME;
  196. else
  197. reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
  198. __raw_writel(debounce, reg);
  199. reg = bank->base;
  200. if (bank->method == METHOD_GPIO_44XX)
  201. reg += OMAP4_GPIO_DEBOUNCENABLE;
  202. else
  203. reg += OMAP24XX_GPIO_DEBOUNCE_EN;
  204. val = __raw_readl(reg);
  205. if (debounce) {
  206. val |= l;
  207. clk_enable(bank->dbck);
  208. } else {
  209. val &= ~l;
  210. clk_disable(bank->dbck);
  211. }
  212. bank->dbck_enable_mask = val;
  213. __raw_writel(val, reg);
  214. }
  215. #ifdef CONFIG_ARCH_OMAP2PLUS
  216. static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
  217. int trigger)
  218. {
  219. void __iomem *base = bank->base;
  220. u32 gpio_bit = 1 << gpio;
  221. if (cpu_is_omap44xx()) {
  222. MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
  223. trigger & IRQ_TYPE_LEVEL_LOW);
  224. MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit,
  225. trigger & IRQ_TYPE_LEVEL_HIGH);
  226. MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit,
  227. trigger & IRQ_TYPE_EDGE_RISING);
  228. MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit,
  229. trigger & IRQ_TYPE_EDGE_FALLING);
  230. } else {
  231. MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
  232. trigger & IRQ_TYPE_LEVEL_LOW);
  233. MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
  234. trigger & IRQ_TYPE_LEVEL_HIGH);
  235. MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
  236. trigger & IRQ_TYPE_EDGE_RISING);
  237. MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
  238. trigger & IRQ_TYPE_EDGE_FALLING);
  239. }
  240. if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
  241. if (cpu_is_omap44xx()) {
  242. MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
  243. trigger != 0);
  244. } else {
  245. /*
  246. * GPIO wakeup request can only be generated on edge
  247. * transitions
  248. */
  249. if (trigger & IRQ_TYPE_EDGE_BOTH)
  250. __raw_writel(1 << gpio, bank->base
  251. + OMAP24XX_GPIO_SETWKUENA);
  252. else
  253. __raw_writel(1 << gpio, bank->base
  254. + OMAP24XX_GPIO_CLEARWKUENA);
  255. }
  256. }
  257. /* This part needs to be executed always for OMAP34xx */
  258. if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) {
  259. /*
  260. * Log the edge gpio and manually trigger the IRQ
  261. * after resume if the input level changes
  262. * to avoid irq lost during PER RET/OFF mode
  263. * Applies for omap2 non-wakeup gpio and all omap3 gpios
  264. */
  265. if (trigger & IRQ_TYPE_EDGE_BOTH)
  266. bank->enabled_non_wakeup_gpios |= gpio_bit;
  267. else
  268. bank->enabled_non_wakeup_gpios &= ~gpio_bit;
  269. }
  270. if (cpu_is_omap44xx()) {
  271. bank->level_mask =
  272. __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT0) |
  273. __raw_readl(bank->base + OMAP4_GPIO_LEVELDETECT1);
  274. } else {
  275. bank->level_mask =
  276. __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0) |
  277. __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
  278. }
  279. }
  280. #endif
  281. #ifdef CONFIG_ARCH_OMAP1
  282. /*
  283. * This only applies to chips that can't do both rising and falling edge
  284. * detection at once. For all other chips, this function is a noop.
  285. */
  286. static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
  287. {
  288. void __iomem *reg = bank->base;
  289. u32 l = 0;
  290. switch (bank->method) {
  291. case METHOD_MPUIO:
  292. reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
  293. break;
  294. #ifdef CONFIG_ARCH_OMAP15XX
  295. case METHOD_GPIO_1510:
  296. reg += OMAP1510_GPIO_INT_CONTROL;
  297. break;
  298. #endif
  299. #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
  300. case METHOD_GPIO_7XX:
  301. reg += OMAP7XX_GPIO_INT_CONTROL;
  302. break;
  303. #endif
  304. default:
  305. return;
  306. }
  307. l = __raw_readl(reg);
  308. if ((l >> gpio) & 1)
  309. l &= ~(1 << gpio);
  310. else
  311. l |= 1 << gpio;
  312. __raw_writel(l, reg);
  313. }
  314. #endif
  315. static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
  316. {
  317. void __iomem *reg = bank->base;
  318. u32 l = 0;
  319. switch (bank->method) {
  320. #ifdef CONFIG_ARCH_OMAP1
  321. case METHOD_MPUIO:
  322. reg += OMAP_MPUIO_GPIO_INT_EDGE / bank->stride;
  323. l = __raw_readl(reg);
  324. if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
  325. bank->toggle_mask |= 1 << gpio;
  326. if (trigger & IRQ_TYPE_EDGE_RISING)
  327. l |= 1 << gpio;
  328. else if (trigger & IRQ_TYPE_EDGE_FALLING)
  329. l &= ~(1 << gpio);
  330. else
  331. goto bad;
  332. break;
  333. #endif
  334. #ifdef CONFIG_ARCH_OMAP15XX
  335. case METHOD_GPIO_1510:
  336. reg += OMAP1510_GPIO_INT_CONTROL;
  337. l = __raw_readl(reg);
  338. if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
  339. bank->toggle_mask |= 1 << gpio;
  340. if (trigger & IRQ_TYPE_EDGE_RISING)
  341. l |= 1 << gpio;
  342. else if (trigger & IRQ_TYPE_EDGE_FALLING)
  343. l &= ~(1 << gpio);
  344. else
  345. goto bad;
  346. break;
  347. #endif
  348. #ifdef CONFIG_ARCH_OMAP16XX
  349. case METHOD_GPIO_1610:
  350. if (gpio & 0x08)
  351. reg += OMAP1610_GPIO_EDGE_CTRL2;
  352. else
  353. reg += OMAP1610_GPIO_EDGE_CTRL1;
  354. gpio &= 0x07;
  355. l = __raw_readl(reg);
  356. l &= ~(3 << (gpio << 1));
  357. if (trigger & IRQ_TYPE_EDGE_RISING)
  358. l |= 2 << (gpio << 1);
  359. if (trigger & IRQ_TYPE_EDGE_FALLING)
  360. l |= 1 << (gpio << 1);
  361. if (trigger)
  362. /* Enable wake-up during idle for dynamic tick */
  363. __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_SET_WAKEUPENA);
  364. else
  365. __raw_writel(1 << gpio, bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA);
  366. break;
  367. #endif
  368. #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
  369. case METHOD_GPIO_7XX:
  370. reg += OMAP7XX_GPIO_INT_CONTROL;
  371. l = __raw_readl(reg);
  372. if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
  373. bank->toggle_mask |= 1 << gpio;
  374. if (trigger & IRQ_TYPE_EDGE_RISING)
  375. l |= 1 << gpio;
  376. else if (trigger & IRQ_TYPE_EDGE_FALLING)
  377. l &= ~(1 << gpio);
  378. else
  379. goto bad;
  380. break;
  381. #endif
  382. #ifdef CONFIG_ARCH_OMAP2PLUS
  383. case METHOD_GPIO_24XX:
  384. case METHOD_GPIO_44XX:
  385. set_24xx_gpio_triggering(bank, gpio, trigger);
  386. return 0;
  387. #endif
  388. default:
  389. goto bad;
  390. }
  391. __raw_writel(l, reg);
  392. return 0;
  393. bad:
  394. return -EINVAL;
  395. }
  396. static int gpio_irq_type(struct irq_data *d, unsigned type)
  397. {
  398. struct gpio_bank *bank;
  399. unsigned gpio;
  400. int retval;
  401. unsigned long flags;
  402. if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
  403. gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
  404. else
  405. gpio = d->irq - IH_GPIO_BASE;
  406. if (check_gpio(gpio) < 0)
  407. return -EINVAL;
  408. if (type & ~IRQ_TYPE_SENSE_MASK)
  409. return -EINVAL;
  410. /* OMAP1 allows only only edge triggering */
  411. if (!cpu_class_is_omap2()
  412. && (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
  413. return -EINVAL;
  414. bank = irq_data_get_irq_chip_data(d);
  415. spin_lock_irqsave(&bank->lock, flags);
  416. retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
  417. spin_unlock_irqrestore(&bank->lock, flags);
  418. if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
  419. __irq_set_handler_locked(d->irq, handle_level_irq);
  420. else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
  421. __irq_set_handler_locked(d->irq, handle_edge_irq);
  422. return retval;
  423. }
  424. static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
  425. {
  426. void __iomem *reg = bank->base;
  427. reg += bank->regs->irqstatus;
  428. __raw_writel(gpio_mask, reg);
  429. /* Workaround for clearing DSP GPIO interrupts to allow retention */
  430. if (bank->regs->irqstatus2) {
  431. reg = bank->base + bank->regs->irqstatus2;
  432. __raw_writel(gpio_mask, reg);
  433. }
  434. /* Flush posted write for the irq status to avoid spurious interrupts */
  435. __raw_readl(reg);
  436. }
  437. static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
  438. {
  439. _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
  440. }
  441. static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
  442. {
  443. void __iomem *reg = bank->base;
  444. u32 l;
  445. u32 mask = (1 << bank->width) - 1;
  446. reg += bank->regs->irqenable;
  447. l = __raw_readl(reg);
  448. if (bank->regs->irqenable_inv)
  449. l = ~l;
  450. l &= mask;
  451. return l;
  452. }
  453. static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
  454. {
  455. void __iomem *reg = bank->base;
  456. u32 l;
  457. if (bank->regs->set_irqenable) {
  458. reg += bank->regs->set_irqenable;
  459. l = gpio_mask;
  460. } else {
  461. reg += bank->regs->irqenable;
  462. l = __raw_readl(reg);
  463. if (bank->regs->irqenable_inv)
  464. l &= ~gpio_mask;
  465. else
  466. l |= gpio_mask;
  467. }
  468. __raw_writel(l, reg);
  469. }
  470. static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
  471. {
  472. void __iomem *reg = bank->base;
  473. u32 l;
  474. if (bank->regs->clr_irqenable) {
  475. reg += bank->regs->clr_irqenable;
  476. l = gpio_mask;
  477. } else {
  478. reg += bank->regs->irqenable;
  479. l = __raw_readl(reg);
  480. if (bank->regs->irqenable_inv)
  481. l |= gpio_mask;
  482. else
  483. l &= ~gpio_mask;
  484. }
  485. __raw_writel(l, reg);
  486. }
  487. static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
  488. {
  489. _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
  490. }
  491. /*
  492. * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
  493. * 1510 does not seem to have a wake-up register. If JTAG is connected
  494. * to the target, system will wake up always on GPIO events. While
  495. * system is running all registered GPIO interrupts need to have wake-up
  496. * enabled. When system is suspended, only selected GPIO interrupts need
  497. * to have wake-up enabled.
  498. */
  499. static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
  500. {
  501. unsigned long uninitialized_var(flags);
  502. switch (bank->method) {
  503. #ifdef CONFIG_ARCH_OMAP16XX
  504. case METHOD_MPUIO:
  505. case METHOD_GPIO_1610:
  506. spin_lock_irqsave(&bank->lock, flags);
  507. if (enable)
  508. bank->suspend_wakeup |= (1 << gpio);
  509. else
  510. bank->suspend_wakeup &= ~(1 << gpio);
  511. spin_unlock_irqrestore(&bank->lock, flags);
  512. return 0;
  513. #endif
  514. #ifdef CONFIG_ARCH_OMAP2PLUS
  515. case METHOD_GPIO_24XX:
  516. case METHOD_GPIO_44XX:
  517. if (bank->non_wakeup_gpios & (1 << gpio)) {
  518. printk(KERN_ERR "Unable to modify wakeup on "
  519. "non-wakeup GPIO%d\n",
  520. (bank - gpio_bank) * bank->width + gpio);
  521. return -EINVAL;
  522. }
  523. spin_lock_irqsave(&bank->lock, flags);
  524. if (enable)
  525. bank->suspend_wakeup |= (1 << gpio);
  526. else
  527. bank->suspend_wakeup &= ~(1 << gpio);
  528. spin_unlock_irqrestore(&bank->lock, flags);
  529. return 0;
  530. #endif
  531. default:
  532. printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
  533. bank->method);
  534. return -EINVAL;
  535. }
  536. }
  537. static void _reset_gpio(struct gpio_bank *bank, int gpio)
  538. {
  539. _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
  540. _set_gpio_irqenable(bank, gpio, 0);
  541. _clear_gpio_irqstatus(bank, gpio);
  542. _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
  543. }
  544. /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
  545. static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
  546. {
  547. unsigned int gpio = d->irq - IH_GPIO_BASE;
  548. struct gpio_bank *bank;
  549. int retval;
  550. if (check_gpio(gpio) < 0)
  551. return -ENODEV;
  552. bank = irq_data_get_irq_chip_data(d);
  553. retval = _set_gpio_wakeup(bank, GPIO_INDEX(bank, gpio), enable);
  554. return retval;
  555. }
  556. static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
  557. {
  558. struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
  559. unsigned long flags;
  560. spin_lock_irqsave(&bank->lock, flags);
  561. /* Set trigger to none. You need to enable the desired trigger with
  562. * request_irq() or set_irq_type().
  563. */
  564. _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
  565. #ifdef CONFIG_ARCH_OMAP15XX
  566. if (bank->method == METHOD_GPIO_1510) {
  567. void __iomem *reg;
  568. /* Claim the pin for MPU */
  569. reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
  570. __raw_writel(__raw_readl(reg) | (1 << offset), reg);
  571. }
  572. #endif
  573. if (!cpu_class_is_omap1()) {
  574. if (!bank->mod_usage) {
  575. void __iomem *reg = bank->base;
  576. u32 ctrl;
  577. if (cpu_is_omap24xx() || cpu_is_omap34xx())
  578. reg += OMAP24XX_GPIO_CTRL;
  579. else if (cpu_is_omap44xx())
  580. reg += OMAP4_GPIO_CTRL;
  581. ctrl = __raw_readl(reg);
  582. /* Module is enabled, clocks are not gated */
  583. ctrl &= 0xFFFFFFFE;
  584. __raw_writel(ctrl, reg);
  585. }
  586. bank->mod_usage |= 1 << offset;
  587. }
  588. spin_unlock_irqrestore(&bank->lock, flags);
  589. return 0;
  590. }
  591. static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
  592. {
  593. struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
  594. unsigned long flags;
  595. spin_lock_irqsave(&bank->lock, flags);
  596. #ifdef CONFIG_ARCH_OMAP16XX
  597. if (bank->method == METHOD_GPIO_1610) {
  598. /* Disable wake-up during idle for dynamic tick */
  599. void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
  600. __raw_writel(1 << offset, reg);
  601. }
  602. #endif
  603. #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
  604. if (bank->method == METHOD_GPIO_24XX) {
  605. /* Disable wake-up during idle for dynamic tick */
  606. void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
  607. __raw_writel(1 << offset, reg);
  608. }
  609. #endif
  610. #ifdef CONFIG_ARCH_OMAP4
  611. if (bank->method == METHOD_GPIO_44XX) {
  612. /* Disable wake-up during idle for dynamic tick */
  613. void __iomem *reg = bank->base + OMAP4_GPIO_IRQWAKEN0;
  614. __raw_writel(1 << offset, reg);
  615. }
  616. #endif
  617. if (!cpu_class_is_omap1()) {
  618. bank->mod_usage &= ~(1 << offset);
  619. if (!bank->mod_usage) {
  620. void __iomem *reg = bank->base;
  621. u32 ctrl;
  622. if (cpu_is_omap24xx() || cpu_is_omap34xx())
  623. reg += OMAP24XX_GPIO_CTRL;
  624. else if (cpu_is_omap44xx())
  625. reg += OMAP4_GPIO_CTRL;
  626. ctrl = __raw_readl(reg);
  627. /* Module is disabled, clocks are gated */
  628. ctrl |= 1;
  629. __raw_writel(ctrl, reg);
  630. }
  631. }
  632. _reset_gpio(bank, bank->chip.base + offset);
  633. spin_unlock_irqrestore(&bank->lock, flags);
  634. }
  635. /*
  636. * We need to unmask the GPIO bank interrupt as soon as possible to
  637. * avoid missing GPIO interrupts for other lines in the bank.
  638. * Then we need to mask-read-clear-unmask the triggered GPIO lines
  639. * in the bank to avoid missing nested interrupts for a GPIO line.
  640. * If we wait to unmask individual GPIO lines in the bank after the
  641. * line's interrupt handler has been run, we may miss some nested
  642. * interrupts.
  643. */
  644. static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
  645. {
  646. void __iomem *isr_reg = NULL;
  647. u32 isr;
  648. unsigned int gpio_irq, gpio_index;
  649. struct gpio_bank *bank;
  650. u32 retrigger = 0;
  651. int unmasked = 0;
  652. struct irq_chip *chip = irq_desc_get_chip(desc);
  653. chained_irq_enter(chip, desc);
  654. bank = irq_get_handler_data(irq);
  655. isr_reg = bank->base + bank->regs->irqstatus;
  656. if (WARN_ON(!isr_reg))
  657. goto exit;
  658. while(1) {
  659. u32 isr_saved, level_mask = 0;
  660. u32 enabled;
  661. enabled = _get_gpio_irqbank_mask(bank);
  662. isr_saved = isr = __raw_readl(isr_reg) & enabled;
  663. if (cpu_is_omap15xx() && (bank->method == METHOD_MPUIO))
  664. isr &= 0x0000ffff;
  665. if (cpu_class_is_omap2()) {
  666. level_mask = bank->level_mask & enabled;
  667. }
  668. /* clear edge sensitive interrupts before handler(s) are
  669. called so that we don't miss any interrupt occurred while
  670. executing them */
  671. _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
  672. _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
  673. _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
  674. /* if there is only edge sensitive GPIO pin interrupts
  675. configured, we could unmask GPIO bank interrupt immediately */
  676. if (!level_mask && !unmasked) {
  677. unmasked = 1;
  678. chained_irq_exit(chip, desc);
  679. }
  680. isr |= retrigger;
  681. retrigger = 0;
  682. if (!isr)
  683. break;
  684. gpio_irq = bank->virtual_irq_start;
  685. for (; isr != 0; isr >>= 1, gpio_irq++) {
  686. gpio_index = GPIO_INDEX(bank, irq_to_gpio(gpio_irq));
  687. if (!(isr & 1))
  688. continue;
  689. #ifdef CONFIG_ARCH_OMAP1
  690. /*
  691. * Some chips can't respond to both rising and falling
  692. * at the same time. If this irq was requested with
  693. * both flags, we need to flip the ICR data for the IRQ
  694. * to respond to the IRQ for the opposite direction.
  695. * This will be indicated in the bank toggle_mask.
  696. */
  697. if (bank->toggle_mask & (1 << gpio_index))
  698. _toggle_gpio_edge_triggering(bank, gpio_index);
  699. #endif
  700. generic_handle_irq(gpio_irq);
  701. }
  702. }
  703. /* if bank has any level sensitive GPIO pin interrupt
  704. configured, we must unmask the bank interrupt only after
  705. handler(s) are executed in order to avoid spurious bank
  706. interrupt */
  707. exit:
  708. if (!unmasked)
  709. chained_irq_exit(chip, desc);
  710. }
  711. static void gpio_irq_shutdown(struct irq_data *d)
  712. {
  713. unsigned int gpio = d->irq - IH_GPIO_BASE;
  714. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  715. unsigned long flags;
  716. spin_lock_irqsave(&bank->lock, flags);
  717. _reset_gpio(bank, gpio);
  718. spin_unlock_irqrestore(&bank->lock, flags);
  719. }
  720. static void gpio_ack_irq(struct irq_data *d)
  721. {
  722. unsigned int gpio = d->irq - IH_GPIO_BASE;
  723. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  724. _clear_gpio_irqstatus(bank, gpio);
  725. }
  726. static void gpio_mask_irq(struct irq_data *d)
  727. {
  728. unsigned int gpio = d->irq - IH_GPIO_BASE;
  729. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  730. unsigned long flags;
  731. spin_lock_irqsave(&bank->lock, flags);
  732. _set_gpio_irqenable(bank, gpio, 0);
  733. _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
  734. spin_unlock_irqrestore(&bank->lock, flags);
  735. }
  736. static void gpio_unmask_irq(struct irq_data *d)
  737. {
  738. unsigned int gpio = d->irq - IH_GPIO_BASE;
  739. struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
  740. unsigned int irq_mask = GPIO_BIT(bank, gpio);
  741. u32 trigger = irqd_get_trigger_type(d);
  742. unsigned long flags;
  743. spin_lock_irqsave(&bank->lock, flags);
  744. if (trigger)
  745. _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
  746. /* For level-triggered GPIOs, the clearing must be done after
  747. * the HW source is cleared, thus after the handler has run */
  748. if (bank->level_mask & irq_mask) {
  749. _set_gpio_irqenable(bank, gpio, 0);
  750. _clear_gpio_irqstatus(bank, gpio);
  751. }
  752. _set_gpio_irqenable(bank, gpio, 1);
  753. spin_unlock_irqrestore(&bank->lock, flags);
  754. }
  755. static struct irq_chip gpio_irq_chip = {
  756. .name = "GPIO",
  757. .irq_shutdown = gpio_irq_shutdown,
  758. .irq_ack = gpio_ack_irq,
  759. .irq_mask = gpio_mask_irq,
  760. .irq_unmask = gpio_unmask_irq,
  761. .irq_set_type = gpio_irq_type,
  762. .irq_set_wake = gpio_wake_enable,
  763. };
  764. /*---------------------------------------------------------------------*/
  765. #ifdef CONFIG_ARCH_OMAP1
  766. #define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
  767. #ifdef CONFIG_ARCH_OMAP16XX
  768. #include <linux/platform_device.h>
  769. static int omap_mpuio_suspend_noirq(struct device *dev)
  770. {
  771. struct platform_device *pdev = to_platform_device(dev);
  772. struct gpio_bank *bank = platform_get_drvdata(pdev);
  773. void __iomem *mask_reg = bank->base +
  774. OMAP_MPUIO_GPIO_MASKIT / bank->stride;
  775. unsigned long flags;
  776. spin_lock_irqsave(&bank->lock, flags);
  777. bank->saved_wakeup = __raw_readl(mask_reg);
  778. __raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
  779. spin_unlock_irqrestore(&bank->lock, flags);
  780. return 0;
  781. }
  782. static int omap_mpuio_resume_noirq(struct device *dev)
  783. {
  784. struct platform_device *pdev = to_platform_device(dev);
  785. struct gpio_bank *bank = platform_get_drvdata(pdev);
  786. void __iomem *mask_reg = bank->base +
  787. OMAP_MPUIO_GPIO_MASKIT / bank->stride;
  788. unsigned long flags;
  789. spin_lock_irqsave(&bank->lock, flags);
  790. __raw_writel(bank->saved_wakeup, mask_reg);
  791. spin_unlock_irqrestore(&bank->lock, flags);
  792. return 0;
  793. }
  794. static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
  795. .suspend_noirq = omap_mpuio_suspend_noirq,
  796. .resume_noirq = omap_mpuio_resume_noirq,
  797. };
  798. /* use platform_driver for this. */
  799. static struct platform_driver omap_mpuio_driver = {
  800. .driver = {
  801. .name = "mpuio",
  802. .pm = &omap_mpuio_dev_pm_ops,
  803. },
  804. };
  805. static struct platform_device omap_mpuio_device = {
  806. .name = "mpuio",
  807. .id = -1,
  808. .dev = {
  809. .driver = &omap_mpuio_driver.driver,
  810. }
  811. /* could list the /proc/iomem resources */
  812. };
  813. static inline void mpuio_init(void)
  814. {
  815. struct gpio_bank *bank = &gpio_bank[0];
  816. platform_set_drvdata(&omap_mpuio_device, bank);
  817. if (platform_driver_register(&omap_mpuio_driver) == 0)
  818. (void) platform_device_register(&omap_mpuio_device);
  819. }
  820. #else
  821. static inline void mpuio_init(void) {}
  822. #endif /* 16xx */
  823. #else
  824. #define bank_is_mpuio(bank) 0
  825. static inline void mpuio_init(void) {}
  826. #endif
  827. /*---------------------------------------------------------------------*/
  828. /* REVISIT these are stupid implementations! replace by ones that
  829. * don't switch on METHOD_* and which mostly avoid spinlocks
  830. */
  831. static int gpio_input(struct gpio_chip *chip, unsigned offset)
  832. {
  833. struct gpio_bank *bank;
  834. unsigned long flags;
  835. bank = container_of(chip, struct gpio_bank, chip);
  836. spin_lock_irqsave(&bank->lock, flags);
  837. _set_gpio_direction(bank, offset, 1);
  838. spin_unlock_irqrestore(&bank->lock, flags);
  839. return 0;
  840. }
  841. static int gpio_is_input(struct gpio_bank *bank, int mask)
  842. {
  843. void __iomem *reg = bank->base + bank->regs->direction;
  844. return __raw_readl(reg) & mask;
  845. }
  846. static int gpio_get(struct gpio_chip *chip, unsigned offset)
  847. {
  848. struct gpio_bank *bank;
  849. void __iomem *reg;
  850. int gpio;
  851. u32 mask;
  852. gpio = chip->base + offset;
  853. bank = container_of(chip, struct gpio_bank, chip);
  854. reg = bank->base;
  855. mask = GPIO_BIT(bank, gpio);
  856. if (gpio_is_input(bank, mask))
  857. return _get_gpio_datain(bank, gpio);
  858. else
  859. return _get_gpio_dataout(bank, gpio);
  860. }
  861. static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
  862. {
  863. struct gpio_bank *bank;
  864. unsigned long flags;
  865. bank = container_of(chip, struct gpio_bank, chip);
  866. spin_lock_irqsave(&bank->lock, flags);
  867. bank->set_dataout(bank, offset, value);
  868. _set_gpio_direction(bank, offset, 0);
  869. spin_unlock_irqrestore(&bank->lock, flags);
  870. return 0;
  871. }
  872. static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
  873. unsigned debounce)
  874. {
  875. struct gpio_bank *bank;
  876. unsigned long flags;
  877. bank = container_of(chip, struct gpio_bank, chip);
  878. if (!bank->dbck) {
  879. bank->dbck = clk_get(bank->dev, "dbclk");
  880. if (IS_ERR(bank->dbck))
  881. dev_err(bank->dev, "Could not get gpio dbck\n");
  882. }
  883. spin_lock_irqsave(&bank->lock, flags);
  884. _set_gpio_debounce(bank, offset, debounce);
  885. spin_unlock_irqrestore(&bank->lock, flags);
  886. return 0;
  887. }
  888. static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
  889. {
  890. struct gpio_bank *bank;
  891. unsigned long flags;
  892. bank = container_of(chip, struct gpio_bank, chip);
  893. spin_lock_irqsave(&bank->lock, flags);
  894. bank->set_dataout(bank, offset, value);
  895. spin_unlock_irqrestore(&bank->lock, flags);
  896. }
  897. static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
  898. {
  899. struct gpio_bank *bank;
  900. bank = container_of(chip, struct gpio_bank, chip);
  901. return bank->virtual_irq_start + offset;
  902. }
  903. /*---------------------------------------------------------------------*/
  904. static void __init omap_gpio_show_rev(struct gpio_bank *bank)
  905. {
  906. u32 rev;
  907. if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
  908. rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
  909. else if (cpu_is_omap24xx() || cpu_is_omap34xx())
  910. rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
  911. else if (cpu_is_omap44xx())
  912. rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
  913. else
  914. return;
  915. printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
  916. (rev >> 4) & 0x0f, rev & 0x0f);
  917. }
  918. /* This lock class tells lockdep that GPIO irqs are in a different
  919. * category than their parents, so it won't report false recursion.
  920. */
  921. static struct lock_class_key gpio_lock_class;
  922. static inline int init_gpio_info(struct platform_device *pdev)
  923. {
  924. /* TODO: Analyze removing gpio_bank_count usage from driver code */
  925. gpio_bank = kzalloc(gpio_bank_count * sizeof(struct gpio_bank),
  926. GFP_KERNEL);
  927. if (!gpio_bank) {
  928. dev_err(&pdev->dev, "Memory alloc failed for gpio_bank\n");
  929. return -ENOMEM;
  930. }
  931. return 0;
  932. }
  933. /* TODO: Cleanup cpu_is_* checks */
  934. static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
  935. {
  936. if (cpu_class_is_omap2()) {
  937. if (cpu_is_omap44xx()) {
  938. __raw_writel(0xffffffff, bank->base +
  939. OMAP4_GPIO_IRQSTATUSCLR0);
  940. __raw_writel(0x00000000, bank->base +
  941. OMAP4_GPIO_DEBOUNCENABLE);
  942. /* Initialize interface clk ungated, module enabled */
  943. __raw_writel(0, bank->base + OMAP4_GPIO_CTRL);
  944. } else if (cpu_is_omap34xx()) {
  945. __raw_writel(0x00000000, bank->base +
  946. OMAP24XX_GPIO_IRQENABLE1);
  947. __raw_writel(0xffffffff, bank->base +
  948. OMAP24XX_GPIO_IRQSTATUS1);
  949. __raw_writel(0x00000000, bank->base +
  950. OMAP24XX_GPIO_DEBOUNCE_EN);
  951. /* Initialize interface clk ungated, module enabled */
  952. __raw_writel(0, bank->base + OMAP24XX_GPIO_CTRL);
  953. } else if (cpu_is_omap24xx()) {
  954. static const u32 non_wakeup_gpios[] = {
  955. 0xe203ffc0, 0x08700040
  956. };
  957. if (id < ARRAY_SIZE(non_wakeup_gpios))
  958. bank->non_wakeup_gpios = non_wakeup_gpios[id];
  959. }
  960. } else if (cpu_class_is_omap1()) {
  961. if (bank_is_mpuio(bank))
  962. __raw_writew(0xffff, bank->base +
  963. OMAP_MPUIO_GPIO_MASKIT / bank->stride);
  964. if (cpu_is_omap15xx() && bank->method == METHOD_GPIO_1510) {
  965. __raw_writew(0xffff, bank->base
  966. + OMAP1510_GPIO_INT_MASK);
  967. __raw_writew(0x0000, bank->base
  968. + OMAP1510_GPIO_INT_STATUS);
  969. }
  970. if (cpu_is_omap16xx() && bank->method == METHOD_GPIO_1610) {
  971. __raw_writew(0x0000, bank->base
  972. + OMAP1610_GPIO_IRQENABLE1);
  973. __raw_writew(0xffff, bank->base
  974. + OMAP1610_GPIO_IRQSTATUS1);
  975. __raw_writew(0x0014, bank->base
  976. + OMAP1610_GPIO_SYSCONFIG);
  977. /*
  978. * Enable system clock for GPIO module.
  979. * The CAM_CLK_CTRL *is* really the right place.
  980. */
  981. omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04,
  982. ULPD_CAM_CLK_CTRL);
  983. }
  984. if (cpu_is_omap7xx() && bank->method == METHOD_GPIO_7XX) {
  985. __raw_writel(0xffffffff, bank->base
  986. + OMAP7XX_GPIO_INT_MASK);
  987. __raw_writel(0x00000000, bank->base
  988. + OMAP7XX_GPIO_INT_STATUS);
  989. }
  990. }
  991. }
  992. static __init void
  993. omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
  994. unsigned int num)
  995. {
  996. struct irq_chip_generic *gc;
  997. struct irq_chip_type *ct;
  998. gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
  999. handle_simple_irq);
  1000. ct = gc->chip_types;
  1001. /* NOTE: No ack required, reading IRQ status clears it. */
  1002. ct->chip.irq_mask = irq_gc_mask_set_bit;
  1003. ct->chip.irq_unmask = irq_gc_mask_clr_bit;
  1004. ct->chip.irq_set_type = gpio_irq_type;
  1005. /* REVISIT: assuming only 16xx supports MPUIO wake events */
  1006. if (cpu_is_omap16xx())
  1007. ct->chip.irq_set_wake = gpio_wake_enable,
  1008. ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
  1009. irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
  1010. IRQ_NOREQUEST | IRQ_NOPROBE, 0);
  1011. }
  1012. static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
  1013. {
  1014. int j;
  1015. static int gpio;
  1016. bank->mod_usage = 0;
  1017. /*
  1018. * REVISIT eventually switch from OMAP-specific gpio structs
  1019. * over to the generic ones
  1020. */
  1021. bank->chip.request = omap_gpio_request;
  1022. bank->chip.free = omap_gpio_free;
  1023. bank->chip.direction_input = gpio_input;
  1024. bank->chip.get = gpio_get;
  1025. bank->chip.direction_output = gpio_output;
  1026. bank->chip.set_debounce = gpio_debounce;
  1027. bank->chip.set = gpio_set;
  1028. bank->chip.to_irq = gpio_2irq;
  1029. if (bank_is_mpuio(bank)) {
  1030. bank->chip.label = "mpuio";
  1031. #ifdef CONFIG_ARCH_OMAP16XX
  1032. bank->chip.dev = &omap_mpuio_device.dev;
  1033. #endif
  1034. bank->chip.base = OMAP_MPUIO(0);
  1035. } else {
  1036. bank->chip.label = "gpio";
  1037. bank->chip.base = gpio;
  1038. gpio += bank->width;
  1039. }
  1040. bank->chip.ngpio = bank->width;
  1041. gpiochip_add(&bank->chip);
  1042. for (j = bank->virtual_irq_start;
  1043. j < bank->virtual_irq_start + bank->width; j++) {
  1044. irq_set_lockdep_class(j, &gpio_lock_class);
  1045. irq_set_chip_data(j, bank);
  1046. if (bank_is_mpuio(bank)) {
  1047. omap_mpuio_alloc_gc(bank, j, bank->width);
  1048. } else {
  1049. irq_set_chip(j, &gpio_irq_chip);
  1050. irq_set_handler(j, handle_simple_irq);
  1051. set_irq_flags(j, IRQF_VALID);
  1052. }
  1053. }
  1054. irq_set_chained_handler(bank->irq, gpio_irq_handler);
  1055. irq_set_handler_data(bank->irq, bank);
  1056. }
  1057. static int __devinit omap_gpio_probe(struct platform_device *pdev)
  1058. {
  1059. static int gpio_init_done;
  1060. struct omap_gpio_platform_data *pdata;
  1061. struct resource *res;
  1062. int id;
  1063. struct gpio_bank *bank;
  1064. if (!pdev->dev.platform_data)
  1065. return -EINVAL;
  1066. pdata = pdev->dev.platform_data;
  1067. if (!gpio_init_done) {
  1068. int ret;
  1069. ret = init_gpio_info(pdev);
  1070. if (ret)
  1071. return ret;
  1072. }
  1073. id = pdev->id;
  1074. bank = &gpio_bank[id];
  1075. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1076. if (unlikely(!res)) {
  1077. dev_err(&pdev->dev, "GPIO Bank %i Invalid IRQ resource\n", id);
  1078. return -ENODEV;
  1079. }
  1080. bank->irq = res->start;
  1081. bank->virtual_irq_start = pdata->virtual_irq_start;
  1082. bank->method = pdata->bank_type;
  1083. bank->dev = &pdev->dev;
  1084. bank->dbck_flag = pdata->dbck_flag;
  1085. bank->stride = pdata->bank_stride;
  1086. bank->width = pdata->bank_width;
  1087. bank->regs = pdata->regs;
  1088. if (bank->regs->set_dataout && bank->regs->clr_dataout)
  1089. bank->set_dataout = _set_gpio_dataout_reg;
  1090. else
  1091. bank->set_dataout = _set_gpio_dataout_mask;
  1092. spin_lock_init(&bank->lock);
  1093. /* Static mapping, never released */
  1094. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1095. if (unlikely(!res)) {
  1096. dev_err(&pdev->dev, "GPIO Bank %i Invalid mem resource\n", id);
  1097. return -ENODEV;
  1098. }
  1099. bank->base = ioremap(res->start, resource_size(res));
  1100. if (!bank->base) {
  1101. dev_err(&pdev->dev, "Could not ioremap gpio bank%i\n", id);
  1102. return -ENOMEM;
  1103. }
  1104. pm_runtime_enable(bank->dev);
  1105. pm_runtime_get_sync(bank->dev);
  1106. omap_gpio_mod_init(bank, id);
  1107. omap_gpio_chip_init(bank);
  1108. omap_gpio_show_rev(bank);
  1109. if (!gpio_init_done)
  1110. gpio_init_done = 1;
  1111. return 0;
  1112. }
  1113. #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
  1114. static int omap_gpio_suspend(void)
  1115. {
  1116. int i;
  1117. if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
  1118. return 0;
  1119. for (i = 0; i < gpio_bank_count; i++) {
  1120. struct gpio_bank *bank = &gpio_bank[i];
  1121. void __iomem *wake_status;
  1122. void __iomem *wake_clear;
  1123. void __iomem *wake_set;
  1124. unsigned long flags;
  1125. switch (bank->method) {
  1126. #ifdef CONFIG_ARCH_OMAP16XX
  1127. case METHOD_GPIO_1610:
  1128. wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
  1129. wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
  1130. wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
  1131. break;
  1132. #endif
  1133. #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
  1134. case METHOD_GPIO_24XX:
  1135. wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
  1136. wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
  1137. wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
  1138. break;
  1139. #endif
  1140. #ifdef CONFIG_ARCH_OMAP4
  1141. case METHOD_GPIO_44XX:
  1142. wake_status = bank->base + OMAP4_GPIO_IRQWAKEN0;
  1143. wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
  1144. wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
  1145. break;
  1146. #endif
  1147. default:
  1148. continue;
  1149. }
  1150. spin_lock_irqsave(&bank->lock, flags);
  1151. bank->saved_wakeup = __raw_readl(wake_status);
  1152. __raw_writel(0xffffffff, wake_clear);
  1153. __raw_writel(bank->suspend_wakeup, wake_set);
  1154. spin_unlock_irqrestore(&bank->lock, flags);
  1155. }
  1156. return 0;
  1157. }
  1158. static void omap_gpio_resume(void)
  1159. {
  1160. int i;
  1161. if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
  1162. return;
  1163. for (i = 0; i < gpio_bank_count; i++) {
  1164. struct gpio_bank *bank = &gpio_bank[i];
  1165. void __iomem *wake_clear;
  1166. void __iomem *wake_set;
  1167. unsigned long flags;
  1168. switch (bank->method) {
  1169. #ifdef CONFIG_ARCH_OMAP16XX
  1170. case METHOD_GPIO_1610:
  1171. wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
  1172. wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
  1173. break;
  1174. #endif
  1175. #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
  1176. case METHOD_GPIO_24XX:
  1177. wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
  1178. wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
  1179. break;
  1180. #endif
  1181. #ifdef CONFIG_ARCH_OMAP4
  1182. case METHOD_GPIO_44XX:
  1183. wake_clear = bank->base + OMAP4_GPIO_IRQWAKEN0;
  1184. wake_set = bank->base + OMAP4_GPIO_IRQWAKEN0;
  1185. break;
  1186. #endif
  1187. default:
  1188. continue;
  1189. }
  1190. spin_lock_irqsave(&bank->lock, flags);
  1191. __raw_writel(0xffffffff, wake_clear);
  1192. __raw_writel(bank->saved_wakeup, wake_set);
  1193. spin_unlock_irqrestore(&bank->lock, flags);
  1194. }
  1195. }
  1196. static struct syscore_ops omap_gpio_syscore_ops = {
  1197. .suspend = omap_gpio_suspend,
  1198. .resume = omap_gpio_resume,
  1199. };
  1200. #endif
  1201. #ifdef CONFIG_ARCH_OMAP2PLUS
  1202. static int workaround_enabled;
  1203. void omap2_gpio_prepare_for_idle(int off_mode)
  1204. {
  1205. int i, c = 0;
  1206. int min = 0;
  1207. if (cpu_is_omap34xx())
  1208. min = 1;
  1209. for (i = min; i < gpio_bank_count; i++) {
  1210. struct gpio_bank *bank = &gpio_bank[i];
  1211. u32 l1 = 0, l2 = 0;
  1212. int j;
  1213. for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
  1214. clk_disable(bank->dbck);
  1215. if (!off_mode)
  1216. continue;
  1217. /* If going to OFF, remove triggering for all
  1218. * non-wakeup GPIOs. Otherwise spurious IRQs will be
  1219. * generated. See OMAP2420 Errata item 1.101. */
  1220. if (!(bank->enabled_non_wakeup_gpios))
  1221. continue;
  1222. if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
  1223. bank->saved_datain = __raw_readl(bank->base +
  1224. OMAP24XX_GPIO_DATAIN);
  1225. l1 = __raw_readl(bank->base +
  1226. OMAP24XX_GPIO_FALLINGDETECT);
  1227. l2 = __raw_readl(bank->base +
  1228. OMAP24XX_GPIO_RISINGDETECT);
  1229. }
  1230. if (cpu_is_omap44xx()) {
  1231. bank->saved_datain = __raw_readl(bank->base +
  1232. OMAP4_GPIO_DATAIN);
  1233. l1 = __raw_readl(bank->base +
  1234. OMAP4_GPIO_FALLINGDETECT);
  1235. l2 = __raw_readl(bank->base +
  1236. OMAP4_GPIO_RISINGDETECT);
  1237. }
  1238. bank->saved_fallingdetect = l1;
  1239. bank->saved_risingdetect = l2;
  1240. l1 &= ~bank->enabled_non_wakeup_gpios;
  1241. l2 &= ~bank->enabled_non_wakeup_gpios;
  1242. if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
  1243. __raw_writel(l1, bank->base +
  1244. OMAP24XX_GPIO_FALLINGDETECT);
  1245. __raw_writel(l2, bank->base +
  1246. OMAP24XX_GPIO_RISINGDETECT);
  1247. }
  1248. if (cpu_is_omap44xx()) {
  1249. __raw_writel(l1, bank->base + OMAP4_GPIO_FALLINGDETECT);
  1250. __raw_writel(l2, bank->base + OMAP4_GPIO_RISINGDETECT);
  1251. }
  1252. c++;
  1253. }
  1254. if (!c) {
  1255. workaround_enabled = 0;
  1256. return;
  1257. }
  1258. workaround_enabled = 1;
  1259. }
  1260. void omap2_gpio_resume_after_idle(void)
  1261. {
  1262. int i;
  1263. int min = 0;
  1264. if (cpu_is_omap34xx())
  1265. min = 1;
  1266. for (i = min; i < gpio_bank_count; i++) {
  1267. struct gpio_bank *bank = &gpio_bank[i];
  1268. u32 l = 0, gen, gen0, gen1;
  1269. int j;
  1270. for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
  1271. clk_enable(bank->dbck);
  1272. if (!workaround_enabled)
  1273. continue;
  1274. if (!(bank->enabled_non_wakeup_gpios))
  1275. continue;
  1276. if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
  1277. __raw_writel(bank->saved_fallingdetect,
  1278. bank->base + OMAP24XX_GPIO_FALLINGDETECT);
  1279. __raw_writel(bank->saved_risingdetect,
  1280. bank->base + OMAP24XX_GPIO_RISINGDETECT);
  1281. l = __raw_readl(bank->base + OMAP24XX_GPIO_DATAIN);
  1282. }
  1283. if (cpu_is_omap44xx()) {
  1284. __raw_writel(bank->saved_fallingdetect,
  1285. bank->base + OMAP4_GPIO_FALLINGDETECT);
  1286. __raw_writel(bank->saved_risingdetect,
  1287. bank->base + OMAP4_GPIO_RISINGDETECT);
  1288. l = __raw_readl(bank->base + OMAP4_GPIO_DATAIN);
  1289. }
  1290. /* Check if any of the non-wakeup interrupt GPIOs have changed
  1291. * state. If so, generate an IRQ by software. This is
  1292. * horribly racy, but it's the best we can do to work around
  1293. * this silicon bug. */
  1294. l ^= bank->saved_datain;
  1295. l &= bank->enabled_non_wakeup_gpios;
  1296. /*
  1297. * No need to generate IRQs for the rising edge for gpio IRQs
  1298. * configured with falling edge only; and vice versa.
  1299. */
  1300. gen0 = l & bank->saved_fallingdetect;
  1301. gen0 &= bank->saved_datain;
  1302. gen1 = l & bank->saved_risingdetect;
  1303. gen1 &= ~(bank->saved_datain);
  1304. /* FIXME: Consider GPIO IRQs with level detections properly! */
  1305. gen = l & (~(bank->saved_fallingdetect) &
  1306. ~(bank->saved_risingdetect));
  1307. /* Consider all GPIO IRQs needed to be updated */
  1308. gen |= gen0 | gen1;
  1309. if (gen) {
  1310. u32 old0, old1;
  1311. if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
  1312. old0 = __raw_readl(bank->base +
  1313. OMAP24XX_GPIO_LEVELDETECT0);
  1314. old1 = __raw_readl(bank->base +
  1315. OMAP24XX_GPIO_LEVELDETECT1);
  1316. __raw_writel(old0 | gen, bank->base +
  1317. OMAP24XX_GPIO_LEVELDETECT0);
  1318. __raw_writel(old1 | gen, bank->base +
  1319. OMAP24XX_GPIO_LEVELDETECT1);
  1320. __raw_writel(old0, bank->base +
  1321. OMAP24XX_GPIO_LEVELDETECT0);
  1322. __raw_writel(old1, bank->base +
  1323. OMAP24XX_GPIO_LEVELDETECT1);
  1324. }
  1325. if (cpu_is_omap44xx()) {
  1326. old0 = __raw_readl(bank->base +
  1327. OMAP4_GPIO_LEVELDETECT0);
  1328. old1 = __raw_readl(bank->base +
  1329. OMAP4_GPIO_LEVELDETECT1);
  1330. __raw_writel(old0 | l, bank->base +
  1331. OMAP4_GPIO_LEVELDETECT0);
  1332. __raw_writel(old1 | l, bank->base +
  1333. OMAP4_GPIO_LEVELDETECT1);
  1334. __raw_writel(old0, bank->base +
  1335. OMAP4_GPIO_LEVELDETECT0);
  1336. __raw_writel(old1, bank->base +
  1337. OMAP4_GPIO_LEVELDETECT1);
  1338. }
  1339. }
  1340. }
  1341. }
  1342. #endif
  1343. #ifdef CONFIG_ARCH_OMAP3
  1344. /* save the registers of bank 2-6 */
  1345. void omap_gpio_save_context(void)
  1346. {
  1347. int i;
  1348. /* saving banks from 2-6 only since GPIO1 is in WKUP */
  1349. for (i = 1; i < gpio_bank_count; i++) {
  1350. struct gpio_bank *bank = &gpio_bank[i];
  1351. gpio_context[i].irqenable1 =
  1352. __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE1);
  1353. gpio_context[i].irqenable2 =
  1354. __raw_readl(bank->base + OMAP24XX_GPIO_IRQENABLE2);
  1355. gpio_context[i].wake_en =
  1356. __raw_readl(bank->base + OMAP24XX_GPIO_WAKE_EN);
  1357. gpio_context[i].ctrl =
  1358. __raw_readl(bank->base + OMAP24XX_GPIO_CTRL);
  1359. gpio_context[i].oe =
  1360. __raw_readl(bank->base + OMAP24XX_GPIO_OE);
  1361. gpio_context[i].leveldetect0 =
  1362. __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0);
  1363. gpio_context[i].leveldetect1 =
  1364. __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1);
  1365. gpio_context[i].risingdetect =
  1366. __raw_readl(bank->base + OMAP24XX_GPIO_RISINGDETECT);
  1367. gpio_context[i].fallingdetect =
  1368. __raw_readl(bank->base + OMAP24XX_GPIO_FALLINGDETECT);
  1369. gpio_context[i].dataout =
  1370. __raw_readl(bank->base + OMAP24XX_GPIO_DATAOUT);
  1371. }
  1372. }
  1373. /* restore the required registers of bank 2-6 */
  1374. void omap_gpio_restore_context(void)
  1375. {
  1376. int i;
  1377. for (i = 1; i < gpio_bank_count; i++) {
  1378. struct gpio_bank *bank = &gpio_bank[i];
  1379. __raw_writel(gpio_context[i].irqenable1,
  1380. bank->base + OMAP24XX_GPIO_IRQENABLE1);
  1381. __raw_writel(gpio_context[i].irqenable2,
  1382. bank->base + OMAP24XX_GPIO_IRQENABLE2);
  1383. __raw_writel(gpio_context[i].wake_en,
  1384. bank->base + OMAP24XX_GPIO_WAKE_EN);
  1385. __raw_writel(gpio_context[i].ctrl,
  1386. bank->base + OMAP24XX_GPIO_CTRL);
  1387. __raw_writel(gpio_context[i].oe,
  1388. bank->base + OMAP24XX_GPIO_OE);
  1389. __raw_writel(gpio_context[i].leveldetect0,
  1390. bank->base + OMAP24XX_GPIO_LEVELDETECT0);
  1391. __raw_writel(gpio_context[i].leveldetect1,
  1392. bank->base + OMAP24XX_GPIO_LEVELDETECT1);
  1393. __raw_writel(gpio_context[i].risingdetect,
  1394. bank->base + OMAP24XX_GPIO_RISINGDETECT);
  1395. __raw_writel(gpio_context[i].fallingdetect,
  1396. bank->base + OMAP24XX_GPIO_FALLINGDETECT);
  1397. __raw_writel(gpio_context[i].dataout,
  1398. bank->base + OMAP24XX_GPIO_DATAOUT);
  1399. }
  1400. }
  1401. #endif
  1402. static struct platform_driver omap_gpio_driver = {
  1403. .probe = omap_gpio_probe,
  1404. .driver = {
  1405. .name = "omap_gpio",
  1406. },
  1407. };
  1408. /*
  1409. * gpio driver register needs to be done before
  1410. * machine_init functions access gpio APIs.
  1411. * Hence omap_gpio_drv_reg() is a postcore_initcall.
  1412. */
  1413. static int __init omap_gpio_drv_reg(void)
  1414. {
  1415. return platform_driver_register(&omap_gpio_driver);
  1416. }
  1417. postcore_initcall(omap_gpio_drv_reg);
  1418. static int __init omap_gpio_sysinit(void)
  1419. {
  1420. mpuio_init();
  1421. #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
  1422. if (cpu_is_omap16xx() || cpu_class_is_omap2())
  1423. register_syscore_ops(&omap_gpio_syscore_ops);
  1424. #endif
  1425. return 0;
  1426. }
  1427. arch_initcall(omap_gpio_sysinit);