intc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  3. *
  4. * Copyright (C) 2007, 2008 Magnus Damm
  5. *
  6. * Based on intc2.c and ipr.c
  7. *
  8. * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
  9. * Copyright (C) 2000 Kazumoto Kojima
  10. * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
  11. * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
  12. * Copyright (C) 2005, 2006 Paul Mundt
  13. *
  14. * This file is subject to the terms and conditions of the GNU General Public
  15. * License. See the file "COPYING" in the main directory of this archive
  16. * for more details.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/irq.h>
  20. #include <linux/module.h>
  21. #include <linux/io.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/sh_intc.h>
  24. #include <linux/sysdev.h>
  25. #include <linux/list.h>
  26. #include <linux/topology.h>
  27. #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
  28. ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
  29. ((addr_e) << 16) | ((addr_d << 24)))
  30. #define _INTC_SHIFT(h) (h & 0x1f)
  31. #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
  32. #define _INTC_FN(h) ((h >> 9) & 0xf)
  33. #define _INTC_MODE(h) ((h >> 13) & 0x7)
  34. #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
  35. #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
  36. struct intc_handle_int {
  37. unsigned int irq;
  38. unsigned long handle;
  39. };
  40. struct intc_desc_int {
  41. struct list_head list;
  42. struct sys_device sysdev;
  43. pm_message_t state;
  44. unsigned long *reg;
  45. #ifdef CONFIG_SMP
  46. unsigned long *smp;
  47. #endif
  48. unsigned int nr_reg;
  49. struct intc_handle_int *prio;
  50. unsigned int nr_prio;
  51. struct intc_handle_int *sense;
  52. unsigned int nr_sense;
  53. struct irq_chip chip;
  54. };
  55. static LIST_HEAD(intc_list);
  56. #ifdef CONFIG_SMP
  57. #define IS_SMP(x) x.smp
  58. #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
  59. #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
  60. #else
  61. #define IS_SMP(x) 0
  62. #define INTC_REG(d, x, c) (d->reg[(x)])
  63. #define SMP_NR(d, x) 1
  64. #endif
  65. static unsigned int intc_prio_level[NR_IRQS]; /* for now */
  66. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  67. static unsigned long ack_handle[NR_IRQS];
  68. #endif
  69. static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  70. {
  71. struct irq_chip *chip = get_irq_chip(irq);
  72. return container_of(chip, struct intc_desc_int, chip);
  73. }
  74. static inline unsigned int set_field(unsigned int value,
  75. unsigned int field_value,
  76. unsigned int handle)
  77. {
  78. unsigned int width = _INTC_WIDTH(handle);
  79. unsigned int shift = _INTC_SHIFT(handle);
  80. value &= ~(((1 << width) - 1) << shift);
  81. value |= field_value << shift;
  82. return value;
  83. }
  84. static void write_8(unsigned long addr, unsigned long h, unsigned long data)
  85. {
  86. __raw_writeb(set_field(0, data, h), addr);
  87. (void)__raw_readb(addr); /* Defeat write posting */
  88. }
  89. static void write_16(unsigned long addr, unsigned long h, unsigned long data)
  90. {
  91. __raw_writew(set_field(0, data, h), addr);
  92. (void)__raw_readw(addr); /* Defeat write posting */
  93. }
  94. static void write_32(unsigned long addr, unsigned long h, unsigned long data)
  95. {
  96. __raw_writel(set_field(0, data, h), addr);
  97. (void)__raw_readl(addr); /* Defeat write posting */
  98. }
  99. static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
  100. {
  101. unsigned long flags;
  102. local_irq_save(flags);
  103. __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
  104. (void)__raw_readb(addr); /* Defeat write posting */
  105. local_irq_restore(flags);
  106. }
  107. static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
  108. {
  109. unsigned long flags;
  110. local_irq_save(flags);
  111. __raw_writew(set_field(__raw_readw(addr), data, h), addr);
  112. (void)__raw_readw(addr); /* Defeat write posting */
  113. local_irq_restore(flags);
  114. }
  115. static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
  116. {
  117. unsigned long flags;
  118. local_irq_save(flags);
  119. __raw_writel(set_field(__raw_readl(addr), data, h), addr);
  120. (void)__raw_readl(addr); /* Defeat write posting */
  121. local_irq_restore(flags);
  122. }
  123. enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
  124. static void (*intc_reg_fns[])(unsigned long addr,
  125. unsigned long h,
  126. unsigned long data) = {
  127. [REG_FN_WRITE_BASE + 0] = write_8,
  128. [REG_FN_WRITE_BASE + 1] = write_16,
  129. [REG_FN_WRITE_BASE + 3] = write_32,
  130. [REG_FN_MODIFY_BASE + 0] = modify_8,
  131. [REG_FN_MODIFY_BASE + 1] = modify_16,
  132. [REG_FN_MODIFY_BASE + 3] = modify_32,
  133. };
  134. enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
  135. MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
  136. MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
  137. MODE_PRIO_REG, /* Priority value written to enable interrupt */
  138. MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
  139. };
  140. static void intc_mode_field(unsigned long addr,
  141. unsigned long handle,
  142. void (*fn)(unsigned long,
  143. unsigned long,
  144. unsigned long),
  145. unsigned int irq)
  146. {
  147. fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
  148. }
  149. static void intc_mode_zero(unsigned long addr,
  150. unsigned long handle,
  151. void (*fn)(unsigned long,
  152. unsigned long,
  153. unsigned long),
  154. unsigned int irq)
  155. {
  156. fn(addr, handle, 0);
  157. }
  158. static void intc_mode_prio(unsigned long addr,
  159. unsigned long handle,
  160. void (*fn)(unsigned long,
  161. unsigned long,
  162. unsigned long),
  163. unsigned int irq)
  164. {
  165. fn(addr, handle, intc_prio_level[irq]);
  166. }
  167. static void (*intc_enable_fns[])(unsigned long addr,
  168. unsigned long handle,
  169. void (*fn)(unsigned long,
  170. unsigned long,
  171. unsigned long),
  172. unsigned int irq) = {
  173. [MODE_ENABLE_REG] = intc_mode_field,
  174. [MODE_MASK_REG] = intc_mode_zero,
  175. [MODE_DUAL_REG] = intc_mode_field,
  176. [MODE_PRIO_REG] = intc_mode_prio,
  177. [MODE_PCLR_REG] = intc_mode_prio,
  178. };
  179. static void (*intc_disable_fns[])(unsigned long addr,
  180. unsigned long handle,
  181. void (*fn)(unsigned long,
  182. unsigned long,
  183. unsigned long),
  184. unsigned int irq) = {
  185. [MODE_ENABLE_REG] = intc_mode_zero,
  186. [MODE_MASK_REG] = intc_mode_field,
  187. [MODE_DUAL_REG] = intc_mode_field,
  188. [MODE_PRIO_REG] = intc_mode_zero,
  189. [MODE_PCLR_REG] = intc_mode_field,
  190. };
  191. static inline void _intc_enable(unsigned int irq, unsigned long handle)
  192. {
  193. struct intc_desc_int *d = get_intc_desc(irq);
  194. unsigned long addr;
  195. unsigned int cpu;
  196. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  197. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  198. intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
  199. [_INTC_FN(handle)], irq);
  200. }
  201. }
  202. static void intc_enable(unsigned int irq)
  203. {
  204. _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
  205. }
  206. static void intc_disable(unsigned int irq)
  207. {
  208. struct intc_desc_int *d = get_intc_desc(irq);
  209. unsigned long handle = (unsigned long) get_irq_chip_data(irq);
  210. unsigned long addr;
  211. unsigned int cpu;
  212. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  213. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  214. intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
  215. [_INTC_FN(handle)], irq);
  216. }
  217. }
  218. static int intc_set_wake(unsigned int irq, unsigned int on)
  219. {
  220. return 0; /* allow wakeup, but setup hardware in intc_suspend() */
  221. }
  222. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  223. static void intc_mask_ack(unsigned int irq)
  224. {
  225. struct intc_desc_int *d = get_intc_desc(irq);
  226. unsigned long handle = ack_handle[irq];
  227. unsigned long addr;
  228. intc_disable(irq);
  229. /* read register and write zero only to the assocaited bit */
  230. if (handle) {
  231. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  232. switch (_INTC_FN(handle)) {
  233. case REG_FN_MODIFY_BASE + 0: /* 8bit */
  234. __raw_readb(addr);
  235. __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
  236. break;
  237. case REG_FN_MODIFY_BASE + 1: /* 16bit */
  238. __raw_readw(addr);
  239. __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
  240. break;
  241. case REG_FN_MODIFY_BASE + 3: /* 32bit */
  242. __raw_readl(addr);
  243. __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
  244. break;
  245. default:
  246. BUG();
  247. break;
  248. }
  249. }
  250. }
  251. #endif
  252. static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
  253. unsigned int nr_hp,
  254. unsigned int irq)
  255. {
  256. int i;
  257. /* this doesn't scale well, but...
  258. *
  259. * this function should only be used for cerain uncommon
  260. * operations such as intc_set_priority() and intc_set_sense()
  261. * and in those rare cases performance doesn't matter that much.
  262. * keeping the memory footprint low is more important.
  263. *
  264. * one rather simple way to speed this up and still keep the
  265. * memory footprint down is to make sure the array is sorted
  266. * and then perform a bisect to lookup the irq.
  267. */
  268. for (i = 0; i < nr_hp; i++) {
  269. if ((hp + i)->irq != irq)
  270. continue;
  271. return hp + i;
  272. }
  273. return NULL;
  274. }
  275. int intc_set_priority(unsigned int irq, unsigned int prio)
  276. {
  277. struct intc_desc_int *d = get_intc_desc(irq);
  278. struct intc_handle_int *ihp;
  279. if (!intc_prio_level[irq] || prio <= 1)
  280. return -EINVAL;
  281. ihp = intc_find_irq(d->prio, d->nr_prio, irq);
  282. if (ihp) {
  283. if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
  284. return -EINVAL;
  285. intc_prio_level[irq] = prio;
  286. /*
  287. * only set secondary masking method directly
  288. * primary masking method is using intc_prio_level[irq]
  289. * priority level will be set during next enable()
  290. */
  291. if (_INTC_FN(ihp->handle) != REG_FN_ERR)
  292. _intc_enable(irq, ihp->handle);
  293. }
  294. return 0;
  295. }
  296. #define VALID(x) (x | 0x80)
  297. static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
  298. [IRQ_TYPE_EDGE_FALLING] = VALID(0),
  299. [IRQ_TYPE_EDGE_RISING] = VALID(1),
  300. [IRQ_TYPE_LEVEL_LOW] = VALID(2),
  301. /* SH7706, SH7707 and SH7709 do not support high level triggered */
  302. #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
  303. !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
  304. !defined(CONFIG_CPU_SUBTYPE_SH7709)
  305. [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
  306. #endif
  307. };
  308. static int intc_set_sense(unsigned int irq, unsigned int type)
  309. {
  310. struct intc_desc_int *d = get_intc_desc(irq);
  311. unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
  312. struct intc_handle_int *ihp;
  313. unsigned long addr;
  314. if (!value)
  315. return -EINVAL;
  316. ihp = intc_find_irq(d->sense, d->nr_sense, irq);
  317. if (ihp) {
  318. addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
  319. intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
  320. }
  321. return 0;
  322. }
  323. static unsigned int __init intc_get_reg(struct intc_desc_int *d,
  324. unsigned long address)
  325. {
  326. unsigned int k;
  327. for (k = 0; k < d->nr_reg; k++) {
  328. if (d->reg[k] == address)
  329. return k;
  330. }
  331. BUG();
  332. return 0;
  333. }
  334. static intc_enum __init intc_grp_id(struct intc_desc *desc,
  335. intc_enum enum_id)
  336. {
  337. struct intc_group *g = desc->groups;
  338. unsigned int i, j;
  339. for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
  340. g = desc->groups + i;
  341. for (j = 0; g->enum_ids[j]; j++) {
  342. if (g->enum_ids[j] != enum_id)
  343. continue;
  344. return g->enum_id;
  345. }
  346. }
  347. return 0;
  348. }
  349. static unsigned int __init intc_mask_data(struct intc_desc *desc,
  350. struct intc_desc_int *d,
  351. intc_enum enum_id, int do_grps)
  352. {
  353. struct intc_mask_reg *mr = desc->mask_regs;
  354. unsigned int i, j, fn, mode;
  355. unsigned long reg_e, reg_d;
  356. for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
  357. mr = desc->mask_regs + i;
  358. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  359. if (mr->enum_ids[j] != enum_id)
  360. continue;
  361. if (mr->set_reg && mr->clr_reg) {
  362. fn = REG_FN_WRITE_BASE;
  363. mode = MODE_DUAL_REG;
  364. reg_e = mr->clr_reg;
  365. reg_d = mr->set_reg;
  366. } else {
  367. fn = REG_FN_MODIFY_BASE;
  368. if (mr->set_reg) {
  369. mode = MODE_ENABLE_REG;
  370. reg_e = mr->set_reg;
  371. reg_d = mr->set_reg;
  372. } else {
  373. mode = MODE_MASK_REG;
  374. reg_e = mr->clr_reg;
  375. reg_d = mr->clr_reg;
  376. }
  377. }
  378. fn += (mr->reg_width >> 3) - 1;
  379. return _INTC_MK(fn, mode,
  380. intc_get_reg(d, reg_e),
  381. intc_get_reg(d, reg_d),
  382. 1,
  383. (mr->reg_width - 1) - j);
  384. }
  385. }
  386. if (do_grps)
  387. return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
  388. return 0;
  389. }
  390. static unsigned int __init intc_prio_data(struct intc_desc *desc,
  391. struct intc_desc_int *d,
  392. intc_enum enum_id, int do_grps)
  393. {
  394. struct intc_prio_reg *pr = desc->prio_regs;
  395. unsigned int i, j, fn, mode, bit;
  396. unsigned long reg_e, reg_d;
  397. for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
  398. pr = desc->prio_regs + i;
  399. for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
  400. if (pr->enum_ids[j] != enum_id)
  401. continue;
  402. if (pr->set_reg && pr->clr_reg) {
  403. fn = REG_FN_WRITE_BASE;
  404. mode = MODE_PCLR_REG;
  405. reg_e = pr->set_reg;
  406. reg_d = pr->clr_reg;
  407. } else {
  408. fn = REG_FN_MODIFY_BASE;
  409. mode = MODE_PRIO_REG;
  410. if (!pr->set_reg)
  411. BUG();
  412. reg_e = pr->set_reg;
  413. reg_d = pr->set_reg;
  414. }
  415. fn += (pr->reg_width >> 3) - 1;
  416. BUG_ON((j + 1) * pr->field_width > pr->reg_width);
  417. bit = pr->reg_width - ((j + 1) * pr->field_width);
  418. return _INTC_MK(fn, mode,
  419. intc_get_reg(d, reg_e),
  420. intc_get_reg(d, reg_d),
  421. pr->field_width, bit);
  422. }
  423. }
  424. if (do_grps)
  425. return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
  426. return 0;
  427. }
  428. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  429. static unsigned int __init intc_ack_data(struct intc_desc *desc,
  430. struct intc_desc_int *d,
  431. intc_enum enum_id)
  432. {
  433. struct intc_mask_reg *mr = desc->ack_regs;
  434. unsigned int i, j, fn, mode;
  435. unsigned long reg_e, reg_d;
  436. for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) {
  437. mr = desc->ack_regs + i;
  438. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  439. if (mr->enum_ids[j] != enum_id)
  440. continue;
  441. fn = REG_FN_MODIFY_BASE;
  442. mode = MODE_ENABLE_REG;
  443. reg_e = mr->set_reg;
  444. reg_d = mr->set_reg;
  445. fn += (mr->reg_width >> 3) - 1;
  446. return _INTC_MK(fn, mode,
  447. intc_get_reg(d, reg_e),
  448. intc_get_reg(d, reg_d),
  449. 1,
  450. (mr->reg_width - 1) - j);
  451. }
  452. }
  453. return 0;
  454. }
  455. #endif
  456. static unsigned int __init intc_sense_data(struct intc_desc *desc,
  457. struct intc_desc_int *d,
  458. intc_enum enum_id)
  459. {
  460. struct intc_sense_reg *sr = desc->sense_regs;
  461. unsigned int i, j, fn, bit;
  462. for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
  463. sr = desc->sense_regs + i;
  464. for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
  465. if (sr->enum_ids[j] != enum_id)
  466. continue;
  467. fn = REG_FN_MODIFY_BASE;
  468. fn += (sr->reg_width >> 3) - 1;
  469. BUG_ON((j + 1) * sr->field_width > sr->reg_width);
  470. bit = sr->reg_width - ((j + 1) * sr->field_width);
  471. return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
  472. 0, sr->field_width, bit);
  473. }
  474. }
  475. return 0;
  476. }
  477. static void __init intc_register_irq(struct intc_desc *desc,
  478. struct intc_desc_int *d,
  479. intc_enum enum_id,
  480. unsigned int irq)
  481. {
  482. struct intc_handle_int *hp;
  483. unsigned int data[2], primary;
  484. /* Prefer single interrupt source bitmap over other combinations:
  485. * 1. bitmap, single interrupt source
  486. * 2. priority, single interrupt source
  487. * 3. bitmap, multiple interrupt sources (groups)
  488. * 4. priority, multiple interrupt sources (groups)
  489. */
  490. data[0] = intc_mask_data(desc, d, enum_id, 0);
  491. data[1] = intc_prio_data(desc, d, enum_id, 0);
  492. primary = 0;
  493. if (!data[0] && data[1])
  494. primary = 1;
  495. if (!data[0] && !data[1])
  496. pr_warning("intc: missing unique irq mask for "
  497. "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
  498. data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
  499. data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
  500. if (!data[primary])
  501. primary ^= 1;
  502. BUG_ON(!data[primary]); /* must have primary masking method */
  503. disable_irq_nosync(irq);
  504. set_irq_chip_and_handler_name(irq, &d->chip,
  505. handle_level_irq, "level");
  506. set_irq_chip_data(irq, (void *)data[primary]);
  507. /* set priority level
  508. * - this needs to be at least 2 for 5-bit priorities on 7780
  509. */
  510. intc_prio_level[irq] = 2;
  511. /* enable secondary masking method if present */
  512. if (data[!primary])
  513. _intc_enable(irq, data[!primary]);
  514. /* add irq to d->prio list if priority is available */
  515. if (data[1]) {
  516. hp = d->prio + d->nr_prio;
  517. hp->irq = irq;
  518. hp->handle = data[1];
  519. if (primary) {
  520. /*
  521. * only secondary priority should access registers, so
  522. * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
  523. */
  524. hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
  525. hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
  526. }
  527. d->nr_prio++;
  528. }
  529. /* add irq to d->sense list if sense is available */
  530. data[0] = intc_sense_data(desc, d, enum_id);
  531. if (data[0]) {
  532. (d->sense + d->nr_sense)->irq = irq;
  533. (d->sense + d->nr_sense)->handle = data[0];
  534. d->nr_sense++;
  535. }
  536. /* irq should be disabled by default */
  537. d->chip.mask(irq);
  538. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  539. if (desc->ack_regs)
  540. ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  541. #endif
  542. }
  543. static unsigned int __init save_reg(struct intc_desc_int *d,
  544. unsigned int cnt,
  545. unsigned long value,
  546. unsigned int smp)
  547. {
  548. if (value) {
  549. d->reg[cnt] = value;
  550. #ifdef CONFIG_SMP
  551. d->smp[cnt] = smp;
  552. #endif
  553. return 1;
  554. }
  555. return 0;
  556. }
  557. static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
  558. {
  559. generic_handle_irq((unsigned int)get_irq_data(irq));
  560. }
  561. void __init register_intc_controller(struct intc_desc *desc)
  562. {
  563. unsigned int i, k, smp;
  564. struct intc_desc_int *d;
  565. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  566. INIT_LIST_HEAD(&d->list);
  567. list_add(&d->list, &intc_list);
  568. d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
  569. d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
  570. d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
  571. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  572. d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
  573. #endif
  574. d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
  575. #ifdef CONFIG_SMP
  576. d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
  577. #endif
  578. k = 0;
  579. if (desc->mask_regs) {
  580. for (i = 0; i < desc->nr_mask_regs; i++) {
  581. smp = IS_SMP(desc->mask_regs[i]);
  582. k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
  583. k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
  584. }
  585. }
  586. if (desc->prio_regs) {
  587. d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT);
  588. for (i = 0; i < desc->nr_prio_regs; i++) {
  589. smp = IS_SMP(desc->prio_regs[i]);
  590. k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
  591. k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
  592. }
  593. }
  594. if (desc->sense_regs) {
  595. d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT);
  596. for (i = 0; i < desc->nr_sense_regs; i++) {
  597. k += save_reg(d, k, desc->sense_regs[i].reg, 0);
  598. }
  599. }
  600. d->chip.name = desc->name;
  601. d->chip.mask = intc_disable;
  602. d->chip.unmask = intc_enable;
  603. d->chip.mask_ack = intc_disable;
  604. d->chip.enable = intc_enable;
  605. d->chip.disable = intc_disable;
  606. d->chip.shutdown = intc_disable;
  607. d->chip.set_type = intc_set_sense;
  608. d->chip.set_wake = intc_set_wake;
  609. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  610. if (desc->ack_regs) {
  611. for (i = 0; i < desc->nr_ack_regs; i++)
  612. k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
  613. d->chip.mask_ack = intc_mask_ack;
  614. }
  615. #endif
  616. BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
  617. /* register the vectors one by one */
  618. for (i = 0; i < desc->nr_vectors; i++) {
  619. struct intc_vect *vect = desc->vectors + i;
  620. unsigned int irq = evt2irq(vect->vect);
  621. struct irq_desc *irq_desc;
  622. if (!vect->enum_id)
  623. continue;
  624. irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
  625. if (unlikely(!irq_desc)) {
  626. pr_info("can't get irq_desc for %d\n", irq);
  627. continue;
  628. }
  629. intc_register_irq(desc, d, vect->enum_id, irq);
  630. for (k = i + 1; k < desc->nr_vectors; k++) {
  631. struct intc_vect *vect2 = desc->vectors + k;
  632. unsigned int irq2 = evt2irq(vect2->vect);
  633. if (vect->enum_id != vect2->enum_id)
  634. continue;
  635. /*
  636. * In the case of multi-evt handling and sparse
  637. * IRQ support, each vector still needs to have
  638. * its own backing irq_desc.
  639. */
  640. irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
  641. if (unlikely(!irq_desc)) {
  642. pr_info("can't get irq_desc for %d\n", irq2);
  643. continue;
  644. }
  645. vect2->enum_id = 0;
  646. /* redirect this interrupts to the first one */
  647. set_irq_chip_and_handler_name(irq2, &d->chip,
  648. intc_redirect_irq, "redirect");
  649. set_irq_data(irq2, (void *)irq);
  650. }
  651. }
  652. }
  653. static int intc_suspend(struct sys_device *dev, pm_message_t state)
  654. {
  655. struct intc_desc_int *d;
  656. struct irq_desc *desc;
  657. int irq;
  658. /* get intc controller associated with this sysdev */
  659. d = container_of(dev, struct intc_desc_int, sysdev);
  660. switch (state.event) {
  661. case PM_EVENT_ON:
  662. if (d->state.event != PM_EVENT_FREEZE)
  663. break;
  664. for_each_irq_desc(irq, desc) {
  665. if (desc->chip != &d->chip)
  666. continue;
  667. if (desc->status & IRQ_DISABLED)
  668. intc_disable(irq);
  669. else
  670. intc_enable(irq);
  671. }
  672. break;
  673. case PM_EVENT_FREEZE:
  674. /* nothing has to be done */
  675. break;
  676. case PM_EVENT_SUSPEND:
  677. /* enable wakeup irqs belonging to this intc controller */
  678. for_each_irq_desc(irq, desc) {
  679. if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
  680. intc_enable(irq);
  681. }
  682. break;
  683. }
  684. d->state = state;
  685. return 0;
  686. }
  687. static int intc_resume(struct sys_device *dev)
  688. {
  689. return intc_suspend(dev, PMSG_ON);
  690. }
  691. static struct sysdev_class intc_sysdev_class = {
  692. .name = "intc",
  693. .suspend = intc_suspend,
  694. .resume = intc_resume,
  695. };
  696. /* register this intc as sysdev to allow suspend/resume */
  697. static int __init register_intc_sysdevs(void)
  698. {
  699. struct intc_desc_int *d;
  700. int error;
  701. int id = 0;
  702. error = sysdev_class_register(&intc_sysdev_class);
  703. if (!error) {
  704. list_for_each_entry(d, &intc_list, list) {
  705. d->sysdev.id = id;
  706. d->sysdev.cls = &intc_sysdev_class;
  707. error = sysdev_register(&d->sysdev);
  708. if (error)
  709. break;
  710. id++;
  711. }
  712. }
  713. if (error)
  714. pr_warning("intc: sysdev registration error\n");
  715. return error;
  716. }
  717. device_initcall(register_intc_sysdevs);