intc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  3. *
  4. * Copyright (C) 2007, 2008 Magnus Damm
  5. *
  6. * Based on intc2.c and ipr.c
  7. *
  8. * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
  9. * Copyright (C) 2000 Kazumoto Kojima
  10. * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
  11. * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
  12. * Copyright (C) 2005, 2006 Paul Mundt
  13. *
  14. * This file is subject to the terms and conditions of the GNU General Public
  15. * License. See the file "COPYING" in the main directory of this archive
  16. * for more details.
  17. */
  18. #include <linux/init.h>
  19. #include <linux/irq.h>
  20. #include <linux/module.h>
  21. #include <linux/io.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/bootmem.h>
  24. #include <linux/sh_intc.h>
  25. #include <linux/sysdev.h>
  26. #include <linux/list.h>
  27. #include <linux/topology.h>
  28. #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
  29. ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
  30. ((addr_e) << 16) | ((addr_d << 24)))
  31. #define _INTC_SHIFT(h) (h & 0x1f)
  32. #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
  33. #define _INTC_FN(h) ((h >> 9) & 0xf)
  34. #define _INTC_MODE(h) ((h >> 13) & 0x7)
  35. #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
  36. #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
  37. struct intc_handle_int {
  38. unsigned int irq;
  39. unsigned long handle;
  40. };
  41. struct intc_desc_int {
  42. struct list_head list;
  43. struct sys_device sysdev;
  44. pm_message_t state;
  45. unsigned long *reg;
  46. #ifdef CONFIG_SMP
  47. unsigned long *smp;
  48. #endif
  49. unsigned int nr_reg;
  50. struct intc_handle_int *prio;
  51. unsigned int nr_prio;
  52. struct intc_handle_int *sense;
  53. unsigned int nr_sense;
  54. struct irq_chip chip;
  55. };
  56. static LIST_HEAD(intc_list);
  57. #ifdef CONFIG_SMP
  58. #define IS_SMP(x) x.smp
  59. #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
  60. #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
  61. #else
  62. #define IS_SMP(x) 0
  63. #define INTC_REG(d, x, c) (d->reg[(x)])
  64. #define SMP_NR(d, x) 1
  65. #endif
  66. static unsigned int intc_prio_level[NR_IRQS]; /* for now */
  67. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  68. static unsigned long ack_handle[NR_IRQS];
  69. #endif
  70. static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  71. {
  72. struct irq_chip *chip = get_irq_chip(irq);
  73. return (void *)((char *)chip - offsetof(struct intc_desc_int, chip));
  74. }
  75. static inline unsigned int set_field(unsigned int value,
  76. unsigned int field_value,
  77. unsigned int handle)
  78. {
  79. unsigned int width = _INTC_WIDTH(handle);
  80. unsigned int shift = _INTC_SHIFT(handle);
  81. value &= ~(((1 << width) - 1) << shift);
  82. value |= field_value << shift;
  83. return value;
  84. }
  85. static void write_8(unsigned long addr, unsigned long h, unsigned long data)
  86. {
  87. __raw_writeb(set_field(0, data, h), addr);
  88. }
  89. static void write_16(unsigned long addr, unsigned long h, unsigned long data)
  90. {
  91. __raw_writew(set_field(0, data, h), addr);
  92. }
  93. static void write_32(unsigned long addr, unsigned long h, unsigned long data)
  94. {
  95. __raw_writel(set_field(0, data, h), addr);
  96. }
  97. static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
  98. {
  99. unsigned long flags;
  100. local_irq_save(flags);
  101. __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
  102. local_irq_restore(flags);
  103. }
  104. static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
  105. {
  106. unsigned long flags;
  107. local_irq_save(flags);
  108. __raw_writew(set_field(__raw_readw(addr), data, h), addr);
  109. local_irq_restore(flags);
  110. }
  111. static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
  112. {
  113. unsigned long flags;
  114. local_irq_save(flags);
  115. __raw_writel(set_field(__raw_readl(addr), data, h), addr);
  116. local_irq_restore(flags);
  117. }
  118. enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
  119. static void (*intc_reg_fns[])(unsigned long addr,
  120. unsigned long h,
  121. unsigned long data) = {
  122. [REG_FN_WRITE_BASE + 0] = write_8,
  123. [REG_FN_WRITE_BASE + 1] = write_16,
  124. [REG_FN_WRITE_BASE + 3] = write_32,
  125. [REG_FN_MODIFY_BASE + 0] = modify_8,
  126. [REG_FN_MODIFY_BASE + 1] = modify_16,
  127. [REG_FN_MODIFY_BASE + 3] = modify_32,
  128. };
  129. enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
  130. MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
  131. MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
  132. MODE_PRIO_REG, /* Priority value written to enable interrupt */
  133. MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
  134. };
  135. static void intc_mode_field(unsigned long addr,
  136. unsigned long handle,
  137. void (*fn)(unsigned long,
  138. unsigned long,
  139. unsigned long),
  140. unsigned int irq)
  141. {
  142. fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
  143. }
  144. static void intc_mode_zero(unsigned long addr,
  145. unsigned long handle,
  146. void (*fn)(unsigned long,
  147. unsigned long,
  148. unsigned long),
  149. unsigned int irq)
  150. {
  151. fn(addr, handle, 0);
  152. }
  153. static void intc_mode_prio(unsigned long addr,
  154. unsigned long handle,
  155. void (*fn)(unsigned long,
  156. unsigned long,
  157. unsigned long),
  158. unsigned int irq)
  159. {
  160. fn(addr, handle, intc_prio_level[irq]);
  161. }
  162. static void (*intc_enable_fns[])(unsigned long addr,
  163. unsigned long handle,
  164. void (*fn)(unsigned long,
  165. unsigned long,
  166. unsigned long),
  167. unsigned int irq) = {
  168. [MODE_ENABLE_REG] = intc_mode_field,
  169. [MODE_MASK_REG] = intc_mode_zero,
  170. [MODE_DUAL_REG] = intc_mode_field,
  171. [MODE_PRIO_REG] = intc_mode_prio,
  172. [MODE_PCLR_REG] = intc_mode_prio,
  173. };
  174. static void (*intc_disable_fns[])(unsigned long addr,
  175. unsigned long handle,
  176. void (*fn)(unsigned long,
  177. unsigned long,
  178. unsigned long),
  179. unsigned int irq) = {
  180. [MODE_ENABLE_REG] = intc_mode_zero,
  181. [MODE_MASK_REG] = intc_mode_field,
  182. [MODE_DUAL_REG] = intc_mode_field,
  183. [MODE_PRIO_REG] = intc_mode_zero,
  184. [MODE_PCLR_REG] = intc_mode_field,
  185. };
  186. static inline void _intc_enable(unsigned int irq, unsigned long handle)
  187. {
  188. struct intc_desc_int *d = get_intc_desc(irq);
  189. unsigned long addr;
  190. unsigned int cpu;
  191. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  192. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  193. intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
  194. [_INTC_FN(handle)], irq);
  195. }
  196. }
  197. static void intc_enable(unsigned int irq)
  198. {
  199. _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
  200. }
  201. static void intc_disable(unsigned int irq)
  202. {
  203. struct intc_desc_int *d = get_intc_desc(irq);
  204. unsigned long handle = (unsigned long) get_irq_chip_data(irq);
  205. unsigned long addr;
  206. unsigned int cpu;
  207. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  208. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  209. intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
  210. [_INTC_FN(handle)], irq);
  211. }
  212. }
  213. static int intc_set_wake(unsigned int irq, unsigned int on)
  214. {
  215. return 0; /* allow wakeup, but setup hardware in intc_suspend() */
  216. }
  217. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  218. static void intc_mask_ack(unsigned int irq)
  219. {
  220. struct intc_desc_int *d = get_intc_desc(irq);
  221. unsigned long handle = ack_handle[irq];
  222. unsigned long addr;
  223. intc_disable(irq);
  224. /* read register and write zero only to the assocaited bit */
  225. if (handle) {
  226. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  227. switch (_INTC_FN(handle)) {
  228. case REG_FN_MODIFY_BASE + 0: /* 8bit */
  229. __raw_readb(addr);
  230. __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
  231. break;
  232. case REG_FN_MODIFY_BASE + 1: /* 16bit */
  233. __raw_readw(addr);
  234. __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
  235. break;
  236. case REG_FN_MODIFY_BASE + 3: /* 32bit */
  237. __raw_readl(addr);
  238. __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
  239. break;
  240. default:
  241. BUG();
  242. break;
  243. }
  244. }
  245. }
  246. #endif
  247. static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
  248. unsigned int nr_hp,
  249. unsigned int irq)
  250. {
  251. int i;
  252. /* this doesn't scale well, but...
  253. *
  254. * this function should only be used for cerain uncommon
  255. * operations such as intc_set_priority() and intc_set_sense()
  256. * and in those rare cases performance doesn't matter that much.
  257. * keeping the memory footprint low is more important.
  258. *
  259. * one rather simple way to speed this up and still keep the
  260. * memory footprint down is to make sure the array is sorted
  261. * and then perform a bisect to lookup the irq.
  262. */
  263. for (i = 0; i < nr_hp; i++) {
  264. if ((hp + i)->irq != irq)
  265. continue;
  266. return hp + i;
  267. }
  268. return NULL;
  269. }
  270. int intc_set_priority(unsigned int irq, unsigned int prio)
  271. {
  272. struct intc_desc_int *d = get_intc_desc(irq);
  273. struct intc_handle_int *ihp;
  274. if (!intc_prio_level[irq] || prio <= 1)
  275. return -EINVAL;
  276. ihp = intc_find_irq(d->prio, d->nr_prio, irq);
  277. if (ihp) {
  278. if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
  279. return -EINVAL;
  280. intc_prio_level[irq] = prio;
  281. /*
  282. * only set secondary masking method directly
  283. * primary masking method is using intc_prio_level[irq]
  284. * priority level will be set during next enable()
  285. */
  286. if (_INTC_FN(ihp->handle) != REG_FN_ERR)
  287. _intc_enable(irq, ihp->handle);
  288. }
  289. return 0;
  290. }
  291. #define VALID(x) (x | 0x80)
  292. static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
  293. [IRQ_TYPE_EDGE_FALLING] = VALID(0),
  294. [IRQ_TYPE_EDGE_RISING] = VALID(1),
  295. [IRQ_TYPE_LEVEL_LOW] = VALID(2),
  296. /* SH7706, SH7707 and SH7709 do not support high level triggered */
  297. #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
  298. !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
  299. !defined(CONFIG_CPU_SUBTYPE_SH7709)
  300. [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
  301. #endif
  302. };
  303. static int intc_set_sense(unsigned int irq, unsigned int type)
  304. {
  305. struct intc_desc_int *d = get_intc_desc(irq);
  306. unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
  307. struct intc_handle_int *ihp;
  308. unsigned long addr;
  309. if (!value)
  310. return -EINVAL;
  311. ihp = intc_find_irq(d->sense, d->nr_sense, irq);
  312. if (ihp) {
  313. addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
  314. intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
  315. }
  316. return 0;
  317. }
  318. static unsigned int __init intc_get_reg(struct intc_desc_int *d,
  319. unsigned long address)
  320. {
  321. unsigned int k;
  322. for (k = 0; k < d->nr_reg; k++) {
  323. if (d->reg[k] == address)
  324. return k;
  325. }
  326. BUG();
  327. return 0;
  328. }
  329. static intc_enum __init intc_grp_id(struct intc_desc *desc,
  330. intc_enum enum_id)
  331. {
  332. struct intc_group *g = desc->groups;
  333. unsigned int i, j;
  334. for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
  335. g = desc->groups + i;
  336. for (j = 0; g->enum_ids[j]; j++) {
  337. if (g->enum_ids[j] != enum_id)
  338. continue;
  339. return g->enum_id;
  340. }
  341. }
  342. return 0;
  343. }
  344. static unsigned int __init intc_mask_data(struct intc_desc *desc,
  345. struct intc_desc_int *d,
  346. intc_enum enum_id, int do_grps)
  347. {
  348. struct intc_mask_reg *mr = desc->mask_regs;
  349. unsigned int i, j, fn, mode;
  350. unsigned long reg_e, reg_d;
  351. for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
  352. mr = desc->mask_regs + i;
  353. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  354. if (mr->enum_ids[j] != enum_id)
  355. continue;
  356. if (mr->set_reg && mr->clr_reg) {
  357. fn = REG_FN_WRITE_BASE;
  358. mode = MODE_DUAL_REG;
  359. reg_e = mr->clr_reg;
  360. reg_d = mr->set_reg;
  361. } else {
  362. fn = REG_FN_MODIFY_BASE;
  363. if (mr->set_reg) {
  364. mode = MODE_ENABLE_REG;
  365. reg_e = mr->set_reg;
  366. reg_d = mr->set_reg;
  367. } else {
  368. mode = MODE_MASK_REG;
  369. reg_e = mr->clr_reg;
  370. reg_d = mr->clr_reg;
  371. }
  372. }
  373. fn += (mr->reg_width >> 3) - 1;
  374. return _INTC_MK(fn, mode,
  375. intc_get_reg(d, reg_e),
  376. intc_get_reg(d, reg_d),
  377. 1,
  378. (mr->reg_width - 1) - j);
  379. }
  380. }
  381. if (do_grps)
  382. return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
  383. return 0;
  384. }
  385. static unsigned int __init intc_prio_data(struct intc_desc *desc,
  386. struct intc_desc_int *d,
  387. intc_enum enum_id, int do_grps)
  388. {
  389. struct intc_prio_reg *pr = desc->prio_regs;
  390. unsigned int i, j, fn, mode, bit;
  391. unsigned long reg_e, reg_d;
  392. for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
  393. pr = desc->prio_regs + i;
  394. for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
  395. if (pr->enum_ids[j] != enum_id)
  396. continue;
  397. if (pr->set_reg && pr->clr_reg) {
  398. fn = REG_FN_WRITE_BASE;
  399. mode = MODE_PCLR_REG;
  400. reg_e = pr->set_reg;
  401. reg_d = pr->clr_reg;
  402. } else {
  403. fn = REG_FN_MODIFY_BASE;
  404. mode = MODE_PRIO_REG;
  405. if (!pr->set_reg)
  406. BUG();
  407. reg_e = pr->set_reg;
  408. reg_d = pr->set_reg;
  409. }
  410. fn += (pr->reg_width >> 3) - 1;
  411. BUG_ON((j + 1) * pr->field_width > pr->reg_width);
  412. bit = pr->reg_width - ((j + 1) * pr->field_width);
  413. return _INTC_MK(fn, mode,
  414. intc_get_reg(d, reg_e),
  415. intc_get_reg(d, reg_d),
  416. pr->field_width, bit);
  417. }
  418. }
  419. if (do_grps)
  420. return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
  421. return 0;
  422. }
  423. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  424. static unsigned int __init intc_ack_data(struct intc_desc *desc,
  425. struct intc_desc_int *d,
  426. intc_enum enum_id)
  427. {
  428. struct intc_mask_reg *mr = desc->ack_regs;
  429. unsigned int i, j, fn, mode;
  430. unsigned long reg_e, reg_d;
  431. for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) {
  432. mr = desc->ack_regs + i;
  433. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  434. if (mr->enum_ids[j] != enum_id)
  435. continue;
  436. fn = REG_FN_MODIFY_BASE;
  437. mode = MODE_ENABLE_REG;
  438. reg_e = mr->set_reg;
  439. reg_d = mr->set_reg;
  440. fn += (mr->reg_width >> 3) - 1;
  441. return _INTC_MK(fn, mode,
  442. intc_get_reg(d, reg_e),
  443. intc_get_reg(d, reg_d),
  444. 1,
  445. (mr->reg_width - 1) - j);
  446. }
  447. }
  448. return 0;
  449. }
  450. #endif
  451. static unsigned int __init intc_sense_data(struct intc_desc *desc,
  452. struct intc_desc_int *d,
  453. intc_enum enum_id)
  454. {
  455. struct intc_sense_reg *sr = desc->sense_regs;
  456. unsigned int i, j, fn, bit;
  457. for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
  458. sr = desc->sense_regs + i;
  459. for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
  460. if (sr->enum_ids[j] != enum_id)
  461. continue;
  462. fn = REG_FN_MODIFY_BASE;
  463. fn += (sr->reg_width >> 3) - 1;
  464. BUG_ON((j + 1) * sr->field_width > sr->reg_width);
  465. bit = sr->reg_width - ((j + 1) * sr->field_width);
  466. return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
  467. 0, sr->field_width, bit);
  468. }
  469. }
  470. return 0;
  471. }
  472. static void __init intc_register_irq(struct intc_desc *desc,
  473. struct intc_desc_int *d,
  474. intc_enum enum_id,
  475. unsigned int irq)
  476. {
  477. struct intc_handle_int *hp;
  478. unsigned int data[2], primary;
  479. /* Prefer single interrupt source bitmap over other combinations:
  480. * 1. bitmap, single interrupt source
  481. * 2. priority, single interrupt source
  482. * 3. bitmap, multiple interrupt sources (groups)
  483. * 4. priority, multiple interrupt sources (groups)
  484. */
  485. data[0] = intc_mask_data(desc, d, enum_id, 0);
  486. data[1] = intc_prio_data(desc, d, enum_id, 0);
  487. primary = 0;
  488. if (!data[0] && data[1])
  489. primary = 1;
  490. if (!data[0] && !data[1])
  491. pr_warning("intc: missing unique irq mask for "
  492. "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
  493. data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
  494. data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
  495. if (!data[primary])
  496. primary ^= 1;
  497. BUG_ON(!data[primary]); /* must have primary masking method */
  498. disable_irq_nosync(irq);
  499. set_irq_chip_and_handler_name(irq, &d->chip,
  500. handle_level_irq, "level");
  501. set_irq_chip_data(irq, (void *)data[primary]);
  502. /* set priority level
  503. * - this needs to be at least 2 for 5-bit priorities on 7780
  504. */
  505. intc_prio_level[irq] = 2;
  506. /* enable secondary masking method if present */
  507. if (data[!primary])
  508. _intc_enable(irq, data[!primary]);
  509. /* add irq to d->prio list if priority is available */
  510. if (data[1]) {
  511. hp = d->prio + d->nr_prio;
  512. hp->irq = irq;
  513. hp->handle = data[1];
  514. if (primary) {
  515. /*
  516. * only secondary priority should access registers, so
  517. * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
  518. */
  519. hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
  520. hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
  521. }
  522. d->nr_prio++;
  523. }
  524. /* add irq to d->sense list if sense is available */
  525. data[0] = intc_sense_data(desc, d, enum_id);
  526. if (data[0]) {
  527. (d->sense + d->nr_sense)->irq = irq;
  528. (d->sense + d->nr_sense)->handle = data[0];
  529. d->nr_sense++;
  530. }
  531. /* irq should be disabled by default */
  532. d->chip.mask(irq);
  533. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  534. if (desc->ack_regs)
  535. ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  536. #endif
  537. }
  538. static unsigned int __init save_reg(struct intc_desc_int *d,
  539. unsigned int cnt,
  540. unsigned long value,
  541. unsigned int smp)
  542. {
  543. if (value) {
  544. d->reg[cnt] = value;
  545. #ifdef CONFIG_SMP
  546. d->smp[cnt] = smp;
  547. #endif
  548. return 1;
  549. }
  550. return 0;
  551. }
  552. static unsigned char *intc_evt2irq_table;
  553. unsigned int intc_evt2irq(unsigned int vector)
  554. {
  555. unsigned int irq = evt2irq(vector);
  556. if (intc_evt2irq_table && intc_evt2irq_table[irq])
  557. irq = intc_evt2irq_table[irq];
  558. return irq;
  559. }
  560. void __init register_intc_controller(struct intc_desc *desc)
  561. {
  562. unsigned int i, k, smp;
  563. struct intc_desc_int *d;
  564. d = alloc_bootmem(sizeof(*d));
  565. INIT_LIST_HEAD(&d->list);
  566. list_add(&d->list, &intc_list);
  567. d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
  568. d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
  569. d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
  570. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  571. d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
  572. #endif
  573. d->reg = alloc_bootmem(d->nr_reg * sizeof(*d->reg));
  574. #ifdef CONFIG_SMP
  575. d->smp = alloc_bootmem(d->nr_reg * sizeof(*d->smp));
  576. #endif
  577. k = 0;
  578. if (desc->mask_regs) {
  579. for (i = 0; i < desc->nr_mask_regs; i++) {
  580. smp = IS_SMP(desc->mask_regs[i]);
  581. k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
  582. k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
  583. }
  584. }
  585. if (desc->prio_regs) {
  586. d->prio = alloc_bootmem(desc->nr_vectors * sizeof(*d->prio));
  587. for (i = 0; i < desc->nr_prio_regs; i++) {
  588. smp = IS_SMP(desc->prio_regs[i]);
  589. k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
  590. k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
  591. }
  592. }
  593. if (desc->sense_regs) {
  594. d->sense = alloc_bootmem(desc->nr_vectors * sizeof(*d->sense));
  595. for (i = 0; i < desc->nr_sense_regs; i++) {
  596. k += save_reg(d, k, desc->sense_regs[i].reg, 0);
  597. }
  598. }
  599. d->chip.name = desc->name;
  600. d->chip.mask = intc_disable;
  601. d->chip.unmask = intc_enable;
  602. d->chip.mask_ack = intc_disable;
  603. d->chip.enable = intc_enable;
  604. d->chip.disable = intc_disable;
  605. d->chip.shutdown = intc_disable;
  606. d->chip.set_type = intc_set_sense;
  607. d->chip.set_wake = intc_set_wake;
  608. #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
  609. if (desc->ack_regs) {
  610. for (i = 0; i < desc->nr_ack_regs; i++)
  611. k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
  612. d->chip.mask_ack = intc_mask_ack;
  613. }
  614. #endif
  615. BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
  616. /* keep the first vector only if same enum is used multiple times */
  617. for (i = 0; i < desc->nr_vectors; i++) {
  618. struct intc_vect *vect = desc->vectors + i;
  619. int first_irq = evt2irq(vect->vect);
  620. if (!vect->enum_id)
  621. continue;
  622. for (k = i + 1; k < desc->nr_vectors; k++) {
  623. struct intc_vect *vect2 = desc->vectors + k;
  624. if (vect->enum_id != vect2->enum_id)
  625. continue;
  626. vect2->enum_id = 0;
  627. if (!intc_evt2irq_table)
  628. intc_evt2irq_table = alloc_bootmem(NR_IRQS);
  629. if (!intc_evt2irq_table) {
  630. pr_warning("intc: cannot allocate evt2irq!\n");
  631. continue;
  632. }
  633. intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq;
  634. }
  635. }
  636. /* register the vectors one by one */
  637. for (i = 0; i < desc->nr_vectors; i++) {
  638. struct intc_vect *vect = desc->vectors + i;
  639. unsigned int irq = evt2irq(vect->vect);
  640. struct irq_desc *irq_desc;
  641. if (!vect->enum_id)
  642. continue;
  643. irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
  644. if (unlikely(!irq_desc)) {
  645. printk(KERN_INFO "can not get irq_desc for %d\n", irq);
  646. continue;
  647. }
  648. intc_register_irq(desc, d, vect->enum_id, irq);
  649. }
  650. }
  651. static int intc_suspend(struct sys_device *dev, pm_message_t state)
  652. {
  653. struct intc_desc_int *d;
  654. struct irq_desc *desc;
  655. int irq;
  656. /* get intc controller associated with this sysdev */
  657. d = container_of(dev, struct intc_desc_int, sysdev);
  658. switch (state.event) {
  659. case PM_EVENT_ON:
  660. if (d->state.event != PM_EVENT_FREEZE)
  661. break;
  662. for_each_irq_desc(irq, desc) {
  663. if (desc->chip != &d->chip)
  664. continue;
  665. if (desc->status & IRQ_DISABLED)
  666. intc_disable(irq);
  667. else
  668. intc_enable(irq);
  669. }
  670. break;
  671. case PM_EVENT_FREEZE:
  672. /* nothing has to be done */
  673. break;
  674. case PM_EVENT_SUSPEND:
  675. /* enable wakeup irqs belonging to this intc controller */
  676. for_each_irq_desc(irq, desc) {
  677. if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
  678. intc_enable(irq);
  679. }
  680. break;
  681. }
  682. d->state = state;
  683. return 0;
  684. }
  685. static int intc_resume(struct sys_device *dev)
  686. {
  687. return intc_suspend(dev, PMSG_ON);
  688. }
  689. static struct sysdev_class intc_sysdev_class = {
  690. .name = "intc",
  691. .suspend = intc_suspend,
  692. .resume = intc_resume,
  693. };
  694. /* register this intc as sysdev to allow suspend/resume */
  695. static int __init register_intc_sysdevs(void)
  696. {
  697. struct intc_desc_int *d;
  698. int error;
  699. int id = 0;
  700. error = sysdev_class_register(&intc_sysdev_class);
  701. if (!error) {
  702. list_for_each_entry(d, &intc_list, list) {
  703. d->sysdev.id = id;
  704. d->sysdev.cls = &intc_sysdev_class;
  705. error = sysdev_register(&d->sysdev);
  706. if (error)
  707. break;
  708. id++;
  709. }
  710. }
  711. if (error)
  712. pr_warning("intc: sysdev registration error\n");
  713. return error;
  714. }
  715. device_initcall(register_intc_sysdevs);