intc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /*
  2. * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  3. *
  4. * Copyright (C) 2007, 2008 Magnus Damm
  5. * Copyright (C) 2009 Paul Mundt
  6. *
  7. * Based on intc2.c and ipr.c
  8. *
  9. * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
  10. * Copyright (C) 2000 Kazumoto Kojima
  11. * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
  12. * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
  13. * Copyright (C) 2005, 2006 Paul Mundt
  14. *
  15. * This file is subject to the terms and conditions of the GNU General Public
  16. * License. See the file "COPYING" in the main directory of this archive
  17. * for more details.
  18. */
  19. #include <linux/init.h>
  20. #include <linux/irq.h>
  21. #include <linux/module.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/sh_intc.h>
  25. #include <linux/sysdev.h>
  26. #include <linux/list.h>
  27. #include <linux/topology.h>
  28. #include <linux/bitmap.h>
  29. #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
  30. ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
  31. ((addr_e) << 16) | ((addr_d << 24)))
  32. #define _INTC_SHIFT(h) (h & 0x1f)
  33. #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
  34. #define _INTC_FN(h) ((h >> 9) & 0xf)
  35. #define _INTC_MODE(h) ((h >> 13) & 0x7)
  36. #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
  37. #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
  38. struct intc_handle_int {
  39. unsigned int irq;
  40. unsigned long handle;
  41. };
  42. struct intc_desc_int {
  43. struct list_head list;
  44. struct sys_device sysdev;
  45. pm_message_t state;
  46. unsigned long *reg;
  47. #ifdef CONFIG_SMP
  48. unsigned long *smp;
  49. #endif
  50. unsigned int nr_reg;
  51. struct intc_handle_int *prio;
  52. unsigned int nr_prio;
  53. struct intc_handle_int *sense;
  54. unsigned int nr_sense;
  55. struct irq_chip chip;
  56. };
  57. static LIST_HEAD(intc_list);
  58. /*
  59. * The intc_irq_map provides a global map of bound IRQ vectors for a
  60. * given platform. Allocation of IRQs are either static through the CPU
  61. * vector map, or dynamic in the case of board mux vectors or MSI.
  62. *
  63. * As this is a central point for all IRQ controllers on the system,
  64. * each of the available sources are mapped out here. This combined with
  65. * sparseirq makes it quite trivial to keep the vector map tightly packed
  66. * when dynamically creating IRQs, as well as tying in to otherwise
  67. * unused irq_desc positions in the sparse array.
  68. */
  69. static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
  70. static DEFINE_SPINLOCK(vector_lock);
  71. #ifdef CONFIG_SMP
  72. #define IS_SMP(x) x.smp
  73. #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
  74. #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
  75. #else
  76. #define IS_SMP(x) 0
  77. #define INTC_REG(d, x, c) (d->reg[(x)])
  78. #define SMP_NR(d, x) 1
  79. #endif
  80. static unsigned int intc_prio_level[NR_IRQS]; /* for now */
  81. static unsigned long ack_handle[NR_IRQS];
  82. static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  83. {
  84. struct irq_chip *chip = get_irq_chip(irq);
  85. return container_of(chip, struct intc_desc_int, chip);
  86. }
  87. static inline unsigned int set_field(unsigned int value,
  88. unsigned int field_value,
  89. unsigned int handle)
  90. {
  91. unsigned int width = _INTC_WIDTH(handle);
  92. unsigned int shift = _INTC_SHIFT(handle);
  93. value &= ~(((1 << width) - 1) << shift);
  94. value |= field_value << shift;
  95. return value;
  96. }
  97. static void write_8(unsigned long addr, unsigned long h, unsigned long data)
  98. {
  99. __raw_writeb(set_field(0, data, h), addr);
  100. (void)__raw_readb(addr); /* Defeat write posting */
  101. }
  102. static void write_16(unsigned long addr, unsigned long h, unsigned long data)
  103. {
  104. __raw_writew(set_field(0, data, h), addr);
  105. (void)__raw_readw(addr); /* Defeat write posting */
  106. }
  107. static void write_32(unsigned long addr, unsigned long h, unsigned long data)
  108. {
  109. __raw_writel(set_field(0, data, h), addr);
  110. (void)__raw_readl(addr); /* Defeat write posting */
  111. }
  112. static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
  113. {
  114. unsigned long flags;
  115. local_irq_save(flags);
  116. __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
  117. (void)__raw_readb(addr); /* Defeat write posting */
  118. local_irq_restore(flags);
  119. }
  120. static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
  121. {
  122. unsigned long flags;
  123. local_irq_save(flags);
  124. __raw_writew(set_field(__raw_readw(addr), data, h), addr);
  125. (void)__raw_readw(addr); /* Defeat write posting */
  126. local_irq_restore(flags);
  127. }
  128. static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
  129. {
  130. unsigned long flags;
  131. local_irq_save(flags);
  132. __raw_writel(set_field(__raw_readl(addr), data, h), addr);
  133. (void)__raw_readl(addr); /* Defeat write posting */
  134. local_irq_restore(flags);
  135. }
  136. enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
  137. static void (*intc_reg_fns[])(unsigned long addr,
  138. unsigned long h,
  139. unsigned long data) = {
  140. [REG_FN_WRITE_BASE + 0] = write_8,
  141. [REG_FN_WRITE_BASE + 1] = write_16,
  142. [REG_FN_WRITE_BASE + 3] = write_32,
  143. [REG_FN_MODIFY_BASE + 0] = modify_8,
  144. [REG_FN_MODIFY_BASE + 1] = modify_16,
  145. [REG_FN_MODIFY_BASE + 3] = modify_32,
  146. };
  147. enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
  148. MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
  149. MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
  150. MODE_PRIO_REG, /* Priority value written to enable interrupt */
  151. MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
  152. };
  153. static void intc_mode_field(unsigned long addr,
  154. unsigned long handle,
  155. void (*fn)(unsigned long,
  156. unsigned long,
  157. unsigned long),
  158. unsigned int irq)
  159. {
  160. fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
  161. }
  162. static void intc_mode_zero(unsigned long addr,
  163. unsigned long handle,
  164. void (*fn)(unsigned long,
  165. unsigned long,
  166. unsigned long),
  167. unsigned int irq)
  168. {
  169. fn(addr, handle, 0);
  170. }
  171. static void intc_mode_prio(unsigned long addr,
  172. unsigned long handle,
  173. void (*fn)(unsigned long,
  174. unsigned long,
  175. unsigned long),
  176. unsigned int irq)
  177. {
  178. fn(addr, handle, intc_prio_level[irq]);
  179. }
  180. static void (*intc_enable_fns[])(unsigned long addr,
  181. unsigned long handle,
  182. void (*fn)(unsigned long,
  183. unsigned long,
  184. unsigned long),
  185. unsigned int irq) = {
  186. [MODE_ENABLE_REG] = intc_mode_field,
  187. [MODE_MASK_REG] = intc_mode_zero,
  188. [MODE_DUAL_REG] = intc_mode_field,
  189. [MODE_PRIO_REG] = intc_mode_prio,
  190. [MODE_PCLR_REG] = intc_mode_prio,
  191. };
  192. static void (*intc_disable_fns[])(unsigned long addr,
  193. unsigned long handle,
  194. void (*fn)(unsigned long,
  195. unsigned long,
  196. unsigned long),
  197. unsigned int irq) = {
  198. [MODE_ENABLE_REG] = intc_mode_zero,
  199. [MODE_MASK_REG] = intc_mode_field,
  200. [MODE_DUAL_REG] = intc_mode_field,
  201. [MODE_PRIO_REG] = intc_mode_zero,
  202. [MODE_PCLR_REG] = intc_mode_field,
  203. };
  204. static inline void _intc_enable(unsigned int irq, unsigned long handle)
  205. {
  206. struct intc_desc_int *d = get_intc_desc(irq);
  207. unsigned long addr;
  208. unsigned int cpu;
  209. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  210. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  211. intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
  212. [_INTC_FN(handle)], irq);
  213. }
  214. }
  215. static void intc_enable(unsigned int irq)
  216. {
  217. _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
  218. }
  219. static void intc_disable(unsigned int irq)
  220. {
  221. struct intc_desc_int *d = get_intc_desc(irq);
  222. unsigned long handle = (unsigned long) get_irq_chip_data(irq);
  223. unsigned long addr;
  224. unsigned int cpu;
  225. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  226. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  227. intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
  228. [_INTC_FN(handle)], irq);
  229. }
  230. }
  231. static int intc_set_wake(unsigned int irq, unsigned int on)
  232. {
  233. return 0; /* allow wakeup, but setup hardware in intc_suspend() */
  234. }
  235. static void intc_mask_ack(unsigned int irq)
  236. {
  237. struct intc_desc_int *d = get_intc_desc(irq);
  238. unsigned long handle = ack_handle[irq];
  239. unsigned long addr;
  240. intc_disable(irq);
  241. /* read register and write zero only to the assocaited bit */
  242. if (handle) {
  243. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  244. switch (_INTC_FN(handle)) {
  245. case REG_FN_MODIFY_BASE + 0: /* 8bit */
  246. __raw_readb(addr);
  247. __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
  248. break;
  249. case REG_FN_MODIFY_BASE + 1: /* 16bit */
  250. __raw_readw(addr);
  251. __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
  252. break;
  253. case REG_FN_MODIFY_BASE + 3: /* 32bit */
  254. __raw_readl(addr);
  255. __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
  256. break;
  257. default:
  258. BUG();
  259. break;
  260. }
  261. }
  262. }
  263. static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
  264. unsigned int nr_hp,
  265. unsigned int irq)
  266. {
  267. int i;
  268. /* this doesn't scale well, but...
  269. *
  270. * this function should only be used for cerain uncommon
  271. * operations such as intc_set_priority() and intc_set_sense()
  272. * and in those rare cases performance doesn't matter that much.
  273. * keeping the memory footprint low is more important.
  274. *
  275. * one rather simple way to speed this up and still keep the
  276. * memory footprint down is to make sure the array is sorted
  277. * and then perform a bisect to lookup the irq.
  278. */
  279. for (i = 0; i < nr_hp; i++) {
  280. if ((hp + i)->irq != irq)
  281. continue;
  282. return hp + i;
  283. }
  284. return NULL;
  285. }
  286. int intc_set_priority(unsigned int irq, unsigned int prio)
  287. {
  288. struct intc_desc_int *d = get_intc_desc(irq);
  289. struct intc_handle_int *ihp;
  290. if (!intc_prio_level[irq] || prio <= 1)
  291. return -EINVAL;
  292. ihp = intc_find_irq(d->prio, d->nr_prio, irq);
  293. if (ihp) {
  294. if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
  295. return -EINVAL;
  296. intc_prio_level[irq] = prio;
  297. /*
  298. * only set secondary masking method directly
  299. * primary masking method is using intc_prio_level[irq]
  300. * priority level will be set during next enable()
  301. */
  302. if (_INTC_FN(ihp->handle) != REG_FN_ERR)
  303. _intc_enable(irq, ihp->handle);
  304. }
  305. return 0;
  306. }
  307. #define VALID(x) (x | 0x80)
  308. static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
  309. [IRQ_TYPE_EDGE_FALLING] = VALID(0),
  310. [IRQ_TYPE_EDGE_RISING] = VALID(1),
  311. [IRQ_TYPE_LEVEL_LOW] = VALID(2),
  312. /* SH7706, SH7707 and SH7709 do not support high level triggered */
  313. #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
  314. !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
  315. !defined(CONFIG_CPU_SUBTYPE_SH7709)
  316. [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
  317. #endif
  318. };
  319. static int intc_set_sense(unsigned int irq, unsigned int type)
  320. {
  321. struct intc_desc_int *d = get_intc_desc(irq);
  322. unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
  323. struct intc_handle_int *ihp;
  324. unsigned long addr;
  325. if (!value)
  326. return -EINVAL;
  327. ihp = intc_find_irq(d->sense, d->nr_sense, irq);
  328. if (ihp) {
  329. addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
  330. intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
  331. }
  332. return 0;
  333. }
  334. static unsigned int __init intc_get_reg(struct intc_desc_int *d,
  335. unsigned long address)
  336. {
  337. unsigned int k;
  338. for (k = 0; k < d->nr_reg; k++) {
  339. if (d->reg[k] == address)
  340. return k;
  341. }
  342. BUG();
  343. return 0;
  344. }
  345. static intc_enum __init intc_grp_id(struct intc_desc *desc,
  346. intc_enum enum_id)
  347. {
  348. struct intc_group *g = desc->groups;
  349. unsigned int i, j;
  350. for (i = 0; g && enum_id && i < desc->nr_groups; i++) {
  351. g = desc->groups + i;
  352. for (j = 0; g->enum_ids[j]; j++) {
  353. if (g->enum_ids[j] != enum_id)
  354. continue;
  355. return g->enum_id;
  356. }
  357. }
  358. return 0;
  359. }
  360. static unsigned int __init intc_mask_data(struct intc_desc *desc,
  361. struct intc_desc_int *d,
  362. intc_enum enum_id, int do_grps)
  363. {
  364. struct intc_mask_reg *mr = desc->mask_regs;
  365. unsigned int i, j, fn, mode;
  366. unsigned long reg_e, reg_d;
  367. for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) {
  368. mr = desc->mask_regs + i;
  369. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  370. if (mr->enum_ids[j] != enum_id)
  371. continue;
  372. if (mr->set_reg && mr->clr_reg) {
  373. fn = REG_FN_WRITE_BASE;
  374. mode = MODE_DUAL_REG;
  375. reg_e = mr->clr_reg;
  376. reg_d = mr->set_reg;
  377. } else {
  378. fn = REG_FN_MODIFY_BASE;
  379. if (mr->set_reg) {
  380. mode = MODE_ENABLE_REG;
  381. reg_e = mr->set_reg;
  382. reg_d = mr->set_reg;
  383. } else {
  384. mode = MODE_MASK_REG;
  385. reg_e = mr->clr_reg;
  386. reg_d = mr->clr_reg;
  387. }
  388. }
  389. fn += (mr->reg_width >> 3) - 1;
  390. return _INTC_MK(fn, mode,
  391. intc_get_reg(d, reg_e),
  392. intc_get_reg(d, reg_d),
  393. 1,
  394. (mr->reg_width - 1) - j);
  395. }
  396. }
  397. if (do_grps)
  398. return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
  399. return 0;
  400. }
  401. static unsigned int __init intc_prio_data(struct intc_desc *desc,
  402. struct intc_desc_int *d,
  403. intc_enum enum_id, int do_grps)
  404. {
  405. struct intc_prio_reg *pr = desc->prio_regs;
  406. unsigned int i, j, fn, mode, bit;
  407. unsigned long reg_e, reg_d;
  408. for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) {
  409. pr = desc->prio_regs + i;
  410. for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) {
  411. if (pr->enum_ids[j] != enum_id)
  412. continue;
  413. if (pr->set_reg && pr->clr_reg) {
  414. fn = REG_FN_WRITE_BASE;
  415. mode = MODE_PCLR_REG;
  416. reg_e = pr->set_reg;
  417. reg_d = pr->clr_reg;
  418. } else {
  419. fn = REG_FN_MODIFY_BASE;
  420. mode = MODE_PRIO_REG;
  421. if (!pr->set_reg)
  422. BUG();
  423. reg_e = pr->set_reg;
  424. reg_d = pr->set_reg;
  425. }
  426. fn += (pr->reg_width >> 3) - 1;
  427. BUG_ON((j + 1) * pr->field_width > pr->reg_width);
  428. bit = pr->reg_width - ((j + 1) * pr->field_width);
  429. return _INTC_MK(fn, mode,
  430. intc_get_reg(d, reg_e),
  431. intc_get_reg(d, reg_d),
  432. pr->field_width, bit);
  433. }
  434. }
  435. if (do_grps)
  436. return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
  437. return 0;
  438. }
  439. static unsigned int __init intc_ack_data(struct intc_desc *desc,
  440. struct intc_desc_int *d,
  441. intc_enum enum_id)
  442. {
  443. struct intc_mask_reg *mr = desc->ack_regs;
  444. unsigned int i, j, fn, mode;
  445. unsigned long reg_e, reg_d;
  446. for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) {
  447. mr = desc->ack_regs + i;
  448. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  449. if (mr->enum_ids[j] != enum_id)
  450. continue;
  451. fn = REG_FN_MODIFY_BASE;
  452. mode = MODE_ENABLE_REG;
  453. reg_e = mr->set_reg;
  454. reg_d = mr->set_reg;
  455. fn += (mr->reg_width >> 3) - 1;
  456. return _INTC_MK(fn, mode,
  457. intc_get_reg(d, reg_e),
  458. intc_get_reg(d, reg_d),
  459. 1,
  460. (mr->reg_width - 1) - j);
  461. }
  462. }
  463. return 0;
  464. }
  465. static unsigned int __init intc_sense_data(struct intc_desc *desc,
  466. struct intc_desc_int *d,
  467. intc_enum enum_id)
  468. {
  469. struct intc_sense_reg *sr = desc->sense_regs;
  470. unsigned int i, j, fn, bit;
  471. for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) {
  472. sr = desc->sense_regs + i;
  473. for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
  474. if (sr->enum_ids[j] != enum_id)
  475. continue;
  476. fn = REG_FN_MODIFY_BASE;
  477. fn += (sr->reg_width >> 3) - 1;
  478. BUG_ON((j + 1) * sr->field_width > sr->reg_width);
  479. bit = sr->reg_width - ((j + 1) * sr->field_width);
  480. return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
  481. 0, sr->field_width, bit);
  482. }
  483. }
  484. return 0;
  485. }
  486. static void __init intc_register_irq(struct intc_desc *desc,
  487. struct intc_desc_int *d,
  488. intc_enum enum_id,
  489. unsigned int irq)
  490. {
  491. struct intc_handle_int *hp;
  492. unsigned int data[2], primary;
  493. /*
  494. * Register the IRQ position with the global IRQ map
  495. */
  496. set_bit(irq, intc_irq_map);
  497. /* Prefer single interrupt source bitmap over other combinations:
  498. * 1. bitmap, single interrupt source
  499. * 2. priority, single interrupt source
  500. * 3. bitmap, multiple interrupt sources (groups)
  501. * 4. priority, multiple interrupt sources (groups)
  502. */
  503. data[0] = intc_mask_data(desc, d, enum_id, 0);
  504. data[1] = intc_prio_data(desc, d, enum_id, 0);
  505. primary = 0;
  506. if (!data[0] && data[1])
  507. primary = 1;
  508. if (!data[0] && !data[1])
  509. pr_warning("intc: missing unique irq mask for "
  510. "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
  511. data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
  512. data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
  513. if (!data[primary])
  514. primary ^= 1;
  515. BUG_ON(!data[primary]); /* must have primary masking method */
  516. disable_irq_nosync(irq);
  517. set_irq_chip_and_handler_name(irq, &d->chip,
  518. handle_level_irq, "level");
  519. set_irq_chip_data(irq, (void *)data[primary]);
  520. /* set priority level
  521. * - this needs to be at least 2 for 5-bit priorities on 7780
  522. */
  523. intc_prio_level[irq] = 2;
  524. /* enable secondary masking method if present */
  525. if (data[!primary])
  526. _intc_enable(irq, data[!primary]);
  527. /* add irq to d->prio list if priority is available */
  528. if (data[1]) {
  529. hp = d->prio + d->nr_prio;
  530. hp->irq = irq;
  531. hp->handle = data[1];
  532. if (primary) {
  533. /*
  534. * only secondary priority should access registers, so
  535. * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
  536. */
  537. hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
  538. hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
  539. }
  540. d->nr_prio++;
  541. }
  542. /* add irq to d->sense list if sense is available */
  543. data[0] = intc_sense_data(desc, d, enum_id);
  544. if (data[0]) {
  545. (d->sense + d->nr_sense)->irq = irq;
  546. (d->sense + d->nr_sense)->handle = data[0];
  547. d->nr_sense++;
  548. }
  549. /* irq should be disabled by default */
  550. d->chip.mask(irq);
  551. if (desc->ack_regs)
  552. ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  553. #ifdef CONFIG_ARM
  554. set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  555. #endif
  556. }
  557. static unsigned int __init save_reg(struct intc_desc_int *d,
  558. unsigned int cnt,
  559. unsigned long value,
  560. unsigned int smp)
  561. {
  562. if (value) {
  563. d->reg[cnt] = value;
  564. #ifdef CONFIG_SMP
  565. d->smp[cnt] = smp;
  566. #endif
  567. return 1;
  568. }
  569. return 0;
  570. }
  571. static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
  572. {
  573. generic_handle_irq((unsigned int)get_irq_data(irq));
  574. }
  575. void __init register_intc_controller(struct intc_desc *desc)
  576. {
  577. unsigned int i, k, smp;
  578. struct intc_desc_int *d;
  579. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  580. INIT_LIST_HEAD(&d->list);
  581. list_add(&d->list, &intc_list);
  582. d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0;
  583. d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0;
  584. d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0;
  585. d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
  586. d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
  587. #ifdef CONFIG_SMP
  588. d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
  589. #endif
  590. k = 0;
  591. if (desc->mask_regs) {
  592. for (i = 0; i < desc->nr_mask_regs; i++) {
  593. smp = IS_SMP(desc->mask_regs[i]);
  594. k += save_reg(d, k, desc->mask_regs[i].set_reg, smp);
  595. k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp);
  596. }
  597. }
  598. if (desc->prio_regs) {
  599. d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT);
  600. for (i = 0; i < desc->nr_prio_regs; i++) {
  601. smp = IS_SMP(desc->prio_regs[i]);
  602. k += save_reg(d, k, desc->prio_regs[i].set_reg, smp);
  603. k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp);
  604. }
  605. }
  606. if (desc->sense_regs) {
  607. d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT);
  608. for (i = 0; i < desc->nr_sense_regs; i++) {
  609. k += save_reg(d, k, desc->sense_regs[i].reg, 0);
  610. }
  611. }
  612. d->chip.name = desc->name;
  613. d->chip.mask = intc_disable;
  614. d->chip.unmask = intc_enable;
  615. d->chip.mask_ack = intc_disable;
  616. d->chip.enable = intc_enable;
  617. d->chip.disable = intc_disable;
  618. d->chip.shutdown = intc_disable;
  619. d->chip.set_type = intc_set_sense;
  620. d->chip.set_wake = intc_set_wake;
  621. if (desc->ack_regs) {
  622. for (i = 0; i < desc->nr_ack_regs; i++)
  623. k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
  624. d->chip.mask_ack = intc_mask_ack;
  625. }
  626. BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
  627. /* register the vectors one by one */
  628. for (i = 0; i < desc->nr_vectors; i++) {
  629. struct intc_vect *vect = desc->vectors + i;
  630. unsigned int irq = evt2irq(vect->vect);
  631. struct irq_desc *irq_desc;
  632. if (!vect->enum_id)
  633. continue;
  634. irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
  635. if (unlikely(!irq_desc)) {
  636. pr_info("can't get irq_desc for %d\n", irq);
  637. continue;
  638. }
  639. intc_register_irq(desc, d, vect->enum_id, irq);
  640. for (k = i + 1; k < desc->nr_vectors; k++) {
  641. struct intc_vect *vect2 = desc->vectors + k;
  642. unsigned int irq2 = evt2irq(vect2->vect);
  643. if (vect->enum_id != vect2->enum_id)
  644. continue;
  645. /*
  646. * In the case of multi-evt handling and sparse
  647. * IRQ support, each vector still needs to have
  648. * its own backing irq_desc.
  649. */
  650. irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
  651. if (unlikely(!irq_desc)) {
  652. pr_info("can't get irq_desc for %d\n", irq2);
  653. continue;
  654. }
  655. vect2->enum_id = 0;
  656. /* redirect this interrupts to the first one */
  657. set_irq_chip_and_handler_name(irq2, &d->chip,
  658. intc_redirect_irq, "redirect");
  659. set_irq_data(irq2, (void *)irq);
  660. }
  661. }
  662. }
  663. static int intc_suspend(struct sys_device *dev, pm_message_t state)
  664. {
  665. struct intc_desc_int *d;
  666. struct irq_desc *desc;
  667. int irq;
  668. /* get intc controller associated with this sysdev */
  669. d = container_of(dev, struct intc_desc_int, sysdev);
  670. switch (state.event) {
  671. case PM_EVENT_ON:
  672. if (d->state.event != PM_EVENT_FREEZE)
  673. break;
  674. for_each_irq_desc(irq, desc) {
  675. if (desc->handle_irq == intc_redirect_irq)
  676. continue;
  677. if (desc->chip != &d->chip)
  678. continue;
  679. if (desc->status & IRQ_DISABLED)
  680. intc_disable(irq);
  681. else
  682. intc_enable(irq);
  683. }
  684. break;
  685. case PM_EVENT_FREEZE:
  686. /* nothing has to be done */
  687. break;
  688. case PM_EVENT_SUSPEND:
  689. /* enable wakeup irqs belonging to this intc controller */
  690. for_each_irq_desc(irq, desc) {
  691. if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
  692. intc_enable(irq);
  693. }
  694. break;
  695. }
  696. d->state = state;
  697. return 0;
  698. }
  699. static int intc_resume(struct sys_device *dev)
  700. {
  701. return intc_suspend(dev, PMSG_ON);
  702. }
  703. static struct sysdev_class intc_sysdev_class = {
  704. .name = "intc",
  705. .suspend = intc_suspend,
  706. .resume = intc_resume,
  707. };
  708. /* register this intc as sysdev to allow suspend/resume */
  709. static int __init register_intc_sysdevs(void)
  710. {
  711. struct intc_desc_int *d;
  712. int error;
  713. int id = 0;
  714. error = sysdev_class_register(&intc_sysdev_class);
  715. if (!error) {
  716. list_for_each_entry(d, &intc_list, list) {
  717. d->sysdev.id = id;
  718. d->sysdev.cls = &intc_sysdev_class;
  719. error = sysdev_register(&d->sysdev);
  720. if (error)
  721. break;
  722. id++;
  723. }
  724. }
  725. if (error)
  726. pr_warning("intc: sysdev registration error\n");
  727. return error;
  728. }
  729. device_initcall(register_intc_sysdevs);
  730. /*
  731. * Dynamic IRQ allocation and deallocation
  732. */
  733. static unsigned int create_irq_on_node(unsigned int irq_want, int node)
  734. {
  735. unsigned int irq = 0, new;
  736. unsigned long flags;
  737. struct irq_desc *desc;
  738. spin_lock_irqsave(&vector_lock, flags);
  739. /*
  740. * First try the wanted IRQ, then scan.
  741. */
  742. if (test_and_set_bit(irq_want, intc_irq_map)) {
  743. new = find_first_zero_bit(intc_irq_map, nr_irqs);
  744. if (unlikely(new == nr_irqs))
  745. goto out_unlock;
  746. desc = irq_to_desc_alloc_node(new, node);
  747. if (unlikely(!desc)) {
  748. pr_info("can't get irq_desc for %d\n", new);
  749. goto out_unlock;
  750. }
  751. desc = move_irq_desc(desc, node);
  752. __set_bit(new, intc_irq_map);
  753. irq = new;
  754. }
  755. out_unlock:
  756. spin_unlock_irqrestore(&vector_lock, flags);
  757. if (irq > 0) {
  758. dynamic_irq_init(irq);
  759. #ifdef CONFIG_ARM
  760. set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  761. #endif
  762. }
  763. return irq;
  764. }
  765. int create_irq(void)
  766. {
  767. int nid = cpu_to_node(smp_processor_id());
  768. int irq;
  769. irq = create_irq_on_node(NR_IRQS_LEGACY, nid);
  770. if (irq == 0)
  771. irq = -1;
  772. return irq;
  773. }
  774. void destroy_irq(unsigned int irq)
  775. {
  776. unsigned long flags;
  777. dynamic_irq_cleanup(irq);
  778. spin_lock_irqsave(&vector_lock, flags);
  779. __clear_bit(irq, intc_irq_map);
  780. spin_unlock_irqrestore(&vector_lock, flags);
  781. }
  782. int reserve_irq_vector(unsigned int irq)
  783. {
  784. unsigned long flags;
  785. int ret = 0;
  786. spin_lock_irqsave(&vector_lock, flags);
  787. if (test_and_set_bit(irq, intc_irq_map))
  788. ret = -EBUSY;
  789. spin_unlock_irqrestore(&vector_lock, flags);
  790. return ret;
  791. }
  792. void reserve_irq_legacy(void)
  793. {
  794. unsigned long flags;
  795. int i, j;
  796. spin_lock_irqsave(&vector_lock, flags);
  797. j = find_first_bit(intc_irq_map, nr_irqs);
  798. for (i = 0; i < j; i++)
  799. __set_bit(i, intc_irq_map);
  800. spin_unlock_irqrestore(&vector_lock, flags);
  801. }