intc.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /*
  2. * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  3. *
  4. * Copyright (C) 2007, 2008 Magnus Damm
  5. * Copyright (C) 2009 Paul Mundt
  6. *
  7. * Based on intc2.c and ipr.c
  8. *
  9. * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
  10. * Copyright (C) 2000 Kazumoto Kojima
  11. * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
  12. * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
  13. * Copyright (C) 2005, 2006 Paul Mundt
  14. *
  15. * This file is subject to the terms and conditions of the GNU General Public
  16. * License. See the file "COPYING" in the main directory of this archive
  17. * for more details.
  18. */
  19. #include <linux/init.h>
  20. #include <linux/irq.h>
  21. #include <linux/module.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/sh_intc.h>
  25. #include <linux/sysdev.h>
  26. #include <linux/list.h>
  27. #include <linux/topology.h>
  28. #include <linux/bitmap.h>
  29. #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
  30. ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
  31. ((addr_e) << 16) | ((addr_d << 24)))
  32. #define _INTC_SHIFT(h) (h & 0x1f)
  33. #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
  34. #define _INTC_FN(h) ((h >> 9) & 0xf)
  35. #define _INTC_MODE(h) ((h >> 13) & 0x7)
  36. #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
  37. #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
  38. struct intc_handle_int {
  39. unsigned int irq;
  40. unsigned long handle;
  41. };
  42. struct intc_desc_int {
  43. struct list_head list;
  44. struct sys_device sysdev;
  45. pm_message_t state;
  46. unsigned long *reg;
  47. #ifdef CONFIG_SMP
  48. unsigned long *smp;
  49. #endif
  50. unsigned int nr_reg;
  51. struct intc_handle_int *prio;
  52. unsigned int nr_prio;
  53. struct intc_handle_int *sense;
  54. unsigned int nr_sense;
  55. struct irq_chip chip;
  56. };
  57. static LIST_HEAD(intc_list);
  58. /*
  59. * The intc_irq_map provides a global map of bound IRQ vectors for a
  60. * given platform. Allocation of IRQs are either static through the CPU
  61. * vector map, or dynamic in the case of board mux vectors or MSI.
  62. *
  63. * As this is a central point for all IRQ controllers on the system,
  64. * each of the available sources are mapped out here. This combined with
  65. * sparseirq makes it quite trivial to keep the vector map tightly packed
  66. * when dynamically creating IRQs, as well as tying in to otherwise
  67. * unused irq_desc positions in the sparse array.
  68. */
  69. static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
  70. static DEFINE_SPINLOCK(vector_lock);
  71. #ifdef CONFIG_SMP
  72. #define IS_SMP(x) x.smp
  73. #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
  74. #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
  75. #else
  76. #define IS_SMP(x) 0
  77. #define INTC_REG(d, x, c) (d->reg[(x)])
  78. #define SMP_NR(d, x) 1
  79. #endif
  80. static unsigned int intc_prio_level[NR_IRQS]; /* for now */
  81. static unsigned long ack_handle[NR_IRQS];
  82. static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  83. {
  84. struct irq_chip *chip = get_irq_chip(irq);
  85. return container_of(chip, struct intc_desc_int, chip);
  86. }
  87. static inline unsigned int set_field(unsigned int value,
  88. unsigned int field_value,
  89. unsigned int handle)
  90. {
  91. unsigned int width = _INTC_WIDTH(handle);
  92. unsigned int shift = _INTC_SHIFT(handle);
  93. value &= ~(((1 << width) - 1) << shift);
  94. value |= field_value << shift;
  95. return value;
  96. }
  97. static void write_8(unsigned long addr, unsigned long h, unsigned long data)
  98. {
  99. __raw_writeb(set_field(0, data, h), addr);
  100. (void)__raw_readb(addr); /* Defeat write posting */
  101. }
  102. static void write_16(unsigned long addr, unsigned long h, unsigned long data)
  103. {
  104. __raw_writew(set_field(0, data, h), addr);
  105. (void)__raw_readw(addr); /* Defeat write posting */
  106. }
  107. static void write_32(unsigned long addr, unsigned long h, unsigned long data)
  108. {
  109. __raw_writel(set_field(0, data, h), addr);
  110. (void)__raw_readl(addr); /* Defeat write posting */
  111. }
  112. static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
  113. {
  114. unsigned long flags;
  115. local_irq_save(flags);
  116. __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
  117. (void)__raw_readb(addr); /* Defeat write posting */
  118. local_irq_restore(flags);
  119. }
  120. static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
  121. {
  122. unsigned long flags;
  123. local_irq_save(flags);
  124. __raw_writew(set_field(__raw_readw(addr), data, h), addr);
  125. (void)__raw_readw(addr); /* Defeat write posting */
  126. local_irq_restore(flags);
  127. }
  128. static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
  129. {
  130. unsigned long flags;
  131. local_irq_save(flags);
  132. __raw_writel(set_field(__raw_readl(addr), data, h), addr);
  133. (void)__raw_readl(addr); /* Defeat write posting */
  134. local_irq_restore(flags);
  135. }
  136. enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
  137. static void (*intc_reg_fns[])(unsigned long addr,
  138. unsigned long h,
  139. unsigned long data) = {
  140. [REG_FN_WRITE_BASE + 0] = write_8,
  141. [REG_FN_WRITE_BASE + 1] = write_16,
  142. [REG_FN_WRITE_BASE + 3] = write_32,
  143. [REG_FN_MODIFY_BASE + 0] = modify_8,
  144. [REG_FN_MODIFY_BASE + 1] = modify_16,
  145. [REG_FN_MODIFY_BASE + 3] = modify_32,
  146. };
  147. enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
  148. MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
  149. MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
  150. MODE_PRIO_REG, /* Priority value written to enable interrupt */
  151. MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
  152. };
  153. static void intc_mode_field(unsigned long addr,
  154. unsigned long handle,
  155. void (*fn)(unsigned long,
  156. unsigned long,
  157. unsigned long),
  158. unsigned int irq)
  159. {
  160. fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
  161. }
  162. static void intc_mode_zero(unsigned long addr,
  163. unsigned long handle,
  164. void (*fn)(unsigned long,
  165. unsigned long,
  166. unsigned long),
  167. unsigned int irq)
  168. {
  169. fn(addr, handle, 0);
  170. }
  171. static void intc_mode_prio(unsigned long addr,
  172. unsigned long handle,
  173. void (*fn)(unsigned long,
  174. unsigned long,
  175. unsigned long),
  176. unsigned int irq)
  177. {
  178. fn(addr, handle, intc_prio_level[irq]);
  179. }
  180. static void (*intc_enable_fns[])(unsigned long addr,
  181. unsigned long handle,
  182. void (*fn)(unsigned long,
  183. unsigned long,
  184. unsigned long),
  185. unsigned int irq) = {
  186. [MODE_ENABLE_REG] = intc_mode_field,
  187. [MODE_MASK_REG] = intc_mode_zero,
  188. [MODE_DUAL_REG] = intc_mode_field,
  189. [MODE_PRIO_REG] = intc_mode_prio,
  190. [MODE_PCLR_REG] = intc_mode_prio,
  191. };
  192. static void (*intc_disable_fns[])(unsigned long addr,
  193. unsigned long handle,
  194. void (*fn)(unsigned long,
  195. unsigned long,
  196. unsigned long),
  197. unsigned int irq) = {
  198. [MODE_ENABLE_REG] = intc_mode_zero,
  199. [MODE_MASK_REG] = intc_mode_field,
  200. [MODE_DUAL_REG] = intc_mode_field,
  201. [MODE_PRIO_REG] = intc_mode_zero,
  202. [MODE_PCLR_REG] = intc_mode_field,
  203. };
  204. static inline void _intc_enable(unsigned int irq, unsigned long handle)
  205. {
  206. struct intc_desc_int *d = get_intc_desc(irq);
  207. unsigned long addr;
  208. unsigned int cpu;
  209. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  210. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  211. intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
  212. [_INTC_FN(handle)], irq);
  213. }
  214. }
  215. static void intc_enable(unsigned int irq)
  216. {
  217. _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
  218. }
  219. static void intc_disable(unsigned int irq)
  220. {
  221. struct intc_desc_int *d = get_intc_desc(irq);
  222. unsigned long handle = (unsigned long) get_irq_chip_data(irq);
  223. unsigned long addr;
  224. unsigned int cpu;
  225. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  226. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  227. intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
  228. [_INTC_FN(handle)], irq);
  229. }
  230. }
  231. static void (*intc_enable_noprio_fns[])(unsigned long addr,
  232. unsigned long handle,
  233. void (*fn)(unsigned long,
  234. unsigned long,
  235. unsigned long),
  236. unsigned int irq) = {
  237. [MODE_ENABLE_REG] = intc_mode_field,
  238. [MODE_MASK_REG] = intc_mode_zero,
  239. [MODE_DUAL_REG] = intc_mode_field,
  240. [MODE_PRIO_REG] = intc_mode_field,
  241. [MODE_PCLR_REG] = intc_mode_field,
  242. };
  243. static void intc_enable_disable(struct intc_desc_int *d,
  244. unsigned long handle, int do_enable)
  245. {
  246. unsigned long addr;
  247. unsigned int cpu;
  248. void (*fn)(unsigned long, unsigned long,
  249. void (*)(unsigned long, unsigned long, unsigned long),
  250. unsigned int);
  251. if (do_enable) {
  252. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  253. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  254. fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
  255. fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
  256. }
  257. } else {
  258. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  259. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  260. fn = intc_disable_fns[_INTC_MODE(handle)];
  261. fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
  262. }
  263. }
  264. }
  265. static int intc_set_wake(unsigned int irq, unsigned int on)
  266. {
  267. return 0; /* allow wakeup, but setup hardware in intc_suspend() */
  268. }
  269. static void intc_mask_ack(unsigned int irq)
  270. {
  271. struct intc_desc_int *d = get_intc_desc(irq);
  272. unsigned long handle = ack_handle[irq];
  273. unsigned long addr;
  274. intc_disable(irq);
  275. /* read register and write zero only to the assocaited bit */
  276. if (handle) {
  277. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  278. switch (_INTC_FN(handle)) {
  279. case REG_FN_MODIFY_BASE + 0: /* 8bit */
  280. __raw_readb(addr);
  281. __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
  282. break;
  283. case REG_FN_MODIFY_BASE + 1: /* 16bit */
  284. __raw_readw(addr);
  285. __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
  286. break;
  287. case REG_FN_MODIFY_BASE + 3: /* 32bit */
  288. __raw_readl(addr);
  289. __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
  290. break;
  291. default:
  292. BUG();
  293. break;
  294. }
  295. }
  296. }
  297. static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
  298. unsigned int nr_hp,
  299. unsigned int irq)
  300. {
  301. int i;
  302. /* this doesn't scale well, but...
  303. *
  304. * this function should only be used for cerain uncommon
  305. * operations such as intc_set_priority() and intc_set_sense()
  306. * and in those rare cases performance doesn't matter that much.
  307. * keeping the memory footprint low is more important.
  308. *
  309. * one rather simple way to speed this up and still keep the
  310. * memory footprint down is to make sure the array is sorted
  311. * and then perform a bisect to lookup the irq.
  312. */
  313. for (i = 0; i < nr_hp; i++) {
  314. if ((hp + i)->irq != irq)
  315. continue;
  316. return hp + i;
  317. }
  318. return NULL;
  319. }
  320. int intc_set_priority(unsigned int irq, unsigned int prio)
  321. {
  322. struct intc_desc_int *d = get_intc_desc(irq);
  323. struct intc_handle_int *ihp;
  324. if (!intc_prio_level[irq] || prio <= 1)
  325. return -EINVAL;
  326. ihp = intc_find_irq(d->prio, d->nr_prio, irq);
  327. if (ihp) {
  328. if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
  329. return -EINVAL;
  330. intc_prio_level[irq] = prio;
  331. /*
  332. * only set secondary masking method directly
  333. * primary masking method is using intc_prio_level[irq]
  334. * priority level will be set during next enable()
  335. */
  336. if (_INTC_FN(ihp->handle) != REG_FN_ERR)
  337. _intc_enable(irq, ihp->handle);
  338. }
  339. return 0;
  340. }
  341. #define VALID(x) (x | 0x80)
  342. static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
  343. [IRQ_TYPE_EDGE_FALLING] = VALID(0),
  344. [IRQ_TYPE_EDGE_RISING] = VALID(1),
  345. [IRQ_TYPE_LEVEL_LOW] = VALID(2),
  346. /* SH7706, SH7707 and SH7709 do not support high level triggered */
  347. #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
  348. !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
  349. !defined(CONFIG_CPU_SUBTYPE_SH7709)
  350. [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
  351. #endif
  352. };
  353. static int intc_set_sense(unsigned int irq, unsigned int type)
  354. {
  355. struct intc_desc_int *d = get_intc_desc(irq);
  356. unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
  357. struct intc_handle_int *ihp;
  358. unsigned long addr;
  359. if (!value)
  360. return -EINVAL;
  361. ihp = intc_find_irq(d->sense, d->nr_sense, irq);
  362. if (ihp) {
  363. addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
  364. intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
  365. }
  366. return 0;
  367. }
  368. static unsigned int __init intc_get_reg(struct intc_desc_int *d,
  369. unsigned long address)
  370. {
  371. unsigned int k;
  372. for (k = 0; k < d->nr_reg; k++) {
  373. if (d->reg[k] == address)
  374. return k;
  375. }
  376. BUG();
  377. return 0;
  378. }
  379. static intc_enum __init intc_grp_id(struct intc_desc *desc,
  380. intc_enum enum_id)
  381. {
  382. struct intc_group *g = desc->hw.groups;
  383. unsigned int i, j;
  384. for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
  385. g = desc->hw.groups + i;
  386. for (j = 0; g->enum_ids[j]; j++) {
  387. if (g->enum_ids[j] != enum_id)
  388. continue;
  389. return g->enum_id;
  390. }
  391. }
  392. return 0;
  393. }
  394. static unsigned int __init _intc_mask_data(struct intc_desc *desc,
  395. struct intc_desc_int *d,
  396. intc_enum enum_id,
  397. unsigned int *reg_idx,
  398. unsigned int *fld_idx)
  399. {
  400. struct intc_mask_reg *mr = desc->hw.mask_regs;
  401. unsigned int fn, mode;
  402. unsigned long reg_e, reg_d;
  403. while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
  404. mr = desc->hw.mask_regs + *reg_idx;
  405. for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
  406. if (mr->enum_ids[*fld_idx] != enum_id)
  407. continue;
  408. if (mr->set_reg && mr->clr_reg) {
  409. fn = REG_FN_WRITE_BASE;
  410. mode = MODE_DUAL_REG;
  411. reg_e = mr->clr_reg;
  412. reg_d = mr->set_reg;
  413. } else {
  414. fn = REG_FN_MODIFY_BASE;
  415. if (mr->set_reg) {
  416. mode = MODE_ENABLE_REG;
  417. reg_e = mr->set_reg;
  418. reg_d = mr->set_reg;
  419. } else {
  420. mode = MODE_MASK_REG;
  421. reg_e = mr->clr_reg;
  422. reg_d = mr->clr_reg;
  423. }
  424. }
  425. fn += (mr->reg_width >> 3) - 1;
  426. return _INTC_MK(fn, mode,
  427. intc_get_reg(d, reg_e),
  428. intc_get_reg(d, reg_d),
  429. 1,
  430. (mr->reg_width - 1) - *fld_idx);
  431. }
  432. *fld_idx = 0;
  433. (*reg_idx)++;
  434. }
  435. return 0;
  436. }
  437. static unsigned int __init intc_mask_data(struct intc_desc *desc,
  438. struct intc_desc_int *d,
  439. intc_enum enum_id, int do_grps)
  440. {
  441. unsigned int i = 0;
  442. unsigned int j = 0;
  443. unsigned int ret;
  444. ret = _intc_mask_data(desc, d, enum_id, &i, &j);
  445. if (ret)
  446. return ret;
  447. if (do_grps)
  448. return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
  449. return 0;
  450. }
  451. static unsigned int __init _intc_prio_data(struct intc_desc *desc,
  452. struct intc_desc_int *d,
  453. intc_enum enum_id,
  454. unsigned int *reg_idx,
  455. unsigned int *fld_idx)
  456. {
  457. struct intc_prio_reg *pr = desc->hw.prio_regs;
  458. unsigned int fn, n, mode, bit;
  459. unsigned long reg_e, reg_d;
  460. while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
  461. pr = desc->hw.prio_regs + *reg_idx;
  462. for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
  463. if (pr->enum_ids[*fld_idx] != enum_id)
  464. continue;
  465. if (pr->set_reg && pr->clr_reg) {
  466. fn = REG_FN_WRITE_BASE;
  467. mode = MODE_PCLR_REG;
  468. reg_e = pr->set_reg;
  469. reg_d = pr->clr_reg;
  470. } else {
  471. fn = REG_FN_MODIFY_BASE;
  472. mode = MODE_PRIO_REG;
  473. if (!pr->set_reg)
  474. BUG();
  475. reg_e = pr->set_reg;
  476. reg_d = pr->set_reg;
  477. }
  478. fn += (pr->reg_width >> 3) - 1;
  479. n = *fld_idx + 1;
  480. BUG_ON(n * pr->field_width > pr->reg_width);
  481. bit = pr->reg_width - (n * pr->field_width);
  482. return _INTC_MK(fn, mode,
  483. intc_get_reg(d, reg_e),
  484. intc_get_reg(d, reg_d),
  485. pr->field_width, bit);
  486. }
  487. *fld_idx = 0;
  488. (*reg_idx)++;
  489. }
  490. return 0;
  491. }
  492. static unsigned int __init intc_prio_data(struct intc_desc *desc,
  493. struct intc_desc_int *d,
  494. intc_enum enum_id, int do_grps)
  495. {
  496. unsigned int i = 0;
  497. unsigned int j = 0;
  498. unsigned int ret;
  499. ret = _intc_prio_data(desc, d, enum_id, &i, &j);
  500. if (ret)
  501. return ret;
  502. if (do_grps)
  503. return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
  504. return 0;
  505. }
  506. static void __init intc_enable_disable_enum(struct intc_desc *desc,
  507. struct intc_desc_int *d,
  508. intc_enum enum_id, int enable)
  509. {
  510. unsigned int i, j, data;
  511. /* go through and enable/disable all mask bits */
  512. i = j = 0;
  513. do {
  514. data = _intc_mask_data(desc, d, enum_id, &i, &j);
  515. if (data)
  516. intc_enable_disable(d, data, enable);
  517. j++;
  518. } while (data);
  519. /* go through and enable/disable all priority fields */
  520. i = j = 0;
  521. do {
  522. data = _intc_prio_data(desc, d, enum_id, &i, &j);
  523. if (data)
  524. intc_enable_disable(d, data, enable);
  525. j++;
  526. } while (data);
  527. }
  528. static unsigned int __init intc_ack_data(struct intc_desc *desc,
  529. struct intc_desc_int *d,
  530. intc_enum enum_id)
  531. {
  532. struct intc_mask_reg *mr = desc->hw.ack_regs;
  533. unsigned int i, j, fn, mode;
  534. unsigned long reg_e, reg_d;
  535. for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
  536. mr = desc->hw.ack_regs + i;
  537. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  538. if (mr->enum_ids[j] != enum_id)
  539. continue;
  540. fn = REG_FN_MODIFY_BASE;
  541. mode = MODE_ENABLE_REG;
  542. reg_e = mr->set_reg;
  543. reg_d = mr->set_reg;
  544. fn += (mr->reg_width >> 3) - 1;
  545. return _INTC_MK(fn, mode,
  546. intc_get_reg(d, reg_e),
  547. intc_get_reg(d, reg_d),
  548. 1,
  549. (mr->reg_width - 1) - j);
  550. }
  551. }
  552. return 0;
  553. }
  554. static unsigned int __init intc_sense_data(struct intc_desc *desc,
  555. struct intc_desc_int *d,
  556. intc_enum enum_id)
  557. {
  558. struct intc_sense_reg *sr = desc->hw.sense_regs;
  559. unsigned int i, j, fn, bit;
  560. for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
  561. sr = desc->hw.sense_regs + i;
  562. for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
  563. if (sr->enum_ids[j] != enum_id)
  564. continue;
  565. fn = REG_FN_MODIFY_BASE;
  566. fn += (sr->reg_width >> 3) - 1;
  567. BUG_ON((j + 1) * sr->field_width > sr->reg_width);
  568. bit = sr->reg_width - ((j + 1) * sr->field_width);
  569. return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
  570. 0, sr->field_width, bit);
  571. }
  572. }
  573. return 0;
  574. }
  575. static void __init intc_register_irq(struct intc_desc *desc,
  576. struct intc_desc_int *d,
  577. intc_enum enum_id,
  578. unsigned int irq)
  579. {
  580. struct intc_handle_int *hp;
  581. unsigned int data[2], primary;
  582. /*
  583. * Register the IRQ position with the global IRQ map
  584. */
  585. set_bit(irq, intc_irq_map);
  586. /* Prefer single interrupt source bitmap over other combinations:
  587. * 1. bitmap, single interrupt source
  588. * 2. priority, single interrupt source
  589. * 3. bitmap, multiple interrupt sources (groups)
  590. * 4. priority, multiple interrupt sources (groups)
  591. */
  592. data[0] = intc_mask_data(desc, d, enum_id, 0);
  593. data[1] = intc_prio_data(desc, d, enum_id, 0);
  594. primary = 0;
  595. if (!data[0] && data[1])
  596. primary = 1;
  597. if (!data[0] && !data[1])
  598. pr_warning("intc: missing unique irq mask for "
  599. "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
  600. data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
  601. data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
  602. if (!data[primary])
  603. primary ^= 1;
  604. BUG_ON(!data[primary]); /* must have primary masking method */
  605. disable_irq_nosync(irq);
  606. set_irq_chip_and_handler_name(irq, &d->chip,
  607. handle_level_irq, "level");
  608. set_irq_chip_data(irq, (void *)data[primary]);
  609. /* set priority level
  610. * - this needs to be at least 2 for 5-bit priorities on 7780
  611. */
  612. intc_prio_level[irq] = 2;
  613. /* enable secondary masking method if present */
  614. if (data[!primary])
  615. _intc_enable(irq, data[!primary]);
  616. /* add irq to d->prio list if priority is available */
  617. if (data[1]) {
  618. hp = d->prio + d->nr_prio;
  619. hp->irq = irq;
  620. hp->handle = data[1];
  621. if (primary) {
  622. /*
  623. * only secondary priority should access registers, so
  624. * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
  625. */
  626. hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
  627. hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
  628. }
  629. d->nr_prio++;
  630. }
  631. /* add irq to d->sense list if sense is available */
  632. data[0] = intc_sense_data(desc, d, enum_id);
  633. if (data[0]) {
  634. (d->sense + d->nr_sense)->irq = irq;
  635. (d->sense + d->nr_sense)->handle = data[0];
  636. d->nr_sense++;
  637. }
  638. /* irq should be disabled by default */
  639. d->chip.mask(irq);
  640. if (desc->hw.ack_regs)
  641. ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  642. }
  643. static unsigned int __init save_reg(struct intc_desc_int *d,
  644. unsigned int cnt,
  645. unsigned long value,
  646. unsigned int smp)
  647. {
  648. if (value) {
  649. d->reg[cnt] = value;
  650. #ifdef CONFIG_SMP
  651. d->smp[cnt] = smp;
  652. #endif
  653. return 1;
  654. }
  655. return 0;
  656. }
  657. static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
  658. {
  659. generic_handle_irq((unsigned int)get_irq_data(irq));
  660. }
  661. void __init register_intc_controller(struct intc_desc *desc)
  662. {
  663. unsigned int i, k, smp;
  664. struct intc_hw_desc *hw = &desc->hw;
  665. struct intc_desc_int *d;
  666. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  667. INIT_LIST_HEAD(&d->list);
  668. list_add(&d->list, &intc_list);
  669. d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
  670. d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
  671. d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
  672. d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
  673. d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
  674. #ifdef CONFIG_SMP
  675. d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
  676. #endif
  677. k = 0;
  678. if (hw->mask_regs) {
  679. for (i = 0; i < hw->nr_mask_regs; i++) {
  680. smp = IS_SMP(hw->mask_regs[i]);
  681. k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
  682. k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
  683. }
  684. }
  685. if (hw->prio_regs) {
  686. d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
  687. GFP_NOWAIT);
  688. for (i = 0; i < hw->nr_prio_regs; i++) {
  689. smp = IS_SMP(hw->prio_regs[i]);
  690. k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
  691. k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
  692. }
  693. }
  694. if (hw->sense_regs) {
  695. d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
  696. GFP_NOWAIT);
  697. for (i = 0; i < hw->nr_sense_regs; i++)
  698. k += save_reg(d, k, hw->sense_regs[i].reg, 0);
  699. }
  700. d->chip.name = desc->name;
  701. d->chip.mask = intc_disable;
  702. d->chip.unmask = intc_enable;
  703. d->chip.mask_ack = intc_disable;
  704. d->chip.enable = intc_enable;
  705. d->chip.disable = intc_disable;
  706. d->chip.shutdown = intc_disable;
  707. d->chip.set_type = intc_set_sense;
  708. d->chip.set_wake = intc_set_wake;
  709. if (hw->ack_regs) {
  710. for (i = 0; i < hw->nr_ack_regs; i++)
  711. k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
  712. d->chip.mask_ack = intc_mask_ack;
  713. }
  714. /* disable bits matching force_disable before registering irqs */
  715. if (desc->force_disable)
  716. intc_enable_disable_enum(desc, d, desc->force_disable, 0);
  717. /* disable bits matching force_enable before registering irqs */
  718. if (desc->force_enable)
  719. intc_enable_disable_enum(desc, d, desc->force_enable, 0);
  720. BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
  721. /* register the vectors one by one */
  722. for (i = 0; i < hw->nr_vectors; i++) {
  723. struct intc_vect *vect = hw->vectors + i;
  724. unsigned int irq = evt2irq(vect->vect);
  725. struct irq_desc *irq_desc;
  726. if (!vect->enum_id)
  727. continue;
  728. irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
  729. if (unlikely(!irq_desc)) {
  730. pr_info("can't get irq_desc for %d\n", irq);
  731. continue;
  732. }
  733. intc_register_irq(desc, d, vect->enum_id, irq);
  734. for (k = i + 1; k < hw->nr_vectors; k++) {
  735. struct intc_vect *vect2 = hw->vectors + k;
  736. unsigned int irq2 = evt2irq(vect2->vect);
  737. if (vect->enum_id != vect2->enum_id)
  738. continue;
  739. /*
  740. * In the case of multi-evt handling and sparse
  741. * IRQ support, each vector still needs to have
  742. * its own backing irq_desc.
  743. */
  744. irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
  745. if (unlikely(!irq_desc)) {
  746. pr_info("can't get irq_desc for %d\n", irq2);
  747. continue;
  748. }
  749. vect2->enum_id = 0;
  750. /* redirect this interrupts to the first one */
  751. set_irq_chip(irq2, &dummy_irq_chip);
  752. set_irq_chained_handler(irq2, intc_redirect_irq);
  753. set_irq_data(irq2, (void *)irq);
  754. }
  755. }
  756. /* enable bits matching force_enable after registering irqs */
  757. if (desc->force_enable)
  758. intc_enable_disable_enum(desc, d, desc->force_enable, 1);
  759. }
  760. static int intc_suspend(struct sys_device *dev, pm_message_t state)
  761. {
  762. struct intc_desc_int *d;
  763. struct irq_desc *desc;
  764. int irq;
  765. /* get intc controller associated with this sysdev */
  766. d = container_of(dev, struct intc_desc_int, sysdev);
  767. switch (state.event) {
  768. case PM_EVENT_ON:
  769. if (d->state.event != PM_EVENT_FREEZE)
  770. break;
  771. for_each_irq_desc(irq, desc) {
  772. if (desc->handle_irq == intc_redirect_irq)
  773. continue;
  774. if (desc->chip != &d->chip)
  775. continue;
  776. if (desc->status & IRQ_DISABLED)
  777. intc_disable(irq);
  778. else
  779. intc_enable(irq);
  780. }
  781. break;
  782. case PM_EVENT_FREEZE:
  783. /* nothing has to be done */
  784. break;
  785. case PM_EVENT_SUSPEND:
  786. /* enable wakeup irqs belonging to this intc controller */
  787. for_each_irq_desc(irq, desc) {
  788. if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
  789. intc_enable(irq);
  790. }
  791. break;
  792. }
  793. d->state = state;
  794. return 0;
  795. }
  796. static int intc_resume(struct sys_device *dev)
  797. {
  798. return intc_suspend(dev, PMSG_ON);
  799. }
  800. static struct sysdev_class intc_sysdev_class = {
  801. .name = "intc",
  802. .suspend = intc_suspend,
  803. .resume = intc_resume,
  804. };
  805. /* register this intc as sysdev to allow suspend/resume */
  806. static int __init register_intc_sysdevs(void)
  807. {
  808. struct intc_desc_int *d;
  809. int error;
  810. int id = 0;
  811. error = sysdev_class_register(&intc_sysdev_class);
  812. if (!error) {
  813. list_for_each_entry(d, &intc_list, list) {
  814. d->sysdev.id = id;
  815. d->sysdev.cls = &intc_sysdev_class;
  816. error = sysdev_register(&d->sysdev);
  817. if (error)
  818. break;
  819. id++;
  820. }
  821. }
  822. if (error)
  823. pr_warning("intc: sysdev registration error\n");
  824. return error;
  825. }
  826. device_initcall(register_intc_sysdevs);
  827. /*
  828. * Dynamic IRQ allocation and deallocation
  829. */
  830. unsigned int create_irq_nr(unsigned int irq_want, int node)
  831. {
  832. unsigned int irq = 0, new;
  833. unsigned long flags;
  834. struct irq_desc *desc;
  835. spin_lock_irqsave(&vector_lock, flags);
  836. /*
  837. * First try the wanted IRQ
  838. */
  839. if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
  840. new = irq_want;
  841. } else {
  842. /* .. then fall back to scanning. */
  843. new = find_first_zero_bit(intc_irq_map, nr_irqs);
  844. if (unlikely(new == nr_irqs))
  845. goto out_unlock;
  846. __set_bit(new, intc_irq_map);
  847. }
  848. desc = irq_to_desc_alloc_node(new, node);
  849. if (unlikely(!desc)) {
  850. pr_info("can't get irq_desc for %d\n", new);
  851. goto out_unlock;
  852. }
  853. desc = move_irq_desc(desc, node);
  854. irq = new;
  855. out_unlock:
  856. spin_unlock_irqrestore(&vector_lock, flags);
  857. if (irq > 0)
  858. dynamic_irq_init(irq);
  859. return irq;
  860. }
  861. int create_irq(void)
  862. {
  863. int nid = cpu_to_node(smp_processor_id());
  864. int irq;
  865. irq = create_irq_nr(NR_IRQS_LEGACY, nid);
  866. if (irq == 0)
  867. irq = -1;
  868. return irq;
  869. }
  870. void destroy_irq(unsigned int irq)
  871. {
  872. unsigned long flags;
  873. dynamic_irq_cleanup(irq);
  874. spin_lock_irqsave(&vector_lock, flags);
  875. __clear_bit(irq, intc_irq_map);
  876. spin_unlock_irqrestore(&vector_lock, flags);
  877. }
  878. int reserve_irq_vector(unsigned int irq)
  879. {
  880. unsigned long flags;
  881. int ret = 0;
  882. spin_lock_irqsave(&vector_lock, flags);
  883. if (test_and_set_bit(irq, intc_irq_map))
  884. ret = -EBUSY;
  885. spin_unlock_irqrestore(&vector_lock, flags);
  886. return ret;
  887. }
  888. void reserve_irq_legacy(void)
  889. {
  890. unsigned long flags;
  891. int i, j;
  892. spin_lock_irqsave(&vector_lock, flags);
  893. j = find_first_bit(intc_irq_map, nr_irqs);
  894. for (i = 0; i < j; i++)
  895. __set_bit(i, intc_irq_map);
  896. spin_unlock_irqrestore(&vector_lock, flags);
  897. }