intc.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. /*
  2. * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  3. *
  4. * Copyright (C) 2007, 2008 Magnus Damm
  5. * Copyright (C) 2009, 2010 Paul Mundt
  6. *
  7. * Based on intc2.c and ipr.c
  8. *
  9. * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
  10. * Copyright (C) 2000 Kazumoto Kojima
  11. * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
  12. * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
  13. * Copyright (C) 2005, 2006 Paul Mundt
  14. *
  15. * This file is subject to the terms and conditions of the GNU General Public
  16. * License. See the file "COPYING" in the main directory of this archive
  17. * for more details.
  18. */
  19. #include <linux/init.h>
  20. #include <linux/irq.h>
  21. #include <linux/module.h>
  22. #include <linux/io.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/sh_intc.h>
  25. #include <linux/sysdev.h>
  26. #include <linux/list.h>
  27. #include <linux/topology.h>
  28. #include <linux/bitmap.h>
  29. #include <linux/cpumask.h>
  30. #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
  31. ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
  32. ((addr_e) << 16) | ((addr_d << 24)))
  33. #define _INTC_SHIFT(h) (h & 0x1f)
  34. #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
  35. #define _INTC_FN(h) ((h >> 9) & 0xf)
  36. #define _INTC_MODE(h) ((h >> 13) & 0x7)
  37. #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
  38. #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
  39. struct intc_handle_int {
  40. unsigned int irq;
  41. unsigned long handle;
  42. };
  43. struct intc_window {
  44. phys_addr_t phys;
  45. void __iomem *virt;
  46. unsigned long size;
  47. };
  48. struct intc_desc_int {
  49. struct list_head list;
  50. struct sys_device sysdev;
  51. pm_message_t state;
  52. unsigned long *reg;
  53. #ifdef CONFIG_SMP
  54. unsigned long *smp;
  55. #endif
  56. unsigned int nr_reg;
  57. struct intc_handle_int *prio;
  58. unsigned int nr_prio;
  59. struct intc_handle_int *sense;
  60. unsigned int nr_sense;
  61. struct intc_window *window;
  62. unsigned int nr_windows;
  63. struct irq_chip chip;
  64. };
  65. static LIST_HEAD(intc_list);
  66. /*
  67. * The intc_irq_map provides a global map of bound IRQ vectors for a
  68. * given platform. Allocation of IRQs are either static through the CPU
  69. * vector map, or dynamic in the case of board mux vectors or MSI.
  70. *
  71. * As this is a central point for all IRQ controllers on the system,
  72. * each of the available sources are mapped out here. This combined with
  73. * sparseirq makes it quite trivial to keep the vector map tightly packed
  74. * when dynamically creating IRQs, as well as tying in to otherwise
  75. * unused irq_desc positions in the sparse array.
  76. */
  77. static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
  78. static DEFINE_SPINLOCK(vector_lock);
  79. #ifdef CONFIG_SMP
  80. #define IS_SMP(x) x.smp
  81. #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
  82. #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
  83. #else
  84. #define IS_SMP(x) 0
  85. #define INTC_REG(d, x, c) (d->reg[(x)])
  86. #define SMP_NR(d, x) 1
  87. #endif
  88. static unsigned int intc_prio_level[NR_IRQS]; /* for now */
  89. static unsigned long ack_handle[NR_IRQS];
  90. static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  91. {
  92. struct irq_chip *chip = get_irq_chip(irq);
  93. return container_of(chip, struct intc_desc_int, chip);
  94. }
  95. static inline unsigned int set_field(unsigned int value,
  96. unsigned int field_value,
  97. unsigned int handle)
  98. {
  99. unsigned int width = _INTC_WIDTH(handle);
  100. unsigned int shift = _INTC_SHIFT(handle);
  101. value &= ~(((1 << width) - 1) << shift);
  102. value |= field_value << shift;
  103. return value;
  104. }
  105. static void write_8(unsigned long addr, unsigned long h, unsigned long data)
  106. {
  107. __raw_writeb(set_field(0, data, h), addr);
  108. (void)__raw_readb(addr); /* Defeat write posting */
  109. }
  110. static void write_16(unsigned long addr, unsigned long h, unsigned long data)
  111. {
  112. __raw_writew(set_field(0, data, h), addr);
  113. (void)__raw_readw(addr); /* Defeat write posting */
  114. }
  115. static void write_32(unsigned long addr, unsigned long h, unsigned long data)
  116. {
  117. __raw_writel(set_field(0, data, h), addr);
  118. (void)__raw_readl(addr); /* Defeat write posting */
  119. }
  120. static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
  121. {
  122. unsigned long flags;
  123. local_irq_save(flags);
  124. __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
  125. (void)__raw_readb(addr); /* Defeat write posting */
  126. local_irq_restore(flags);
  127. }
  128. static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
  129. {
  130. unsigned long flags;
  131. local_irq_save(flags);
  132. __raw_writew(set_field(__raw_readw(addr), data, h), addr);
  133. (void)__raw_readw(addr); /* Defeat write posting */
  134. local_irq_restore(flags);
  135. }
  136. static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
  137. {
  138. unsigned long flags;
  139. local_irq_save(flags);
  140. __raw_writel(set_field(__raw_readl(addr), data, h), addr);
  141. (void)__raw_readl(addr); /* Defeat write posting */
  142. local_irq_restore(flags);
  143. }
  144. enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
  145. static void (*intc_reg_fns[])(unsigned long addr,
  146. unsigned long h,
  147. unsigned long data) = {
  148. [REG_FN_WRITE_BASE + 0] = write_8,
  149. [REG_FN_WRITE_BASE + 1] = write_16,
  150. [REG_FN_WRITE_BASE + 3] = write_32,
  151. [REG_FN_MODIFY_BASE + 0] = modify_8,
  152. [REG_FN_MODIFY_BASE + 1] = modify_16,
  153. [REG_FN_MODIFY_BASE + 3] = modify_32,
  154. };
  155. enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
  156. MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
  157. MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
  158. MODE_PRIO_REG, /* Priority value written to enable interrupt */
  159. MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
  160. };
  161. static void intc_mode_field(unsigned long addr,
  162. unsigned long handle,
  163. void (*fn)(unsigned long,
  164. unsigned long,
  165. unsigned long),
  166. unsigned int irq)
  167. {
  168. fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
  169. }
  170. static void intc_mode_zero(unsigned long addr,
  171. unsigned long handle,
  172. void (*fn)(unsigned long,
  173. unsigned long,
  174. unsigned long),
  175. unsigned int irq)
  176. {
  177. fn(addr, handle, 0);
  178. }
  179. static void intc_mode_prio(unsigned long addr,
  180. unsigned long handle,
  181. void (*fn)(unsigned long,
  182. unsigned long,
  183. unsigned long),
  184. unsigned int irq)
  185. {
  186. fn(addr, handle, intc_prio_level[irq]);
  187. }
  188. static void (*intc_enable_fns[])(unsigned long addr,
  189. unsigned long handle,
  190. void (*fn)(unsigned long,
  191. unsigned long,
  192. unsigned long),
  193. unsigned int irq) = {
  194. [MODE_ENABLE_REG] = intc_mode_field,
  195. [MODE_MASK_REG] = intc_mode_zero,
  196. [MODE_DUAL_REG] = intc_mode_field,
  197. [MODE_PRIO_REG] = intc_mode_prio,
  198. [MODE_PCLR_REG] = intc_mode_prio,
  199. };
  200. static void (*intc_disable_fns[])(unsigned long addr,
  201. unsigned long handle,
  202. void (*fn)(unsigned long,
  203. unsigned long,
  204. unsigned long),
  205. unsigned int irq) = {
  206. [MODE_ENABLE_REG] = intc_mode_zero,
  207. [MODE_MASK_REG] = intc_mode_field,
  208. [MODE_DUAL_REG] = intc_mode_field,
  209. [MODE_PRIO_REG] = intc_mode_zero,
  210. [MODE_PCLR_REG] = intc_mode_field,
  211. };
  212. static inline void _intc_enable(unsigned int irq, unsigned long handle)
  213. {
  214. struct intc_desc_int *d = get_intc_desc(irq);
  215. unsigned long addr;
  216. unsigned int cpu;
  217. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  218. #ifdef CONFIG_SMP
  219. if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
  220. continue;
  221. #endif
  222. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  223. intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
  224. [_INTC_FN(handle)], irq);
  225. }
  226. }
  227. static void intc_enable(unsigned int irq)
  228. {
  229. _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
  230. }
  231. static void intc_disable(unsigned int irq)
  232. {
  233. struct intc_desc_int *d = get_intc_desc(irq);
  234. unsigned long handle = (unsigned long) get_irq_chip_data(irq);
  235. unsigned long addr;
  236. unsigned int cpu;
  237. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  238. #ifdef CONFIG_SMP
  239. if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
  240. continue;
  241. #endif
  242. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  243. intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
  244. [_INTC_FN(handle)], irq);
  245. }
  246. }
  247. static void (*intc_enable_noprio_fns[])(unsigned long addr,
  248. unsigned long handle,
  249. void (*fn)(unsigned long,
  250. unsigned long,
  251. unsigned long),
  252. unsigned int irq) = {
  253. [MODE_ENABLE_REG] = intc_mode_field,
  254. [MODE_MASK_REG] = intc_mode_zero,
  255. [MODE_DUAL_REG] = intc_mode_field,
  256. [MODE_PRIO_REG] = intc_mode_field,
  257. [MODE_PCLR_REG] = intc_mode_field,
  258. };
  259. static void intc_enable_disable(struct intc_desc_int *d,
  260. unsigned long handle, int do_enable)
  261. {
  262. unsigned long addr;
  263. unsigned int cpu;
  264. void (*fn)(unsigned long, unsigned long,
  265. void (*)(unsigned long, unsigned long, unsigned long),
  266. unsigned int);
  267. if (do_enable) {
  268. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  269. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  270. fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
  271. fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
  272. }
  273. } else {
  274. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  275. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  276. fn = intc_disable_fns[_INTC_MODE(handle)];
  277. fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
  278. }
  279. }
  280. }
  281. static int intc_set_wake(unsigned int irq, unsigned int on)
  282. {
  283. return 0; /* allow wakeup, but setup hardware in intc_suspend() */
  284. }
  285. #ifdef CONFIG_SMP
  286. /*
  287. * This is held with the irq desc lock held, so we don't require any
  288. * additional locking here at the intc desc level. The affinity mask is
  289. * later tested in the enable/disable paths.
  290. */
  291. static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  292. {
  293. if (!cpumask_intersects(cpumask, cpu_online_mask))
  294. return -1;
  295. cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
  296. return 0;
  297. }
  298. #endif
  299. static void intc_mask_ack(unsigned int irq)
  300. {
  301. struct intc_desc_int *d = get_intc_desc(irq);
  302. unsigned long handle = ack_handle[irq];
  303. unsigned long addr;
  304. intc_disable(irq);
  305. /* read register and write zero only to the assocaited bit */
  306. if (handle) {
  307. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  308. switch (_INTC_FN(handle)) {
  309. case REG_FN_MODIFY_BASE + 0: /* 8bit */
  310. __raw_readb(addr);
  311. __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
  312. break;
  313. case REG_FN_MODIFY_BASE + 1: /* 16bit */
  314. __raw_readw(addr);
  315. __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
  316. break;
  317. case REG_FN_MODIFY_BASE + 3: /* 32bit */
  318. __raw_readl(addr);
  319. __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
  320. break;
  321. default:
  322. BUG();
  323. break;
  324. }
  325. }
  326. }
  327. static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
  328. unsigned int nr_hp,
  329. unsigned int irq)
  330. {
  331. int i;
  332. /* this doesn't scale well, but...
  333. *
  334. * this function should only be used for cerain uncommon
  335. * operations such as intc_set_priority() and intc_set_sense()
  336. * and in those rare cases performance doesn't matter that much.
  337. * keeping the memory footprint low is more important.
  338. *
  339. * one rather simple way to speed this up and still keep the
  340. * memory footprint down is to make sure the array is sorted
  341. * and then perform a bisect to lookup the irq.
  342. */
  343. for (i = 0; i < nr_hp; i++) {
  344. if ((hp + i)->irq != irq)
  345. continue;
  346. return hp + i;
  347. }
  348. return NULL;
  349. }
  350. int intc_set_priority(unsigned int irq, unsigned int prio)
  351. {
  352. struct intc_desc_int *d = get_intc_desc(irq);
  353. struct intc_handle_int *ihp;
  354. if (!intc_prio_level[irq] || prio <= 1)
  355. return -EINVAL;
  356. ihp = intc_find_irq(d->prio, d->nr_prio, irq);
  357. if (ihp) {
  358. if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
  359. return -EINVAL;
  360. intc_prio_level[irq] = prio;
  361. /*
  362. * only set secondary masking method directly
  363. * primary masking method is using intc_prio_level[irq]
  364. * priority level will be set during next enable()
  365. */
  366. if (_INTC_FN(ihp->handle) != REG_FN_ERR)
  367. _intc_enable(irq, ihp->handle);
  368. }
  369. return 0;
  370. }
  371. #define VALID(x) (x | 0x80)
  372. static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
  373. [IRQ_TYPE_EDGE_FALLING] = VALID(0),
  374. [IRQ_TYPE_EDGE_RISING] = VALID(1),
  375. [IRQ_TYPE_LEVEL_LOW] = VALID(2),
  376. /* SH7706, SH7707 and SH7709 do not support high level triggered */
  377. #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
  378. !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
  379. !defined(CONFIG_CPU_SUBTYPE_SH7709)
  380. [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
  381. #endif
  382. };
  383. static int intc_set_sense(unsigned int irq, unsigned int type)
  384. {
  385. struct intc_desc_int *d = get_intc_desc(irq);
  386. unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
  387. struct intc_handle_int *ihp;
  388. unsigned long addr;
  389. if (!value)
  390. return -EINVAL;
  391. ihp = intc_find_irq(d->sense, d->nr_sense, irq);
  392. if (ihp) {
  393. addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
  394. intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
  395. }
  396. return 0;
  397. }
  398. static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
  399. unsigned long address)
  400. {
  401. struct intc_window *window;
  402. int k;
  403. /* scan through physical windows and convert address */
  404. for (k = 0; k < d->nr_windows; k++) {
  405. window = d->window + k;
  406. if (address < window->phys)
  407. continue;
  408. if (address >= (window->phys + window->size))
  409. continue;
  410. address -= window->phys;
  411. address += (unsigned long)window->virt;
  412. return address;
  413. }
  414. /* no windows defined, register must be 1:1 mapped virt:phys */
  415. return address;
  416. }
  417. static unsigned int __init intc_get_reg(struct intc_desc_int *d,
  418. unsigned long address)
  419. {
  420. unsigned int k;
  421. address = intc_phys_to_virt(d, address);
  422. for (k = 0; k < d->nr_reg; k++) {
  423. if (d->reg[k] == address)
  424. return k;
  425. }
  426. BUG();
  427. return 0;
  428. }
  429. static intc_enum __init intc_grp_id(struct intc_desc *desc,
  430. intc_enum enum_id)
  431. {
  432. struct intc_group *g = desc->hw.groups;
  433. unsigned int i, j;
  434. for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
  435. g = desc->hw.groups + i;
  436. for (j = 0; g->enum_ids[j]; j++) {
  437. if (g->enum_ids[j] != enum_id)
  438. continue;
  439. return g->enum_id;
  440. }
  441. }
  442. return 0;
  443. }
  444. static unsigned int __init _intc_mask_data(struct intc_desc *desc,
  445. struct intc_desc_int *d,
  446. intc_enum enum_id,
  447. unsigned int *reg_idx,
  448. unsigned int *fld_idx)
  449. {
  450. struct intc_mask_reg *mr = desc->hw.mask_regs;
  451. unsigned int fn, mode;
  452. unsigned long reg_e, reg_d;
  453. while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
  454. mr = desc->hw.mask_regs + *reg_idx;
  455. for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
  456. if (mr->enum_ids[*fld_idx] != enum_id)
  457. continue;
  458. if (mr->set_reg && mr->clr_reg) {
  459. fn = REG_FN_WRITE_BASE;
  460. mode = MODE_DUAL_REG;
  461. reg_e = mr->clr_reg;
  462. reg_d = mr->set_reg;
  463. } else {
  464. fn = REG_FN_MODIFY_BASE;
  465. if (mr->set_reg) {
  466. mode = MODE_ENABLE_REG;
  467. reg_e = mr->set_reg;
  468. reg_d = mr->set_reg;
  469. } else {
  470. mode = MODE_MASK_REG;
  471. reg_e = mr->clr_reg;
  472. reg_d = mr->clr_reg;
  473. }
  474. }
  475. fn += (mr->reg_width >> 3) - 1;
  476. return _INTC_MK(fn, mode,
  477. intc_get_reg(d, reg_e),
  478. intc_get_reg(d, reg_d),
  479. 1,
  480. (mr->reg_width - 1) - *fld_idx);
  481. }
  482. *fld_idx = 0;
  483. (*reg_idx)++;
  484. }
  485. return 0;
  486. }
  487. static unsigned int __init intc_mask_data(struct intc_desc *desc,
  488. struct intc_desc_int *d,
  489. intc_enum enum_id, int do_grps)
  490. {
  491. unsigned int i = 0;
  492. unsigned int j = 0;
  493. unsigned int ret;
  494. ret = _intc_mask_data(desc, d, enum_id, &i, &j);
  495. if (ret)
  496. return ret;
  497. if (do_grps)
  498. return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
  499. return 0;
  500. }
  501. static unsigned int __init _intc_prio_data(struct intc_desc *desc,
  502. struct intc_desc_int *d,
  503. intc_enum enum_id,
  504. unsigned int *reg_idx,
  505. unsigned int *fld_idx)
  506. {
  507. struct intc_prio_reg *pr = desc->hw.prio_regs;
  508. unsigned int fn, n, mode, bit;
  509. unsigned long reg_e, reg_d;
  510. while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
  511. pr = desc->hw.prio_regs + *reg_idx;
  512. for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
  513. if (pr->enum_ids[*fld_idx] != enum_id)
  514. continue;
  515. if (pr->set_reg && pr->clr_reg) {
  516. fn = REG_FN_WRITE_BASE;
  517. mode = MODE_PCLR_REG;
  518. reg_e = pr->set_reg;
  519. reg_d = pr->clr_reg;
  520. } else {
  521. fn = REG_FN_MODIFY_BASE;
  522. mode = MODE_PRIO_REG;
  523. if (!pr->set_reg)
  524. BUG();
  525. reg_e = pr->set_reg;
  526. reg_d = pr->set_reg;
  527. }
  528. fn += (pr->reg_width >> 3) - 1;
  529. n = *fld_idx + 1;
  530. BUG_ON(n * pr->field_width > pr->reg_width);
  531. bit = pr->reg_width - (n * pr->field_width);
  532. return _INTC_MK(fn, mode,
  533. intc_get_reg(d, reg_e),
  534. intc_get_reg(d, reg_d),
  535. pr->field_width, bit);
  536. }
  537. *fld_idx = 0;
  538. (*reg_idx)++;
  539. }
  540. return 0;
  541. }
  542. static unsigned int __init intc_prio_data(struct intc_desc *desc,
  543. struct intc_desc_int *d,
  544. intc_enum enum_id, int do_grps)
  545. {
  546. unsigned int i = 0;
  547. unsigned int j = 0;
  548. unsigned int ret;
  549. ret = _intc_prio_data(desc, d, enum_id, &i, &j);
  550. if (ret)
  551. return ret;
  552. if (do_grps)
  553. return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
  554. return 0;
  555. }
  556. static void __init intc_enable_disable_enum(struct intc_desc *desc,
  557. struct intc_desc_int *d,
  558. intc_enum enum_id, int enable)
  559. {
  560. unsigned int i, j, data;
  561. /* go through and enable/disable all mask bits */
  562. i = j = 0;
  563. do {
  564. data = _intc_mask_data(desc, d, enum_id, &i, &j);
  565. if (data)
  566. intc_enable_disable(d, data, enable);
  567. j++;
  568. } while (data);
  569. /* go through and enable/disable all priority fields */
  570. i = j = 0;
  571. do {
  572. data = _intc_prio_data(desc, d, enum_id, &i, &j);
  573. if (data)
  574. intc_enable_disable(d, data, enable);
  575. j++;
  576. } while (data);
  577. }
  578. static unsigned int __init intc_ack_data(struct intc_desc *desc,
  579. struct intc_desc_int *d,
  580. intc_enum enum_id)
  581. {
  582. struct intc_mask_reg *mr = desc->hw.ack_regs;
  583. unsigned int i, j, fn, mode;
  584. unsigned long reg_e, reg_d;
  585. for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
  586. mr = desc->hw.ack_regs + i;
  587. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  588. if (mr->enum_ids[j] != enum_id)
  589. continue;
  590. fn = REG_FN_MODIFY_BASE;
  591. mode = MODE_ENABLE_REG;
  592. reg_e = mr->set_reg;
  593. reg_d = mr->set_reg;
  594. fn += (mr->reg_width >> 3) - 1;
  595. return _INTC_MK(fn, mode,
  596. intc_get_reg(d, reg_e),
  597. intc_get_reg(d, reg_d),
  598. 1,
  599. (mr->reg_width - 1) - j);
  600. }
  601. }
  602. return 0;
  603. }
  604. static unsigned int __init intc_sense_data(struct intc_desc *desc,
  605. struct intc_desc_int *d,
  606. intc_enum enum_id)
  607. {
  608. struct intc_sense_reg *sr = desc->hw.sense_regs;
  609. unsigned int i, j, fn, bit;
  610. for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
  611. sr = desc->hw.sense_regs + i;
  612. for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
  613. if (sr->enum_ids[j] != enum_id)
  614. continue;
  615. fn = REG_FN_MODIFY_BASE;
  616. fn += (sr->reg_width >> 3) - 1;
  617. BUG_ON((j + 1) * sr->field_width > sr->reg_width);
  618. bit = sr->reg_width - ((j + 1) * sr->field_width);
  619. return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
  620. 0, sr->field_width, bit);
  621. }
  622. }
  623. return 0;
  624. }
  625. static void __init intc_register_irq(struct intc_desc *desc,
  626. struct intc_desc_int *d,
  627. intc_enum enum_id,
  628. unsigned int irq)
  629. {
  630. struct intc_handle_int *hp;
  631. unsigned int data[2], primary;
  632. /*
  633. * Register the IRQ position with the global IRQ map
  634. */
  635. set_bit(irq, intc_irq_map);
  636. /* Prefer single interrupt source bitmap over other combinations:
  637. * 1. bitmap, single interrupt source
  638. * 2. priority, single interrupt source
  639. * 3. bitmap, multiple interrupt sources (groups)
  640. * 4. priority, multiple interrupt sources (groups)
  641. */
  642. data[0] = intc_mask_data(desc, d, enum_id, 0);
  643. data[1] = intc_prio_data(desc, d, enum_id, 0);
  644. primary = 0;
  645. if (!data[0] && data[1])
  646. primary = 1;
  647. if (!data[0] && !data[1])
  648. pr_warning("intc: missing unique irq mask for "
  649. "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
  650. data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
  651. data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
  652. if (!data[primary])
  653. primary ^= 1;
  654. BUG_ON(!data[primary]); /* must have primary masking method */
  655. disable_irq_nosync(irq);
  656. set_irq_chip_and_handler_name(irq, &d->chip,
  657. handle_level_irq, "level");
  658. set_irq_chip_data(irq, (void *)data[primary]);
  659. /* set priority level
  660. * - this needs to be at least 2 for 5-bit priorities on 7780
  661. */
  662. intc_prio_level[irq] = 2;
  663. /* enable secondary masking method if present */
  664. if (data[!primary])
  665. _intc_enable(irq, data[!primary]);
  666. /* add irq to d->prio list if priority is available */
  667. if (data[1]) {
  668. hp = d->prio + d->nr_prio;
  669. hp->irq = irq;
  670. hp->handle = data[1];
  671. if (primary) {
  672. /*
  673. * only secondary priority should access registers, so
  674. * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
  675. */
  676. hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
  677. hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
  678. }
  679. d->nr_prio++;
  680. }
  681. /* add irq to d->sense list if sense is available */
  682. data[0] = intc_sense_data(desc, d, enum_id);
  683. if (data[0]) {
  684. (d->sense + d->nr_sense)->irq = irq;
  685. (d->sense + d->nr_sense)->handle = data[0];
  686. d->nr_sense++;
  687. }
  688. /* irq should be disabled by default */
  689. d->chip.mask(irq);
  690. if (desc->hw.ack_regs)
  691. ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  692. #ifdef CONFIG_ARM
  693. set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  694. #endif
  695. }
  696. static unsigned int __init save_reg(struct intc_desc_int *d,
  697. unsigned int cnt,
  698. unsigned long value,
  699. unsigned int smp)
  700. {
  701. if (value) {
  702. value = intc_phys_to_virt(d, value);
  703. d->reg[cnt] = value;
  704. #ifdef CONFIG_SMP
  705. d->smp[cnt] = smp;
  706. #endif
  707. return 1;
  708. }
  709. return 0;
  710. }
  711. static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
  712. {
  713. generic_handle_irq((unsigned int)get_irq_data(irq));
  714. }
  715. int __init register_intc_controller(struct intc_desc *desc)
  716. {
  717. unsigned int i, k, smp;
  718. struct intc_hw_desc *hw = &desc->hw;
  719. struct intc_desc_int *d;
  720. struct resource *res;
  721. pr_info("intc: Registered controller '%s' with %u IRQs\n",
  722. desc->name, hw->nr_vectors);
  723. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  724. if (!d)
  725. goto err0;
  726. INIT_LIST_HEAD(&d->list);
  727. list_add(&d->list, &intc_list);
  728. if (desc->num_resources) {
  729. d->nr_windows = desc->num_resources;
  730. d->window = kzalloc(d->nr_windows * sizeof(*d->window),
  731. GFP_NOWAIT);
  732. if (!d->window)
  733. goto err1;
  734. for (k = 0; k < d->nr_windows; k++) {
  735. res = desc->resource + k;
  736. WARN_ON(resource_type(res) != IORESOURCE_MEM);
  737. d->window[k].phys = res->start;
  738. d->window[k].size = resource_size(res);
  739. d->window[k].virt = ioremap_nocache(res->start,
  740. resource_size(res));
  741. if (!d->window[k].virt)
  742. goto err2;
  743. }
  744. }
  745. d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
  746. d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
  747. d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
  748. d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
  749. d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
  750. if (!d->reg)
  751. goto err2;
  752. #ifdef CONFIG_SMP
  753. d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
  754. if (!d->smp)
  755. goto err3;
  756. #endif
  757. k = 0;
  758. if (hw->mask_regs) {
  759. for (i = 0; i < hw->nr_mask_regs; i++) {
  760. smp = IS_SMP(hw->mask_regs[i]);
  761. k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
  762. k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
  763. }
  764. }
  765. if (hw->prio_regs) {
  766. d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
  767. GFP_NOWAIT);
  768. if (!d->prio)
  769. goto err4;
  770. for (i = 0; i < hw->nr_prio_regs; i++) {
  771. smp = IS_SMP(hw->prio_regs[i]);
  772. k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
  773. k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
  774. }
  775. }
  776. if (hw->sense_regs) {
  777. d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
  778. GFP_NOWAIT);
  779. if (!d->sense)
  780. goto err5;
  781. for (i = 0; i < hw->nr_sense_regs; i++)
  782. k += save_reg(d, k, hw->sense_regs[i].reg, 0);
  783. }
  784. d->chip.name = desc->name;
  785. d->chip.mask = intc_disable;
  786. d->chip.unmask = intc_enable;
  787. d->chip.mask_ack = intc_disable;
  788. d->chip.enable = intc_enable;
  789. d->chip.disable = intc_disable;
  790. d->chip.shutdown = intc_disable;
  791. d->chip.set_type = intc_set_sense;
  792. d->chip.set_wake = intc_set_wake;
  793. #ifdef CONFIG_SMP
  794. d->chip.set_affinity = intc_set_affinity;
  795. #endif
  796. if (hw->ack_regs) {
  797. for (i = 0; i < hw->nr_ack_regs; i++)
  798. k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
  799. d->chip.mask_ack = intc_mask_ack;
  800. }
  801. /* disable bits matching force_disable before registering irqs */
  802. if (desc->force_disable)
  803. intc_enable_disable_enum(desc, d, desc->force_disable, 0);
  804. /* disable bits matching force_enable before registering irqs */
  805. if (desc->force_enable)
  806. intc_enable_disable_enum(desc, d, desc->force_enable, 0);
  807. BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
  808. /* register the vectors one by one */
  809. for (i = 0; i < hw->nr_vectors; i++) {
  810. struct intc_vect *vect = hw->vectors + i;
  811. unsigned int irq = evt2irq(vect->vect);
  812. struct irq_desc *irq_desc;
  813. if (!vect->enum_id)
  814. continue;
  815. irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
  816. if (unlikely(!irq_desc)) {
  817. pr_err("can't get irq_desc for %d\n", irq);
  818. continue;
  819. }
  820. intc_register_irq(desc, d, vect->enum_id, irq);
  821. for (k = i + 1; k < hw->nr_vectors; k++) {
  822. struct intc_vect *vect2 = hw->vectors + k;
  823. unsigned int irq2 = evt2irq(vect2->vect);
  824. if (vect->enum_id != vect2->enum_id)
  825. continue;
  826. /*
  827. * In the case of multi-evt handling and sparse
  828. * IRQ support, each vector still needs to have
  829. * its own backing irq_desc.
  830. */
  831. irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
  832. if (unlikely(!irq_desc)) {
  833. pr_err("can't get irq_desc for %d\n", irq2);
  834. continue;
  835. }
  836. vect2->enum_id = 0;
  837. /* redirect this interrupts to the first one */
  838. set_irq_chip(irq2, &dummy_irq_chip);
  839. set_irq_chained_handler(irq2, intc_redirect_irq);
  840. set_irq_data(irq2, (void *)irq);
  841. }
  842. }
  843. /* enable bits matching force_enable after registering irqs */
  844. if (desc->force_enable)
  845. intc_enable_disable_enum(desc, d, desc->force_enable, 1);
  846. return 0;
  847. err5:
  848. kfree(d->prio);
  849. err4:
  850. #ifdef CONFIG_SMP
  851. kfree(d->smp);
  852. err3:
  853. #endif
  854. kfree(d->reg);
  855. err2:
  856. for (k = 0; k < d->nr_windows; k++)
  857. if (d->window[k].virt)
  858. iounmap(d->window[k].virt);
  859. kfree(d->window);
  860. err1:
  861. kfree(d);
  862. err0:
  863. pr_err("unable to allocate INTC memory\n");
  864. return -ENOMEM;
  865. }
  866. static ssize_t
  867. show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
  868. {
  869. struct intc_desc_int *d;
  870. d = container_of(dev, struct intc_desc_int, sysdev);
  871. return sprintf(buf, "%s\n", d->chip.name);
  872. }
  873. static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
  874. static int intc_suspend(struct sys_device *dev, pm_message_t state)
  875. {
  876. struct intc_desc_int *d;
  877. struct irq_desc *desc;
  878. int irq;
  879. /* get intc controller associated with this sysdev */
  880. d = container_of(dev, struct intc_desc_int, sysdev);
  881. switch (state.event) {
  882. case PM_EVENT_ON:
  883. if (d->state.event != PM_EVENT_FREEZE)
  884. break;
  885. for_each_irq_desc(irq, desc) {
  886. if (desc->handle_irq == intc_redirect_irq)
  887. continue;
  888. if (desc->chip != &d->chip)
  889. continue;
  890. if (desc->status & IRQ_DISABLED)
  891. intc_disable(irq);
  892. else
  893. intc_enable(irq);
  894. }
  895. break;
  896. case PM_EVENT_FREEZE:
  897. /* nothing has to be done */
  898. break;
  899. case PM_EVENT_SUSPEND:
  900. /* enable wakeup irqs belonging to this intc controller */
  901. for_each_irq_desc(irq, desc) {
  902. if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
  903. intc_enable(irq);
  904. }
  905. break;
  906. }
  907. d->state = state;
  908. return 0;
  909. }
  910. static int intc_resume(struct sys_device *dev)
  911. {
  912. return intc_suspend(dev, PMSG_ON);
  913. }
  914. static struct sysdev_class intc_sysdev_class = {
  915. .name = "intc",
  916. .suspend = intc_suspend,
  917. .resume = intc_resume,
  918. };
  919. /* register this intc as sysdev to allow suspend/resume */
  920. static int __init register_intc_sysdevs(void)
  921. {
  922. struct intc_desc_int *d;
  923. int error;
  924. int id = 0;
  925. error = sysdev_class_register(&intc_sysdev_class);
  926. if (!error) {
  927. list_for_each_entry(d, &intc_list, list) {
  928. d->sysdev.id = id;
  929. d->sysdev.cls = &intc_sysdev_class;
  930. error = sysdev_register(&d->sysdev);
  931. if (error == 0)
  932. error = sysdev_create_file(&d->sysdev,
  933. &attr_name);
  934. if (error)
  935. break;
  936. id++;
  937. }
  938. }
  939. if (error)
  940. pr_err("intc: sysdev registration error\n");
  941. return error;
  942. }
  943. device_initcall(register_intc_sysdevs);
  944. /*
  945. * Dynamic IRQ allocation and deallocation
  946. */
  947. unsigned int create_irq_nr(unsigned int irq_want, int node)
  948. {
  949. unsigned int irq = 0, new;
  950. unsigned long flags;
  951. struct irq_desc *desc;
  952. spin_lock_irqsave(&vector_lock, flags);
  953. /*
  954. * First try the wanted IRQ
  955. */
  956. if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
  957. new = irq_want;
  958. } else {
  959. /* .. then fall back to scanning. */
  960. new = find_first_zero_bit(intc_irq_map, nr_irqs);
  961. if (unlikely(new == nr_irqs))
  962. goto out_unlock;
  963. __set_bit(new, intc_irq_map);
  964. }
  965. desc = irq_to_desc_alloc_node(new, node);
  966. if (unlikely(!desc)) {
  967. pr_err("can't get irq_desc for %d\n", new);
  968. goto out_unlock;
  969. }
  970. desc = move_irq_desc(desc, node);
  971. irq = new;
  972. out_unlock:
  973. spin_unlock_irqrestore(&vector_lock, flags);
  974. if (irq > 0) {
  975. dynamic_irq_init(irq);
  976. #ifdef CONFIG_ARM
  977. set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  978. #endif
  979. }
  980. return irq;
  981. }
  982. int create_irq(void)
  983. {
  984. int nid = cpu_to_node(smp_processor_id());
  985. int irq;
  986. irq = create_irq_nr(NR_IRQS_LEGACY, nid);
  987. if (irq == 0)
  988. irq = -1;
  989. return irq;
  990. }
  991. void destroy_irq(unsigned int irq)
  992. {
  993. unsigned long flags;
  994. dynamic_irq_cleanup(irq);
  995. spin_lock_irqsave(&vector_lock, flags);
  996. __clear_bit(irq, intc_irq_map);
  997. spin_unlock_irqrestore(&vector_lock, flags);
  998. }
  999. int reserve_irq_vector(unsigned int irq)
  1000. {
  1001. unsigned long flags;
  1002. int ret = 0;
  1003. spin_lock_irqsave(&vector_lock, flags);
  1004. if (test_and_set_bit(irq, intc_irq_map))
  1005. ret = -EBUSY;
  1006. spin_unlock_irqrestore(&vector_lock, flags);
  1007. return ret;
  1008. }
  1009. void reserve_irq_legacy(void)
  1010. {
  1011. unsigned long flags;
  1012. int i, j;
  1013. spin_lock_irqsave(&vector_lock, flags);
  1014. j = find_first_bit(intc_irq_map, nr_irqs);
  1015. for (i = 0; i < j; i++)
  1016. __set_bit(i, intc_irq_map);
  1017. spin_unlock_irqrestore(&vector_lock, flags);
  1018. }