intc.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388
  1. /*
  2. * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  3. *
  4. * Copyright (C) 2007, 2008 Magnus Damm
  5. * Copyright (C) 2009, 2010 Paul Mundt
  6. *
  7. * Based on intc2.c and ipr.c
  8. *
  9. * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
  10. * Copyright (C) 2000 Kazumoto Kojima
  11. * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
  12. * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
  13. * Copyright (C) 2005, 2006 Paul Mundt
  14. *
  15. * This file is subject to the terms and conditions of the GNU General Public
  16. * License. See the file "COPYING" in the main directory of this archive
  17. * for more details.
  18. */
  19. #include <linux/init.h>
  20. #include <linux/irq.h>
  21. #include <linux/module.h>
  22. #include <linux/io.h>
  23. #include <linux/slab.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/sh_intc.h>
  26. #include <linux/sysdev.h>
  27. #include <linux/list.h>
  28. #include <linux/topology.h>
  29. #include <linux/bitmap.h>
  30. #include <linux/cpumask.h>
  31. #include <asm/sizes.h>
  32. #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
  33. ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
  34. ((addr_e) << 16) | ((addr_d << 24)))
  35. #define _INTC_SHIFT(h) (h & 0x1f)
  36. #define _INTC_WIDTH(h) ((h >> 5) & 0xf)
  37. #define _INTC_FN(h) ((h >> 9) & 0xf)
  38. #define _INTC_MODE(h) ((h >> 13) & 0x7)
  39. #define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
  40. #define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
  41. struct intc_handle_int {
  42. unsigned int irq;
  43. unsigned long handle;
  44. };
  45. struct intc_window {
  46. phys_addr_t phys;
  47. void __iomem *virt;
  48. unsigned long size;
  49. };
  50. struct intc_desc_int {
  51. struct list_head list;
  52. struct sys_device sysdev;
  53. pm_message_t state;
  54. unsigned long *reg;
  55. #ifdef CONFIG_SMP
  56. unsigned long *smp;
  57. #endif
  58. unsigned int nr_reg;
  59. struct intc_handle_int *prio;
  60. unsigned int nr_prio;
  61. struct intc_handle_int *sense;
  62. unsigned int nr_sense;
  63. struct intc_window *window;
  64. unsigned int nr_windows;
  65. struct irq_chip chip;
  66. };
  67. static LIST_HEAD(intc_list);
  68. /*
  69. * The intc_irq_map provides a global map of bound IRQ vectors for a
  70. * given platform. Allocation of IRQs are either static through the CPU
  71. * vector map, or dynamic in the case of board mux vectors or MSI.
  72. *
  73. * As this is a central point for all IRQ controllers on the system,
  74. * each of the available sources are mapped out here. This combined with
  75. * sparseirq makes it quite trivial to keep the vector map tightly packed
  76. * when dynamically creating IRQs, as well as tying in to otherwise
  77. * unused irq_desc positions in the sparse array.
  78. */
  79. static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
  80. static DEFINE_SPINLOCK(vector_lock);
  81. #ifdef CONFIG_SMP
  82. #define IS_SMP(x) x.smp
  83. #define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
  84. #define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
  85. #else
  86. #define IS_SMP(x) 0
  87. #define INTC_REG(d, x, c) (d->reg[(x)])
  88. #define SMP_NR(d, x) 1
  89. #endif
  90. static unsigned int intc_prio_level[NR_IRQS]; /* for now */
  91. static unsigned int default_prio_level = 2; /* 2 - 16 */
  92. static unsigned long ack_handle[NR_IRQS];
  93. #ifdef CONFIG_INTC_BALANCING
  94. static unsigned long dist_handle[NR_IRQS];
  95. #endif
  96. static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
  97. {
  98. struct irq_chip *chip = get_irq_chip(irq);
  99. return container_of(chip, struct intc_desc_int, chip);
  100. }
  101. static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
  102. unsigned long address)
  103. {
  104. struct intc_window *window;
  105. int k;
  106. /* scan through physical windows and convert address */
  107. for (k = 0; k < d->nr_windows; k++) {
  108. window = d->window + k;
  109. if (address < window->phys)
  110. continue;
  111. if (address >= (window->phys + window->size))
  112. continue;
  113. address -= window->phys;
  114. address += (unsigned long)window->virt;
  115. return address;
  116. }
  117. /* no windows defined, register must be 1:1 mapped virt:phys */
  118. return address;
  119. }
  120. static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
  121. {
  122. unsigned int k;
  123. address = intc_phys_to_virt(d, address);
  124. for (k = 0; k < d->nr_reg; k++) {
  125. if (d->reg[k] == address)
  126. return k;
  127. }
  128. BUG();
  129. return 0;
  130. }
  131. static inline unsigned int set_field(unsigned int value,
  132. unsigned int field_value,
  133. unsigned int handle)
  134. {
  135. unsigned int width = _INTC_WIDTH(handle);
  136. unsigned int shift = _INTC_SHIFT(handle);
  137. value &= ~(((1 << width) - 1) << shift);
  138. value |= field_value << shift;
  139. return value;
  140. }
  141. static void write_8(unsigned long addr, unsigned long h, unsigned long data)
  142. {
  143. __raw_writeb(set_field(0, data, h), addr);
  144. (void)__raw_readb(addr); /* Defeat write posting */
  145. }
  146. static void write_16(unsigned long addr, unsigned long h, unsigned long data)
  147. {
  148. __raw_writew(set_field(0, data, h), addr);
  149. (void)__raw_readw(addr); /* Defeat write posting */
  150. }
  151. static void write_32(unsigned long addr, unsigned long h, unsigned long data)
  152. {
  153. __raw_writel(set_field(0, data, h), addr);
  154. (void)__raw_readl(addr); /* Defeat write posting */
  155. }
  156. static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
  157. {
  158. unsigned long flags;
  159. local_irq_save(flags);
  160. __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
  161. (void)__raw_readb(addr); /* Defeat write posting */
  162. local_irq_restore(flags);
  163. }
  164. static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
  165. {
  166. unsigned long flags;
  167. local_irq_save(flags);
  168. __raw_writew(set_field(__raw_readw(addr), data, h), addr);
  169. (void)__raw_readw(addr); /* Defeat write posting */
  170. local_irq_restore(flags);
  171. }
  172. static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
  173. {
  174. unsigned long flags;
  175. local_irq_save(flags);
  176. __raw_writel(set_field(__raw_readl(addr), data, h), addr);
  177. (void)__raw_readl(addr); /* Defeat write posting */
  178. local_irq_restore(flags);
  179. }
  180. enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
  181. static void (*intc_reg_fns[])(unsigned long addr,
  182. unsigned long h,
  183. unsigned long data) = {
  184. [REG_FN_WRITE_BASE + 0] = write_8,
  185. [REG_FN_WRITE_BASE + 1] = write_16,
  186. [REG_FN_WRITE_BASE + 3] = write_32,
  187. [REG_FN_MODIFY_BASE + 0] = modify_8,
  188. [REG_FN_MODIFY_BASE + 1] = modify_16,
  189. [REG_FN_MODIFY_BASE + 3] = modify_32,
  190. };
  191. enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
  192. MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
  193. MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
  194. MODE_PRIO_REG, /* Priority value written to enable interrupt */
  195. MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
  196. };
  197. static void intc_mode_field(unsigned long addr,
  198. unsigned long handle,
  199. void (*fn)(unsigned long,
  200. unsigned long,
  201. unsigned long),
  202. unsigned int irq)
  203. {
  204. fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
  205. }
  206. static void intc_mode_zero(unsigned long addr,
  207. unsigned long handle,
  208. void (*fn)(unsigned long,
  209. unsigned long,
  210. unsigned long),
  211. unsigned int irq)
  212. {
  213. fn(addr, handle, 0);
  214. }
  215. static void intc_mode_prio(unsigned long addr,
  216. unsigned long handle,
  217. void (*fn)(unsigned long,
  218. unsigned long,
  219. unsigned long),
  220. unsigned int irq)
  221. {
  222. fn(addr, handle, intc_prio_level[irq]);
  223. }
  224. static void (*intc_enable_fns[])(unsigned long addr,
  225. unsigned long handle,
  226. void (*fn)(unsigned long,
  227. unsigned long,
  228. unsigned long),
  229. unsigned int irq) = {
  230. [MODE_ENABLE_REG] = intc_mode_field,
  231. [MODE_MASK_REG] = intc_mode_zero,
  232. [MODE_DUAL_REG] = intc_mode_field,
  233. [MODE_PRIO_REG] = intc_mode_prio,
  234. [MODE_PCLR_REG] = intc_mode_prio,
  235. };
  236. static void (*intc_disable_fns[])(unsigned long addr,
  237. unsigned long handle,
  238. void (*fn)(unsigned long,
  239. unsigned long,
  240. unsigned long),
  241. unsigned int irq) = {
  242. [MODE_ENABLE_REG] = intc_mode_zero,
  243. [MODE_MASK_REG] = intc_mode_field,
  244. [MODE_DUAL_REG] = intc_mode_field,
  245. [MODE_PRIO_REG] = intc_mode_zero,
  246. [MODE_PCLR_REG] = intc_mode_field,
  247. };
  248. #ifdef CONFIG_INTC_BALANCING
  249. static inline void intc_balancing_enable(unsigned int irq)
  250. {
  251. struct intc_desc_int *d = get_intc_desc(irq);
  252. unsigned long handle = dist_handle[irq];
  253. unsigned long addr;
  254. if (irq_balancing_disabled(irq) || !handle)
  255. return;
  256. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  257. intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
  258. }
  259. static inline void intc_balancing_disable(unsigned int irq)
  260. {
  261. struct intc_desc_int *d = get_intc_desc(irq);
  262. unsigned long handle = dist_handle[irq];
  263. unsigned long addr;
  264. if (irq_balancing_disabled(irq) || !handle)
  265. return;
  266. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  267. intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
  268. }
  269. static unsigned int intc_dist_data(struct intc_desc *desc,
  270. struct intc_desc_int *d,
  271. intc_enum enum_id)
  272. {
  273. struct intc_mask_reg *mr = desc->hw.mask_regs;
  274. unsigned int i, j, fn, mode;
  275. unsigned long reg_e, reg_d;
  276. for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
  277. mr = desc->hw.mask_regs + i;
  278. /*
  279. * Skip this entry if there's no auto-distribution
  280. * register associated with it.
  281. */
  282. if (!mr->dist_reg)
  283. continue;
  284. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  285. if (mr->enum_ids[j] != enum_id)
  286. continue;
  287. fn = REG_FN_MODIFY_BASE;
  288. mode = MODE_ENABLE_REG;
  289. reg_e = mr->dist_reg;
  290. reg_d = mr->dist_reg;
  291. fn += (mr->reg_width >> 3) - 1;
  292. return _INTC_MK(fn, mode,
  293. intc_get_reg(d, reg_e),
  294. intc_get_reg(d, reg_d),
  295. 1,
  296. (mr->reg_width - 1) - j);
  297. }
  298. }
  299. /*
  300. * It's possible we've gotten here with no distribution options
  301. * available for the IRQ in question, so we just skip over those.
  302. */
  303. return 0;
  304. }
  305. #else
  306. static inline void intc_balancing_enable(unsigned int irq)
  307. {
  308. }
  309. static inline void intc_balancing_disable(unsigned int irq)
  310. {
  311. }
  312. #endif
  313. static inline void _intc_enable(unsigned int irq, unsigned long handle)
  314. {
  315. struct intc_desc_int *d = get_intc_desc(irq);
  316. unsigned long addr;
  317. unsigned int cpu;
  318. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  319. #ifdef CONFIG_SMP
  320. if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
  321. continue;
  322. #endif
  323. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  324. intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
  325. [_INTC_FN(handle)], irq);
  326. }
  327. intc_balancing_enable(irq);
  328. }
  329. static void intc_enable(unsigned int irq)
  330. {
  331. _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
  332. }
  333. static void intc_disable(unsigned int irq)
  334. {
  335. struct intc_desc_int *d = get_intc_desc(irq);
  336. unsigned long handle = (unsigned long)get_irq_chip_data(irq);
  337. unsigned long addr;
  338. unsigned int cpu;
  339. intc_balancing_disable(irq);
  340. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  341. #ifdef CONFIG_SMP
  342. if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
  343. continue;
  344. #endif
  345. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  346. intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
  347. [_INTC_FN(handle)], irq);
  348. }
  349. }
  350. static void (*intc_enable_noprio_fns[])(unsigned long addr,
  351. unsigned long handle,
  352. void (*fn)(unsigned long,
  353. unsigned long,
  354. unsigned long),
  355. unsigned int irq) = {
  356. [MODE_ENABLE_REG] = intc_mode_field,
  357. [MODE_MASK_REG] = intc_mode_zero,
  358. [MODE_DUAL_REG] = intc_mode_field,
  359. [MODE_PRIO_REG] = intc_mode_field,
  360. [MODE_PCLR_REG] = intc_mode_field,
  361. };
  362. static void intc_enable_disable(struct intc_desc_int *d,
  363. unsigned long handle, int do_enable)
  364. {
  365. unsigned long addr;
  366. unsigned int cpu;
  367. void (*fn)(unsigned long, unsigned long,
  368. void (*)(unsigned long, unsigned long, unsigned long),
  369. unsigned int);
  370. if (do_enable) {
  371. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
  372. addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
  373. fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
  374. fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
  375. }
  376. } else {
  377. for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
  378. addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
  379. fn = intc_disable_fns[_INTC_MODE(handle)];
  380. fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
  381. }
  382. }
  383. }
  384. static int intc_set_wake(unsigned int irq, unsigned int on)
  385. {
  386. return 0; /* allow wakeup, but setup hardware in intc_suspend() */
  387. }
  388. #ifdef CONFIG_SMP
  389. /*
  390. * This is held with the irq desc lock held, so we don't require any
  391. * additional locking here at the intc desc level. The affinity mask is
  392. * later tested in the enable/disable paths.
  393. */
  394. static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  395. {
  396. if (!cpumask_intersects(cpumask, cpu_online_mask))
  397. return -1;
  398. cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
  399. return 0;
  400. }
  401. #endif
  402. static void intc_mask_ack(unsigned int irq)
  403. {
  404. struct intc_desc_int *d = get_intc_desc(irq);
  405. unsigned long handle = ack_handle[irq];
  406. unsigned long addr;
  407. intc_disable(irq);
  408. /* read register and write zero only to the associated bit */
  409. if (handle) {
  410. addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
  411. switch (_INTC_FN(handle)) {
  412. case REG_FN_MODIFY_BASE + 0: /* 8bit */
  413. __raw_readb(addr);
  414. __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
  415. break;
  416. case REG_FN_MODIFY_BASE + 1: /* 16bit */
  417. __raw_readw(addr);
  418. __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
  419. break;
  420. case REG_FN_MODIFY_BASE + 3: /* 32bit */
  421. __raw_readl(addr);
  422. __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
  423. break;
  424. default:
  425. BUG();
  426. break;
  427. }
  428. }
  429. }
  430. static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
  431. unsigned int nr_hp,
  432. unsigned int irq)
  433. {
  434. int i;
  435. /*
  436. * this doesn't scale well, but...
  437. *
  438. * this function should only be used for cerain uncommon
  439. * operations such as intc_set_priority() and intc_set_sense()
  440. * and in those rare cases performance doesn't matter that much.
  441. * keeping the memory footprint low is more important.
  442. *
  443. * one rather simple way to speed this up and still keep the
  444. * memory footprint down is to make sure the array is sorted
  445. * and then perform a bisect to lookup the irq.
  446. */
  447. for (i = 0; i < nr_hp; i++) {
  448. if ((hp + i)->irq != irq)
  449. continue;
  450. return hp + i;
  451. }
  452. return NULL;
  453. }
  454. int intc_set_priority(unsigned int irq, unsigned int prio)
  455. {
  456. struct intc_desc_int *d = get_intc_desc(irq);
  457. struct intc_handle_int *ihp;
  458. if (!intc_prio_level[irq] || prio <= 1)
  459. return -EINVAL;
  460. ihp = intc_find_irq(d->prio, d->nr_prio, irq);
  461. if (ihp) {
  462. if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
  463. return -EINVAL;
  464. intc_prio_level[irq] = prio;
  465. /*
  466. * only set secondary masking method directly
  467. * primary masking method is using intc_prio_level[irq]
  468. * priority level will be set during next enable()
  469. */
  470. if (_INTC_FN(ihp->handle) != REG_FN_ERR)
  471. _intc_enable(irq, ihp->handle);
  472. }
  473. return 0;
  474. }
  475. #define VALID(x) (x | 0x80)
  476. static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
  477. [IRQ_TYPE_EDGE_FALLING] = VALID(0),
  478. [IRQ_TYPE_EDGE_RISING] = VALID(1),
  479. [IRQ_TYPE_LEVEL_LOW] = VALID(2),
  480. /* SH7706, SH7707 and SH7709 do not support high level triggered */
  481. #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
  482. !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
  483. !defined(CONFIG_CPU_SUBTYPE_SH7709)
  484. [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
  485. #endif
  486. };
  487. static int intc_set_sense(unsigned int irq, unsigned int type)
  488. {
  489. struct intc_desc_int *d = get_intc_desc(irq);
  490. unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
  491. struct intc_handle_int *ihp;
  492. unsigned long addr;
  493. if (!value)
  494. return -EINVAL;
  495. ihp = intc_find_irq(d->sense, d->nr_sense, irq);
  496. if (ihp) {
  497. addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
  498. intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
  499. }
  500. return 0;
  501. }
  502. static intc_enum __init intc_grp_id(struct intc_desc *desc,
  503. intc_enum enum_id)
  504. {
  505. struct intc_group *g = desc->hw.groups;
  506. unsigned int i, j;
  507. for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
  508. g = desc->hw.groups + i;
  509. for (j = 0; g->enum_ids[j]; j++) {
  510. if (g->enum_ids[j] != enum_id)
  511. continue;
  512. return g->enum_id;
  513. }
  514. }
  515. return 0;
  516. }
  517. static unsigned int __init _intc_mask_data(struct intc_desc *desc,
  518. struct intc_desc_int *d,
  519. intc_enum enum_id,
  520. unsigned int *reg_idx,
  521. unsigned int *fld_idx)
  522. {
  523. struct intc_mask_reg *mr = desc->hw.mask_regs;
  524. unsigned int fn, mode;
  525. unsigned long reg_e, reg_d;
  526. while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
  527. mr = desc->hw.mask_regs + *reg_idx;
  528. for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
  529. if (mr->enum_ids[*fld_idx] != enum_id)
  530. continue;
  531. if (mr->set_reg && mr->clr_reg) {
  532. fn = REG_FN_WRITE_BASE;
  533. mode = MODE_DUAL_REG;
  534. reg_e = mr->clr_reg;
  535. reg_d = mr->set_reg;
  536. } else {
  537. fn = REG_FN_MODIFY_BASE;
  538. if (mr->set_reg) {
  539. mode = MODE_ENABLE_REG;
  540. reg_e = mr->set_reg;
  541. reg_d = mr->set_reg;
  542. } else {
  543. mode = MODE_MASK_REG;
  544. reg_e = mr->clr_reg;
  545. reg_d = mr->clr_reg;
  546. }
  547. }
  548. fn += (mr->reg_width >> 3) - 1;
  549. return _INTC_MK(fn, mode,
  550. intc_get_reg(d, reg_e),
  551. intc_get_reg(d, reg_d),
  552. 1,
  553. (mr->reg_width - 1) - *fld_idx);
  554. }
  555. *fld_idx = 0;
  556. (*reg_idx)++;
  557. }
  558. return 0;
  559. }
  560. static unsigned int __init intc_mask_data(struct intc_desc *desc,
  561. struct intc_desc_int *d,
  562. intc_enum enum_id, int do_grps)
  563. {
  564. unsigned int i = 0;
  565. unsigned int j = 0;
  566. unsigned int ret;
  567. ret = _intc_mask_data(desc, d, enum_id, &i, &j);
  568. if (ret)
  569. return ret;
  570. if (do_grps)
  571. return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
  572. return 0;
  573. }
  574. static unsigned int __init _intc_prio_data(struct intc_desc *desc,
  575. struct intc_desc_int *d,
  576. intc_enum enum_id,
  577. unsigned int *reg_idx,
  578. unsigned int *fld_idx)
  579. {
  580. struct intc_prio_reg *pr = desc->hw.prio_regs;
  581. unsigned int fn, n, mode, bit;
  582. unsigned long reg_e, reg_d;
  583. while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
  584. pr = desc->hw.prio_regs + *reg_idx;
  585. for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
  586. if (pr->enum_ids[*fld_idx] != enum_id)
  587. continue;
  588. if (pr->set_reg && pr->clr_reg) {
  589. fn = REG_FN_WRITE_BASE;
  590. mode = MODE_PCLR_REG;
  591. reg_e = pr->set_reg;
  592. reg_d = pr->clr_reg;
  593. } else {
  594. fn = REG_FN_MODIFY_BASE;
  595. mode = MODE_PRIO_REG;
  596. if (!pr->set_reg)
  597. BUG();
  598. reg_e = pr->set_reg;
  599. reg_d = pr->set_reg;
  600. }
  601. fn += (pr->reg_width >> 3) - 1;
  602. n = *fld_idx + 1;
  603. BUG_ON(n * pr->field_width > pr->reg_width);
  604. bit = pr->reg_width - (n * pr->field_width);
  605. return _INTC_MK(fn, mode,
  606. intc_get_reg(d, reg_e),
  607. intc_get_reg(d, reg_d),
  608. pr->field_width, bit);
  609. }
  610. *fld_idx = 0;
  611. (*reg_idx)++;
  612. }
  613. return 0;
  614. }
  615. static unsigned int __init intc_prio_data(struct intc_desc *desc,
  616. struct intc_desc_int *d,
  617. intc_enum enum_id, int do_grps)
  618. {
  619. unsigned int i = 0;
  620. unsigned int j = 0;
  621. unsigned int ret;
  622. ret = _intc_prio_data(desc, d, enum_id, &i, &j);
  623. if (ret)
  624. return ret;
  625. if (do_grps)
  626. return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
  627. return 0;
  628. }
  629. static void __init intc_enable_disable_enum(struct intc_desc *desc,
  630. struct intc_desc_int *d,
  631. intc_enum enum_id, int enable)
  632. {
  633. unsigned int i, j, data;
  634. /* go through and enable/disable all mask bits */
  635. i = j = 0;
  636. do {
  637. data = _intc_mask_data(desc, d, enum_id, &i, &j);
  638. if (data)
  639. intc_enable_disable(d, data, enable);
  640. j++;
  641. } while (data);
  642. /* go through and enable/disable all priority fields */
  643. i = j = 0;
  644. do {
  645. data = _intc_prio_data(desc, d, enum_id, &i, &j);
  646. if (data)
  647. intc_enable_disable(d, data, enable);
  648. j++;
  649. } while (data);
  650. }
  651. static unsigned int __init intc_ack_data(struct intc_desc *desc,
  652. struct intc_desc_int *d,
  653. intc_enum enum_id)
  654. {
  655. struct intc_mask_reg *mr = desc->hw.ack_regs;
  656. unsigned int i, j, fn, mode;
  657. unsigned long reg_e, reg_d;
  658. for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
  659. mr = desc->hw.ack_regs + i;
  660. for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
  661. if (mr->enum_ids[j] != enum_id)
  662. continue;
  663. fn = REG_FN_MODIFY_BASE;
  664. mode = MODE_ENABLE_REG;
  665. reg_e = mr->set_reg;
  666. reg_d = mr->set_reg;
  667. fn += (mr->reg_width >> 3) - 1;
  668. return _INTC_MK(fn, mode,
  669. intc_get_reg(d, reg_e),
  670. intc_get_reg(d, reg_d),
  671. 1,
  672. (mr->reg_width - 1) - j);
  673. }
  674. }
  675. return 0;
  676. }
  677. static unsigned int __init intc_sense_data(struct intc_desc *desc,
  678. struct intc_desc_int *d,
  679. intc_enum enum_id)
  680. {
  681. struct intc_sense_reg *sr = desc->hw.sense_regs;
  682. unsigned int i, j, fn, bit;
  683. for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
  684. sr = desc->hw.sense_regs + i;
  685. for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
  686. if (sr->enum_ids[j] != enum_id)
  687. continue;
  688. fn = REG_FN_MODIFY_BASE;
  689. fn += (sr->reg_width >> 3) - 1;
  690. BUG_ON((j + 1) * sr->field_width > sr->reg_width);
  691. bit = sr->reg_width - ((j + 1) * sr->field_width);
  692. return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
  693. 0, sr->field_width, bit);
  694. }
  695. }
  696. return 0;
  697. }
  698. static void __init intc_register_irq(struct intc_desc *desc,
  699. struct intc_desc_int *d,
  700. intc_enum enum_id,
  701. unsigned int irq)
  702. {
  703. struct intc_handle_int *hp;
  704. unsigned int data[2], primary;
  705. /*
  706. * Register the IRQ position with the global IRQ map
  707. */
  708. set_bit(irq, intc_irq_map);
  709. /*
  710. * Prefer single interrupt source bitmap over other combinations:
  711. *
  712. * 1. bitmap, single interrupt source
  713. * 2. priority, single interrupt source
  714. * 3. bitmap, multiple interrupt sources (groups)
  715. * 4. priority, multiple interrupt sources (groups)
  716. */
  717. data[0] = intc_mask_data(desc, d, enum_id, 0);
  718. data[1] = intc_prio_data(desc, d, enum_id, 0);
  719. primary = 0;
  720. if (!data[0] && data[1])
  721. primary = 1;
  722. if (!data[0] && !data[1])
  723. pr_warning("intc: missing unique irq mask for "
  724. "irq %d (vect 0x%04x)\n", irq, irq2evt(irq));
  725. data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
  726. data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
  727. if (!data[primary])
  728. primary ^= 1;
  729. BUG_ON(!data[primary]); /* must have primary masking method */
  730. disable_irq_nosync(irq);
  731. set_irq_chip_and_handler_name(irq, &d->chip,
  732. handle_level_irq, "level");
  733. set_irq_chip_data(irq, (void *)data[primary]);
  734. /*
  735. * set priority level
  736. * - this needs to be at least 2 for 5-bit priorities on 7780
  737. */
  738. intc_prio_level[irq] = default_prio_level;
  739. /* enable secondary masking method if present */
  740. if (data[!primary])
  741. _intc_enable(irq, data[!primary]);
  742. /* add irq to d->prio list if priority is available */
  743. if (data[1]) {
  744. hp = d->prio + d->nr_prio;
  745. hp->irq = irq;
  746. hp->handle = data[1];
  747. if (primary) {
  748. /*
  749. * only secondary priority should access registers, so
  750. * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
  751. */
  752. hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
  753. hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
  754. }
  755. d->nr_prio++;
  756. }
  757. /* add irq to d->sense list if sense is available */
  758. data[0] = intc_sense_data(desc, d, enum_id);
  759. if (data[0]) {
  760. (d->sense + d->nr_sense)->irq = irq;
  761. (d->sense + d->nr_sense)->handle = data[0];
  762. d->nr_sense++;
  763. }
  764. /* irq should be disabled by default */
  765. d->chip.mask(irq);
  766. if (desc->hw.ack_regs)
  767. ack_handle[irq] = intc_ack_data(desc, d, enum_id);
  768. #ifdef CONFIG_INTC_BALANCING
  769. if (desc->hw.mask_regs)
  770. dist_handle[irq] = intc_dist_data(desc, d, enum_id);
  771. #endif
  772. #ifdef CONFIG_ARM
  773. set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  774. #endif
  775. }
  776. static unsigned int __init save_reg(struct intc_desc_int *d,
  777. unsigned int cnt,
  778. unsigned long value,
  779. unsigned int smp)
  780. {
  781. if (value) {
  782. value = intc_phys_to_virt(d, value);
  783. d->reg[cnt] = value;
  784. #ifdef CONFIG_SMP
  785. d->smp[cnt] = smp;
  786. #endif
  787. return 1;
  788. }
  789. return 0;
  790. }
  791. static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
  792. {
  793. generic_handle_irq((unsigned int)get_irq_data(irq));
  794. }
  795. int __init register_intc_controller(struct intc_desc *desc)
  796. {
  797. unsigned int i, k, smp;
  798. struct intc_hw_desc *hw = &desc->hw;
  799. struct intc_desc_int *d;
  800. struct resource *res;
  801. pr_info("intc: Registered controller '%s' with %u IRQs\n",
  802. desc->name, hw->nr_vectors);
  803. d = kzalloc(sizeof(*d), GFP_NOWAIT);
  804. if (!d)
  805. goto err0;
  806. INIT_LIST_HEAD(&d->list);
  807. list_add(&d->list, &intc_list);
  808. if (desc->num_resources) {
  809. d->nr_windows = desc->num_resources;
  810. d->window = kzalloc(d->nr_windows * sizeof(*d->window),
  811. GFP_NOWAIT);
  812. if (!d->window)
  813. goto err1;
  814. for (k = 0; k < d->nr_windows; k++) {
  815. res = desc->resource + k;
  816. WARN_ON(resource_type(res) != IORESOURCE_MEM);
  817. d->window[k].phys = res->start;
  818. d->window[k].size = resource_size(res);
  819. d->window[k].virt = ioremap_nocache(res->start,
  820. resource_size(res));
  821. if (!d->window[k].virt)
  822. goto err2;
  823. }
  824. }
  825. d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
  826. #ifdef CONFIG_INTC_BALANCING
  827. if (d->nr_reg)
  828. d->nr_reg += hw->nr_mask_regs;
  829. #endif
  830. d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
  831. d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
  832. d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
  833. d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
  834. if (!d->reg)
  835. goto err2;
  836. #ifdef CONFIG_SMP
  837. d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
  838. if (!d->smp)
  839. goto err3;
  840. #endif
  841. k = 0;
  842. if (hw->mask_regs) {
  843. for (i = 0; i < hw->nr_mask_regs; i++) {
  844. smp = IS_SMP(hw->mask_regs[i]);
  845. k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
  846. k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
  847. #ifdef CONFIG_INTC_BALANCING
  848. k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
  849. #endif
  850. }
  851. }
  852. if (hw->prio_regs) {
  853. d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
  854. GFP_NOWAIT);
  855. if (!d->prio)
  856. goto err4;
  857. for (i = 0; i < hw->nr_prio_regs; i++) {
  858. smp = IS_SMP(hw->prio_regs[i]);
  859. k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
  860. k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
  861. }
  862. }
  863. if (hw->sense_regs) {
  864. d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
  865. GFP_NOWAIT);
  866. if (!d->sense)
  867. goto err5;
  868. for (i = 0; i < hw->nr_sense_regs; i++)
  869. k += save_reg(d, k, hw->sense_regs[i].reg, 0);
  870. }
  871. d->chip.name = desc->name;
  872. d->chip.mask = intc_disable;
  873. d->chip.unmask = intc_enable;
  874. d->chip.mask_ack = intc_disable;
  875. d->chip.enable = intc_enable;
  876. d->chip.disable = intc_disable;
  877. d->chip.shutdown = intc_disable;
  878. d->chip.set_type = intc_set_sense;
  879. d->chip.set_wake = intc_set_wake;
  880. #ifdef CONFIG_SMP
  881. d->chip.set_affinity = intc_set_affinity;
  882. #endif
  883. if (hw->ack_regs) {
  884. for (i = 0; i < hw->nr_ack_regs; i++)
  885. k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
  886. d->chip.mask_ack = intc_mask_ack;
  887. }
  888. /* disable bits matching force_disable before registering irqs */
  889. if (desc->force_disable)
  890. intc_enable_disable_enum(desc, d, desc->force_disable, 0);
  891. /* disable bits matching force_enable before registering irqs */
  892. if (desc->force_enable)
  893. intc_enable_disable_enum(desc, d, desc->force_enable, 0);
  894. BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
  895. /* register the vectors one by one */
  896. for (i = 0; i < hw->nr_vectors; i++) {
  897. struct intc_vect *vect = hw->vectors + i;
  898. unsigned int irq = evt2irq(vect->vect);
  899. struct irq_desc *irq_desc;
  900. if (!vect->enum_id)
  901. continue;
  902. irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
  903. if (unlikely(!irq_desc)) {
  904. pr_err("can't get irq_desc for %d\n", irq);
  905. continue;
  906. }
  907. intc_register_irq(desc, d, vect->enum_id, irq);
  908. for (k = i + 1; k < hw->nr_vectors; k++) {
  909. struct intc_vect *vect2 = hw->vectors + k;
  910. unsigned int irq2 = evt2irq(vect2->vect);
  911. if (vect->enum_id != vect2->enum_id)
  912. continue;
  913. /*
  914. * In the case of multi-evt handling and sparse
  915. * IRQ support, each vector still needs to have
  916. * its own backing irq_desc.
  917. */
  918. irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
  919. if (unlikely(!irq_desc)) {
  920. pr_err("can't get irq_desc for %d\n", irq2);
  921. continue;
  922. }
  923. vect2->enum_id = 0;
  924. /* redirect this interrupts to the first one */
  925. set_irq_chip(irq2, &dummy_irq_chip);
  926. set_irq_chained_handler(irq2, intc_redirect_irq);
  927. set_irq_data(irq2, (void *)irq);
  928. }
  929. }
  930. /* enable bits matching force_enable after registering irqs */
  931. if (desc->force_enable)
  932. intc_enable_disable_enum(desc, d, desc->force_enable, 1);
  933. return 0;
  934. err5:
  935. kfree(d->prio);
  936. err4:
  937. #ifdef CONFIG_SMP
  938. kfree(d->smp);
  939. err3:
  940. #endif
  941. kfree(d->reg);
  942. err2:
  943. for (k = 0; k < d->nr_windows; k++)
  944. if (d->window[k].virt)
  945. iounmap(d->window[k].virt);
  946. kfree(d->window);
  947. err1:
  948. kfree(d);
  949. err0:
  950. pr_err("unable to allocate INTC memory\n");
  951. return -ENOMEM;
  952. }
  953. #ifdef CONFIG_INTC_USERIMASK
  954. static void __iomem *uimask;
  955. int register_intc_userimask(unsigned long addr)
  956. {
  957. if (unlikely(uimask))
  958. return -EBUSY;
  959. uimask = ioremap_nocache(addr, SZ_4K);
  960. if (unlikely(!uimask))
  961. return -ENOMEM;
  962. pr_info("intc: userimask support registered for levels 0 -> %d\n",
  963. default_prio_level - 1);
  964. return 0;
  965. }
  966. static ssize_t
  967. show_intc_userimask(struct sysdev_class *cls,
  968. struct sysdev_class_attribute *attr, char *buf)
  969. {
  970. return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
  971. }
  972. static ssize_t
  973. store_intc_userimask(struct sysdev_class *cls,
  974. struct sysdev_class_attribute *attr,
  975. const char *buf, size_t count)
  976. {
  977. unsigned long level;
  978. level = simple_strtoul(buf, NULL, 10);
  979. /*
  980. * Minimal acceptable IRQ levels are in the 2 - 16 range, but
  981. * these are chomped so as to not interfere with normal IRQs.
  982. *
  983. * Level 1 is a special case on some CPUs in that it's not
  984. * directly settable, but given that USERIMASK cuts off below a
  985. * certain level, we don't care about this limitation here.
  986. * Level 0 on the other hand equates to user masking disabled.
  987. *
  988. * We use default_prio_level as a cut off so that only special
  989. * case opt-in IRQs can be mangled.
  990. */
  991. if (level >= default_prio_level)
  992. return -EINVAL;
  993. __raw_writel(0xa5 << 24 | level << 4, uimask);
  994. return count;
  995. }
  996. static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
  997. show_intc_userimask, store_intc_userimask);
  998. #endif
  999. static ssize_t
  1000. show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
  1001. {
  1002. struct intc_desc_int *d;
  1003. d = container_of(dev, struct intc_desc_int, sysdev);
  1004. return sprintf(buf, "%s\n", d->chip.name);
  1005. }
  1006. static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
  1007. static int intc_suspend(struct sys_device *dev, pm_message_t state)
  1008. {
  1009. struct intc_desc_int *d;
  1010. struct irq_desc *desc;
  1011. int irq;
  1012. /* get intc controller associated with this sysdev */
  1013. d = container_of(dev, struct intc_desc_int, sysdev);
  1014. switch (state.event) {
  1015. case PM_EVENT_ON:
  1016. if (d->state.event != PM_EVENT_FREEZE)
  1017. break;
  1018. for_each_irq_desc(irq, desc) {
  1019. if (desc->handle_irq == intc_redirect_irq)
  1020. continue;
  1021. if (desc->chip != &d->chip)
  1022. continue;
  1023. if (desc->status & IRQ_DISABLED)
  1024. intc_disable(irq);
  1025. else
  1026. intc_enable(irq);
  1027. }
  1028. break;
  1029. case PM_EVENT_FREEZE:
  1030. /* nothing has to be done */
  1031. break;
  1032. case PM_EVENT_SUSPEND:
  1033. /* enable wakeup irqs belonging to this intc controller */
  1034. for_each_irq_desc(irq, desc) {
  1035. if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
  1036. intc_enable(irq);
  1037. }
  1038. break;
  1039. }
  1040. d->state = state;
  1041. return 0;
  1042. }
  1043. static int intc_resume(struct sys_device *dev)
  1044. {
  1045. return intc_suspend(dev, PMSG_ON);
  1046. }
  1047. static struct sysdev_class intc_sysdev_class = {
  1048. .name = "intc",
  1049. .suspend = intc_suspend,
  1050. .resume = intc_resume,
  1051. };
  1052. /* register this intc as sysdev to allow suspend/resume */
  1053. static int __init register_intc_sysdevs(void)
  1054. {
  1055. struct intc_desc_int *d;
  1056. int error;
  1057. int id = 0;
  1058. error = sysdev_class_register(&intc_sysdev_class);
  1059. #ifdef CONFIG_INTC_USERIMASK
  1060. if (!error && uimask)
  1061. error = sysdev_class_create_file(&intc_sysdev_class,
  1062. &attr_userimask);
  1063. #endif
  1064. if (!error) {
  1065. list_for_each_entry(d, &intc_list, list) {
  1066. d->sysdev.id = id;
  1067. d->sysdev.cls = &intc_sysdev_class;
  1068. error = sysdev_register(&d->sysdev);
  1069. if (error == 0)
  1070. error = sysdev_create_file(&d->sysdev,
  1071. &attr_name);
  1072. if (error)
  1073. break;
  1074. id++;
  1075. }
  1076. }
  1077. if (error)
  1078. pr_err("intc: sysdev registration error\n");
  1079. return error;
  1080. }
  1081. device_initcall(register_intc_sysdevs);
  1082. /*
  1083. * Dynamic IRQ allocation and deallocation
  1084. */
  1085. unsigned int create_irq_nr(unsigned int irq_want, int node)
  1086. {
  1087. unsigned int irq = 0, new;
  1088. unsigned long flags;
  1089. struct irq_desc *desc;
  1090. spin_lock_irqsave(&vector_lock, flags);
  1091. /*
  1092. * First try the wanted IRQ
  1093. */
  1094. if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
  1095. new = irq_want;
  1096. } else {
  1097. /* .. then fall back to scanning. */
  1098. new = find_first_zero_bit(intc_irq_map, nr_irqs);
  1099. if (unlikely(new == nr_irqs))
  1100. goto out_unlock;
  1101. __set_bit(new, intc_irq_map);
  1102. }
  1103. desc = irq_to_desc_alloc_node(new, node);
  1104. if (unlikely(!desc)) {
  1105. pr_err("can't get irq_desc for %d\n", new);
  1106. goto out_unlock;
  1107. }
  1108. desc = move_irq_desc(desc, node);
  1109. irq = new;
  1110. out_unlock:
  1111. spin_unlock_irqrestore(&vector_lock, flags);
  1112. if (irq > 0) {
  1113. dynamic_irq_init(irq);
  1114. #ifdef CONFIG_ARM
  1115. set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
  1116. #endif
  1117. }
  1118. return irq;
  1119. }
  1120. int create_irq(void)
  1121. {
  1122. int nid = cpu_to_node(smp_processor_id());
  1123. int irq;
  1124. irq = create_irq_nr(NR_IRQS_LEGACY, nid);
  1125. if (irq == 0)
  1126. irq = -1;
  1127. return irq;
  1128. }
  1129. void destroy_irq(unsigned int irq)
  1130. {
  1131. unsigned long flags;
  1132. dynamic_irq_cleanup(irq);
  1133. spin_lock_irqsave(&vector_lock, flags);
  1134. __clear_bit(irq, intc_irq_map);
  1135. spin_unlock_irqrestore(&vector_lock, flags);
  1136. }
  1137. int reserve_irq_vector(unsigned int irq)
  1138. {
  1139. unsigned long flags;
  1140. int ret = 0;
  1141. spin_lock_irqsave(&vector_lock, flags);
  1142. if (test_and_set_bit(irq, intc_irq_map))
  1143. ret = -EBUSY;
  1144. spin_unlock_irqrestore(&vector_lock, flags);
  1145. return ret;
  1146. }
  1147. void reserve_irq_legacy(void)
  1148. {
  1149. unsigned long flags;
  1150. int i, j;
  1151. spin_lock_irqsave(&vector_lock, flags);
  1152. j = find_first_bit(intc_irq_map, nr_irqs);
  1153. for (i = 0; i < j; i++)
  1154. __set_bit(i, intc_irq_map);
  1155. spin_unlock_irqrestore(&vector_lock, flags);
  1156. }