ints-priority.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. /*
  2. * Set up the interrupt priorities
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. * 2003 Bas Vermeulen <bas@buyways.nl>
  6. * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
  7. * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
  8. * 1999 D. Jeff Dionne <jeff@uclinux.org>
  9. * 1996 Roman Zippel
  10. *
  11. * Licensed under the GPL-2
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/irq.h>
  17. #ifdef CONFIG_IPIPE
  18. #include <linux/ipipe.h>
  19. #endif
  20. #ifdef CONFIG_KGDB
  21. #include <linux/kgdb.h>
  22. #endif
  23. #include <asm/traps.h>
  24. #include <asm/blackfin.h>
  25. #include <asm/gpio.h>
  26. #include <asm/irq_handler.h>
  27. #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
  28. #ifdef BF537_FAMILY
  29. # define BF537_GENERIC_ERROR_INT_DEMUX
  30. #else
  31. # undef BF537_GENERIC_ERROR_INT_DEMUX
  32. #endif
  33. /*
  34. * NOTES:
  35. * - we have separated the physical Hardware interrupt from the
  36. * levels that the LINUX kernel sees (see the description in irq.h)
  37. * -
  38. */
  39. #ifndef CONFIG_SMP
  40. /* Initialize this to an actual value to force it into the .data
  41. * section so that we know it is properly initialized at entry into
  42. * the kernel but before bss is initialized to zero (which is where
  43. * it would live otherwise). The 0x1f magic represents the IRQs we
  44. * cannot actually mask out in hardware.
  45. */
  46. unsigned long bfin_irq_flags = 0x1f;
  47. EXPORT_SYMBOL(bfin_irq_flags);
  48. #endif
  49. /* The number of spurious interrupts */
  50. atomic_t num_spurious;
  51. #ifdef CONFIG_PM
  52. unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
  53. unsigned vr_wakeup;
  54. #endif
  55. struct ivgx {
  56. /* irq number for request_irq, available in mach-bf5xx/irq.h */
  57. unsigned int irqno;
  58. /* corresponding bit in the SIC_ISR register */
  59. unsigned int isrflag;
  60. } ivg_table[NR_PERI_INTS];
  61. struct ivg_slice {
  62. /* position of first irq in ivg_table for given ivg */
  63. struct ivgx *ifirst;
  64. struct ivgx *istop;
  65. } ivg7_13[IVG13 - IVG7 + 1];
  66. /*
  67. * Search SIC_IAR and fill tables with the irqvalues
  68. * and their positions in the SIC_ISR register.
  69. */
  70. static void __init search_IAR(void)
  71. {
  72. unsigned ivg, irq_pos = 0;
  73. for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  74. int irqn;
  75. ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  76. for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
  77. int iar_shift = (irqn & 7) * 4;
  78. if (ivg == (0xf &
  79. #if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
  80. || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
  81. bfin_read32((unsigned long *)SIC_IAR0 +
  82. ((irqn % 32) >> 3) + ((irqn / 32) *
  83. ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
  84. #else
  85. bfin_read32((unsigned long *)SIC_IAR0 +
  86. (irqn >> 3)) >> iar_shift)) {
  87. #endif
  88. ivg_table[irq_pos].irqno = IVG7 + irqn;
  89. ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  90. ivg7_13[ivg].istop++;
  91. irq_pos++;
  92. }
  93. }
  94. }
  95. }
  96. /*
  97. * This is for core internal IRQs
  98. */
  99. static void bfin_ack_noop(unsigned int irq)
  100. {
  101. /* Dummy function. */
  102. }
  103. static void bfin_core_mask_irq(unsigned int irq)
  104. {
  105. bfin_irq_flags &= ~(1 << irq);
  106. if (!irqs_disabled_hw())
  107. local_irq_enable_hw();
  108. }
  109. static void bfin_core_unmask_irq(unsigned int irq)
  110. {
  111. bfin_irq_flags |= 1 << irq;
  112. /*
  113. * If interrupts are enabled, IMASK must contain the same value
  114. * as bfin_irq_flags. Make sure that invariant holds. If interrupts
  115. * are currently disabled we need not do anything; one of the
  116. * callers will take care of setting IMASK to the proper value
  117. * when reenabling interrupts.
  118. * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
  119. * what we need.
  120. */
  121. if (!irqs_disabled_hw())
  122. local_irq_enable_hw();
  123. return;
  124. }
  125. static void bfin_internal_mask_irq(unsigned int irq)
  126. {
  127. unsigned long flags;
  128. #ifdef CONFIG_BF53x
  129. local_irq_save_hw(flags);
  130. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
  131. ~(1 << SIC_SYSIRQ(irq)));
  132. #else
  133. unsigned mask_bank, mask_bit;
  134. local_irq_save_hw(flags);
  135. mask_bank = SIC_SYSIRQ(irq) / 32;
  136. mask_bit = SIC_SYSIRQ(irq) % 32;
  137. bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
  138. ~(1 << mask_bit));
  139. #ifdef CONFIG_SMP
  140. bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
  141. ~(1 << mask_bit));
  142. #endif
  143. #endif
  144. local_irq_restore_hw(flags);
  145. }
  146. static void bfin_internal_unmask_irq(unsigned int irq)
  147. {
  148. unsigned long flags;
  149. #ifdef CONFIG_BF53x
  150. local_irq_save_hw(flags);
  151. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
  152. (1 << SIC_SYSIRQ(irq)));
  153. #else
  154. unsigned mask_bank, mask_bit;
  155. local_irq_save_hw(flags);
  156. mask_bank = SIC_SYSIRQ(irq) / 32;
  157. mask_bit = SIC_SYSIRQ(irq) % 32;
  158. bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
  159. (1 << mask_bit));
  160. #ifdef CONFIG_SMP
  161. bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
  162. (1 << mask_bit));
  163. #endif
  164. #endif
  165. local_irq_restore_hw(flags);
  166. }
  167. #ifdef CONFIG_PM
  168. int bfin_internal_set_wake(unsigned int irq, unsigned int state)
  169. {
  170. u32 bank, bit, wakeup = 0;
  171. unsigned long flags;
  172. bank = SIC_SYSIRQ(irq) / 32;
  173. bit = SIC_SYSIRQ(irq) % 32;
  174. switch (irq) {
  175. #ifdef IRQ_RTC
  176. case IRQ_RTC:
  177. wakeup |= WAKE;
  178. break;
  179. #endif
  180. #ifdef IRQ_CAN0_RX
  181. case IRQ_CAN0_RX:
  182. wakeup |= CANWE;
  183. break;
  184. #endif
  185. #ifdef IRQ_CAN1_RX
  186. case IRQ_CAN1_RX:
  187. wakeup |= CANWE;
  188. break;
  189. #endif
  190. #ifdef IRQ_USB_INT0
  191. case IRQ_USB_INT0:
  192. wakeup |= USBWE;
  193. break;
  194. #endif
  195. #ifdef IRQ_KEY
  196. case IRQ_KEY:
  197. wakeup |= KPADWE;
  198. break;
  199. #endif
  200. #ifdef CONFIG_BF54x
  201. case IRQ_CNT:
  202. wakeup |= ROTWE;
  203. break;
  204. #endif
  205. default:
  206. break;
  207. }
  208. local_irq_save_hw(flags);
  209. if (state) {
  210. bfin_sic_iwr[bank] |= (1 << bit);
  211. vr_wakeup |= wakeup;
  212. } else {
  213. bfin_sic_iwr[bank] &= ~(1 << bit);
  214. vr_wakeup &= ~wakeup;
  215. }
  216. local_irq_restore_hw(flags);
  217. return 0;
  218. }
  219. #endif
  220. static struct irq_chip bfin_core_irqchip = {
  221. .name = "CORE",
  222. .ack = bfin_ack_noop,
  223. .mask = bfin_core_mask_irq,
  224. .unmask = bfin_core_unmask_irq,
  225. };
  226. static struct irq_chip bfin_internal_irqchip = {
  227. .name = "INTN",
  228. .ack = bfin_ack_noop,
  229. .mask = bfin_internal_mask_irq,
  230. .unmask = bfin_internal_unmask_irq,
  231. .mask_ack = bfin_internal_mask_irq,
  232. .disable = bfin_internal_mask_irq,
  233. .enable = bfin_internal_unmask_irq,
  234. #ifdef CONFIG_PM
  235. .set_wake = bfin_internal_set_wake,
  236. #endif
  237. };
  238. static void bfin_handle_irq(unsigned irq)
  239. {
  240. #ifdef CONFIG_IPIPE
  241. struct pt_regs regs; /* Contents not used. */
  242. ipipe_trace_irq_entry(irq);
  243. __ipipe_handle_irq(irq, &regs);
  244. ipipe_trace_irq_exit(irq);
  245. #else /* !CONFIG_IPIPE */
  246. struct irq_desc *desc = irq_desc + irq;
  247. desc->handle_irq(irq, desc);
  248. #endif /* !CONFIG_IPIPE */
  249. }
  250. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  251. static int error_int_mask;
  252. static void bfin_generic_error_mask_irq(unsigned int irq)
  253. {
  254. error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
  255. if (!error_int_mask)
  256. bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
  257. }
  258. static void bfin_generic_error_unmask_irq(unsigned int irq)
  259. {
  260. bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
  261. error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
  262. }
  263. static struct irq_chip bfin_generic_error_irqchip = {
  264. .name = "ERROR",
  265. .ack = bfin_ack_noop,
  266. .mask_ack = bfin_generic_error_mask_irq,
  267. .mask = bfin_generic_error_mask_irq,
  268. .unmask = bfin_generic_error_unmask_irq,
  269. };
  270. static void bfin_demux_error_irq(unsigned int int_err_irq,
  271. struct irq_desc *inta_desc)
  272. {
  273. int irq = 0;
  274. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  275. if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
  276. irq = IRQ_MAC_ERROR;
  277. else
  278. #endif
  279. if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
  280. irq = IRQ_SPORT0_ERROR;
  281. else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
  282. irq = IRQ_SPORT1_ERROR;
  283. else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
  284. irq = IRQ_PPI_ERROR;
  285. else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
  286. irq = IRQ_CAN_ERROR;
  287. else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
  288. irq = IRQ_SPI_ERROR;
  289. else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) &&
  290. (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0))
  291. irq = IRQ_UART0_ERROR;
  292. else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) &&
  293. (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0))
  294. irq = IRQ_UART1_ERROR;
  295. if (irq) {
  296. if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
  297. bfin_handle_irq(irq);
  298. else {
  299. switch (irq) {
  300. case IRQ_PPI_ERROR:
  301. bfin_write_PPI_STATUS(PPI_ERR_MASK);
  302. break;
  303. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  304. case IRQ_MAC_ERROR:
  305. bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
  306. break;
  307. #endif
  308. case IRQ_SPORT0_ERROR:
  309. bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
  310. break;
  311. case IRQ_SPORT1_ERROR:
  312. bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
  313. break;
  314. case IRQ_CAN_ERROR:
  315. bfin_write_CAN_GIS(CAN_ERR_MASK);
  316. break;
  317. case IRQ_SPI_ERROR:
  318. bfin_write_SPI_STAT(SPI_ERR_MASK);
  319. break;
  320. default:
  321. break;
  322. }
  323. pr_debug("IRQ %d:"
  324. " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
  325. irq);
  326. }
  327. } else
  328. printk(KERN_ERR
  329. "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
  330. " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
  331. __func__, __FILE__, __LINE__);
  332. }
  333. #endif /* BF537_GENERIC_ERROR_INT_DEMUX */
  334. static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
  335. {
  336. #ifdef CONFIG_IPIPE
  337. _set_irq_handler(irq, handle_level_irq);
  338. #else
  339. struct irq_desc *desc = irq_desc + irq;
  340. /* May not call generic set_irq_handler() due to spinlock
  341. recursion. */
  342. desc->handle_irq = handle;
  343. #endif
  344. }
  345. static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
  346. extern void bfin_gpio_irq_prepare(unsigned gpio);
  347. #if !defined(CONFIG_BF54x)
  348. static void bfin_gpio_ack_irq(unsigned int irq)
  349. {
  350. /* AFAIK ack_irq in case mask_ack is provided
  351. * get's only called for edge sense irqs
  352. */
  353. set_gpio_data(irq_to_gpio(irq), 0);
  354. }
  355. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  356. {
  357. struct irq_desc *desc = irq_desc + irq;
  358. u32 gpionr = irq_to_gpio(irq);
  359. if (desc->handle_irq == handle_edge_irq)
  360. set_gpio_data(gpionr, 0);
  361. set_gpio_maska(gpionr, 0);
  362. }
  363. static void bfin_gpio_mask_irq(unsigned int irq)
  364. {
  365. set_gpio_maska(irq_to_gpio(irq), 0);
  366. }
  367. static void bfin_gpio_unmask_irq(unsigned int irq)
  368. {
  369. set_gpio_maska(irq_to_gpio(irq), 1);
  370. }
  371. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  372. {
  373. u32 gpionr = irq_to_gpio(irq);
  374. if (__test_and_set_bit(gpionr, gpio_enabled))
  375. bfin_gpio_irq_prepare(gpionr);
  376. bfin_gpio_unmask_irq(irq);
  377. return 0;
  378. }
  379. static void bfin_gpio_irq_shutdown(unsigned int irq)
  380. {
  381. u32 gpionr = irq_to_gpio(irq);
  382. bfin_gpio_mask_irq(irq);
  383. __clear_bit(gpionr, gpio_enabled);
  384. bfin_gpio_irq_free(gpionr);
  385. }
  386. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  387. {
  388. int ret;
  389. char buf[16];
  390. u32 gpionr = irq_to_gpio(irq);
  391. if (type == IRQ_TYPE_PROBE) {
  392. /* only probe unenabled GPIO interrupt lines */
  393. if (test_bit(gpionr, gpio_enabled))
  394. return 0;
  395. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  396. }
  397. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  398. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
  399. snprintf(buf, 16, "gpio-irq%d", irq);
  400. ret = bfin_gpio_irq_request(gpionr, buf);
  401. if (ret)
  402. return ret;
  403. if (__test_and_set_bit(gpionr, gpio_enabled))
  404. bfin_gpio_irq_prepare(gpionr);
  405. } else {
  406. __clear_bit(gpionr, gpio_enabled);
  407. return 0;
  408. }
  409. set_gpio_inen(gpionr, 0);
  410. set_gpio_dir(gpionr, 0);
  411. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  412. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  413. set_gpio_both(gpionr, 1);
  414. else
  415. set_gpio_both(gpionr, 0);
  416. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  417. set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
  418. else
  419. set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
  420. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  421. set_gpio_edge(gpionr, 1);
  422. set_gpio_inen(gpionr, 1);
  423. set_gpio_data(gpionr, 0);
  424. } else {
  425. set_gpio_edge(gpionr, 0);
  426. set_gpio_inen(gpionr, 1);
  427. }
  428. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  429. bfin_set_irq_handler(irq, handle_edge_irq);
  430. else
  431. bfin_set_irq_handler(irq, handle_level_irq);
  432. return 0;
  433. }
  434. #ifdef CONFIG_PM
  435. int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
  436. {
  437. unsigned gpio = irq_to_gpio(irq);
  438. if (state)
  439. gpio_pm_wakeup_request(gpio, PM_WAKE_IGNORE);
  440. else
  441. gpio_pm_wakeup_free(gpio);
  442. return 0;
  443. }
  444. #endif
  445. static void bfin_demux_gpio_irq(unsigned int inta_irq,
  446. struct irq_desc *desc)
  447. {
  448. unsigned int i, gpio, mask, irq, search = 0;
  449. switch (inta_irq) {
  450. #if defined(CONFIG_BF53x)
  451. case IRQ_PROG_INTA:
  452. irq = IRQ_PF0;
  453. search = 1;
  454. break;
  455. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  456. case IRQ_MAC_RX:
  457. irq = IRQ_PH0;
  458. break;
  459. # endif
  460. #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
  461. case IRQ_PORTF_INTA:
  462. irq = IRQ_PF0;
  463. break;
  464. #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
  465. case IRQ_PORTF_INTA:
  466. irq = IRQ_PF0;
  467. break;
  468. case IRQ_PORTG_INTA:
  469. irq = IRQ_PG0;
  470. break;
  471. case IRQ_PORTH_INTA:
  472. irq = IRQ_PH0;
  473. break;
  474. #elif defined(CONFIG_BF561)
  475. case IRQ_PROG0_INTA:
  476. irq = IRQ_PF0;
  477. break;
  478. case IRQ_PROG1_INTA:
  479. irq = IRQ_PF16;
  480. break;
  481. case IRQ_PROG2_INTA:
  482. irq = IRQ_PF32;
  483. break;
  484. #endif
  485. default:
  486. BUG();
  487. return;
  488. }
  489. if (search) {
  490. for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
  491. irq += i;
  492. mask = get_gpiop_data(i) & get_gpiop_maska(i);
  493. while (mask) {
  494. if (mask & 1)
  495. bfin_handle_irq(irq);
  496. irq++;
  497. mask >>= 1;
  498. }
  499. }
  500. } else {
  501. gpio = irq_to_gpio(irq);
  502. mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
  503. do {
  504. if (mask & 1)
  505. bfin_handle_irq(irq);
  506. irq++;
  507. mask >>= 1;
  508. } while (mask);
  509. }
  510. }
  511. #else /* CONFIG_BF54x */
  512. #define NR_PINT_SYS_IRQS 4
  513. #define NR_PINT_BITS 32
  514. #define NR_PINTS 160
  515. #define IRQ_NOT_AVAIL 0xFF
  516. #define PINT_2_BANK(x) ((x) >> 5)
  517. #define PINT_2_BIT(x) ((x) & 0x1F)
  518. #define PINT_BIT(x) (1 << (PINT_2_BIT(x)))
  519. static unsigned char irq2pint_lut[NR_PINTS];
  520. static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
  521. struct pin_int_t {
  522. unsigned int mask_set;
  523. unsigned int mask_clear;
  524. unsigned int request;
  525. unsigned int assign;
  526. unsigned int edge_set;
  527. unsigned int edge_clear;
  528. unsigned int invert_set;
  529. unsigned int invert_clear;
  530. unsigned int pinstate;
  531. unsigned int latch;
  532. };
  533. static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
  534. (struct pin_int_t *)PINT0_MASK_SET,
  535. (struct pin_int_t *)PINT1_MASK_SET,
  536. (struct pin_int_t *)PINT2_MASK_SET,
  537. (struct pin_int_t *)PINT3_MASK_SET,
  538. };
  539. inline unsigned int get_irq_base(u32 bank, u8 bmap)
  540. {
  541. unsigned int irq_base;
  542. if (bank < 2) { /*PA-PB */
  543. irq_base = IRQ_PA0 + bmap * 16;
  544. } else { /*PC-PJ */
  545. irq_base = IRQ_PC0 + bmap * 16;
  546. }
  547. return irq_base;
  548. }
  549. /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
  550. void init_pint_lut(void)
  551. {
  552. u16 bank, bit, irq_base, bit_pos;
  553. u32 pint_assign;
  554. u8 bmap;
  555. memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
  556. for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
  557. pint_assign = pint[bank]->assign;
  558. for (bit = 0; bit < NR_PINT_BITS; bit++) {
  559. bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
  560. irq_base = get_irq_base(bank, bmap);
  561. irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
  562. bit_pos = bit + bank * NR_PINT_BITS;
  563. pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
  564. irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
  565. }
  566. }
  567. }
  568. static void bfin_gpio_ack_irq(unsigned int irq)
  569. {
  570. struct irq_desc *desc = irq_desc + irq;
  571. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  572. u32 pintbit = PINT_BIT(pint_val);
  573. u32 bank = PINT_2_BANK(pint_val);
  574. if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
  575. if (pint[bank]->invert_set & pintbit)
  576. pint[bank]->invert_clear = pintbit;
  577. else
  578. pint[bank]->invert_set = pintbit;
  579. }
  580. pint[bank]->request = pintbit;
  581. }
  582. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  583. {
  584. struct irq_desc *desc = irq_desc + irq;
  585. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  586. u32 pintbit = PINT_BIT(pint_val);
  587. u32 bank = PINT_2_BANK(pint_val);
  588. if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
  589. if (pint[bank]->invert_set & pintbit)
  590. pint[bank]->invert_clear = pintbit;
  591. else
  592. pint[bank]->invert_set = pintbit;
  593. }
  594. pint[bank]->request = pintbit;
  595. pint[bank]->mask_clear = pintbit;
  596. }
  597. static void bfin_gpio_mask_irq(unsigned int irq)
  598. {
  599. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  600. pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
  601. }
  602. static void bfin_gpio_unmask_irq(unsigned int irq)
  603. {
  604. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  605. u32 pintbit = PINT_BIT(pint_val);
  606. u32 bank = PINT_2_BANK(pint_val);
  607. pint[bank]->request = pintbit;
  608. pint[bank]->mask_set = pintbit;
  609. }
  610. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  611. {
  612. u32 gpionr = irq_to_gpio(irq);
  613. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  614. if (pint_val == IRQ_NOT_AVAIL) {
  615. printk(KERN_ERR
  616. "GPIO IRQ %d :Not in PINT Assign table "
  617. "Reconfigure Interrupt to Port Assignemt\n", irq);
  618. return -ENODEV;
  619. }
  620. if (__test_and_set_bit(gpionr, gpio_enabled))
  621. bfin_gpio_irq_prepare(gpionr);
  622. bfin_gpio_unmask_irq(irq);
  623. return 0;
  624. }
  625. static void bfin_gpio_irq_shutdown(unsigned int irq)
  626. {
  627. u32 gpionr = irq_to_gpio(irq);
  628. bfin_gpio_mask_irq(irq);
  629. __clear_bit(gpionr, gpio_enabled);
  630. bfin_gpio_irq_free(gpionr);
  631. }
  632. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  633. {
  634. int ret;
  635. char buf[16];
  636. u32 gpionr = irq_to_gpio(irq);
  637. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  638. u32 pintbit = PINT_BIT(pint_val);
  639. u32 bank = PINT_2_BANK(pint_val);
  640. if (pint_val == IRQ_NOT_AVAIL)
  641. return -ENODEV;
  642. if (type == IRQ_TYPE_PROBE) {
  643. /* only probe unenabled GPIO interrupt lines */
  644. if (test_bit(gpionr, gpio_enabled))
  645. return 0;
  646. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  647. }
  648. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  649. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
  650. snprintf(buf, 16, "gpio-irq%d", irq);
  651. ret = bfin_gpio_irq_request(gpionr, buf);
  652. if (ret)
  653. return ret;
  654. if (__test_and_set_bit(gpionr, gpio_enabled))
  655. bfin_gpio_irq_prepare(gpionr);
  656. } else {
  657. __clear_bit(gpionr, gpio_enabled);
  658. return 0;
  659. }
  660. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  661. pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */
  662. else
  663. pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */
  664. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  665. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  666. if (gpio_get_value(gpionr))
  667. pint[bank]->invert_set = pintbit;
  668. else
  669. pint[bank]->invert_clear = pintbit;
  670. }
  671. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  672. pint[bank]->edge_set = pintbit;
  673. bfin_set_irq_handler(irq, handle_edge_irq);
  674. } else {
  675. pint[bank]->edge_clear = pintbit;
  676. bfin_set_irq_handler(irq, handle_level_irq);
  677. }
  678. return 0;
  679. }
  680. #ifdef CONFIG_PM
  681. u32 pint_saved_masks[NR_PINT_SYS_IRQS];
  682. u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
  683. int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
  684. {
  685. u32 pint_irq;
  686. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  687. u32 bank = PINT_2_BANK(pint_val);
  688. u32 pintbit = PINT_BIT(pint_val);
  689. switch (bank) {
  690. case 0:
  691. pint_irq = IRQ_PINT0;
  692. break;
  693. case 2:
  694. pint_irq = IRQ_PINT2;
  695. break;
  696. case 3:
  697. pint_irq = IRQ_PINT3;
  698. break;
  699. case 1:
  700. pint_irq = IRQ_PINT1;
  701. break;
  702. default:
  703. return -EINVAL;
  704. }
  705. bfin_internal_set_wake(pint_irq, state);
  706. if (state)
  707. pint_wakeup_masks[bank] |= pintbit;
  708. else
  709. pint_wakeup_masks[bank] &= ~pintbit;
  710. return 0;
  711. }
  712. u32 bfin_pm_setup(void)
  713. {
  714. u32 val, i;
  715. for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
  716. val = pint[i]->mask_clear;
  717. pint_saved_masks[i] = val;
  718. if (val ^ pint_wakeup_masks[i]) {
  719. pint[i]->mask_clear = val;
  720. pint[i]->mask_set = pint_wakeup_masks[i];
  721. }
  722. }
  723. return 0;
  724. }
  725. void bfin_pm_restore(void)
  726. {
  727. u32 i, val;
  728. for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
  729. val = pint_saved_masks[i];
  730. if (val ^ pint_wakeup_masks[i]) {
  731. pint[i]->mask_clear = pint[i]->mask_clear;
  732. pint[i]->mask_set = val;
  733. }
  734. }
  735. }
  736. #endif
  737. static void bfin_demux_gpio_irq(unsigned int inta_irq,
  738. struct irq_desc *desc)
  739. {
  740. u32 bank, pint_val;
  741. u32 request, irq;
  742. switch (inta_irq) {
  743. case IRQ_PINT0:
  744. bank = 0;
  745. break;
  746. case IRQ_PINT2:
  747. bank = 2;
  748. break;
  749. case IRQ_PINT3:
  750. bank = 3;
  751. break;
  752. case IRQ_PINT1:
  753. bank = 1;
  754. break;
  755. default:
  756. return;
  757. }
  758. pint_val = bank * NR_PINT_BITS;
  759. request = pint[bank]->request;
  760. while (request) {
  761. if (request & 1) {
  762. irq = pint2irq_lut[pint_val] + SYS_IRQS;
  763. bfin_handle_irq(irq);
  764. }
  765. pint_val++;
  766. request >>= 1;
  767. }
  768. }
  769. #endif
  770. static struct irq_chip bfin_gpio_irqchip = {
  771. .name = "GPIO",
  772. .ack = bfin_gpio_ack_irq,
  773. .mask = bfin_gpio_mask_irq,
  774. .mask_ack = bfin_gpio_mask_ack_irq,
  775. .unmask = bfin_gpio_unmask_irq,
  776. .disable = bfin_gpio_mask_irq,
  777. .enable = bfin_gpio_unmask_irq,
  778. .set_type = bfin_gpio_irq_type,
  779. .startup = bfin_gpio_irq_startup,
  780. .shutdown = bfin_gpio_irq_shutdown,
  781. #ifdef CONFIG_PM
  782. .set_wake = bfin_gpio_set_wake,
  783. #endif
  784. };
  785. void __cpuinit init_exception_vectors(void)
  786. {
  787. /* cannot program in software:
  788. * evt0 - emulation (jtag)
  789. * evt1 - reset
  790. */
  791. bfin_write_EVT2(evt_nmi);
  792. bfin_write_EVT3(trap);
  793. bfin_write_EVT5(evt_ivhw);
  794. bfin_write_EVT6(evt_timer);
  795. bfin_write_EVT7(evt_evt7);
  796. bfin_write_EVT8(evt_evt8);
  797. bfin_write_EVT9(evt_evt9);
  798. bfin_write_EVT10(evt_evt10);
  799. bfin_write_EVT11(evt_evt11);
  800. bfin_write_EVT12(evt_evt12);
  801. bfin_write_EVT13(evt_evt13);
  802. bfin_write_EVT14(evt_evt14);
  803. bfin_write_EVT15(evt_system_call);
  804. CSYNC();
  805. }
  806. /*
  807. * This function should be called during kernel startup to initialize
  808. * the BFin IRQ handling routines.
  809. */
  810. int __init init_arch_irq(void)
  811. {
  812. int irq;
  813. unsigned long ilat = 0;
  814. /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
  815. #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
  816. || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
  817. bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
  818. bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
  819. # ifdef CONFIG_BF54x
  820. bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
  821. # endif
  822. # ifdef CONFIG_SMP
  823. bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
  824. bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
  825. # endif
  826. #else
  827. bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
  828. #endif
  829. local_irq_disable();
  830. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  831. /* Clear EMAC Interrupt Status bits so we can demux it later */
  832. bfin_write_EMAC_SYSTAT(-1);
  833. #endif
  834. #ifdef CONFIG_BF54x
  835. # ifdef CONFIG_PINTx_REASSIGN
  836. pint[0]->assign = CONFIG_PINT0_ASSIGN;
  837. pint[1]->assign = CONFIG_PINT1_ASSIGN;
  838. pint[2]->assign = CONFIG_PINT2_ASSIGN;
  839. pint[3]->assign = CONFIG_PINT3_ASSIGN;
  840. # endif
  841. /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
  842. init_pint_lut();
  843. #endif
  844. for (irq = 0; irq <= SYS_IRQS; irq++) {
  845. if (irq <= IRQ_CORETMR)
  846. set_irq_chip(irq, &bfin_core_irqchip);
  847. else
  848. set_irq_chip(irq, &bfin_internal_irqchip);
  849. switch (irq) {
  850. #if defined(CONFIG_BF53x)
  851. case IRQ_PROG_INTA:
  852. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  853. case IRQ_MAC_RX:
  854. # endif
  855. #elif defined(CONFIG_BF54x)
  856. case IRQ_PINT0:
  857. case IRQ_PINT1:
  858. case IRQ_PINT2:
  859. case IRQ_PINT3:
  860. #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
  861. case IRQ_PORTF_INTA:
  862. case IRQ_PORTG_INTA:
  863. case IRQ_PORTH_INTA:
  864. #elif defined(CONFIG_BF561)
  865. case IRQ_PROG0_INTA:
  866. case IRQ_PROG1_INTA:
  867. case IRQ_PROG2_INTA:
  868. #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
  869. case IRQ_PORTF_INTA:
  870. #endif
  871. set_irq_chained_handler(irq,
  872. bfin_demux_gpio_irq);
  873. break;
  874. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  875. case IRQ_GENERIC_ERROR:
  876. set_irq_chained_handler(irq, bfin_demux_error_irq);
  877. break;
  878. #endif
  879. #ifdef CONFIG_SMP
  880. #ifdef CONFIG_TICKSOURCE_GPTMR0
  881. case IRQ_TIMER0:
  882. #endif
  883. #ifdef CONFIG_TICKSOURCE_CORETMR
  884. case IRQ_CORETMR:
  885. #endif
  886. case IRQ_SUPPLE_0:
  887. case IRQ_SUPPLE_1:
  888. set_irq_handler(irq, handle_percpu_irq);
  889. break;
  890. #endif
  891. #ifdef CONFIG_IPIPE
  892. #ifndef CONFIG_TICKSOURCE_CORETMR
  893. case IRQ_TIMER0:
  894. set_irq_handler(irq, handle_simple_irq);
  895. break;
  896. #endif
  897. case IRQ_CORETMR:
  898. set_irq_handler(irq, handle_simple_irq);
  899. break;
  900. default:
  901. set_irq_handler(irq, handle_level_irq);
  902. break;
  903. #else /* !CONFIG_IPIPE */
  904. default:
  905. set_irq_handler(irq, handle_simple_irq);
  906. break;
  907. #endif /* !CONFIG_IPIPE */
  908. }
  909. }
  910. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  911. for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
  912. set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
  913. handle_level_irq);
  914. #endif
  915. /* if configured as edge, then will be changed to do_edge_IRQ */
  916. for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
  917. set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
  918. handle_level_irq);
  919. bfin_write_IMASK(0);
  920. CSYNC();
  921. ilat = bfin_read_ILAT();
  922. CSYNC();
  923. bfin_write_ILAT(ilat);
  924. CSYNC();
  925. printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
  926. /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
  927. * local_irq_enable()
  928. */
  929. program_IAR();
  930. /* Therefore it's better to setup IARs before interrupts enabled */
  931. search_IAR();
  932. /* Enable interrupts IVG7-15 */
  933. bfin_irq_flags |= IMASK_IVG15 |
  934. IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
  935. IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
  936. /* This implicitly covers ANOMALY_05000171
  937. * Boot-ROM code modifies SICA_IWRx wakeup registers
  938. */
  939. #ifdef SIC_IWR0
  940. bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
  941. # ifdef SIC_IWR1
  942. /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
  943. * will screw up the bootrom as it relies on MDMA0/1 waking it
  944. * up from IDLE instructions. See this report for more info:
  945. * http://blackfin.uclinux.org/gf/tracker/4323
  946. */
  947. if (ANOMALY_05000435)
  948. bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
  949. else
  950. bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
  951. # endif
  952. # ifdef SIC_IWR2
  953. bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
  954. # endif
  955. #else
  956. bfin_write_SIC_IWR(IWR_DISABLE_ALL);
  957. #endif
  958. return 0;
  959. }
  960. #ifdef CONFIG_DO_IRQ_L1
  961. __attribute__((l1_text))
  962. #endif
  963. void do_irq(int vec, struct pt_regs *fp)
  964. {
  965. if (vec == EVT_IVTMR_P) {
  966. vec = IRQ_CORETMR;
  967. } else {
  968. struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
  969. struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
  970. #if defined(SIC_ISR0) || defined(SICA_ISR0)
  971. unsigned long sic_status[3];
  972. if (smp_processor_id()) {
  973. # ifdef SICB_ISR0
  974. /* This will be optimized out in UP mode. */
  975. sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
  976. sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
  977. # endif
  978. } else {
  979. sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
  980. sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
  981. }
  982. # ifdef SIC_ISR2
  983. sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
  984. # endif
  985. for (;; ivg++) {
  986. if (ivg >= ivg_stop) {
  987. atomic_inc(&num_spurious);
  988. return;
  989. }
  990. if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
  991. break;
  992. }
  993. #else
  994. unsigned long sic_status;
  995. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  996. for (;; ivg++) {
  997. if (ivg >= ivg_stop) {
  998. atomic_inc(&num_spurious);
  999. return;
  1000. } else if (sic_status & ivg->isrflag)
  1001. break;
  1002. }
  1003. #endif
  1004. vec = ivg->irqno;
  1005. }
  1006. asm_do_IRQ(vec, fp);
  1007. }
  1008. #ifdef CONFIG_IPIPE
  1009. int __ipipe_get_irq_priority(unsigned irq)
  1010. {
  1011. int ient, prio;
  1012. if (irq <= IRQ_CORETMR)
  1013. return irq;
  1014. for (ient = 0; ient < NR_PERI_INTS; ient++) {
  1015. struct ivgx *ivg = ivg_table + ient;
  1016. if (ivg->irqno == irq) {
  1017. for (prio = 0; prio <= IVG13-IVG7; prio++) {
  1018. if (ivg7_13[prio].ifirst <= ivg &&
  1019. ivg7_13[prio].istop > ivg)
  1020. return IVG7 + prio;
  1021. }
  1022. }
  1023. }
  1024. return IVG15;
  1025. }
  1026. /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
  1027. #ifdef CONFIG_DO_IRQ_L1
  1028. __attribute__((l1_text))
  1029. #endif
  1030. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
  1031. {
  1032. struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
  1033. struct ipipe_domain *this_domain = __ipipe_current_domain;
  1034. struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
  1035. struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
  1036. int irq, s;
  1037. if (likely(vec == EVT_IVTMR_P))
  1038. irq = IRQ_CORETMR;
  1039. else {
  1040. #if defined(SIC_ISR0) || defined(SICA_ISR0)
  1041. unsigned long sic_status[3];
  1042. sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
  1043. sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
  1044. # ifdef SIC_ISR2
  1045. sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
  1046. # endif
  1047. for (;; ivg++) {
  1048. if (ivg >= ivg_stop) {
  1049. atomic_inc(&num_spurious);
  1050. return 0;
  1051. }
  1052. if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
  1053. break;
  1054. }
  1055. #else
  1056. unsigned long sic_status;
  1057. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  1058. for (;; ivg++) {
  1059. if (ivg >= ivg_stop) {
  1060. atomic_inc(&num_spurious);
  1061. return 0;
  1062. } else if (sic_status & ivg->isrflag)
  1063. break;
  1064. }
  1065. #endif
  1066. irq = ivg->irqno;
  1067. }
  1068. if (irq == IRQ_SYSTMR) {
  1069. #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
  1070. bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
  1071. #endif
  1072. /* This is basically what we need from the register frame. */
  1073. __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
  1074. __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
  1075. if (this_domain != ipipe_root_domain)
  1076. __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
  1077. else
  1078. __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
  1079. }
  1080. if (this_domain == ipipe_root_domain) {
  1081. s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
  1082. barrier();
  1083. }
  1084. ipipe_trace_irq_entry(irq);
  1085. __ipipe_handle_irq(irq, regs);
  1086. ipipe_trace_irq_exit(irq);
  1087. if (this_domain == ipipe_root_domain) {
  1088. set_thread_flag(TIF_IRQ_SYNC);
  1089. if (!s) {
  1090. __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
  1091. return !test_bit(IPIPE_STALL_FLAG, &p->status);
  1092. }
  1093. }
  1094. return 0;
  1095. }
  1096. #endif /* CONFIG_IPIPE */