ints-priority.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. /*
  2. * Set up the interrupt priorities
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. * 2003 Bas Vermeulen <bas@buyways.nl>
  6. * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
  7. * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
  8. * 1999 D. Jeff Dionne <jeff@uclinux.org>
  9. * 1996 Roman Zippel
  10. *
  11. * Licensed under the GPL-2
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/irq.h>
  17. #ifdef CONFIG_IPIPE
  18. #include <linux/ipipe.h>
  19. #endif
  20. #ifdef CONFIG_KGDB
  21. #include <linux/kgdb.h>
  22. #endif
  23. #include <asm/traps.h>
  24. #include <asm/blackfin.h>
  25. #include <asm/gpio.h>
  26. #include <asm/irq_handler.h>
  27. #include <asm/dpmc.h>
  28. #include <asm/bfin5xx_spi.h>
  29. #include <asm/bfin_sport.h>
  30. #include <asm/bfin_can.h>
  31. #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
  32. #ifdef BF537_FAMILY
  33. # define BF537_GENERIC_ERROR_INT_DEMUX
  34. # define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
  35. # define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
  36. # define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
  37. # define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
  38. # define UART_ERR_MASK (0x6) /* UART_IIR */
  39. # define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
  40. #else
  41. # undef BF537_GENERIC_ERROR_INT_DEMUX
  42. #endif
  43. /*
  44. * NOTES:
  45. * - we have separated the physical Hardware interrupt from the
  46. * levels that the LINUX kernel sees (see the description in irq.h)
  47. * -
  48. */
  49. #ifndef CONFIG_SMP
  50. /* Initialize this to an actual value to force it into the .data
  51. * section so that we know it is properly initialized at entry into
  52. * the kernel but before bss is initialized to zero (which is where
  53. * it would live otherwise). The 0x1f magic represents the IRQs we
  54. * cannot actually mask out in hardware.
  55. */
  56. unsigned long bfin_irq_flags = 0x1f;
  57. EXPORT_SYMBOL(bfin_irq_flags);
  58. #endif
  59. /* The number of spurious interrupts */
  60. atomic_t num_spurious;
  61. #ifdef CONFIG_PM
  62. unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
  63. unsigned vr_wakeup;
  64. #endif
  65. struct ivgx {
  66. /* irq number for request_irq, available in mach-bf5xx/irq.h */
  67. unsigned int irqno;
  68. /* corresponding bit in the SIC_ISR register */
  69. unsigned int isrflag;
  70. } ivg_table[NR_PERI_INTS];
  71. struct ivg_slice {
  72. /* position of first irq in ivg_table for given ivg */
  73. struct ivgx *ifirst;
  74. struct ivgx *istop;
  75. } ivg7_13[IVG13 - IVG7 + 1];
  76. /*
  77. * Search SIC_IAR and fill tables with the irqvalues
  78. * and their positions in the SIC_ISR register.
  79. */
  80. static void __init search_IAR(void)
  81. {
  82. unsigned ivg, irq_pos = 0;
  83. for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  84. int irqN;
  85. ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  86. for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
  87. int irqn;
  88. u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
  89. #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
  90. defined(CONFIG_BF538) || defined(CONFIG_BF539)
  91. ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
  92. #else
  93. (irqN >> 3)
  94. #endif
  95. );
  96. for (irqn = irqN; irqn < irqN + 4; ++irqn) {
  97. int iar_shift = (irqn & 7) * 4;
  98. if (ivg == (0xf & (iar >> iar_shift))) {
  99. ivg_table[irq_pos].irqno = IVG7 + irqn;
  100. ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  101. ivg7_13[ivg].istop++;
  102. irq_pos++;
  103. }
  104. }
  105. }
  106. }
  107. }
  108. /*
  109. * This is for core internal IRQs
  110. */
  111. static void bfin_ack_noop(unsigned int irq)
  112. {
  113. /* Dummy function. */
  114. }
  115. static void bfin_core_mask_irq(unsigned int irq)
  116. {
  117. bfin_irq_flags &= ~(1 << irq);
  118. if (!hard_irqs_disabled())
  119. hard_local_irq_enable();
  120. }
  121. static void bfin_core_unmask_irq(unsigned int irq)
  122. {
  123. bfin_irq_flags |= 1 << irq;
  124. /*
  125. * If interrupts are enabled, IMASK must contain the same value
  126. * as bfin_irq_flags. Make sure that invariant holds. If interrupts
  127. * are currently disabled we need not do anything; one of the
  128. * callers will take care of setting IMASK to the proper value
  129. * when reenabling interrupts.
  130. * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
  131. * what we need.
  132. */
  133. if (!hard_irqs_disabled())
  134. hard_local_irq_enable();
  135. return;
  136. }
  137. static void bfin_internal_mask_irq(unsigned int irq)
  138. {
  139. unsigned long flags;
  140. #ifdef CONFIG_BF53x
  141. flags = hard_local_irq_save();
  142. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
  143. ~(1 << SIC_SYSIRQ(irq)));
  144. #else
  145. unsigned mask_bank, mask_bit;
  146. flags = hard_local_irq_save();
  147. mask_bank = SIC_SYSIRQ(irq) / 32;
  148. mask_bit = SIC_SYSIRQ(irq) % 32;
  149. bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
  150. ~(1 << mask_bit));
  151. #ifdef CONFIG_SMP
  152. bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
  153. ~(1 << mask_bit));
  154. #endif
  155. #endif
  156. hard_local_irq_restore(flags);
  157. }
  158. #ifdef CONFIG_SMP
  159. static void bfin_internal_unmask_irq_affinity(unsigned int irq,
  160. const struct cpumask *affinity)
  161. #else
  162. static void bfin_internal_unmask_irq(unsigned int irq)
  163. #endif
  164. {
  165. unsigned long flags;
  166. #ifdef CONFIG_BF53x
  167. flags = hard_local_irq_save();
  168. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
  169. (1 << SIC_SYSIRQ(irq)));
  170. #else
  171. unsigned mask_bank, mask_bit;
  172. flags = hard_local_irq_save();
  173. mask_bank = SIC_SYSIRQ(irq) / 32;
  174. mask_bit = SIC_SYSIRQ(irq) % 32;
  175. #ifdef CONFIG_SMP
  176. if (cpumask_test_cpu(0, affinity))
  177. #endif
  178. bfin_write_SIC_IMASK(mask_bank,
  179. bfin_read_SIC_IMASK(mask_bank) |
  180. (1 << mask_bit));
  181. #ifdef CONFIG_SMP
  182. if (cpumask_test_cpu(1, affinity))
  183. bfin_write_SICB_IMASK(mask_bank,
  184. bfin_read_SICB_IMASK(mask_bank) |
  185. (1 << mask_bit));
  186. #endif
  187. #endif
  188. hard_local_irq_restore(flags);
  189. }
  190. #ifdef CONFIG_SMP
  191. static void bfin_internal_unmask_irq(unsigned int irq)
  192. {
  193. struct irq_desc *desc = irq_to_desc(irq);
  194. bfin_internal_unmask_irq_affinity(irq, desc->affinity);
  195. }
  196. static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask)
  197. {
  198. bfin_internal_mask_irq(irq);
  199. bfin_internal_unmask_irq_affinity(irq, mask);
  200. return 0;
  201. }
  202. #endif
  203. #ifdef CONFIG_PM
  204. int bfin_internal_set_wake(unsigned int irq, unsigned int state)
  205. {
  206. u32 bank, bit, wakeup = 0;
  207. unsigned long flags;
  208. bank = SIC_SYSIRQ(irq) / 32;
  209. bit = SIC_SYSIRQ(irq) % 32;
  210. switch (irq) {
  211. #ifdef IRQ_RTC
  212. case IRQ_RTC:
  213. wakeup |= WAKE;
  214. break;
  215. #endif
  216. #ifdef IRQ_CAN0_RX
  217. case IRQ_CAN0_RX:
  218. wakeup |= CANWE;
  219. break;
  220. #endif
  221. #ifdef IRQ_CAN1_RX
  222. case IRQ_CAN1_RX:
  223. wakeup |= CANWE;
  224. break;
  225. #endif
  226. #ifdef IRQ_USB_INT0
  227. case IRQ_USB_INT0:
  228. wakeup |= USBWE;
  229. break;
  230. #endif
  231. #ifdef CONFIG_BF54x
  232. case IRQ_CNT:
  233. wakeup |= ROTWE;
  234. break;
  235. #endif
  236. default:
  237. break;
  238. }
  239. flags = hard_local_irq_save();
  240. if (state) {
  241. bfin_sic_iwr[bank] |= (1 << bit);
  242. vr_wakeup |= wakeup;
  243. } else {
  244. bfin_sic_iwr[bank] &= ~(1 << bit);
  245. vr_wakeup &= ~wakeup;
  246. }
  247. hard_local_irq_restore(flags);
  248. return 0;
  249. }
  250. #endif
  251. static struct irq_chip bfin_core_irqchip = {
  252. .name = "CORE",
  253. .ack = bfin_ack_noop,
  254. .mask = bfin_core_mask_irq,
  255. .unmask = bfin_core_unmask_irq,
  256. };
  257. static struct irq_chip bfin_internal_irqchip = {
  258. .name = "INTN",
  259. .ack = bfin_ack_noop,
  260. .mask = bfin_internal_mask_irq,
  261. .unmask = bfin_internal_unmask_irq,
  262. .mask_ack = bfin_internal_mask_irq,
  263. .disable = bfin_internal_mask_irq,
  264. .enable = bfin_internal_unmask_irq,
  265. #ifdef CONFIG_SMP
  266. .set_affinity = bfin_internal_set_affinity,
  267. #endif
  268. #ifdef CONFIG_PM
  269. .set_wake = bfin_internal_set_wake,
  270. #endif
  271. };
  272. static void bfin_handle_irq(unsigned irq)
  273. {
  274. #ifdef CONFIG_IPIPE
  275. struct pt_regs regs; /* Contents not used. */
  276. ipipe_trace_irq_entry(irq);
  277. __ipipe_handle_irq(irq, &regs);
  278. ipipe_trace_irq_exit(irq);
  279. #else /* !CONFIG_IPIPE */
  280. generic_handle_irq(irq);
  281. #endif /* !CONFIG_IPIPE */
  282. }
  283. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  284. static int error_int_mask;
  285. static void bfin_generic_error_mask_irq(unsigned int irq)
  286. {
  287. error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
  288. if (!error_int_mask)
  289. bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
  290. }
  291. static void bfin_generic_error_unmask_irq(unsigned int irq)
  292. {
  293. bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
  294. error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
  295. }
  296. static struct irq_chip bfin_generic_error_irqchip = {
  297. .name = "ERROR",
  298. .ack = bfin_ack_noop,
  299. .mask_ack = bfin_generic_error_mask_irq,
  300. .mask = bfin_generic_error_mask_irq,
  301. .unmask = bfin_generic_error_unmask_irq,
  302. };
  303. static void bfin_demux_error_irq(unsigned int int_err_irq,
  304. struct irq_desc *inta_desc)
  305. {
  306. int irq = 0;
  307. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  308. if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
  309. irq = IRQ_MAC_ERROR;
  310. else
  311. #endif
  312. if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
  313. irq = IRQ_SPORT0_ERROR;
  314. else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
  315. irq = IRQ_SPORT1_ERROR;
  316. else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
  317. irq = IRQ_PPI_ERROR;
  318. else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
  319. irq = IRQ_CAN_ERROR;
  320. else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
  321. irq = IRQ_SPI_ERROR;
  322. else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
  323. irq = IRQ_UART0_ERROR;
  324. else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
  325. irq = IRQ_UART1_ERROR;
  326. if (irq) {
  327. if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
  328. bfin_handle_irq(irq);
  329. else {
  330. switch (irq) {
  331. case IRQ_PPI_ERROR:
  332. bfin_write_PPI_STATUS(PPI_ERR_MASK);
  333. break;
  334. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  335. case IRQ_MAC_ERROR:
  336. bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
  337. break;
  338. #endif
  339. case IRQ_SPORT0_ERROR:
  340. bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
  341. break;
  342. case IRQ_SPORT1_ERROR:
  343. bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
  344. break;
  345. case IRQ_CAN_ERROR:
  346. bfin_write_CAN_GIS(CAN_ERR_MASK);
  347. break;
  348. case IRQ_SPI_ERROR:
  349. bfin_write_SPI_STAT(SPI_ERR_MASK);
  350. break;
  351. default:
  352. break;
  353. }
  354. pr_debug("IRQ %d:"
  355. " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
  356. irq);
  357. }
  358. } else
  359. printk(KERN_ERR
  360. "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
  361. " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
  362. __func__, __FILE__, __LINE__);
  363. }
  364. #endif /* BF537_GENERIC_ERROR_INT_DEMUX */
  365. #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
  366. static int mac_stat_int_mask;
  367. static void bfin_mac_status_ack_irq(unsigned int irq)
  368. {
  369. switch (irq) {
  370. case IRQ_MAC_MMCINT:
  371. bfin_write_EMAC_MMC_TIRQS(
  372. bfin_read_EMAC_MMC_TIRQE() &
  373. bfin_read_EMAC_MMC_TIRQS());
  374. bfin_write_EMAC_MMC_RIRQS(
  375. bfin_read_EMAC_MMC_RIRQE() &
  376. bfin_read_EMAC_MMC_RIRQS());
  377. break;
  378. case IRQ_MAC_RXFSINT:
  379. bfin_write_EMAC_RX_STKY(
  380. bfin_read_EMAC_RX_IRQE() &
  381. bfin_read_EMAC_RX_STKY());
  382. break;
  383. case IRQ_MAC_TXFSINT:
  384. bfin_write_EMAC_TX_STKY(
  385. bfin_read_EMAC_TX_IRQE() &
  386. bfin_read_EMAC_TX_STKY());
  387. break;
  388. case IRQ_MAC_WAKEDET:
  389. bfin_write_EMAC_WKUP_CTL(
  390. bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
  391. break;
  392. default:
  393. /* These bits are W1C */
  394. bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
  395. break;
  396. }
  397. }
  398. static void bfin_mac_status_mask_irq(unsigned int irq)
  399. {
  400. mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
  401. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  402. switch (irq) {
  403. case IRQ_MAC_PHYINT:
  404. bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
  405. break;
  406. default:
  407. break;
  408. }
  409. #else
  410. if (!mac_stat_int_mask)
  411. bfin_internal_mask_irq(IRQ_MAC_ERROR);
  412. #endif
  413. bfin_mac_status_ack_irq(irq);
  414. }
  415. static void bfin_mac_status_unmask_irq(unsigned int irq)
  416. {
  417. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  418. switch (irq) {
  419. case IRQ_MAC_PHYINT:
  420. bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
  421. break;
  422. default:
  423. break;
  424. }
  425. #else
  426. if (!mac_stat_int_mask)
  427. bfin_internal_unmask_irq(IRQ_MAC_ERROR);
  428. #endif
  429. mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
  430. }
  431. #ifdef CONFIG_PM
  432. int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
  433. {
  434. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  435. return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
  436. #else
  437. return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
  438. #endif
  439. }
  440. #endif
  441. static struct irq_chip bfin_mac_status_irqchip = {
  442. .name = "MACST",
  443. .ack = bfin_ack_noop,
  444. .mask_ack = bfin_mac_status_mask_irq,
  445. .mask = bfin_mac_status_mask_irq,
  446. .unmask = bfin_mac_status_unmask_irq,
  447. #ifdef CONFIG_PM
  448. .set_wake = bfin_mac_status_set_wake,
  449. #endif
  450. };
  451. static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
  452. struct irq_desc *inta_desc)
  453. {
  454. int i, irq = 0;
  455. u32 status = bfin_read_EMAC_SYSTAT();
  456. for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
  457. if (status & (1L << i)) {
  458. irq = IRQ_MAC_PHYINT + i;
  459. break;
  460. }
  461. if (irq) {
  462. if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
  463. bfin_handle_irq(irq);
  464. } else {
  465. bfin_mac_status_ack_irq(irq);
  466. pr_debug("IRQ %d:"
  467. " MASKED MAC ERROR INTERRUPT ASSERTED\n",
  468. irq);
  469. }
  470. } else
  471. printk(KERN_ERR
  472. "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
  473. " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
  474. "(EMAC_SYSTAT=0x%X)\n",
  475. __func__, __FILE__, __LINE__, status);
  476. }
  477. #endif
  478. static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
  479. {
  480. #ifdef CONFIG_IPIPE
  481. _set_irq_handler(irq, handle_level_irq);
  482. #else
  483. __set_irq_handler_unlocked(irq, handle);
  484. #endif
  485. }
  486. static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
  487. extern void bfin_gpio_irq_prepare(unsigned gpio);
  488. #if !defined(CONFIG_BF54x)
  489. static void bfin_gpio_ack_irq(unsigned int irq)
  490. {
  491. /* AFAIK ack_irq in case mask_ack is provided
  492. * get's only called for edge sense irqs
  493. */
  494. set_gpio_data(irq_to_gpio(irq), 0);
  495. }
  496. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  497. {
  498. struct irq_desc *desc = irq_to_desc(irq);
  499. u32 gpionr = irq_to_gpio(irq);
  500. if (desc->handle_irq == handle_edge_irq)
  501. set_gpio_data(gpionr, 0);
  502. set_gpio_maska(gpionr, 0);
  503. }
  504. static void bfin_gpio_mask_irq(unsigned int irq)
  505. {
  506. set_gpio_maska(irq_to_gpio(irq), 0);
  507. }
  508. static void bfin_gpio_unmask_irq(unsigned int irq)
  509. {
  510. set_gpio_maska(irq_to_gpio(irq), 1);
  511. }
  512. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  513. {
  514. u32 gpionr = irq_to_gpio(irq);
  515. if (__test_and_set_bit(gpionr, gpio_enabled))
  516. bfin_gpio_irq_prepare(gpionr);
  517. bfin_gpio_unmask_irq(irq);
  518. return 0;
  519. }
  520. static void bfin_gpio_irq_shutdown(unsigned int irq)
  521. {
  522. u32 gpionr = irq_to_gpio(irq);
  523. bfin_gpio_mask_irq(irq);
  524. __clear_bit(gpionr, gpio_enabled);
  525. bfin_gpio_irq_free(gpionr);
  526. }
  527. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  528. {
  529. int ret;
  530. char buf[16];
  531. u32 gpionr = irq_to_gpio(irq);
  532. if (type == IRQ_TYPE_PROBE) {
  533. /* only probe unenabled GPIO interrupt lines */
  534. if (test_bit(gpionr, gpio_enabled))
  535. return 0;
  536. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  537. }
  538. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  539. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
  540. snprintf(buf, 16, "gpio-irq%d", irq);
  541. ret = bfin_gpio_irq_request(gpionr, buf);
  542. if (ret)
  543. return ret;
  544. if (__test_and_set_bit(gpionr, gpio_enabled))
  545. bfin_gpio_irq_prepare(gpionr);
  546. } else {
  547. __clear_bit(gpionr, gpio_enabled);
  548. return 0;
  549. }
  550. set_gpio_inen(gpionr, 0);
  551. set_gpio_dir(gpionr, 0);
  552. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  553. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  554. set_gpio_both(gpionr, 1);
  555. else
  556. set_gpio_both(gpionr, 0);
  557. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  558. set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
  559. else
  560. set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
  561. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  562. set_gpio_edge(gpionr, 1);
  563. set_gpio_inen(gpionr, 1);
  564. set_gpio_data(gpionr, 0);
  565. } else {
  566. set_gpio_edge(gpionr, 0);
  567. set_gpio_inen(gpionr, 1);
  568. }
  569. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  570. bfin_set_irq_handler(irq, handle_edge_irq);
  571. else
  572. bfin_set_irq_handler(irq, handle_level_irq);
  573. return 0;
  574. }
  575. #ifdef CONFIG_PM
  576. int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
  577. {
  578. return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state);
  579. }
  580. #endif
  581. static void bfin_demux_gpio_irq(unsigned int inta_irq,
  582. struct irq_desc *desc)
  583. {
  584. unsigned int i, gpio, mask, irq, search = 0;
  585. switch (inta_irq) {
  586. #if defined(CONFIG_BF53x)
  587. case IRQ_PROG_INTA:
  588. irq = IRQ_PF0;
  589. search = 1;
  590. break;
  591. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  592. case IRQ_MAC_RX:
  593. irq = IRQ_PH0;
  594. break;
  595. # endif
  596. #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
  597. case IRQ_PORTF_INTA:
  598. irq = IRQ_PF0;
  599. break;
  600. #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
  601. case IRQ_PORTF_INTA:
  602. irq = IRQ_PF0;
  603. break;
  604. case IRQ_PORTG_INTA:
  605. irq = IRQ_PG0;
  606. break;
  607. case IRQ_PORTH_INTA:
  608. irq = IRQ_PH0;
  609. break;
  610. #elif defined(CONFIG_BF561)
  611. case IRQ_PROG0_INTA:
  612. irq = IRQ_PF0;
  613. break;
  614. case IRQ_PROG1_INTA:
  615. irq = IRQ_PF16;
  616. break;
  617. case IRQ_PROG2_INTA:
  618. irq = IRQ_PF32;
  619. break;
  620. #endif
  621. default:
  622. BUG();
  623. return;
  624. }
  625. if (search) {
  626. for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
  627. irq += i;
  628. mask = get_gpiop_data(i) & get_gpiop_maska(i);
  629. while (mask) {
  630. if (mask & 1)
  631. bfin_handle_irq(irq);
  632. irq++;
  633. mask >>= 1;
  634. }
  635. }
  636. } else {
  637. gpio = irq_to_gpio(irq);
  638. mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
  639. do {
  640. if (mask & 1)
  641. bfin_handle_irq(irq);
  642. irq++;
  643. mask >>= 1;
  644. } while (mask);
  645. }
  646. }
  647. #else /* CONFIG_BF54x */
  648. #define NR_PINT_SYS_IRQS 4
  649. #define NR_PINT_BITS 32
  650. #define NR_PINTS 160
  651. #define IRQ_NOT_AVAIL 0xFF
  652. #define PINT_2_BANK(x) ((x) >> 5)
  653. #define PINT_2_BIT(x) ((x) & 0x1F)
  654. #define PINT_BIT(x) (1 << (PINT_2_BIT(x)))
  655. static unsigned char irq2pint_lut[NR_PINTS];
  656. static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
  657. struct pin_int_t {
  658. unsigned int mask_set;
  659. unsigned int mask_clear;
  660. unsigned int request;
  661. unsigned int assign;
  662. unsigned int edge_set;
  663. unsigned int edge_clear;
  664. unsigned int invert_set;
  665. unsigned int invert_clear;
  666. unsigned int pinstate;
  667. unsigned int latch;
  668. };
  669. static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
  670. (struct pin_int_t *)PINT0_MASK_SET,
  671. (struct pin_int_t *)PINT1_MASK_SET,
  672. (struct pin_int_t *)PINT2_MASK_SET,
  673. (struct pin_int_t *)PINT3_MASK_SET,
  674. };
  675. inline unsigned int get_irq_base(u32 bank, u8 bmap)
  676. {
  677. unsigned int irq_base;
  678. if (bank < 2) { /*PA-PB */
  679. irq_base = IRQ_PA0 + bmap * 16;
  680. } else { /*PC-PJ */
  681. irq_base = IRQ_PC0 + bmap * 16;
  682. }
  683. return irq_base;
  684. }
  685. /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
  686. void init_pint_lut(void)
  687. {
  688. u16 bank, bit, irq_base, bit_pos;
  689. u32 pint_assign;
  690. u8 bmap;
  691. memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
  692. for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
  693. pint_assign = pint[bank]->assign;
  694. for (bit = 0; bit < NR_PINT_BITS; bit++) {
  695. bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
  696. irq_base = get_irq_base(bank, bmap);
  697. irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
  698. bit_pos = bit + bank * NR_PINT_BITS;
  699. pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
  700. irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
  701. }
  702. }
  703. }
  704. static void bfin_gpio_ack_irq(unsigned int irq)
  705. {
  706. struct irq_desc *desc = irq_to_desc(irq);
  707. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  708. u32 pintbit = PINT_BIT(pint_val);
  709. u32 bank = PINT_2_BANK(pint_val);
  710. if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
  711. if (pint[bank]->invert_set & pintbit)
  712. pint[bank]->invert_clear = pintbit;
  713. else
  714. pint[bank]->invert_set = pintbit;
  715. }
  716. pint[bank]->request = pintbit;
  717. }
  718. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  719. {
  720. struct irq_desc *desc = irq_to_desc(irq);
  721. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  722. u32 pintbit = PINT_BIT(pint_val);
  723. u32 bank = PINT_2_BANK(pint_val);
  724. if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
  725. if (pint[bank]->invert_set & pintbit)
  726. pint[bank]->invert_clear = pintbit;
  727. else
  728. pint[bank]->invert_set = pintbit;
  729. }
  730. pint[bank]->request = pintbit;
  731. pint[bank]->mask_clear = pintbit;
  732. }
  733. static void bfin_gpio_mask_irq(unsigned int irq)
  734. {
  735. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  736. pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
  737. }
  738. static void bfin_gpio_unmask_irq(unsigned int irq)
  739. {
  740. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  741. u32 pintbit = PINT_BIT(pint_val);
  742. u32 bank = PINT_2_BANK(pint_val);
  743. pint[bank]->mask_set = pintbit;
  744. }
  745. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  746. {
  747. u32 gpionr = irq_to_gpio(irq);
  748. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  749. if (pint_val == IRQ_NOT_AVAIL) {
  750. printk(KERN_ERR
  751. "GPIO IRQ %d :Not in PINT Assign table "
  752. "Reconfigure Interrupt to Port Assignemt\n", irq);
  753. return -ENODEV;
  754. }
  755. if (__test_and_set_bit(gpionr, gpio_enabled))
  756. bfin_gpio_irq_prepare(gpionr);
  757. bfin_gpio_unmask_irq(irq);
  758. return 0;
  759. }
  760. static void bfin_gpio_irq_shutdown(unsigned int irq)
  761. {
  762. u32 gpionr = irq_to_gpio(irq);
  763. bfin_gpio_mask_irq(irq);
  764. __clear_bit(gpionr, gpio_enabled);
  765. bfin_gpio_irq_free(gpionr);
  766. }
  767. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  768. {
  769. int ret;
  770. char buf[16];
  771. u32 gpionr = irq_to_gpio(irq);
  772. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  773. u32 pintbit = PINT_BIT(pint_val);
  774. u32 bank = PINT_2_BANK(pint_val);
  775. if (pint_val == IRQ_NOT_AVAIL)
  776. return -ENODEV;
  777. if (type == IRQ_TYPE_PROBE) {
  778. /* only probe unenabled GPIO interrupt lines */
  779. if (test_bit(gpionr, gpio_enabled))
  780. return 0;
  781. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  782. }
  783. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  784. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
  785. snprintf(buf, 16, "gpio-irq%d", irq);
  786. ret = bfin_gpio_irq_request(gpionr, buf);
  787. if (ret)
  788. return ret;
  789. if (__test_and_set_bit(gpionr, gpio_enabled))
  790. bfin_gpio_irq_prepare(gpionr);
  791. } else {
  792. __clear_bit(gpionr, gpio_enabled);
  793. return 0;
  794. }
  795. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  796. pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */
  797. else
  798. pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */
  799. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  800. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  801. if (gpio_get_value(gpionr))
  802. pint[bank]->invert_set = pintbit;
  803. else
  804. pint[bank]->invert_clear = pintbit;
  805. }
  806. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  807. pint[bank]->edge_set = pintbit;
  808. bfin_set_irq_handler(irq, handle_edge_irq);
  809. } else {
  810. pint[bank]->edge_clear = pintbit;
  811. bfin_set_irq_handler(irq, handle_level_irq);
  812. }
  813. return 0;
  814. }
  815. #ifdef CONFIG_PM
  816. u32 pint_saved_masks[NR_PINT_SYS_IRQS];
  817. u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
  818. int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
  819. {
  820. u32 pint_irq;
  821. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  822. u32 bank = PINT_2_BANK(pint_val);
  823. u32 pintbit = PINT_BIT(pint_val);
  824. switch (bank) {
  825. case 0:
  826. pint_irq = IRQ_PINT0;
  827. break;
  828. case 2:
  829. pint_irq = IRQ_PINT2;
  830. break;
  831. case 3:
  832. pint_irq = IRQ_PINT3;
  833. break;
  834. case 1:
  835. pint_irq = IRQ_PINT1;
  836. break;
  837. default:
  838. return -EINVAL;
  839. }
  840. bfin_internal_set_wake(pint_irq, state);
  841. if (state)
  842. pint_wakeup_masks[bank] |= pintbit;
  843. else
  844. pint_wakeup_masks[bank] &= ~pintbit;
  845. return 0;
  846. }
  847. u32 bfin_pm_setup(void)
  848. {
  849. u32 val, i;
  850. for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
  851. val = pint[i]->mask_clear;
  852. pint_saved_masks[i] = val;
  853. if (val ^ pint_wakeup_masks[i]) {
  854. pint[i]->mask_clear = val;
  855. pint[i]->mask_set = pint_wakeup_masks[i];
  856. }
  857. }
  858. return 0;
  859. }
  860. void bfin_pm_restore(void)
  861. {
  862. u32 i, val;
  863. for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
  864. val = pint_saved_masks[i];
  865. if (val ^ pint_wakeup_masks[i]) {
  866. pint[i]->mask_clear = pint[i]->mask_clear;
  867. pint[i]->mask_set = val;
  868. }
  869. }
  870. }
  871. #endif
  872. static void bfin_demux_gpio_irq(unsigned int inta_irq,
  873. struct irq_desc *desc)
  874. {
  875. u32 bank, pint_val;
  876. u32 request, irq;
  877. switch (inta_irq) {
  878. case IRQ_PINT0:
  879. bank = 0;
  880. break;
  881. case IRQ_PINT2:
  882. bank = 2;
  883. break;
  884. case IRQ_PINT3:
  885. bank = 3;
  886. break;
  887. case IRQ_PINT1:
  888. bank = 1;
  889. break;
  890. default:
  891. return;
  892. }
  893. pint_val = bank * NR_PINT_BITS;
  894. request = pint[bank]->request;
  895. while (request) {
  896. if (request & 1) {
  897. irq = pint2irq_lut[pint_val] + SYS_IRQS;
  898. bfin_handle_irq(irq);
  899. }
  900. pint_val++;
  901. request >>= 1;
  902. }
  903. }
  904. #endif
  905. static struct irq_chip bfin_gpio_irqchip = {
  906. .name = "GPIO",
  907. .ack = bfin_gpio_ack_irq,
  908. .mask = bfin_gpio_mask_irq,
  909. .mask_ack = bfin_gpio_mask_ack_irq,
  910. .unmask = bfin_gpio_unmask_irq,
  911. .disable = bfin_gpio_mask_irq,
  912. .enable = bfin_gpio_unmask_irq,
  913. .set_type = bfin_gpio_irq_type,
  914. .startup = bfin_gpio_irq_startup,
  915. .shutdown = bfin_gpio_irq_shutdown,
  916. #ifdef CONFIG_PM
  917. .set_wake = bfin_gpio_set_wake,
  918. #endif
  919. };
  920. void __cpuinit init_exception_vectors(void)
  921. {
  922. /* cannot program in software:
  923. * evt0 - emulation (jtag)
  924. * evt1 - reset
  925. */
  926. bfin_write_EVT2(evt_nmi);
  927. bfin_write_EVT3(trap);
  928. bfin_write_EVT5(evt_ivhw);
  929. bfin_write_EVT6(evt_timer);
  930. bfin_write_EVT7(evt_evt7);
  931. bfin_write_EVT8(evt_evt8);
  932. bfin_write_EVT9(evt_evt9);
  933. bfin_write_EVT10(evt_evt10);
  934. bfin_write_EVT11(evt_evt11);
  935. bfin_write_EVT12(evt_evt12);
  936. bfin_write_EVT13(evt_evt13);
  937. bfin_write_EVT14(evt_evt14);
  938. bfin_write_EVT15(evt_system_call);
  939. CSYNC();
  940. }
  941. /*
  942. * This function should be called during kernel startup to initialize
  943. * the BFin IRQ handling routines.
  944. */
  945. int __init init_arch_irq(void)
  946. {
  947. int irq;
  948. unsigned long ilat = 0;
  949. /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
  950. #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
  951. || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
  952. bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
  953. bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
  954. # ifdef CONFIG_BF54x
  955. bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
  956. # endif
  957. # ifdef CONFIG_SMP
  958. bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
  959. bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
  960. # endif
  961. #else
  962. bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
  963. #endif
  964. local_irq_disable();
  965. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  966. /* Clear EMAC Interrupt Status bits so we can demux it later */
  967. bfin_write_EMAC_SYSTAT(-1);
  968. #endif
  969. #ifdef CONFIG_BF54x
  970. # ifdef CONFIG_PINTx_REASSIGN
  971. pint[0]->assign = CONFIG_PINT0_ASSIGN;
  972. pint[1]->assign = CONFIG_PINT1_ASSIGN;
  973. pint[2]->assign = CONFIG_PINT2_ASSIGN;
  974. pint[3]->assign = CONFIG_PINT3_ASSIGN;
  975. # endif
  976. /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
  977. init_pint_lut();
  978. #endif
  979. for (irq = 0; irq <= SYS_IRQS; irq++) {
  980. if (irq <= IRQ_CORETMR)
  981. set_irq_chip(irq, &bfin_core_irqchip);
  982. else
  983. set_irq_chip(irq, &bfin_internal_irqchip);
  984. switch (irq) {
  985. #if defined(CONFIG_BF53x)
  986. case IRQ_PROG_INTA:
  987. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  988. case IRQ_MAC_RX:
  989. # endif
  990. #elif defined(CONFIG_BF54x)
  991. case IRQ_PINT0:
  992. case IRQ_PINT1:
  993. case IRQ_PINT2:
  994. case IRQ_PINT3:
  995. #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
  996. case IRQ_PORTF_INTA:
  997. case IRQ_PORTG_INTA:
  998. case IRQ_PORTH_INTA:
  999. #elif defined(CONFIG_BF561)
  1000. case IRQ_PROG0_INTA:
  1001. case IRQ_PROG1_INTA:
  1002. case IRQ_PROG2_INTA:
  1003. #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
  1004. case IRQ_PORTF_INTA:
  1005. #endif
  1006. set_irq_chained_handler(irq,
  1007. bfin_demux_gpio_irq);
  1008. break;
  1009. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  1010. case IRQ_GENERIC_ERROR:
  1011. set_irq_chained_handler(irq, bfin_demux_error_irq);
  1012. break;
  1013. #endif
  1014. #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
  1015. case IRQ_MAC_ERROR:
  1016. set_irq_chained_handler(irq, bfin_demux_mac_status_irq);
  1017. break;
  1018. #endif
  1019. #ifdef CONFIG_SMP
  1020. case IRQ_SUPPLE_0:
  1021. case IRQ_SUPPLE_1:
  1022. set_irq_handler(irq, handle_percpu_irq);
  1023. break;
  1024. #endif
  1025. #ifdef CONFIG_TICKSOURCE_CORETMR
  1026. case IRQ_CORETMR:
  1027. # ifdef CONFIG_SMP
  1028. set_irq_handler(irq, handle_percpu_irq);
  1029. break;
  1030. # else
  1031. set_irq_handler(irq, handle_simple_irq);
  1032. break;
  1033. # endif
  1034. #endif
  1035. #ifdef CONFIG_TICKSOURCE_GPTMR0
  1036. case IRQ_TIMER0:
  1037. set_irq_handler(irq, handle_simple_irq);
  1038. break;
  1039. #endif
  1040. #ifdef CONFIG_IPIPE
  1041. default:
  1042. set_irq_handler(irq, handle_level_irq);
  1043. break;
  1044. #else /* !CONFIG_IPIPE */
  1045. default:
  1046. set_irq_handler(irq, handle_simple_irq);
  1047. break;
  1048. #endif /* !CONFIG_IPIPE */
  1049. }
  1050. }
  1051. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  1052. for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
  1053. set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
  1054. handle_level_irq);
  1055. #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
  1056. set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
  1057. #endif
  1058. #endif
  1059. #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
  1060. for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
  1061. set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip,
  1062. handle_level_irq);
  1063. #endif
  1064. /* if configured as edge, then will be changed to do_edge_IRQ */
  1065. for (irq = GPIO_IRQ_BASE;
  1066. irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
  1067. set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
  1068. handle_level_irq);
  1069. bfin_write_IMASK(0);
  1070. CSYNC();
  1071. ilat = bfin_read_ILAT();
  1072. CSYNC();
  1073. bfin_write_ILAT(ilat);
  1074. CSYNC();
  1075. printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
  1076. /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
  1077. * local_irq_enable()
  1078. */
  1079. program_IAR();
  1080. /* Therefore it's better to setup IARs before interrupts enabled */
  1081. search_IAR();
  1082. /* Enable interrupts IVG7-15 */
  1083. bfin_irq_flags |= IMASK_IVG15 |
  1084. IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
  1085. IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
  1086. /* This implicitly covers ANOMALY_05000171
  1087. * Boot-ROM code modifies SICA_IWRx wakeup registers
  1088. */
  1089. #ifdef SIC_IWR0
  1090. bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
  1091. # ifdef SIC_IWR1
  1092. /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
  1093. * will screw up the bootrom as it relies on MDMA0/1 waking it
  1094. * up from IDLE instructions. See this report for more info:
  1095. * http://blackfin.uclinux.org/gf/tracker/4323
  1096. */
  1097. if (ANOMALY_05000435)
  1098. bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
  1099. else
  1100. bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
  1101. # endif
  1102. # ifdef SIC_IWR2
  1103. bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
  1104. # endif
  1105. #else
  1106. bfin_write_SIC_IWR(IWR_DISABLE_ALL);
  1107. #endif
  1108. return 0;
  1109. }
  1110. #ifdef CONFIG_DO_IRQ_L1
  1111. __attribute__((l1_text))
  1112. #endif
  1113. void do_irq(int vec, struct pt_regs *fp)
  1114. {
  1115. if (vec == EVT_IVTMR_P) {
  1116. vec = IRQ_CORETMR;
  1117. } else {
  1118. struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
  1119. struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
  1120. #if defined(SIC_ISR0)
  1121. unsigned long sic_status[3];
  1122. if (smp_processor_id()) {
  1123. # ifdef SICB_ISR0
  1124. /* This will be optimized out in UP mode. */
  1125. sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
  1126. sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
  1127. # endif
  1128. } else {
  1129. sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
  1130. sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
  1131. }
  1132. # ifdef SIC_ISR2
  1133. sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
  1134. # endif
  1135. for (;; ivg++) {
  1136. if (ivg >= ivg_stop) {
  1137. atomic_inc(&num_spurious);
  1138. return;
  1139. }
  1140. if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
  1141. break;
  1142. }
  1143. #else
  1144. unsigned long sic_status;
  1145. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  1146. for (;; ivg++) {
  1147. if (ivg >= ivg_stop) {
  1148. atomic_inc(&num_spurious);
  1149. return;
  1150. } else if (sic_status & ivg->isrflag)
  1151. break;
  1152. }
  1153. #endif
  1154. vec = ivg->irqno;
  1155. }
  1156. asm_do_IRQ(vec, fp);
  1157. }
  1158. #ifdef CONFIG_IPIPE
  1159. int __ipipe_get_irq_priority(unsigned irq)
  1160. {
  1161. int ient, prio;
  1162. if (irq <= IRQ_CORETMR)
  1163. return irq;
  1164. for (ient = 0; ient < NR_PERI_INTS; ient++) {
  1165. struct ivgx *ivg = ivg_table + ient;
  1166. if (ivg->irqno == irq) {
  1167. for (prio = 0; prio <= IVG13-IVG7; prio++) {
  1168. if (ivg7_13[prio].ifirst <= ivg &&
  1169. ivg7_13[prio].istop > ivg)
  1170. return IVG7 + prio;
  1171. }
  1172. }
  1173. }
  1174. return IVG15;
  1175. }
  1176. /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
  1177. #ifdef CONFIG_DO_IRQ_L1
  1178. __attribute__((l1_text))
  1179. #endif
  1180. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
  1181. {
  1182. struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
  1183. struct ipipe_domain *this_domain = __ipipe_current_domain;
  1184. struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
  1185. struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
  1186. int irq, s;
  1187. if (likely(vec == EVT_IVTMR_P))
  1188. irq = IRQ_CORETMR;
  1189. else {
  1190. #if defined(SIC_ISR0)
  1191. unsigned long sic_status[3];
  1192. sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
  1193. sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
  1194. # ifdef SIC_ISR2
  1195. sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
  1196. # endif
  1197. for (;; ivg++) {
  1198. if (ivg >= ivg_stop) {
  1199. atomic_inc(&num_spurious);
  1200. return 0;
  1201. }
  1202. if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
  1203. break;
  1204. }
  1205. #else
  1206. unsigned long sic_status;
  1207. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  1208. for (;; ivg++) {
  1209. if (ivg >= ivg_stop) {
  1210. atomic_inc(&num_spurious);
  1211. return 0;
  1212. } else if (sic_status & ivg->isrflag)
  1213. break;
  1214. }
  1215. #endif
  1216. irq = ivg->irqno;
  1217. }
  1218. if (irq == IRQ_SYSTMR) {
  1219. #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
  1220. bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
  1221. #endif
  1222. /* This is basically what we need from the register frame. */
  1223. __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
  1224. __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
  1225. if (this_domain != ipipe_root_domain)
  1226. __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
  1227. else
  1228. __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
  1229. }
  1230. if (this_domain == ipipe_root_domain) {
  1231. s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
  1232. barrier();
  1233. }
  1234. ipipe_trace_irq_entry(irq);
  1235. __ipipe_handle_irq(irq, regs);
  1236. ipipe_trace_irq_exit(irq);
  1237. if (this_domain == ipipe_root_domain) {
  1238. set_thread_flag(TIF_IRQ_SYNC);
  1239. if (!s) {
  1240. __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
  1241. return !test_bit(IPIPE_STALL_FLAG, &p->status);
  1242. }
  1243. }
  1244. return 0;
  1245. }
  1246. #endif /* CONFIG_IPIPE */