ints-priority-sc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. /*
  2. * File: arch/blackfin/mach-common/ints-priority-sc.c
  3. * Based on:
  4. * Author:
  5. *
  6. * Created: ?
  7. * Description: Set up the interupt priorities
  8. *
  9. * Modified:
  10. * 1996 Roman Zippel
  11. * 1999 D. Jeff Dionne <jeff@uclinux.org>
  12. * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
  13. * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
  14. * 2003 Metrowerks/Motorola
  15. * 2003 Bas Vermeulen <bas@buyways.nl>
  16. * Copyright 2004-2006 Analog Devices Inc.
  17. *
  18. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  19. *
  20. * This program is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License as published by
  22. * the Free Software Foundation; either version 2 of the License, or
  23. * (at your option) any later version.
  24. *
  25. * This program is distributed in the hope that it will be useful,
  26. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  27. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  28. * GNU General Public License for more details.
  29. *
  30. * You should have received a copy of the GNU General Public License
  31. * along with this program; if not, see the file COPYING, or write
  32. * to the Free Software Foundation, Inc.,
  33. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  34. */
  35. #include <linux/module.h>
  36. #include <linux/kernel_stat.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/irq.h>
  39. #ifdef CONFIG_KGDB
  40. #include <linux/kgdb.h>
  41. #endif
  42. #include <asm/traps.h>
  43. #include <asm/blackfin.h>
  44. #include <asm/gpio.h>
  45. #include <asm/irq_handler.h>
  46. #ifdef BF537_FAMILY
  47. # define BF537_GENERIC_ERROR_INT_DEMUX
  48. #else
  49. # undef BF537_GENERIC_ERROR_INT_DEMUX
  50. #endif
  51. /*
  52. * NOTES:
  53. * - we have separated the physical Hardware interrupt from the
  54. * levels that the LINUX kernel sees (see the description in irq.h)
  55. * -
  56. */
  57. unsigned long irq_flags = 0;
  58. /* The number of spurious interrupts */
  59. atomic_t num_spurious;
  60. struct ivgx {
  61. /* irq number for request_irq, available in mach-bf533/irq.h */
  62. int irqno;
  63. /* corresponding bit in the SIC_ISR register */
  64. int isrflag;
  65. } ivg_table[NR_PERI_INTS];
  66. struct ivg_slice {
  67. /* position of first irq in ivg_table for given ivg */
  68. struct ivgx *ifirst;
  69. struct ivgx *istop;
  70. } ivg7_13[IVG13 - IVG7 + 1];
  71. static void search_IAR(void);
  72. /*
  73. * Search SIC_IAR and fill tables with the irqvalues
  74. * and their positions in the SIC_ISR register.
  75. */
  76. static void __init search_IAR(void)
  77. {
  78. unsigned ivg, irq_pos = 0;
  79. for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  80. int irqn;
  81. ivg7_13[ivg].istop = ivg7_13[ivg].ifirst =
  82. &ivg_table[irq_pos];
  83. for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
  84. int iar_shift = (irqn & 7) * 4;
  85. if (ivg ==
  86. (0xf &
  87. bfin_read32((unsigned long *) SIC_IAR0 +
  88. (irqn >> 3)) >> iar_shift)) {
  89. ivg_table[irq_pos].irqno = IVG7 + irqn;
  90. ivg_table[irq_pos].isrflag = 1 << irqn;
  91. ivg7_13[ivg].istop++;
  92. irq_pos++;
  93. }
  94. }
  95. }
  96. }
  97. /*
  98. * This is for BF533 internal IRQs
  99. */
  100. static void ack_noop(unsigned int irq)
  101. {
  102. /* Dummy function. */
  103. }
  104. static void bfin_core_mask_irq(unsigned int irq)
  105. {
  106. irq_flags &= ~(1 << irq);
  107. if (!irqs_disabled())
  108. local_irq_enable();
  109. }
  110. static void bfin_core_unmask_irq(unsigned int irq)
  111. {
  112. irq_flags |= 1 << irq;
  113. /*
  114. * If interrupts are enabled, IMASK must contain the same value
  115. * as irq_flags. Make sure that invariant holds. If interrupts
  116. * are currently disabled we need not do anything; one of the
  117. * callers will take care of setting IMASK to the proper value
  118. * when reenabling interrupts.
  119. * local_irq_enable just does "STI irq_flags", so it's exactly
  120. * what we need.
  121. */
  122. if (!irqs_disabled())
  123. local_irq_enable();
  124. return;
  125. }
  126. static void bfin_internal_mask_irq(unsigned int irq)
  127. {
  128. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
  129. ~(1 << (irq - (IRQ_CORETMR + 1))));
  130. SSYNC();
  131. }
  132. static void bfin_internal_unmask_irq(unsigned int irq)
  133. {
  134. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
  135. (1 << (irq - (IRQ_CORETMR + 1))));
  136. SSYNC();
  137. }
  138. static struct irq_chip bfin_core_irqchip = {
  139. .ack = ack_noop,
  140. .mask = bfin_core_mask_irq,
  141. .unmask = bfin_core_unmask_irq,
  142. };
  143. static struct irq_chip bfin_internal_irqchip = {
  144. .ack = ack_noop,
  145. .mask = bfin_internal_mask_irq,
  146. .unmask = bfin_internal_unmask_irq,
  147. };
  148. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  149. static int error_int_mask;
  150. static void bfin_generic_error_ack_irq(unsigned int irq)
  151. {
  152. }
  153. static void bfin_generic_error_mask_irq(unsigned int irq)
  154. {
  155. error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
  156. if (!error_int_mask) {
  157. local_irq_disable();
  158. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
  159. ~(1 <<
  160. (IRQ_GENERIC_ERROR -
  161. (IRQ_CORETMR + 1))));
  162. SSYNC();
  163. local_irq_enable();
  164. }
  165. }
  166. static void bfin_generic_error_unmask_irq(unsigned int irq)
  167. {
  168. local_irq_disable();
  169. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 1 <<
  170. (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1)));
  171. SSYNC();
  172. local_irq_enable();
  173. error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
  174. }
  175. static struct irq_chip bfin_generic_error_irqchip = {
  176. .ack = bfin_generic_error_ack_irq,
  177. .mask = bfin_generic_error_mask_irq,
  178. .unmask = bfin_generic_error_unmask_irq,
  179. };
  180. static void bfin_demux_error_irq(unsigned int int_err_irq,
  181. struct irq_desc *intb_desc)
  182. {
  183. int irq = 0;
  184. SSYNC();
  185. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  186. if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
  187. irq = IRQ_MAC_ERROR;
  188. else
  189. #endif
  190. if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
  191. irq = IRQ_SPORT0_ERROR;
  192. else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
  193. irq = IRQ_SPORT1_ERROR;
  194. else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
  195. irq = IRQ_PPI_ERROR;
  196. else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
  197. irq = IRQ_CAN_ERROR;
  198. else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
  199. irq = IRQ_SPI_ERROR;
  200. else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) &&
  201. (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0))
  202. irq = IRQ_UART0_ERROR;
  203. else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) &&
  204. (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0))
  205. irq = IRQ_UART1_ERROR;
  206. if (irq) {
  207. if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) {
  208. struct irq_desc *desc = irq_desc + irq;
  209. desc->handle_irq(irq, desc);
  210. } else {
  211. switch (irq) {
  212. case IRQ_PPI_ERROR:
  213. bfin_write_PPI_STATUS(PPI_ERR_MASK);
  214. break;
  215. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  216. case IRQ_MAC_ERROR:
  217. bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
  218. break;
  219. #endif
  220. case IRQ_SPORT0_ERROR:
  221. bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
  222. break;
  223. case IRQ_SPORT1_ERROR:
  224. bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
  225. break;
  226. case IRQ_CAN_ERROR:
  227. bfin_write_CAN_GIS(CAN_ERR_MASK);
  228. break;
  229. case IRQ_SPI_ERROR:
  230. bfin_write_SPI_STAT(SPI_ERR_MASK);
  231. break;
  232. default:
  233. break;
  234. }
  235. pr_debug("IRQ %d:"
  236. " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
  237. irq);
  238. }
  239. } else
  240. printk(KERN_ERR
  241. "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
  242. " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
  243. __FUNCTION__, __FILE__, __LINE__);
  244. }
  245. #endif /* BF537_GENERIC_ERROR_INT_DEMUX */
  246. #ifdef CONFIG_IRQCHIP_DEMUX_GPIO
  247. static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
  248. static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)];
  249. static void bfin_gpio_ack_irq(unsigned int irq)
  250. {
  251. u16 gpionr = irq - IRQ_PF0;
  252. if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
  253. set_gpio_data(gpionr, 0);
  254. SSYNC();
  255. }
  256. }
  257. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  258. {
  259. u16 gpionr = irq - IRQ_PF0;
  260. if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
  261. set_gpio_data(gpionr, 0);
  262. SSYNC();
  263. }
  264. set_gpio_maska(gpionr, 0);
  265. SSYNC();
  266. }
  267. static void bfin_gpio_mask_irq(unsigned int irq)
  268. {
  269. set_gpio_maska(irq - IRQ_PF0, 0);
  270. SSYNC();
  271. }
  272. static void bfin_gpio_unmask_irq(unsigned int irq)
  273. {
  274. set_gpio_maska(irq - IRQ_PF0, 1);
  275. SSYNC();
  276. }
  277. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  278. {
  279. unsigned int ret;
  280. u16 gpionr = irq - IRQ_PF0;
  281. if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
  282. ret = gpio_request(gpionr, NULL);
  283. if (ret)
  284. return ret;
  285. }
  286. gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
  287. bfin_gpio_unmask_irq(irq);
  288. return ret;
  289. }
  290. static void bfin_gpio_irq_shutdown(unsigned int irq)
  291. {
  292. bfin_gpio_mask_irq(irq);
  293. gpio_free(irq - IRQ_PF0);
  294. gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0);
  295. }
  296. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  297. {
  298. unsigned int ret;
  299. u16 gpionr = irq - IRQ_PF0;
  300. if (type == IRQ_TYPE_PROBE) {
  301. /* only probe unenabled GPIO interrupt lines */
  302. if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))
  303. return 0;
  304. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  305. }
  306. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  307. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
  308. {
  309. if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
  310. ret = gpio_request(gpionr, NULL);
  311. if (ret)
  312. return ret;
  313. }
  314. gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
  315. } else {
  316. gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
  317. return 0;
  318. }
  319. set_gpio_dir(gpionr, 0);
  320. set_gpio_inen(gpionr, 1);
  321. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  322. gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr);
  323. set_gpio_edge(gpionr, 1);
  324. } else {
  325. set_gpio_edge(gpionr, 0);
  326. gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
  327. }
  328. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  329. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  330. set_gpio_both(gpionr, 1);
  331. else
  332. set_gpio_both(gpionr, 0);
  333. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  334. set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
  335. else
  336. set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
  337. SSYNC();
  338. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  339. set_irq_handler(irq, handle_edge_irq);
  340. else
  341. set_irq_handler(irq, handle_level_irq);
  342. return 0;
  343. }
  344. static struct irq_chip bfin_gpio_irqchip = {
  345. .ack = bfin_gpio_ack_irq,
  346. .mask = bfin_gpio_mask_irq,
  347. .mask_ack = bfin_gpio_mask_ack_irq,
  348. .unmask = bfin_gpio_unmask_irq,
  349. .set_type = bfin_gpio_irq_type,
  350. .startup = bfin_gpio_irq_startup,
  351. .shutdown = bfin_gpio_irq_shutdown
  352. };
  353. static void bfin_demux_gpio_irq(unsigned int intb_irq,
  354. struct irq_desc *intb_desc)
  355. {
  356. u16 i;
  357. for (i = 0; i < MAX_BLACKFIN_GPIOS; i+=16) {
  358. int irq = IRQ_PF0 + i;
  359. int flag_d = get_gpiop_data(i);
  360. int mask =
  361. flag_d & (gpio_enabled[gpio_bank(i)] &
  362. get_gpiop_maska(i));
  363. while (mask) {
  364. if (mask & 1) {
  365. struct irq_desc *desc = irq_desc + irq;
  366. desc->handle_irq(irq, desc);
  367. }
  368. irq++;
  369. mask >>= 1;
  370. }
  371. }
  372. }
  373. #endif /* CONFIG_IRQCHIP_DEMUX_GPIO */
  374. /*
  375. * This function should be called during kernel startup to initialize
  376. * the BFin IRQ handling routines.
  377. */
  378. int __init init_arch_irq(void)
  379. {
  380. int irq;
  381. unsigned long ilat = 0;
  382. /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
  383. bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
  384. SSYNC();
  385. local_irq_disable();
  386. #ifndef CONFIG_KGDB
  387. bfin_write_EVT0(evt_emulation);
  388. #endif
  389. bfin_write_EVT2(evt_evt2);
  390. bfin_write_EVT3(trap);
  391. bfin_write_EVT5(evt_ivhw);
  392. bfin_write_EVT6(evt_timer);
  393. bfin_write_EVT7(evt_evt7);
  394. bfin_write_EVT8(evt_evt8);
  395. bfin_write_EVT9(evt_evt9);
  396. bfin_write_EVT10(evt_evt10);
  397. bfin_write_EVT11(evt_evt11);
  398. bfin_write_EVT12(evt_evt12);
  399. bfin_write_EVT13(evt_evt13);
  400. bfin_write_EVT14(evt14_softirq);
  401. bfin_write_EVT15(evt_system_call);
  402. CSYNC();
  403. for (irq = 0; irq < SYS_IRQS; irq++) {
  404. if (irq <= IRQ_CORETMR)
  405. set_irq_chip(irq, &bfin_core_irqchip);
  406. else
  407. set_irq_chip(irq, &bfin_internal_irqchip);
  408. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  409. if (irq != IRQ_GENERIC_ERROR) {
  410. #endif
  411. #ifdef CONFIG_IRQCHIP_DEMUX_GPIO
  412. if ((irq != IRQ_PROG_INTA) /*PORT F & G MASK_A Interrupt*/
  413. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  414. && (irq != IRQ_MAC_RX) /*PORT H MASK_A Interrupt*/
  415. # endif
  416. ) {
  417. #endif
  418. set_irq_handler(irq, handle_simple_irq);
  419. #ifdef CONFIG_IRQCHIP_DEMUX_GPIO
  420. } else {
  421. set_irq_chained_handler(irq,
  422. bfin_demux_gpio_irq);
  423. }
  424. #endif
  425. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  426. } else {
  427. set_irq_handler(irq, bfin_demux_error_irq);
  428. }
  429. #endif
  430. }
  431. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  432. for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) {
  433. set_irq_chip(irq, &bfin_generic_error_irqchip);
  434. set_irq_handler(irq, handle_level_irq);
  435. }
  436. #endif
  437. #ifdef CONFIG_IRQCHIP_DEMUX_GPIO
  438. for (irq = IRQ_PF0; irq < NR_IRQS; irq++) {
  439. set_irq_chip(irq, &bfin_gpio_irqchip);
  440. /* if configured as edge, then will be changed to do_edge_IRQ */
  441. set_irq_handler(irq, handle_level_irq);
  442. }
  443. #endif
  444. bfin_write_IMASK(0);
  445. CSYNC();
  446. ilat = bfin_read_ILAT();
  447. CSYNC();
  448. bfin_write_ILAT(ilat);
  449. CSYNC();
  450. printk(KERN_INFO
  451. "Configuring Blackfin Priority Driven Interrupts\n");
  452. /* IMASK=xxx is equivalent to STI xx or irq_flags=xx,
  453. * local_irq_enable()
  454. */
  455. program_IAR();
  456. /* Therefore it's better to setup IARs before interrupts enabled */
  457. search_IAR();
  458. /* Enable interrupts IVG7-15 */
  459. irq_flags = irq_flags | IMASK_IVG15 |
  460. IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
  461. IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 |
  462. IMASK_IVGHW;
  463. return 0;
  464. }
  465. #ifdef CONFIG_DO_IRQ_L1
  466. void do_irq(int vec, struct pt_regs *fp)__attribute__((l1_text));
  467. #endif
  468. void do_irq(int vec, struct pt_regs *fp)
  469. {
  470. if (vec == EVT_IVTMR_P) {
  471. vec = IRQ_CORETMR;
  472. } else {
  473. struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
  474. struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
  475. unsigned long sic_status;
  476. SSYNC();
  477. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  478. for (;; ivg++) {
  479. if (ivg >= ivg_stop) {
  480. atomic_inc(&num_spurious);
  481. return;
  482. } else if (sic_status & ivg->isrflag)
  483. break;
  484. }
  485. vec = ivg->irqno;
  486. }
  487. asm_do_IRQ(vec, fp);
  488. #ifdef CONFIG_KGDB
  489. kgdb_process_breakpoint();
  490. #endif
  491. }