ints-priority.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344
  1. /*
  2. * File: arch/blackfin/mach-common/ints-priority.c
  3. *
  4. * Description: Set up the interrupt priorities
  5. *
  6. * Modified:
  7. * 1996 Roman Zippel
  8. * 1999 D. Jeff Dionne <jeff@uclinux.org>
  9. * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
  10. * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
  11. * 2003 Metrowerks/Motorola
  12. * 2003 Bas Vermeulen <bas@buyways.nl>
  13. * Copyright 2004-2008 Analog Devices Inc.
  14. *
  15. * Bugs: Enter bugs at http://blackfin.uclinux.org/
  16. *
  17. * This program is free software; you can redistribute it and/or modify
  18. * it under the terms of the GNU General Public License as published by
  19. * the Free Software Foundation; either version 2 of the License, or
  20. * (at your option) any later version.
  21. *
  22. * This program is distributed in the hope that it will be useful,
  23. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  24. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  25. * GNU General Public License for more details.
  26. *
  27. * You should have received a copy of the GNU General Public License
  28. * along with this program; if not, see the file COPYING, or write
  29. * to the Free Software Foundation, Inc.,
  30. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  31. */
  32. #include <linux/module.h>
  33. #include <linux/kernel_stat.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/irq.h>
  36. #ifdef CONFIG_IPIPE
  37. #include <linux/ipipe.h>
  38. #endif
  39. #ifdef CONFIG_KGDB
  40. #include <linux/kgdb.h>
  41. #endif
  42. #include <asm/traps.h>
  43. #include <asm/blackfin.h>
  44. #include <asm/gpio.h>
  45. #include <asm/irq_handler.h>
  46. #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
  47. #ifdef BF537_FAMILY
  48. # define BF537_GENERIC_ERROR_INT_DEMUX
  49. #else
  50. # undef BF537_GENERIC_ERROR_INT_DEMUX
  51. #endif
  52. /*
  53. * NOTES:
  54. * - we have separated the physical Hardware interrupt from the
  55. * levels that the LINUX kernel sees (see the description in irq.h)
  56. * -
  57. */
  58. #ifndef CONFIG_SMP
  59. /* Initialize this to an actual value to force it into the .data
  60. * section so that we know it is properly initialized at entry into
  61. * the kernel but before bss is initialized to zero (which is where
  62. * it would live otherwise). The 0x1f magic represents the IRQs we
  63. * cannot actually mask out in hardware.
  64. */
  65. unsigned long bfin_irq_flags = 0x1f;
  66. EXPORT_SYMBOL(bfin_irq_flags);
  67. #endif
  68. /* The number of spurious interrupts */
  69. atomic_t num_spurious;
  70. #ifdef CONFIG_PM
  71. unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
  72. unsigned vr_wakeup;
  73. #endif
  74. struct ivgx {
  75. /* irq number for request_irq, available in mach-bf5xx/irq.h */
  76. unsigned int irqno;
  77. /* corresponding bit in the SIC_ISR register */
  78. unsigned int isrflag;
  79. } ivg_table[NR_PERI_INTS];
  80. struct ivg_slice {
  81. /* position of first irq in ivg_table for given ivg */
  82. struct ivgx *ifirst;
  83. struct ivgx *istop;
  84. } ivg7_13[IVG13 - IVG7 + 1];
  85. /*
  86. * Search SIC_IAR and fill tables with the irqvalues
  87. * and their positions in the SIC_ISR register.
  88. */
  89. static void __init search_IAR(void)
  90. {
  91. unsigned ivg, irq_pos = 0;
  92. for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  93. int irqn;
  94. ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  95. for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
  96. int iar_shift = (irqn & 7) * 4;
  97. if (ivg == (0xf &
  98. #if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
  99. || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
  100. bfin_read32((unsigned long *)SIC_IAR0 +
  101. ((irqn % 32) >> 3) + ((irqn / 32) *
  102. ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
  103. #else
  104. bfin_read32((unsigned long *)SIC_IAR0 +
  105. (irqn >> 3)) >> iar_shift)) {
  106. #endif
  107. ivg_table[irq_pos].irqno = IVG7 + irqn;
  108. ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  109. ivg7_13[ivg].istop++;
  110. irq_pos++;
  111. }
  112. }
  113. }
  114. }
  115. /*
  116. * This is for core internal IRQs
  117. */
  118. static void bfin_ack_noop(unsigned int irq)
  119. {
  120. /* Dummy function. */
  121. }
  122. static void bfin_core_mask_irq(unsigned int irq)
  123. {
  124. bfin_irq_flags &= ~(1 << irq);
  125. if (!irqs_disabled_hw())
  126. local_irq_enable_hw();
  127. }
  128. static void bfin_core_unmask_irq(unsigned int irq)
  129. {
  130. bfin_irq_flags |= 1 << irq;
  131. /*
  132. * If interrupts are enabled, IMASK must contain the same value
  133. * as bfin_irq_flags. Make sure that invariant holds. If interrupts
  134. * are currently disabled we need not do anything; one of the
  135. * callers will take care of setting IMASK to the proper value
  136. * when reenabling interrupts.
  137. * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
  138. * what we need.
  139. */
  140. if (!irqs_disabled_hw())
  141. local_irq_enable_hw();
  142. return;
  143. }
  144. static void bfin_internal_mask_irq(unsigned int irq)
  145. {
  146. #ifdef CONFIG_BF53x
  147. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
  148. ~(1 << SIC_SYSIRQ(irq)));
  149. #else
  150. unsigned mask_bank, mask_bit;
  151. mask_bank = SIC_SYSIRQ(irq) / 32;
  152. mask_bit = SIC_SYSIRQ(irq) % 32;
  153. bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
  154. ~(1 << mask_bit));
  155. #ifdef CONFIG_SMP
  156. bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
  157. ~(1 << mask_bit));
  158. #endif
  159. #endif
  160. }
  161. static void bfin_internal_unmask_irq(unsigned int irq)
  162. {
  163. #ifdef CONFIG_BF53x
  164. bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
  165. (1 << SIC_SYSIRQ(irq)));
  166. #else
  167. unsigned mask_bank, mask_bit;
  168. mask_bank = SIC_SYSIRQ(irq) / 32;
  169. mask_bit = SIC_SYSIRQ(irq) % 32;
  170. bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
  171. (1 << mask_bit));
  172. #ifdef CONFIG_SMP
  173. bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
  174. (1 << mask_bit));
  175. #endif
  176. #endif
  177. }
  178. #ifdef CONFIG_PM
  179. int bfin_internal_set_wake(unsigned int irq, unsigned int state)
  180. {
  181. u32 bank, bit, wakeup = 0;
  182. unsigned long flags;
  183. bank = SIC_SYSIRQ(irq) / 32;
  184. bit = SIC_SYSIRQ(irq) % 32;
  185. switch (irq) {
  186. #ifdef IRQ_RTC
  187. case IRQ_RTC:
  188. wakeup |= WAKE;
  189. break;
  190. #endif
  191. #ifdef IRQ_CAN0_RX
  192. case IRQ_CAN0_RX:
  193. wakeup |= CANWE;
  194. break;
  195. #endif
  196. #ifdef IRQ_CAN1_RX
  197. case IRQ_CAN1_RX:
  198. wakeup |= CANWE;
  199. break;
  200. #endif
  201. #ifdef IRQ_USB_INT0
  202. case IRQ_USB_INT0:
  203. wakeup |= USBWE;
  204. break;
  205. #endif
  206. #ifdef IRQ_KEY
  207. case IRQ_KEY:
  208. wakeup |= KPADWE;
  209. break;
  210. #endif
  211. #ifdef CONFIG_BF54x
  212. case IRQ_CNT:
  213. wakeup |= ROTWE;
  214. break;
  215. #endif
  216. default:
  217. break;
  218. }
  219. local_irq_save_hw(flags);
  220. if (state) {
  221. bfin_sic_iwr[bank] |= (1 << bit);
  222. vr_wakeup |= wakeup;
  223. } else {
  224. bfin_sic_iwr[bank] &= ~(1 << bit);
  225. vr_wakeup &= ~wakeup;
  226. }
  227. local_irq_restore_hw(flags);
  228. return 0;
  229. }
  230. #endif
  231. static struct irq_chip bfin_core_irqchip = {
  232. .name = "CORE",
  233. .ack = bfin_ack_noop,
  234. .mask = bfin_core_mask_irq,
  235. .unmask = bfin_core_unmask_irq,
  236. };
  237. static struct irq_chip bfin_internal_irqchip = {
  238. .name = "INTN",
  239. .ack = bfin_ack_noop,
  240. .mask = bfin_internal_mask_irq,
  241. .unmask = bfin_internal_unmask_irq,
  242. .mask_ack = bfin_internal_mask_irq,
  243. .disable = bfin_internal_mask_irq,
  244. .enable = bfin_internal_unmask_irq,
  245. #ifdef CONFIG_PM
  246. .set_wake = bfin_internal_set_wake,
  247. #endif
  248. };
  249. static void bfin_handle_irq(unsigned irq)
  250. {
  251. #ifdef CONFIG_IPIPE
  252. struct pt_regs regs; /* Contents not used. */
  253. ipipe_trace_irq_entry(irq);
  254. __ipipe_handle_irq(irq, &regs);
  255. ipipe_trace_irq_exit(irq);
  256. #else /* !CONFIG_IPIPE */
  257. struct irq_desc *desc = irq_desc + irq;
  258. desc->handle_irq(irq, desc);
  259. #endif /* !CONFIG_IPIPE */
  260. }
  261. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  262. static int error_int_mask;
  263. static void bfin_generic_error_mask_irq(unsigned int irq)
  264. {
  265. error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
  266. if (!error_int_mask)
  267. bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
  268. }
  269. static void bfin_generic_error_unmask_irq(unsigned int irq)
  270. {
  271. bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
  272. error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
  273. }
  274. static struct irq_chip bfin_generic_error_irqchip = {
  275. .name = "ERROR",
  276. .ack = bfin_ack_noop,
  277. .mask_ack = bfin_generic_error_mask_irq,
  278. .mask = bfin_generic_error_mask_irq,
  279. .unmask = bfin_generic_error_unmask_irq,
  280. };
  281. static void bfin_demux_error_irq(unsigned int int_err_irq,
  282. struct irq_desc *inta_desc)
  283. {
  284. int irq = 0;
  285. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  286. if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
  287. irq = IRQ_MAC_ERROR;
  288. else
  289. #endif
  290. if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
  291. irq = IRQ_SPORT0_ERROR;
  292. else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
  293. irq = IRQ_SPORT1_ERROR;
  294. else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
  295. irq = IRQ_PPI_ERROR;
  296. else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
  297. irq = IRQ_CAN_ERROR;
  298. else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
  299. irq = IRQ_SPI_ERROR;
  300. else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) &&
  301. (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0))
  302. irq = IRQ_UART0_ERROR;
  303. else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) &&
  304. (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0))
  305. irq = IRQ_UART1_ERROR;
  306. if (irq) {
  307. if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
  308. bfin_handle_irq(irq);
  309. else {
  310. switch (irq) {
  311. case IRQ_PPI_ERROR:
  312. bfin_write_PPI_STATUS(PPI_ERR_MASK);
  313. break;
  314. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  315. case IRQ_MAC_ERROR:
  316. bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
  317. break;
  318. #endif
  319. case IRQ_SPORT0_ERROR:
  320. bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
  321. break;
  322. case IRQ_SPORT1_ERROR:
  323. bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
  324. break;
  325. case IRQ_CAN_ERROR:
  326. bfin_write_CAN_GIS(CAN_ERR_MASK);
  327. break;
  328. case IRQ_SPI_ERROR:
  329. bfin_write_SPI_STAT(SPI_ERR_MASK);
  330. break;
  331. default:
  332. break;
  333. }
  334. pr_debug("IRQ %d:"
  335. " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
  336. irq);
  337. }
  338. } else
  339. printk(KERN_ERR
  340. "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
  341. " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
  342. __func__, __FILE__, __LINE__);
  343. }
  344. #endif /* BF537_GENERIC_ERROR_INT_DEMUX */
  345. static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
  346. {
  347. #ifdef CONFIG_IPIPE
  348. _set_irq_handler(irq, handle_edge_irq);
  349. #else
  350. struct irq_desc *desc = irq_desc + irq;
  351. /* May not call generic set_irq_handler() due to spinlock
  352. recursion. */
  353. desc->handle_irq = handle;
  354. #endif
  355. }
  356. static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
  357. extern void bfin_gpio_irq_prepare(unsigned gpio);
  358. #if !defined(CONFIG_BF54x)
  359. static void bfin_gpio_ack_irq(unsigned int irq)
  360. {
  361. /* AFAIK ack_irq in case mask_ack is provided
  362. * get's only called for edge sense irqs
  363. */
  364. set_gpio_data(irq_to_gpio(irq), 0);
  365. }
  366. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  367. {
  368. struct irq_desc *desc = irq_desc + irq;
  369. u32 gpionr = irq_to_gpio(irq);
  370. if (desc->handle_irq == handle_edge_irq)
  371. set_gpio_data(gpionr, 0);
  372. set_gpio_maska(gpionr, 0);
  373. }
  374. static void bfin_gpio_mask_irq(unsigned int irq)
  375. {
  376. set_gpio_maska(irq_to_gpio(irq), 0);
  377. }
  378. static void bfin_gpio_unmask_irq(unsigned int irq)
  379. {
  380. set_gpio_maska(irq_to_gpio(irq), 1);
  381. }
  382. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  383. {
  384. u32 gpionr = irq_to_gpio(irq);
  385. if (__test_and_set_bit(gpionr, gpio_enabled))
  386. bfin_gpio_irq_prepare(gpionr);
  387. bfin_gpio_unmask_irq(irq);
  388. return 0;
  389. }
  390. static void bfin_gpio_irq_shutdown(unsigned int irq)
  391. {
  392. u32 gpionr = irq_to_gpio(irq);
  393. bfin_gpio_mask_irq(irq);
  394. __clear_bit(gpionr, gpio_enabled);
  395. bfin_gpio_irq_free(gpionr);
  396. }
  397. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  398. {
  399. int ret;
  400. char buf[16];
  401. u32 gpionr = irq_to_gpio(irq);
  402. if (type == IRQ_TYPE_PROBE) {
  403. /* only probe unenabled GPIO interrupt lines */
  404. if (__test_bit(gpionr, gpio_enabled))
  405. return 0;
  406. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  407. }
  408. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  409. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
  410. snprintf(buf, 16, "gpio-irq%d", irq);
  411. ret = bfin_gpio_irq_request(gpionr, buf);
  412. if (ret)
  413. return ret;
  414. if (__test_and_set_bit(gpionr, gpio_enabled))
  415. bfin_gpio_irq_prepare(gpionr);
  416. } else {
  417. __clear_bit(gpionr, gpio_enabled);
  418. return 0;
  419. }
  420. set_gpio_inen(gpionr, 0);
  421. set_gpio_dir(gpionr, 0);
  422. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  423. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  424. set_gpio_both(gpionr, 1);
  425. else
  426. set_gpio_both(gpionr, 0);
  427. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  428. set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
  429. else
  430. set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
  431. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  432. set_gpio_edge(gpionr, 1);
  433. set_gpio_inen(gpionr, 1);
  434. set_gpio_data(gpionr, 0);
  435. } else {
  436. set_gpio_edge(gpionr, 0);
  437. set_gpio_inen(gpionr, 1);
  438. }
  439. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  440. bfin_set_irq_handler(irq, handle_edge_irq);
  441. else
  442. bfin_set_irq_handler(irq, handle_level_irq);
  443. return 0;
  444. }
  445. #ifdef CONFIG_PM
  446. int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
  447. {
  448. unsigned gpio = irq_to_gpio(irq);
  449. if (state)
  450. gpio_pm_wakeup_request(gpio, PM_WAKE_IGNORE);
  451. else
  452. gpio_pm_wakeup_free(gpio);
  453. return 0;
  454. }
  455. #endif
  456. static void bfin_demux_gpio_irq(unsigned int inta_irq,
  457. struct irq_desc *desc)
  458. {
  459. unsigned int i, gpio, mask, irq, search = 0;
  460. switch (inta_irq) {
  461. #if defined(CONFIG_BF53x)
  462. case IRQ_PROG_INTA:
  463. irq = IRQ_PF0;
  464. search = 1;
  465. break;
  466. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  467. case IRQ_MAC_RX:
  468. irq = IRQ_PH0;
  469. break;
  470. # endif
  471. #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
  472. case IRQ_PORTF_INTA:
  473. irq = IRQ_PF0;
  474. break;
  475. #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
  476. case IRQ_PORTF_INTA:
  477. irq = IRQ_PF0;
  478. break;
  479. case IRQ_PORTG_INTA:
  480. irq = IRQ_PG0;
  481. break;
  482. case IRQ_PORTH_INTA:
  483. irq = IRQ_PH0;
  484. break;
  485. #elif defined(CONFIG_BF561)
  486. case IRQ_PROG0_INTA:
  487. irq = IRQ_PF0;
  488. break;
  489. case IRQ_PROG1_INTA:
  490. irq = IRQ_PF16;
  491. break;
  492. case IRQ_PROG2_INTA:
  493. irq = IRQ_PF32;
  494. break;
  495. #endif
  496. default:
  497. BUG();
  498. return;
  499. }
  500. if (search) {
  501. for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
  502. irq += i;
  503. mask = get_gpiop_data(i) & get_gpiop_maska(i);
  504. while (mask) {
  505. if (mask & 1)
  506. bfin_handle_irq(irq);
  507. irq++;
  508. mask >>= 1;
  509. }
  510. }
  511. } else {
  512. gpio = irq_to_gpio(irq);
  513. mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
  514. do {
  515. if (mask & 1)
  516. bfin_handle_irq(irq);
  517. irq++;
  518. mask >>= 1;
  519. } while (mask);
  520. }
  521. }
  522. #else /* CONFIG_BF54x */
  523. #define NR_PINT_SYS_IRQS 4
  524. #define NR_PINT_BITS 32
  525. #define NR_PINTS 160
  526. #define IRQ_NOT_AVAIL 0xFF
  527. #define PINT_2_BANK(x) ((x) >> 5)
  528. #define PINT_2_BIT(x) ((x) & 0x1F)
  529. #define PINT_BIT(x) (1 << (PINT_2_BIT(x)))
  530. static unsigned char irq2pint_lut[NR_PINTS];
  531. static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
  532. struct pin_int_t {
  533. unsigned int mask_set;
  534. unsigned int mask_clear;
  535. unsigned int request;
  536. unsigned int assign;
  537. unsigned int edge_set;
  538. unsigned int edge_clear;
  539. unsigned int invert_set;
  540. unsigned int invert_clear;
  541. unsigned int pinstate;
  542. unsigned int latch;
  543. };
  544. static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
  545. (struct pin_int_t *)PINT0_MASK_SET,
  546. (struct pin_int_t *)PINT1_MASK_SET,
  547. (struct pin_int_t *)PINT2_MASK_SET,
  548. (struct pin_int_t *)PINT3_MASK_SET,
  549. };
  550. inline unsigned int get_irq_base(u32 bank, u8 bmap)
  551. {
  552. unsigned int irq_base;
  553. if (bank < 2) { /*PA-PB */
  554. irq_base = IRQ_PA0 + bmap * 16;
  555. } else { /*PC-PJ */
  556. irq_base = IRQ_PC0 + bmap * 16;
  557. }
  558. return irq_base;
  559. }
  560. /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
  561. void init_pint_lut(void)
  562. {
  563. u16 bank, bit, irq_base, bit_pos;
  564. u32 pint_assign;
  565. u8 bmap;
  566. memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
  567. for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
  568. pint_assign = pint[bank]->assign;
  569. for (bit = 0; bit < NR_PINT_BITS; bit++) {
  570. bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
  571. irq_base = get_irq_base(bank, bmap);
  572. irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
  573. bit_pos = bit + bank * NR_PINT_BITS;
  574. pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
  575. irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
  576. }
  577. }
  578. }
  579. static void bfin_gpio_ack_irq(unsigned int irq)
  580. {
  581. struct irq_desc *desc = irq_desc + irq;
  582. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  583. u32 pintbit = PINT_BIT(pint_val);
  584. u32 bank = PINT_2_BANK(pint_val);
  585. if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
  586. if (pint[bank]->invert_set & pintbit)
  587. pint[bank]->invert_clear = pintbit;
  588. else
  589. pint[bank]->invert_set = pintbit;
  590. }
  591. pint[bank]->request = pintbit;
  592. }
  593. static void bfin_gpio_mask_ack_irq(unsigned int irq)
  594. {
  595. struct irq_desc *desc = irq_desc + irq;
  596. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  597. u32 pintbit = PINT_BIT(pint_val);
  598. u32 bank = PINT_2_BANK(pint_val);
  599. if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
  600. if (pint[bank]->invert_set & pintbit)
  601. pint[bank]->invert_clear = pintbit;
  602. else
  603. pint[bank]->invert_set = pintbit;
  604. }
  605. pint[bank]->request = pintbit;
  606. pint[bank]->mask_clear = pintbit;
  607. }
  608. static void bfin_gpio_mask_irq(unsigned int irq)
  609. {
  610. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  611. pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
  612. }
  613. static void bfin_gpio_unmask_irq(unsigned int irq)
  614. {
  615. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  616. u32 pintbit = PINT_BIT(pint_val);
  617. u32 bank = PINT_2_BANK(pint_val);
  618. pint[bank]->request = pintbit;
  619. pint[bank]->mask_set = pintbit;
  620. }
  621. static unsigned int bfin_gpio_irq_startup(unsigned int irq)
  622. {
  623. u32 gpionr = irq_to_gpio(irq);
  624. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  625. if (pint_val == IRQ_NOT_AVAIL) {
  626. printk(KERN_ERR
  627. "GPIO IRQ %d :Not in PINT Assign table "
  628. "Reconfigure Interrupt to Port Assignemt\n", irq);
  629. return -ENODEV;
  630. }
  631. if (__test_and_set_bit(gpionr, gpio_enabled))
  632. bfin_gpio_irq_prepare(gpionr);
  633. bfin_gpio_unmask_irq(irq);
  634. return 0;
  635. }
  636. static void bfin_gpio_irq_shutdown(unsigned int irq)
  637. {
  638. u32 gpionr = irq_to_gpio(irq);
  639. bfin_gpio_mask_irq(irq);
  640. __clear_bit(gpionr, gpio_enabled);
  641. bfin_gpio_irq_free(gpionr);
  642. }
  643. static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
  644. {
  645. int ret;
  646. char buf[16];
  647. u32 gpionr = irq_to_gpio(irq);
  648. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  649. u32 pintbit = PINT_BIT(pint_val);
  650. u32 bank = PINT_2_BANK(pint_val);
  651. if (pint_val == IRQ_NOT_AVAIL)
  652. return -ENODEV;
  653. if (type == IRQ_TYPE_PROBE) {
  654. /* only probe unenabled GPIO interrupt lines */
  655. if (__test_bit(gpionr, gpio_enabled))
  656. return 0;
  657. type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
  658. }
  659. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
  660. IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
  661. snprintf(buf, 16, "gpio-irq%d", irq);
  662. ret = bfin_gpio_irq_request(gpionr, buf);
  663. if (ret)
  664. return ret;
  665. if (__test_and_set_bit(gpionr, gpio_enabled))
  666. bfin_gpio_irq_prepare(gpionr);
  667. } else {
  668. __clear_bit(gpionr, gpio_enabled);
  669. return 0;
  670. }
  671. if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
  672. pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */
  673. else
  674. pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */
  675. if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
  676. == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  677. if (gpio_get_value(gpionr))
  678. pint[bank]->invert_set = pintbit;
  679. else
  680. pint[bank]->invert_clear = pintbit;
  681. }
  682. if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
  683. pint[bank]->edge_set = pintbit;
  684. bfin_set_irq_handler(irq, handle_edge_irq);
  685. } else {
  686. pint[bank]->edge_clear = pintbit;
  687. bfin_set_irq_handler(irq, handle_level_irq);
  688. }
  689. return 0;
  690. }
  691. #ifdef CONFIG_PM
  692. u32 pint_saved_masks[NR_PINT_SYS_IRQS];
  693. u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
  694. int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
  695. {
  696. u32 pint_irq;
  697. u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
  698. u32 bank = PINT_2_BANK(pint_val);
  699. u32 pintbit = PINT_BIT(pint_val);
  700. switch (bank) {
  701. case 0:
  702. pint_irq = IRQ_PINT0;
  703. break;
  704. case 2:
  705. pint_irq = IRQ_PINT2;
  706. break;
  707. case 3:
  708. pint_irq = IRQ_PINT3;
  709. break;
  710. case 1:
  711. pint_irq = IRQ_PINT1;
  712. break;
  713. default:
  714. return -EINVAL;
  715. }
  716. bfin_internal_set_wake(pint_irq, state);
  717. if (state)
  718. pint_wakeup_masks[bank] |= pintbit;
  719. else
  720. pint_wakeup_masks[bank] &= ~pintbit;
  721. return 0;
  722. }
  723. u32 bfin_pm_setup(void)
  724. {
  725. u32 val, i;
  726. for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
  727. val = pint[i]->mask_clear;
  728. pint_saved_masks[i] = val;
  729. if (val ^ pint_wakeup_masks[i]) {
  730. pint[i]->mask_clear = val;
  731. pint[i]->mask_set = pint_wakeup_masks[i];
  732. }
  733. }
  734. return 0;
  735. }
  736. void bfin_pm_restore(void)
  737. {
  738. u32 i, val;
  739. for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
  740. val = pint_saved_masks[i];
  741. if (val ^ pint_wakeup_masks[i]) {
  742. pint[i]->mask_clear = pint[i]->mask_clear;
  743. pint[i]->mask_set = val;
  744. }
  745. }
  746. }
  747. #endif
  748. static void bfin_demux_gpio_irq(unsigned int inta_irq,
  749. struct irq_desc *desc)
  750. {
  751. u32 bank, pint_val;
  752. u32 request, irq;
  753. switch (inta_irq) {
  754. case IRQ_PINT0:
  755. bank = 0;
  756. break;
  757. case IRQ_PINT2:
  758. bank = 2;
  759. break;
  760. case IRQ_PINT3:
  761. bank = 3;
  762. break;
  763. case IRQ_PINT1:
  764. bank = 1;
  765. break;
  766. default:
  767. return;
  768. }
  769. pint_val = bank * NR_PINT_BITS;
  770. request = pint[bank]->request;
  771. while (request) {
  772. if (request & 1) {
  773. irq = pint2irq_lut[pint_val] + SYS_IRQS;
  774. bfin_handle_irq(irq);
  775. }
  776. pint_val++;
  777. request >>= 1;
  778. }
  779. }
  780. #endif
  781. static struct irq_chip bfin_gpio_irqchip = {
  782. .name = "GPIO",
  783. .ack = bfin_gpio_ack_irq,
  784. .mask = bfin_gpio_mask_irq,
  785. .mask_ack = bfin_gpio_mask_ack_irq,
  786. .unmask = bfin_gpio_unmask_irq,
  787. .disable = bfin_gpio_mask_irq,
  788. .enable = bfin_gpio_unmask_irq,
  789. .set_type = bfin_gpio_irq_type,
  790. .startup = bfin_gpio_irq_startup,
  791. .shutdown = bfin_gpio_irq_shutdown,
  792. #ifdef CONFIG_PM
  793. .set_wake = bfin_gpio_set_wake,
  794. #endif
  795. };
  796. void __cpuinit init_exception_vectors(void)
  797. {
  798. /* cannot program in software:
  799. * evt0 - emulation (jtag)
  800. * evt1 - reset
  801. */
  802. bfin_write_EVT2(evt_nmi);
  803. bfin_write_EVT3(trap);
  804. bfin_write_EVT5(evt_ivhw);
  805. bfin_write_EVT6(evt_timer);
  806. bfin_write_EVT7(evt_evt7);
  807. bfin_write_EVT8(evt_evt8);
  808. bfin_write_EVT9(evt_evt9);
  809. bfin_write_EVT10(evt_evt10);
  810. bfin_write_EVT11(evt_evt11);
  811. bfin_write_EVT12(evt_evt12);
  812. bfin_write_EVT13(evt_evt13);
  813. bfin_write_EVT14(evt14_softirq);
  814. bfin_write_EVT15(evt_system_call);
  815. CSYNC();
  816. }
  817. /*
  818. * This function should be called during kernel startup to initialize
  819. * the BFin IRQ handling routines.
  820. */
  821. int __init init_arch_irq(void)
  822. {
  823. int irq;
  824. unsigned long ilat = 0;
  825. /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
  826. #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
  827. || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
  828. bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
  829. bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
  830. # ifdef CONFIG_BF54x
  831. bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
  832. # endif
  833. # ifdef CONFIG_SMP
  834. bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
  835. bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
  836. # endif
  837. #else
  838. bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
  839. #endif
  840. local_irq_disable();
  841. #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
  842. /* Clear EMAC Interrupt Status bits so we can demux it later */
  843. bfin_write_EMAC_SYSTAT(-1);
  844. #endif
  845. #ifdef CONFIG_BF54x
  846. # ifdef CONFIG_PINTx_REASSIGN
  847. pint[0]->assign = CONFIG_PINT0_ASSIGN;
  848. pint[1]->assign = CONFIG_PINT1_ASSIGN;
  849. pint[2]->assign = CONFIG_PINT2_ASSIGN;
  850. pint[3]->assign = CONFIG_PINT3_ASSIGN;
  851. # endif
  852. /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
  853. init_pint_lut();
  854. #endif
  855. for (irq = 0; irq <= SYS_IRQS; irq++) {
  856. if (irq <= IRQ_CORETMR)
  857. set_irq_chip(irq, &bfin_core_irqchip);
  858. else
  859. set_irq_chip(irq, &bfin_internal_irqchip);
  860. switch (irq) {
  861. #if defined(CONFIG_BF53x)
  862. case IRQ_PROG_INTA:
  863. # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
  864. case IRQ_MAC_RX:
  865. # endif
  866. #elif defined(CONFIG_BF54x)
  867. case IRQ_PINT0:
  868. case IRQ_PINT1:
  869. case IRQ_PINT2:
  870. case IRQ_PINT3:
  871. #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
  872. case IRQ_PORTF_INTA:
  873. case IRQ_PORTG_INTA:
  874. case IRQ_PORTH_INTA:
  875. #elif defined(CONFIG_BF561)
  876. case IRQ_PROG0_INTA:
  877. case IRQ_PROG1_INTA:
  878. case IRQ_PROG2_INTA:
  879. #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
  880. case IRQ_PORTF_INTA:
  881. #endif
  882. set_irq_chained_handler(irq,
  883. bfin_demux_gpio_irq);
  884. break;
  885. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  886. case IRQ_GENERIC_ERROR:
  887. set_irq_chained_handler(irq, bfin_demux_error_irq);
  888. break;
  889. #endif
  890. #if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
  891. case IRQ_TIMER0:
  892. set_irq_handler(irq, handle_percpu_irq);
  893. break;
  894. #endif
  895. #ifdef CONFIG_SMP
  896. case IRQ_SUPPLE_0:
  897. case IRQ_SUPPLE_1:
  898. set_irq_handler(irq, handle_percpu_irq);
  899. break;
  900. #endif
  901. default:
  902. #ifdef CONFIG_IPIPE
  903. /*
  904. * We want internal interrupt sources to be masked, because
  905. * ISRs may trigger interrupts recursively (e.g. DMA), but
  906. * interrupts are _not_ masked at CPU level. So let's handle
  907. * them as level interrupts.
  908. */
  909. set_irq_handler(irq, handle_level_irq);
  910. #else /* !CONFIG_IPIPE */
  911. set_irq_handler(irq, handle_simple_irq);
  912. #endif /* !CONFIG_IPIPE */
  913. break;
  914. }
  915. }
  916. #ifdef BF537_GENERIC_ERROR_INT_DEMUX
  917. for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
  918. set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
  919. handle_level_irq);
  920. #endif
  921. /* if configured as edge, then will be changed to do_edge_IRQ */
  922. for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
  923. set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
  924. handle_level_irq);
  925. bfin_write_IMASK(0);
  926. CSYNC();
  927. ilat = bfin_read_ILAT();
  928. CSYNC();
  929. bfin_write_ILAT(ilat);
  930. CSYNC();
  931. printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
  932. /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
  933. * local_irq_enable()
  934. */
  935. program_IAR();
  936. /* Therefore it's better to setup IARs before interrupts enabled */
  937. search_IAR();
  938. /* Enable interrupts IVG7-15 */
  939. bfin_irq_flags |= IMASK_IVG15 |
  940. IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
  941. IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
  942. #ifdef SIC_IWR0
  943. bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
  944. # ifdef SIC_IWR1
  945. /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
  946. * will screw up the bootrom as it relies on MDMA0/1 waking it
  947. * up from IDLE instructions. See this report for more info:
  948. * http://blackfin.uclinux.org/gf/tracker/4323
  949. */
  950. if (ANOMALY_05000435)
  951. bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
  952. else
  953. bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
  954. # endif
  955. # ifdef SIC_IWR2
  956. bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
  957. # endif
  958. #else
  959. bfin_write_SIC_IWR(IWR_DISABLE_ALL);
  960. #endif
  961. #ifdef CONFIG_IPIPE
  962. for (irq = 0; irq < NR_IRQS; irq++) {
  963. struct irq_desc *desc = irq_desc + irq;
  964. desc->ic_prio = __ipipe_get_irq_priority(irq);
  965. desc->thr_prio = __ipipe_get_irqthread_priority(irq);
  966. }
  967. #endif /* CONFIG_IPIPE */
  968. return 0;
  969. }
  970. #ifdef CONFIG_DO_IRQ_L1
  971. __attribute__((l1_text))
  972. #endif
  973. void do_irq(int vec, struct pt_regs *fp)
  974. {
  975. if (vec == EVT_IVTMR_P) {
  976. vec = IRQ_CORETMR;
  977. } else {
  978. struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
  979. struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
  980. #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
  981. || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
  982. unsigned long sic_status[3];
  983. if (smp_processor_id()) {
  984. #ifdef CONFIG_SMP
  985. /* This will be optimized out in UP mode. */
  986. sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
  987. sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
  988. #endif
  989. } else {
  990. sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
  991. sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
  992. }
  993. #ifdef CONFIG_BF54x
  994. sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
  995. #endif
  996. for (;; ivg++) {
  997. if (ivg >= ivg_stop) {
  998. atomic_inc(&num_spurious);
  999. return;
  1000. }
  1001. if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
  1002. break;
  1003. }
  1004. #else
  1005. unsigned long sic_status;
  1006. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  1007. for (;; ivg++) {
  1008. if (ivg >= ivg_stop) {
  1009. atomic_inc(&num_spurious);
  1010. return;
  1011. } else if (sic_status & ivg->isrflag)
  1012. break;
  1013. }
  1014. #endif
  1015. vec = ivg->irqno;
  1016. }
  1017. asm_do_IRQ(vec, fp);
  1018. }
  1019. #ifdef CONFIG_IPIPE
  1020. int __ipipe_get_irq_priority(unsigned irq)
  1021. {
  1022. int ient, prio;
  1023. if (irq <= IRQ_CORETMR)
  1024. return irq;
  1025. for (ient = 0; ient < NR_PERI_INTS; ient++) {
  1026. struct ivgx *ivg = ivg_table + ient;
  1027. if (ivg->irqno == irq) {
  1028. for (prio = 0; prio <= IVG13-IVG7; prio++) {
  1029. if (ivg7_13[prio].ifirst <= ivg &&
  1030. ivg7_13[prio].istop > ivg)
  1031. return IVG7 + prio;
  1032. }
  1033. }
  1034. }
  1035. return IVG15;
  1036. }
  1037. int __ipipe_get_irqthread_priority(unsigned irq)
  1038. {
  1039. int ient, prio;
  1040. int demux_irq;
  1041. /* The returned priority value is rescaled to [0..IVG13+1]
  1042. * with 0 being the lowest effective priority level. */
  1043. if (irq <= IRQ_CORETMR)
  1044. return IVG13 - irq + 1;
  1045. /* GPIO IRQs are given the priority of the demux
  1046. * interrupt. */
  1047. if (IS_GPIOIRQ(irq)) {
  1048. #if defined(CONFIG_BF54x)
  1049. u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
  1050. demux_irq = (bank == 0 ? IRQ_PINT0 :
  1051. bank == 1 ? IRQ_PINT1 :
  1052. bank == 2 ? IRQ_PINT2 :
  1053. IRQ_PINT3);
  1054. #elif defined(CONFIG_BF561)
  1055. demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
  1056. irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
  1057. IRQ_PROG0_INTA);
  1058. #elif defined(CONFIG_BF52x)
  1059. demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
  1060. irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
  1061. IRQ_PORTF_INTA);
  1062. #else
  1063. demux_irq = irq;
  1064. #endif
  1065. return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
  1066. }
  1067. /* The GPIO demux interrupt is given a lower priority
  1068. * than the GPIO IRQs, so that its threaded handler
  1069. * unmasks the interrupt line after the decoded IRQs
  1070. * have been processed. */
  1071. prio = PRIO_GPIODEMUX(irq);
  1072. /* demux irq? */
  1073. if (prio != -1)
  1074. return IVG13 - prio;
  1075. for (ient = 0; ient < NR_PERI_INTS; ient++) {
  1076. struct ivgx *ivg = ivg_table + ient;
  1077. if (ivg->irqno == irq) {
  1078. for (prio = 0; prio <= IVG13-IVG7; prio++) {
  1079. if (ivg7_13[prio].ifirst <= ivg &&
  1080. ivg7_13[prio].istop > ivg)
  1081. return IVG7 - prio;
  1082. }
  1083. }
  1084. }
  1085. return 0;
  1086. }
  1087. /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
  1088. #ifdef CONFIG_DO_IRQ_L1
  1089. __attribute__((l1_text))
  1090. #endif
  1091. asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
  1092. {
  1093. struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
  1094. struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
  1095. int irq;
  1096. if (likely(vec == EVT_IVTMR_P)) {
  1097. irq = IRQ_CORETMR;
  1098. goto handle_irq;
  1099. }
  1100. SSYNC();
  1101. #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
  1102. {
  1103. unsigned long sic_status[3];
  1104. sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
  1105. sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
  1106. #ifdef CONFIG_BF54x
  1107. sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
  1108. #endif
  1109. for (;; ivg++) {
  1110. if (ivg >= ivg_stop) {
  1111. atomic_inc(&num_spurious);
  1112. return 0;
  1113. }
  1114. if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
  1115. break;
  1116. }
  1117. }
  1118. #else
  1119. {
  1120. unsigned long sic_status;
  1121. sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
  1122. for (;; ivg++) {
  1123. if (ivg >= ivg_stop) {
  1124. atomic_inc(&num_spurious);
  1125. return 0;
  1126. } else if (sic_status & ivg->isrflag)
  1127. break;
  1128. }
  1129. }
  1130. #endif
  1131. irq = ivg->irqno;
  1132. if (irq == IRQ_SYSTMR) {
  1133. bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
  1134. /* This is basically what we need from the register frame. */
  1135. __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
  1136. __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
  1137. if (!ipipe_root_domain_p)
  1138. __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
  1139. else
  1140. __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
  1141. }
  1142. handle_irq:
  1143. ipipe_trace_irq_entry(irq);
  1144. __ipipe_handle_irq(irq, regs);
  1145. ipipe_trace_irq_exit(irq);
  1146. if (ipipe_root_domain_p)
  1147. return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
  1148. return 0;
  1149. }
  1150. #endif /* CONFIG_IPIPE */