octeon-irq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
  13. static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
  14. static int octeon_coreid_for_cpu(int cpu)
  15. {
  16. #ifdef CONFIG_SMP
  17. return cpu_logical_map(cpu);
  18. #else
  19. return cvmx_get_core_num();
  20. #endif
  21. }
  22. static void octeon_irq_core_ack(unsigned int irq)
  23. {
  24. unsigned int bit = irq - OCTEON_IRQ_SW0;
  25. /*
  26. * We don't need to disable IRQs to make these atomic since
  27. * they are already disabled earlier in the low level
  28. * interrupt code.
  29. */
  30. clear_c0_status(0x100 << bit);
  31. /* The two user interrupts must be cleared manually. */
  32. if (bit < 2)
  33. clear_c0_cause(0x100 << bit);
  34. }
  35. static void octeon_irq_core_eoi(unsigned int irq)
  36. {
  37. struct irq_desc *desc = irq_desc + irq;
  38. unsigned int bit = irq - OCTEON_IRQ_SW0;
  39. /*
  40. * If an IRQ is being processed while we are disabling it the
  41. * handler will attempt to unmask the interrupt after it has
  42. * been disabled.
  43. */
  44. if (desc->status & IRQ_DISABLED)
  45. return;
  46. /*
  47. * We don't need to disable IRQs to make these atomic since
  48. * they are already disabled earlier in the low level
  49. * interrupt code.
  50. */
  51. set_c0_status(0x100 << bit);
  52. }
  53. static void octeon_irq_core_enable(unsigned int irq)
  54. {
  55. unsigned long flags;
  56. unsigned int bit = irq - OCTEON_IRQ_SW0;
  57. /*
  58. * We need to disable interrupts to make sure our updates are
  59. * atomic.
  60. */
  61. local_irq_save(flags);
  62. set_c0_status(0x100 << bit);
  63. local_irq_restore(flags);
  64. }
  65. static void octeon_irq_core_disable_local(unsigned int irq)
  66. {
  67. unsigned long flags;
  68. unsigned int bit = irq - OCTEON_IRQ_SW0;
  69. /*
  70. * We need to disable interrupts to make sure our updates are
  71. * atomic.
  72. */
  73. local_irq_save(flags);
  74. clear_c0_status(0x100 << bit);
  75. local_irq_restore(flags);
  76. }
  77. static void octeon_irq_core_disable(unsigned int irq)
  78. {
  79. #ifdef CONFIG_SMP
  80. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  81. (void *) (long) irq, 1);
  82. #else
  83. octeon_irq_core_disable_local(irq);
  84. #endif
  85. }
  86. static struct irq_chip octeon_irq_chip_core = {
  87. .name = "Core",
  88. .enable = octeon_irq_core_enable,
  89. .disable = octeon_irq_core_disable,
  90. .ack = octeon_irq_core_ack,
  91. .eoi = octeon_irq_core_eoi,
  92. };
  93. static void octeon_irq_ciu0_ack(unsigned int irq)
  94. {
  95. /*
  96. * In order to avoid any locking accessing the CIU, we
  97. * acknowledge CIU interrupts by disabling all of them. This
  98. * way we can use a per core register and avoid any out of
  99. * core locking requirements. This has the side affect that
  100. * CIU interrupts can't be processed recursively.
  101. *
  102. * We don't need to disable IRQs to make these atomic since
  103. * they are already disabled earlier in the low level
  104. * interrupt code.
  105. */
  106. clear_c0_status(0x100 << 2);
  107. }
  108. static void octeon_irq_ciu0_eoi(unsigned int irq)
  109. {
  110. /*
  111. * Enable all CIU interrupts again. We don't need to disable
  112. * IRQs to make these atomic since they are already disabled
  113. * earlier in the low level interrupt code.
  114. */
  115. set_c0_status(0x100 << 2);
  116. }
  117. static void octeon_irq_ciu0_enable(unsigned int irq)
  118. {
  119. int coreid = cvmx_get_core_num();
  120. unsigned long flags;
  121. uint64_t en0;
  122. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  123. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  124. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  125. en0 |= 1ull << bit;
  126. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  127. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  128. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  129. }
  130. static void octeon_irq_ciu0_disable(unsigned int irq)
  131. {
  132. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  133. unsigned long flags;
  134. uint64_t en0;
  135. int cpu;
  136. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  137. for_each_online_cpu(cpu) {
  138. int coreid = octeon_coreid_for_cpu(cpu);
  139. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  140. en0 &= ~(1ull << bit);
  141. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  142. }
  143. /*
  144. * We need to do a read after the last update to make sure all
  145. * of them are done.
  146. */
  147. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  148. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  149. }
  150. /*
  151. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  152. * registers.
  153. */
  154. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  155. {
  156. int index = cvmx_get_core_num() * 2;
  157. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  158. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  159. }
  160. /*
  161. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  162. * registers.
  163. */
  164. static void octeon_irq_ciu0_ack_v2(unsigned int irq)
  165. {
  166. int index = cvmx_get_core_num() * 2;
  167. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  168. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  169. }
  170. /*
  171. * CIU timer type interrupts must be acknoleged by writing a '1' bit
  172. * to their sum0 bit.
  173. */
  174. static void octeon_irq_ciu0_timer_ack(unsigned int irq)
  175. {
  176. int index = cvmx_get_core_num() * 2;
  177. uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  178. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  179. }
  180. static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
  181. {
  182. octeon_irq_ciu0_timer_ack(irq);
  183. octeon_irq_ciu0_ack(irq);
  184. }
  185. static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
  186. {
  187. octeon_irq_ciu0_timer_ack(irq);
  188. octeon_irq_ciu0_ack_v2(irq);
  189. }
  190. /*
  191. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  192. * registers.
  193. */
  194. static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
  195. {
  196. struct irq_desc *desc = irq_desc + irq;
  197. int index = cvmx_get_core_num() * 2;
  198. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  199. if ((desc->status & IRQ_DISABLED) == 0)
  200. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  201. }
  202. /*
  203. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  204. * registers.
  205. */
  206. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  207. {
  208. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  209. int index;
  210. int cpu;
  211. for_each_online_cpu(cpu) {
  212. index = octeon_coreid_for_cpu(cpu) * 2;
  213. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  214. }
  215. }
  216. #ifdef CONFIG_SMP
  217. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  218. {
  219. int cpu;
  220. unsigned long flags;
  221. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  222. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  223. for_each_online_cpu(cpu) {
  224. int coreid = octeon_coreid_for_cpu(cpu);
  225. uint64_t en0 =
  226. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  227. if (cpumask_test_cpu(cpu, dest))
  228. en0 |= 1ull << bit;
  229. else
  230. en0 &= ~(1ull << bit);
  231. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  232. }
  233. /*
  234. * We need to do a read after the last update to make sure all
  235. * of them are done.
  236. */
  237. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  238. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  239. return 0;
  240. }
  241. /*
  242. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  243. * registers.
  244. */
  245. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  246. const struct cpumask *dest)
  247. {
  248. int cpu;
  249. int index;
  250. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  251. for_each_online_cpu(cpu) {
  252. index = octeon_coreid_for_cpu(cpu) * 2;
  253. if (cpumask_test_cpu(cpu, dest))
  254. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  255. else
  256. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  257. }
  258. return 0;
  259. }
  260. #endif
  261. /*
  262. * Newer octeon chips have support for lockless CIU operation.
  263. */
  264. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  265. .name = "CIU0",
  266. .enable = octeon_irq_ciu0_enable_v2,
  267. .disable = octeon_irq_ciu0_disable_all_v2,
  268. .ack = octeon_irq_ciu0_ack_v2,
  269. .eoi = octeon_irq_ciu0_eoi_v2,
  270. #ifdef CONFIG_SMP
  271. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  272. #endif
  273. };
  274. static struct irq_chip octeon_irq_chip_ciu0 = {
  275. .name = "CIU0",
  276. .enable = octeon_irq_ciu0_enable,
  277. .disable = octeon_irq_ciu0_disable,
  278. .ack = octeon_irq_ciu0_ack,
  279. .eoi = octeon_irq_ciu0_eoi,
  280. #ifdef CONFIG_SMP
  281. .set_affinity = octeon_irq_ciu0_set_affinity,
  282. #endif
  283. };
  284. static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
  285. .name = "CIU0-T",
  286. .enable = octeon_irq_ciu0_enable_v2,
  287. .disable = octeon_irq_ciu0_disable_all_v2,
  288. .ack = octeon_irq_ciu0_timer_ack_v2,
  289. .eoi = octeon_irq_ciu0_eoi_v2,
  290. #ifdef CONFIG_SMP
  291. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  292. #endif
  293. };
  294. static struct irq_chip octeon_irq_chip_ciu0_timer = {
  295. .name = "CIU0-T",
  296. .enable = octeon_irq_ciu0_enable,
  297. .disable = octeon_irq_ciu0_disable,
  298. .ack = octeon_irq_ciu0_timer_ack_v1,
  299. .eoi = octeon_irq_ciu0_eoi,
  300. #ifdef CONFIG_SMP
  301. .set_affinity = octeon_irq_ciu0_set_affinity,
  302. #endif
  303. };
  304. static void octeon_irq_ciu1_ack(unsigned int irq)
  305. {
  306. /*
  307. * In order to avoid any locking accessing the CIU, we
  308. * acknowledge CIU interrupts by disabling all of them. This
  309. * way we can use a per core register and avoid any out of
  310. * core locking requirements. This has the side affect that
  311. * CIU interrupts can't be processed recursively. We don't
  312. * need to disable IRQs to make these atomic since they are
  313. * already disabled earlier in the low level interrupt code.
  314. */
  315. clear_c0_status(0x100 << 3);
  316. }
  317. static void octeon_irq_ciu1_eoi(unsigned int irq)
  318. {
  319. /*
  320. * Enable all CIU interrupts again. We don't need to disable
  321. * IRQs to make these atomic since they are already disabled
  322. * earlier in the low level interrupt code.
  323. */
  324. set_c0_status(0x100 << 3);
  325. }
  326. static void octeon_irq_ciu1_enable(unsigned int irq)
  327. {
  328. int coreid = cvmx_get_core_num();
  329. unsigned long flags;
  330. uint64_t en1;
  331. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  332. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  333. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  334. en1 |= 1ull << bit;
  335. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  336. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  337. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  338. }
  339. static void octeon_irq_ciu1_disable(unsigned int irq)
  340. {
  341. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  342. unsigned long flags;
  343. uint64_t en1;
  344. int cpu;
  345. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  346. for_each_online_cpu(cpu) {
  347. int coreid = octeon_coreid_for_cpu(cpu);
  348. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  349. en1 &= ~(1ull << bit);
  350. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  351. }
  352. /*
  353. * We need to do a read after the last update to make sure all
  354. * of them are done.
  355. */
  356. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  357. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  358. }
  359. /*
  360. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  361. * registers.
  362. */
  363. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  364. {
  365. int index = cvmx_get_core_num() * 2 + 1;
  366. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  367. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  368. }
  369. /*
  370. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  371. * registers.
  372. */
  373. static void octeon_irq_ciu1_ack_v2(unsigned int irq)
  374. {
  375. int index = cvmx_get_core_num() * 2 + 1;
  376. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  377. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  378. }
  379. /*
  380. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  381. * registers.
  382. */
  383. static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
  384. {
  385. struct irq_desc *desc = irq_desc + irq;
  386. int index = cvmx_get_core_num() * 2 + 1;
  387. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  388. if ((desc->status & IRQ_DISABLED) == 0)
  389. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  390. }
  391. /*
  392. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  393. * registers.
  394. */
  395. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  396. {
  397. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  398. int index;
  399. int cpu;
  400. for_each_online_cpu(cpu) {
  401. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  402. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  403. }
  404. }
  405. #ifdef CONFIG_SMP
  406. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  407. const struct cpumask *dest)
  408. {
  409. int cpu;
  410. unsigned long flags;
  411. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  412. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  413. for_each_online_cpu(cpu) {
  414. int coreid = octeon_coreid_for_cpu(cpu);
  415. uint64_t en1 =
  416. cvmx_read_csr(CVMX_CIU_INTX_EN1
  417. (coreid * 2 + 1));
  418. if (cpumask_test_cpu(cpu, dest))
  419. en1 |= 1ull << bit;
  420. else
  421. en1 &= ~(1ull << bit);
  422. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  423. }
  424. /*
  425. * We need to do a read after the last update to make sure all
  426. * of them are done.
  427. */
  428. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  429. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  430. return 0;
  431. }
  432. /*
  433. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  434. * registers.
  435. */
  436. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  437. const struct cpumask *dest)
  438. {
  439. int cpu;
  440. int index;
  441. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  442. for_each_online_cpu(cpu) {
  443. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  444. if (cpumask_test_cpu(cpu, dest))
  445. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  446. else
  447. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  448. }
  449. return 0;
  450. }
  451. #endif
  452. /*
  453. * Newer octeon chips have support for lockless CIU operation.
  454. */
  455. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  456. .name = "CIU0",
  457. .enable = octeon_irq_ciu1_enable_v2,
  458. .disable = octeon_irq_ciu1_disable_all_v2,
  459. .ack = octeon_irq_ciu1_ack_v2,
  460. .eoi = octeon_irq_ciu1_eoi_v2,
  461. #ifdef CONFIG_SMP
  462. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  463. #endif
  464. };
  465. static struct irq_chip octeon_irq_chip_ciu1 = {
  466. .name = "CIU1",
  467. .enable = octeon_irq_ciu1_enable,
  468. .disable = octeon_irq_ciu1_disable,
  469. .ack = octeon_irq_ciu1_ack,
  470. .eoi = octeon_irq_ciu1_eoi,
  471. #ifdef CONFIG_SMP
  472. .set_affinity = octeon_irq_ciu1_set_affinity,
  473. #endif
  474. };
  475. void __init arch_init_irq(void)
  476. {
  477. int irq;
  478. struct irq_chip *chip0;
  479. struct irq_chip *chip0_timer;
  480. struct irq_chip *chip1;
  481. #ifdef CONFIG_SMP
  482. /* Set the default affinity to the boot cpu. */
  483. cpumask_clear(irq_default_affinity);
  484. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  485. #endif
  486. if (NR_IRQS < OCTEON_IRQ_LAST)
  487. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  488. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  489. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  490. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  491. chip0 = &octeon_irq_chip_ciu0_v2;
  492. chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
  493. chip1 = &octeon_irq_chip_ciu1_v2;
  494. } else {
  495. chip0 = &octeon_irq_chip_ciu0;
  496. chip0_timer = &octeon_irq_chip_ciu0_timer;
  497. chip1 = &octeon_irq_chip_ciu1;
  498. }
  499. /* 0 - 15 reserved for i8259 master and slave controller. */
  500. /* 17 - 23 Mips internal */
  501. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  502. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  503. handle_percpu_irq);
  504. }
  505. /* 24 - 87 CIU_INT_SUM0 */
  506. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  507. switch (irq) {
  508. case OCTEON_IRQ_GMX_DRP0:
  509. case OCTEON_IRQ_GMX_DRP1:
  510. case OCTEON_IRQ_IPD_DRP:
  511. case OCTEON_IRQ_KEY_ZERO:
  512. case OCTEON_IRQ_TIMER0:
  513. case OCTEON_IRQ_TIMER1:
  514. case OCTEON_IRQ_TIMER2:
  515. case OCTEON_IRQ_TIMER3:
  516. set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
  517. break;
  518. default:
  519. set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
  520. break;
  521. }
  522. }
  523. /* 88 - 151 CIU_INT_SUM1 */
  524. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
  525. set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
  526. }
  527. set_c0_status(0x300 << 2);
  528. }
  529. asmlinkage void plat_irq_dispatch(void)
  530. {
  531. const unsigned long core_id = cvmx_get_core_num();
  532. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  533. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  534. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  535. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  536. unsigned long cop0_cause;
  537. unsigned long cop0_status;
  538. uint64_t ciu_en;
  539. uint64_t ciu_sum;
  540. while (1) {
  541. cop0_cause = read_c0_cause();
  542. cop0_status = read_c0_status();
  543. cop0_cause &= cop0_status;
  544. cop0_cause &= ST0_IM;
  545. if (unlikely(cop0_cause & STATUSF_IP2)) {
  546. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  547. ciu_en = cvmx_read_csr(ciu_en0_address);
  548. ciu_sum &= ciu_en;
  549. if (likely(ciu_sum))
  550. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
  551. else
  552. spurious_interrupt();
  553. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  554. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  555. ciu_en = cvmx_read_csr(ciu_en1_address);
  556. ciu_sum &= ciu_en;
  557. if (likely(ciu_sum))
  558. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
  559. else
  560. spurious_interrupt();
  561. } else if (likely(cop0_cause)) {
  562. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  563. } else {
  564. break;
  565. }
  566. }
  567. }
  568. #ifdef CONFIG_HOTPLUG_CPU
  569. static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
  570. {
  571. unsigned int isset;
  572. int coreid = octeon_coreid_for_cpu(cpu);
  573. int bit = (irq < OCTEON_IRQ_WDOG0) ?
  574. irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
  575. if (irq < 64) {
  576. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
  577. (1ull << bit)) >> bit;
  578. } else {
  579. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
  580. (1ull << bit)) >> bit;
  581. }
  582. return isset;
  583. }
  584. void fixup_irqs(void)
  585. {
  586. int irq;
  587. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  588. octeon_irq_core_disable_local(irq);
  589. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
  590. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  591. /* ciu irq migrates to next cpu */
  592. octeon_irq_chip_ciu0.disable(irq);
  593. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  594. }
  595. }
  596. #if 0
  597. for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
  598. octeon_irq_mailbox_mask(irq);
  599. #endif
  600. for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  601. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  602. /* ciu irq migrates to next cpu */
  603. octeon_irq_chip_ciu0.disable(irq);
  604. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  605. }
  606. }
  607. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
  608. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  609. /* ciu irq migrates to next cpu */
  610. octeon_irq_chip_ciu1.disable(irq);
  611. octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
  612. }
  613. }
  614. }
  615. #endif /* CONFIG_HOTPLUG_CPU */