octeon-irq.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. #include <asm/octeon/cvmx-pexp-defs.h>
  13. #include <asm/octeon/cvmx-npi-defs.h>
  14. static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
  15. static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
  16. static int octeon_coreid_for_cpu(int cpu)
  17. {
  18. #ifdef CONFIG_SMP
  19. return cpu_logical_map(cpu);
  20. #else
  21. return cvmx_get_core_num();
  22. #endif
  23. }
  24. static void octeon_irq_core_ack(unsigned int irq)
  25. {
  26. unsigned int bit = irq - OCTEON_IRQ_SW0;
  27. /*
  28. * We don't need to disable IRQs to make these atomic since
  29. * they are already disabled earlier in the low level
  30. * interrupt code.
  31. */
  32. clear_c0_status(0x100 << bit);
  33. /* The two user interrupts must be cleared manually. */
  34. if (bit < 2)
  35. clear_c0_cause(0x100 << bit);
  36. }
  37. static void octeon_irq_core_eoi(unsigned int irq)
  38. {
  39. struct irq_desc *desc = irq_desc + irq;
  40. unsigned int bit = irq - OCTEON_IRQ_SW0;
  41. /*
  42. * If an IRQ is being processed while we are disabling it the
  43. * handler will attempt to unmask the interrupt after it has
  44. * been disabled.
  45. */
  46. if (desc->status & IRQ_DISABLED)
  47. return;
  48. /*
  49. * We don't need to disable IRQs to make these atomic since
  50. * they are already disabled earlier in the low level
  51. * interrupt code.
  52. */
  53. set_c0_status(0x100 << bit);
  54. }
  55. static void octeon_irq_core_enable(unsigned int irq)
  56. {
  57. unsigned long flags;
  58. unsigned int bit = irq - OCTEON_IRQ_SW0;
  59. /*
  60. * We need to disable interrupts to make sure our updates are
  61. * atomic.
  62. */
  63. local_irq_save(flags);
  64. set_c0_status(0x100 << bit);
  65. local_irq_restore(flags);
  66. }
  67. static void octeon_irq_core_disable_local(unsigned int irq)
  68. {
  69. unsigned long flags;
  70. unsigned int bit = irq - OCTEON_IRQ_SW0;
  71. /*
  72. * We need to disable interrupts to make sure our updates are
  73. * atomic.
  74. */
  75. local_irq_save(flags);
  76. clear_c0_status(0x100 << bit);
  77. local_irq_restore(flags);
  78. }
  79. static void octeon_irq_core_disable(unsigned int irq)
  80. {
  81. #ifdef CONFIG_SMP
  82. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  83. (void *) (long) irq, 1);
  84. #else
  85. octeon_irq_core_disable_local(irq);
  86. #endif
  87. }
  88. static struct irq_chip octeon_irq_chip_core = {
  89. .name = "Core",
  90. .enable = octeon_irq_core_enable,
  91. .disable = octeon_irq_core_disable,
  92. .ack = octeon_irq_core_ack,
  93. .eoi = octeon_irq_core_eoi,
  94. };
  95. static void octeon_irq_ciu0_ack(unsigned int irq)
  96. {
  97. /*
  98. * In order to avoid any locking accessing the CIU, we
  99. * acknowledge CIU interrupts by disabling all of them. This
  100. * way we can use a per core register and avoid any out of
  101. * core locking requirements. This has the side affect that
  102. * CIU interrupts can't be processed recursively.
  103. *
  104. * We don't need to disable IRQs to make these atomic since
  105. * they are already disabled earlier in the low level
  106. * interrupt code.
  107. */
  108. clear_c0_status(0x100 << 2);
  109. }
  110. static void octeon_irq_ciu0_eoi(unsigned int irq)
  111. {
  112. /*
  113. * Enable all CIU interrupts again. We don't need to disable
  114. * IRQs to make these atomic since they are already disabled
  115. * earlier in the low level interrupt code.
  116. */
  117. set_c0_status(0x100 << 2);
  118. }
  119. static void octeon_irq_ciu0_enable(unsigned int irq)
  120. {
  121. int coreid = cvmx_get_core_num();
  122. unsigned long flags;
  123. uint64_t en0;
  124. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  125. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  126. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  127. en0 |= 1ull << bit;
  128. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  129. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  130. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  131. }
  132. static void octeon_irq_ciu0_disable(unsigned int irq)
  133. {
  134. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  135. unsigned long flags;
  136. uint64_t en0;
  137. int cpu;
  138. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  139. for_each_online_cpu(cpu) {
  140. int coreid = octeon_coreid_for_cpu(cpu);
  141. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  142. en0 &= ~(1ull << bit);
  143. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  144. }
  145. /*
  146. * We need to do a read after the last update to make sure all
  147. * of them are done.
  148. */
  149. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  150. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  151. }
  152. /*
  153. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  154. * registers.
  155. */
  156. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  157. {
  158. int index = cvmx_get_core_num() * 2;
  159. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  160. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  161. }
  162. /*
  163. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  164. * registers.
  165. */
  166. static void octeon_irq_ciu0_ack_v2(unsigned int irq)
  167. {
  168. int index = cvmx_get_core_num() * 2;
  169. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  170. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  171. }
  172. /*
  173. * CIU timer type interrupts must be acknoleged by writing a '1' bit
  174. * to their sum0 bit.
  175. */
  176. static void octeon_irq_ciu0_timer_ack(unsigned int irq)
  177. {
  178. int index = cvmx_get_core_num() * 2;
  179. uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  180. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  181. }
  182. static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
  183. {
  184. octeon_irq_ciu0_timer_ack(irq);
  185. octeon_irq_ciu0_ack(irq);
  186. }
  187. static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
  188. {
  189. octeon_irq_ciu0_timer_ack(irq);
  190. octeon_irq_ciu0_ack_v2(irq);
  191. }
  192. /*
  193. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  194. * registers.
  195. */
  196. static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
  197. {
  198. struct irq_desc *desc = irq_desc + irq;
  199. int index = cvmx_get_core_num() * 2;
  200. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  201. if ((desc->status & IRQ_DISABLED) == 0)
  202. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  203. }
  204. /*
  205. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  206. * registers.
  207. */
  208. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  209. {
  210. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  211. int index;
  212. int cpu;
  213. for_each_online_cpu(cpu) {
  214. index = octeon_coreid_for_cpu(cpu) * 2;
  215. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  216. }
  217. }
  218. #ifdef CONFIG_SMP
  219. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  220. {
  221. int cpu;
  222. unsigned long flags;
  223. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  224. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  225. for_each_online_cpu(cpu) {
  226. int coreid = octeon_coreid_for_cpu(cpu);
  227. uint64_t en0 =
  228. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  229. if (cpumask_test_cpu(cpu, dest))
  230. en0 |= 1ull << bit;
  231. else
  232. en0 &= ~(1ull << bit);
  233. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  234. }
  235. /*
  236. * We need to do a read after the last update to make sure all
  237. * of them are done.
  238. */
  239. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  240. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  241. return 0;
  242. }
  243. /*
  244. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  245. * registers.
  246. */
  247. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  248. const struct cpumask *dest)
  249. {
  250. int cpu;
  251. int index;
  252. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  253. for_each_online_cpu(cpu) {
  254. index = octeon_coreid_for_cpu(cpu) * 2;
  255. if (cpumask_test_cpu(cpu, dest))
  256. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  257. else
  258. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  259. }
  260. return 0;
  261. }
  262. #endif
  263. /*
  264. * Newer octeon chips have support for lockless CIU operation.
  265. */
  266. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  267. .name = "CIU0",
  268. .enable = octeon_irq_ciu0_enable_v2,
  269. .disable = octeon_irq_ciu0_disable_all_v2,
  270. .ack = octeon_irq_ciu0_ack_v2,
  271. .eoi = octeon_irq_ciu0_eoi_v2,
  272. #ifdef CONFIG_SMP
  273. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  274. #endif
  275. };
  276. static struct irq_chip octeon_irq_chip_ciu0 = {
  277. .name = "CIU0",
  278. .enable = octeon_irq_ciu0_enable,
  279. .disable = octeon_irq_ciu0_disable,
  280. .ack = octeon_irq_ciu0_ack,
  281. .eoi = octeon_irq_ciu0_eoi,
  282. #ifdef CONFIG_SMP
  283. .set_affinity = octeon_irq_ciu0_set_affinity,
  284. #endif
  285. };
  286. static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
  287. .name = "CIU0-T",
  288. .enable = octeon_irq_ciu0_enable_v2,
  289. .disable = octeon_irq_ciu0_disable_all_v2,
  290. .ack = octeon_irq_ciu0_timer_ack_v2,
  291. .eoi = octeon_irq_ciu0_eoi_v2,
  292. #ifdef CONFIG_SMP
  293. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  294. #endif
  295. };
  296. static struct irq_chip octeon_irq_chip_ciu0_timer = {
  297. .name = "CIU0-T",
  298. .enable = octeon_irq_ciu0_enable,
  299. .disable = octeon_irq_ciu0_disable,
  300. .ack = octeon_irq_ciu0_timer_ack_v1,
  301. .eoi = octeon_irq_ciu0_eoi,
  302. #ifdef CONFIG_SMP
  303. .set_affinity = octeon_irq_ciu0_set_affinity,
  304. #endif
  305. };
  306. static void octeon_irq_ciu1_ack(unsigned int irq)
  307. {
  308. /*
  309. * In order to avoid any locking accessing the CIU, we
  310. * acknowledge CIU interrupts by disabling all of them. This
  311. * way we can use a per core register and avoid any out of
  312. * core locking requirements. This has the side affect that
  313. * CIU interrupts can't be processed recursively. We don't
  314. * need to disable IRQs to make these atomic since they are
  315. * already disabled earlier in the low level interrupt code.
  316. */
  317. clear_c0_status(0x100 << 3);
  318. }
  319. static void octeon_irq_ciu1_eoi(unsigned int irq)
  320. {
  321. /*
  322. * Enable all CIU interrupts again. We don't need to disable
  323. * IRQs to make these atomic since they are already disabled
  324. * earlier in the low level interrupt code.
  325. */
  326. set_c0_status(0x100 << 3);
  327. }
  328. static void octeon_irq_ciu1_enable(unsigned int irq)
  329. {
  330. int coreid = cvmx_get_core_num();
  331. unsigned long flags;
  332. uint64_t en1;
  333. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  334. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  335. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  336. en1 |= 1ull << bit;
  337. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  338. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  339. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  340. }
  341. static void octeon_irq_ciu1_disable(unsigned int irq)
  342. {
  343. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  344. unsigned long flags;
  345. uint64_t en1;
  346. int cpu;
  347. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  348. for_each_online_cpu(cpu) {
  349. int coreid = octeon_coreid_for_cpu(cpu);
  350. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  351. en1 &= ~(1ull << bit);
  352. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  353. }
  354. /*
  355. * We need to do a read after the last update to make sure all
  356. * of them are done.
  357. */
  358. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  359. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  360. }
  361. /*
  362. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  363. * registers.
  364. */
  365. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  366. {
  367. int index = cvmx_get_core_num() * 2 + 1;
  368. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  369. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  370. }
  371. /*
  372. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  373. * registers.
  374. */
  375. static void octeon_irq_ciu1_ack_v2(unsigned int irq)
  376. {
  377. int index = cvmx_get_core_num() * 2 + 1;
  378. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  379. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  380. }
  381. /*
  382. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  383. * registers.
  384. */
  385. static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
  386. {
  387. struct irq_desc *desc = irq_desc + irq;
  388. int index = cvmx_get_core_num() * 2 + 1;
  389. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  390. if ((desc->status & IRQ_DISABLED) == 0)
  391. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  392. }
  393. /*
  394. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  395. * registers.
  396. */
  397. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  398. {
  399. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  400. int index;
  401. int cpu;
  402. for_each_online_cpu(cpu) {
  403. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  404. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  405. }
  406. }
  407. #ifdef CONFIG_SMP
  408. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  409. const struct cpumask *dest)
  410. {
  411. int cpu;
  412. unsigned long flags;
  413. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  414. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  415. for_each_online_cpu(cpu) {
  416. int coreid = octeon_coreid_for_cpu(cpu);
  417. uint64_t en1 =
  418. cvmx_read_csr(CVMX_CIU_INTX_EN1
  419. (coreid * 2 + 1));
  420. if (cpumask_test_cpu(cpu, dest))
  421. en1 |= 1ull << bit;
  422. else
  423. en1 &= ~(1ull << bit);
  424. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  425. }
  426. /*
  427. * We need to do a read after the last update to make sure all
  428. * of them are done.
  429. */
  430. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  431. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  432. return 0;
  433. }
  434. /*
  435. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  436. * registers.
  437. */
  438. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  439. const struct cpumask *dest)
  440. {
  441. int cpu;
  442. int index;
  443. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  444. for_each_online_cpu(cpu) {
  445. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  446. if (cpumask_test_cpu(cpu, dest))
  447. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  448. else
  449. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  450. }
  451. return 0;
  452. }
  453. #endif
  454. /*
  455. * Newer octeon chips have support for lockless CIU operation.
  456. */
  457. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  458. .name = "CIU0",
  459. .enable = octeon_irq_ciu1_enable_v2,
  460. .disable = octeon_irq_ciu1_disable_all_v2,
  461. .ack = octeon_irq_ciu1_ack_v2,
  462. .eoi = octeon_irq_ciu1_eoi_v2,
  463. #ifdef CONFIG_SMP
  464. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  465. #endif
  466. };
  467. static struct irq_chip octeon_irq_chip_ciu1 = {
  468. .name = "CIU1",
  469. .enable = octeon_irq_ciu1_enable,
  470. .disable = octeon_irq_ciu1_disable,
  471. .ack = octeon_irq_ciu1_ack,
  472. .eoi = octeon_irq_ciu1_eoi,
  473. #ifdef CONFIG_SMP
  474. .set_affinity = octeon_irq_ciu1_set_affinity,
  475. #endif
  476. };
  477. #ifdef CONFIG_PCI_MSI
  478. static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock);
  479. static void octeon_irq_msi_ack(unsigned int irq)
  480. {
  481. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  482. /* These chips have PCI */
  483. cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
  484. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  485. } else {
  486. /*
  487. * These chips have PCIe. Thankfully the ACK doesn't
  488. * need any locking.
  489. */
  490. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
  491. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  492. }
  493. }
  494. static void octeon_irq_msi_eoi(unsigned int irq)
  495. {
  496. /* Nothing needed */
  497. }
  498. static void octeon_irq_msi_enable(unsigned int irq)
  499. {
  500. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  501. /*
  502. * Octeon PCI doesn't have the ability to mask/unmask
  503. * MSI interrupts individually. Instead of
  504. * masking/unmasking them in groups of 16, we simple
  505. * assume MSI devices are well behaved. MSI
  506. * interrupts are always enable and the ACK is assumed
  507. * to be enough.
  508. */
  509. } else {
  510. /* These chips have PCIe. Note that we only support
  511. * the first 64 MSI interrupts. Unfortunately all the
  512. * MSI enables are in the same register. We use
  513. * MSI0's lock to control access to them all.
  514. */
  515. uint64_t en;
  516. unsigned long flags;
  517. raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  518. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  519. en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
  520. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  521. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  522. raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  523. }
  524. }
  525. static void octeon_irq_msi_disable(unsigned int irq)
  526. {
  527. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  528. /* See comment in enable */
  529. } else {
  530. /*
  531. * These chips have PCIe. Note that we only support
  532. * the first 64 MSI interrupts. Unfortunately all the
  533. * MSI enables are in the same register. We use
  534. * MSI0's lock to control access to them all.
  535. */
  536. uint64_t en;
  537. unsigned long flags;
  538. raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  539. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  540. en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  541. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  542. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  543. raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  544. }
  545. }
  546. static struct irq_chip octeon_irq_chip_msi = {
  547. .name = "MSI",
  548. .enable = octeon_irq_msi_enable,
  549. .disable = octeon_irq_msi_disable,
  550. .ack = octeon_irq_msi_ack,
  551. .eoi = octeon_irq_msi_eoi,
  552. };
  553. #endif
  554. void __init arch_init_irq(void)
  555. {
  556. int irq;
  557. struct irq_chip *chip0;
  558. struct irq_chip *chip0_timer;
  559. struct irq_chip *chip1;
  560. #ifdef CONFIG_SMP
  561. /* Set the default affinity to the boot cpu. */
  562. cpumask_clear(irq_default_affinity);
  563. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  564. #endif
  565. if (NR_IRQS < OCTEON_IRQ_LAST)
  566. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  567. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  568. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  569. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  570. chip0 = &octeon_irq_chip_ciu0_v2;
  571. chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
  572. chip1 = &octeon_irq_chip_ciu1_v2;
  573. } else {
  574. chip0 = &octeon_irq_chip_ciu0;
  575. chip0_timer = &octeon_irq_chip_ciu0_timer;
  576. chip1 = &octeon_irq_chip_ciu1;
  577. }
  578. /* 0 - 15 reserved for i8259 master and slave controller. */
  579. /* 17 - 23 Mips internal */
  580. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  581. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  582. handle_percpu_irq);
  583. }
  584. /* 24 - 87 CIU_INT_SUM0 */
  585. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  586. switch (irq) {
  587. case OCTEON_IRQ_GMX_DRP0:
  588. case OCTEON_IRQ_GMX_DRP1:
  589. case OCTEON_IRQ_IPD_DRP:
  590. case OCTEON_IRQ_KEY_ZERO:
  591. case OCTEON_IRQ_TIMER0:
  592. case OCTEON_IRQ_TIMER1:
  593. case OCTEON_IRQ_TIMER2:
  594. case OCTEON_IRQ_TIMER3:
  595. set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
  596. break;
  597. default:
  598. set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
  599. break;
  600. }
  601. }
  602. /* 88 - 151 CIU_INT_SUM1 */
  603. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
  604. set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
  605. }
  606. #ifdef CONFIG_PCI_MSI
  607. /* 152 - 215 PCI/PCIe MSI interrupts */
  608. for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
  609. set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
  610. handle_percpu_irq);
  611. }
  612. #endif
  613. set_c0_status(0x300 << 2);
  614. }
  615. asmlinkage void plat_irq_dispatch(void)
  616. {
  617. const unsigned long core_id = cvmx_get_core_num();
  618. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  619. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  620. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  621. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  622. unsigned long cop0_cause;
  623. unsigned long cop0_status;
  624. uint64_t ciu_en;
  625. uint64_t ciu_sum;
  626. while (1) {
  627. cop0_cause = read_c0_cause();
  628. cop0_status = read_c0_status();
  629. cop0_cause &= cop0_status;
  630. cop0_cause &= ST0_IM;
  631. if (unlikely(cop0_cause & STATUSF_IP2)) {
  632. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  633. ciu_en = cvmx_read_csr(ciu_en0_address);
  634. ciu_sum &= ciu_en;
  635. if (likely(ciu_sum))
  636. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
  637. else
  638. spurious_interrupt();
  639. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  640. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  641. ciu_en = cvmx_read_csr(ciu_en1_address);
  642. ciu_sum &= ciu_en;
  643. if (likely(ciu_sum))
  644. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
  645. else
  646. spurious_interrupt();
  647. } else if (likely(cop0_cause)) {
  648. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  649. } else {
  650. break;
  651. }
  652. }
  653. }
  654. #ifdef CONFIG_HOTPLUG_CPU
  655. static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
  656. {
  657. unsigned int isset;
  658. int coreid = octeon_coreid_for_cpu(cpu);
  659. int bit = (irq < OCTEON_IRQ_WDOG0) ?
  660. irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
  661. if (irq < 64) {
  662. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
  663. (1ull << bit)) >> bit;
  664. } else {
  665. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
  666. (1ull << bit)) >> bit;
  667. }
  668. return isset;
  669. }
  670. void fixup_irqs(void)
  671. {
  672. int irq;
  673. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  674. octeon_irq_core_disable_local(irq);
  675. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
  676. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  677. /* ciu irq migrates to next cpu */
  678. octeon_irq_chip_ciu0.disable(irq);
  679. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  680. }
  681. }
  682. #if 0
  683. for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
  684. octeon_irq_mailbox_mask(irq);
  685. #endif
  686. for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  687. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  688. /* ciu irq migrates to next cpu */
  689. octeon_irq_chip_ciu0.disable(irq);
  690. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  691. }
  692. }
  693. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
  694. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  695. /* ciu irq migrates to next cpu */
  696. octeon_irq_chip_ciu1.disable(irq);
  697. octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
  698. }
  699. }
  700. }
  701. #endif /* CONFIG_HOTPLUG_CPU */