octeon-irq.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. #include <asm/octeon/cvmx-pexp-defs.h>
  13. #include <asm/octeon/cvmx-npi-defs.h>
  14. DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
  15. DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
  16. DEFINE_SPINLOCK(octeon_irq_msi_lock);
  17. static int octeon_coreid_for_cpu(int cpu)
  18. {
  19. #ifdef CONFIG_SMP
  20. return cpu_logical_map(cpu);
  21. #else
  22. return cvmx_get_core_num();
  23. #endif
  24. }
  25. static void octeon_irq_core_ack(unsigned int irq)
  26. {
  27. unsigned int bit = irq - OCTEON_IRQ_SW0;
  28. /*
  29. * We don't need to disable IRQs to make these atomic since
  30. * they are already disabled earlier in the low level
  31. * interrupt code.
  32. */
  33. clear_c0_status(0x100 << bit);
  34. /* The two user interrupts must be cleared manually. */
  35. if (bit < 2)
  36. clear_c0_cause(0x100 << bit);
  37. }
  38. static void octeon_irq_core_eoi(unsigned int irq)
  39. {
  40. struct irq_desc *desc = irq_desc + irq;
  41. unsigned int bit = irq - OCTEON_IRQ_SW0;
  42. /*
  43. * If an IRQ is being processed while we are disabling it the
  44. * handler will attempt to unmask the interrupt after it has
  45. * been disabled.
  46. */
  47. if (desc->status & IRQ_DISABLED)
  48. return;
  49. /* There is a race here. We should fix it. */
  50. /*
  51. * We don't need to disable IRQs to make these atomic since
  52. * they are already disabled earlier in the low level
  53. * interrupt code.
  54. */
  55. set_c0_status(0x100 << bit);
  56. }
  57. static void octeon_irq_core_enable(unsigned int irq)
  58. {
  59. unsigned long flags;
  60. unsigned int bit = irq - OCTEON_IRQ_SW0;
  61. /*
  62. * We need to disable interrupts to make sure our updates are
  63. * atomic.
  64. */
  65. local_irq_save(flags);
  66. set_c0_status(0x100 << bit);
  67. local_irq_restore(flags);
  68. }
  69. static void octeon_irq_core_disable_local(unsigned int irq)
  70. {
  71. unsigned long flags;
  72. unsigned int bit = irq - OCTEON_IRQ_SW0;
  73. /*
  74. * We need to disable interrupts to make sure our updates are
  75. * atomic.
  76. */
  77. local_irq_save(flags);
  78. clear_c0_status(0x100 << bit);
  79. local_irq_restore(flags);
  80. }
  81. static void octeon_irq_core_disable(unsigned int irq)
  82. {
  83. #ifdef CONFIG_SMP
  84. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  85. (void *) (long) irq, 1);
  86. #else
  87. octeon_irq_core_disable_local(irq);
  88. #endif
  89. }
  90. static struct irq_chip octeon_irq_chip_core = {
  91. .name = "Core",
  92. .enable = octeon_irq_core_enable,
  93. .disable = octeon_irq_core_disable,
  94. .ack = octeon_irq_core_ack,
  95. .eoi = octeon_irq_core_eoi,
  96. };
  97. static void octeon_irq_ciu0_ack(unsigned int irq)
  98. {
  99. /*
  100. * In order to avoid any locking accessing the CIU, we
  101. * acknowledge CIU interrupts by disabling all of them. This
  102. * way we can use a per core register and avoid any out of
  103. * core locking requirements. This has the side affect that
  104. * CIU interrupts can't be processed recursively.
  105. *
  106. * We don't need to disable IRQs to make these atomic since
  107. * they are already disabled earlier in the low level
  108. * interrupt code.
  109. */
  110. clear_c0_status(0x100 << 2);
  111. }
  112. static void octeon_irq_ciu0_eoi(unsigned int irq)
  113. {
  114. /*
  115. * Enable all CIU interrupts again. We don't need to disable
  116. * IRQs to make these atomic since they are already disabled
  117. * earlier in the low level interrupt code.
  118. */
  119. set_c0_status(0x100 << 2);
  120. }
  121. static void octeon_irq_ciu0_enable(unsigned int irq)
  122. {
  123. int coreid = cvmx_get_core_num();
  124. unsigned long flags;
  125. uint64_t en0;
  126. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  127. /*
  128. * A read lock is used here to make sure only one core is ever
  129. * updating the CIU enable bits at a time. During an enable
  130. * the cores don't interfere with each other. During a disable
  131. * the write lock stops any enables that might cause a
  132. * problem.
  133. */
  134. read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  135. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  136. en0 |= 1ull << bit;
  137. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  138. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  139. read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  140. }
  141. static void octeon_irq_ciu0_disable(unsigned int irq)
  142. {
  143. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  144. unsigned long flags;
  145. uint64_t en0;
  146. int cpu;
  147. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  148. for_each_online_cpu(cpu) {
  149. int coreid = octeon_coreid_for_cpu(cpu);
  150. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  151. en0 &= ~(1ull << bit);
  152. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  153. }
  154. /*
  155. * We need to do a read after the last update to make sure all
  156. * of them are done.
  157. */
  158. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  159. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  160. }
  161. /*
  162. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  163. * registers.
  164. */
  165. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  166. {
  167. int index = cvmx_get_core_num() * 2;
  168. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  169. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  170. }
  171. /*
  172. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  173. * registers.
  174. */
  175. static void octeon_irq_ciu0_ack_v2(unsigned int irq)
  176. {
  177. int index = cvmx_get_core_num() * 2;
  178. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  179. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  180. }
  181. /*
  182. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  183. * registers.
  184. */
  185. static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
  186. {
  187. struct irq_desc *desc = irq_desc + irq;
  188. int index = cvmx_get_core_num() * 2;
  189. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  190. if ((desc->status & IRQ_DISABLED) == 0)
  191. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  192. }
  193. /*
  194. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  195. * registers.
  196. */
  197. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  198. {
  199. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  200. int index;
  201. int cpu;
  202. for_each_online_cpu(cpu) {
  203. index = octeon_coreid_for_cpu(cpu) * 2;
  204. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  205. }
  206. }
  207. #ifdef CONFIG_SMP
  208. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  209. {
  210. int cpu;
  211. unsigned long flags;
  212. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  213. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  214. for_each_online_cpu(cpu) {
  215. int coreid = octeon_coreid_for_cpu(cpu);
  216. uint64_t en0 =
  217. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  218. if (cpumask_test_cpu(cpu, dest))
  219. en0 |= 1ull << bit;
  220. else
  221. en0 &= ~(1ull << bit);
  222. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  223. }
  224. /*
  225. * We need to do a read after the last update to make sure all
  226. * of them are done.
  227. */
  228. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  229. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  230. return 0;
  231. }
  232. /*
  233. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  234. * registers.
  235. */
  236. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  237. const struct cpumask *dest)
  238. {
  239. int cpu;
  240. int index;
  241. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  242. for_each_online_cpu(cpu) {
  243. index = octeon_coreid_for_cpu(cpu) * 2;
  244. if (cpumask_test_cpu(cpu, dest))
  245. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  246. else
  247. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  248. }
  249. return 0;
  250. }
  251. #endif
  252. /*
  253. * Newer octeon chips have support for lockless CIU operation.
  254. */
  255. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  256. .name = "CIU0",
  257. .enable = octeon_irq_ciu0_enable_v2,
  258. .disable = octeon_irq_ciu0_disable_all_v2,
  259. .ack = octeon_irq_ciu0_ack_v2,
  260. .eoi = octeon_irq_ciu0_eoi_v2,
  261. #ifdef CONFIG_SMP
  262. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  263. #endif
  264. };
  265. static struct irq_chip octeon_irq_chip_ciu0 = {
  266. .name = "CIU0",
  267. .enable = octeon_irq_ciu0_enable,
  268. .disable = octeon_irq_ciu0_disable,
  269. .ack = octeon_irq_ciu0_ack,
  270. .eoi = octeon_irq_ciu0_eoi,
  271. #ifdef CONFIG_SMP
  272. .set_affinity = octeon_irq_ciu0_set_affinity,
  273. #endif
  274. };
  275. static void octeon_irq_ciu1_ack(unsigned int irq)
  276. {
  277. /*
  278. * In order to avoid any locking accessing the CIU, we
  279. * acknowledge CIU interrupts by disabling all of them. This
  280. * way we can use a per core register and avoid any out of
  281. * core locking requirements. This has the side affect that
  282. * CIU interrupts can't be processed recursively. We don't
  283. * need to disable IRQs to make these atomic since they are
  284. * already disabled earlier in the low level interrupt code.
  285. */
  286. clear_c0_status(0x100 << 3);
  287. }
  288. static void octeon_irq_ciu1_eoi(unsigned int irq)
  289. {
  290. /*
  291. * Enable all CIU interrupts again. We don't need to disable
  292. * IRQs to make these atomic since they are already disabled
  293. * earlier in the low level interrupt code.
  294. */
  295. set_c0_status(0x100 << 3);
  296. }
  297. static void octeon_irq_ciu1_enable(unsigned int irq)
  298. {
  299. int coreid = cvmx_get_core_num();
  300. unsigned long flags;
  301. uint64_t en1;
  302. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  303. /*
  304. * A read lock is used here to make sure only one core is ever
  305. * updating the CIU enable bits at a time. During an enable
  306. * the cores don't interfere with each other. During a disable
  307. * the write lock stops any enables that might cause a
  308. * problem.
  309. */
  310. read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  311. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  312. en1 |= 1ull << bit;
  313. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  314. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  315. read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  316. }
  317. static void octeon_irq_ciu1_disable(unsigned int irq)
  318. {
  319. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  320. unsigned long flags;
  321. uint64_t en1;
  322. int cpu;
  323. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  324. for_each_online_cpu(cpu) {
  325. int coreid = octeon_coreid_for_cpu(cpu);
  326. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  327. en1 &= ~(1ull << bit);
  328. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  329. }
  330. /*
  331. * We need to do a read after the last update to make sure all
  332. * of them are done.
  333. */
  334. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  335. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  336. }
  337. /*
  338. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  339. * registers.
  340. */
  341. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  342. {
  343. int index = cvmx_get_core_num() * 2 + 1;
  344. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  345. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  346. }
  347. /*
  348. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  349. * registers.
  350. */
  351. static void octeon_irq_ciu1_ack_v2(unsigned int irq)
  352. {
  353. int index = cvmx_get_core_num() * 2 + 1;
  354. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  355. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  356. }
  357. /*
  358. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  359. * registers.
  360. */
  361. static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
  362. {
  363. struct irq_desc *desc = irq_desc + irq;
  364. int index = cvmx_get_core_num() * 2 + 1;
  365. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  366. if ((desc->status & IRQ_DISABLED) == 0)
  367. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  368. }
  369. /*
  370. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  371. * registers.
  372. */
  373. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  374. {
  375. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  376. int index;
  377. int cpu;
  378. for_each_online_cpu(cpu) {
  379. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  380. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  381. }
  382. }
  383. #ifdef CONFIG_SMP
  384. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  385. const struct cpumask *dest)
  386. {
  387. int cpu;
  388. unsigned long flags;
  389. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  390. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  391. for_each_online_cpu(cpu) {
  392. int coreid = octeon_coreid_for_cpu(cpu);
  393. uint64_t en1 =
  394. cvmx_read_csr(CVMX_CIU_INTX_EN1
  395. (coreid * 2 + 1));
  396. if (cpumask_test_cpu(cpu, dest))
  397. en1 |= 1ull << bit;
  398. else
  399. en1 &= ~(1ull << bit);
  400. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  401. }
  402. /*
  403. * We need to do a read after the last update to make sure all
  404. * of them are done.
  405. */
  406. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  407. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  408. return 0;
  409. }
  410. /*
  411. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  412. * registers.
  413. */
  414. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  415. const struct cpumask *dest)
  416. {
  417. int cpu;
  418. int index;
  419. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  420. for_each_online_cpu(cpu) {
  421. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  422. if (cpumask_test_cpu(cpu, dest))
  423. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  424. else
  425. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  426. }
  427. return 0;
  428. }
  429. #endif
  430. /*
  431. * Newer octeon chips have support for lockless CIU operation.
  432. */
  433. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  434. .name = "CIU0",
  435. .enable = octeon_irq_ciu1_enable_v2,
  436. .disable = octeon_irq_ciu1_disable_all_v2,
  437. .ack = octeon_irq_ciu1_ack_v2,
  438. .eoi = octeon_irq_ciu1_eoi_v2,
  439. #ifdef CONFIG_SMP
  440. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  441. #endif
  442. };
  443. static struct irq_chip octeon_irq_chip_ciu1 = {
  444. .name = "CIU1",
  445. .enable = octeon_irq_ciu1_enable,
  446. .disable = octeon_irq_ciu1_disable,
  447. .ack = octeon_irq_ciu1_ack,
  448. .eoi = octeon_irq_ciu1_eoi,
  449. #ifdef CONFIG_SMP
  450. .set_affinity = octeon_irq_ciu1_set_affinity,
  451. #endif
  452. };
  453. #ifdef CONFIG_PCI_MSI
  454. static void octeon_irq_msi_ack(unsigned int irq)
  455. {
  456. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  457. /* These chips have PCI */
  458. cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
  459. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  460. } else {
  461. /*
  462. * These chips have PCIe. Thankfully the ACK doesn't
  463. * need any locking.
  464. */
  465. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
  466. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  467. }
  468. }
  469. static void octeon_irq_msi_eoi(unsigned int irq)
  470. {
  471. /* Nothing needed */
  472. }
  473. static void octeon_irq_msi_enable(unsigned int irq)
  474. {
  475. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  476. /*
  477. * Octeon PCI doesn't have the ability to mask/unmask
  478. * MSI interrupts individually. Instead of
  479. * masking/unmasking them in groups of 16, we simple
  480. * assume MSI devices are well behaved. MSI
  481. * interrupts are always enable and the ACK is assumed
  482. * to be enough.
  483. */
  484. } else {
  485. /* These chips have PCIe. Note that we only support
  486. * the first 64 MSI interrupts. Unfortunately all the
  487. * MSI enables are in the same register. We use
  488. * MSI0's lock to control access to them all.
  489. */
  490. uint64_t en;
  491. unsigned long flags;
  492. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  493. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  494. en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
  495. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  496. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  497. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  498. }
  499. }
  500. static void octeon_irq_msi_disable(unsigned int irq)
  501. {
  502. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  503. /* See comment in enable */
  504. } else {
  505. /*
  506. * These chips have PCIe. Note that we only support
  507. * the first 64 MSI interrupts. Unfortunately all the
  508. * MSI enables are in the same register. We use
  509. * MSI0's lock to control access to them all.
  510. */
  511. uint64_t en;
  512. unsigned long flags;
  513. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  514. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  515. en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  516. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  517. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  518. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  519. }
  520. }
  521. static struct irq_chip octeon_irq_chip_msi = {
  522. .name = "MSI",
  523. .enable = octeon_irq_msi_enable,
  524. .disable = octeon_irq_msi_disable,
  525. .ack = octeon_irq_msi_ack,
  526. .eoi = octeon_irq_msi_eoi,
  527. };
  528. #endif
  529. void __init arch_init_irq(void)
  530. {
  531. int irq;
  532. struct irq_chip *chip0;
  533. struct irq_chip *chip1;
  534. #ifdef CONFIG_SMP
  535. /* Set the default affinity to the boot cpu. */
  536. cpumask_clear(irq_default_affinity);
  537. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  538. #endif
  539. if (NR_IRQS < OCTEON_IRQ_LAST)
  540. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  541. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  542. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  543. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  544. chip0 = &octeon_irq_chip_ciu0_v2;
  545. chip1 = &octeon_irq_chip_ciu1_v2;
  546. } else {
  547. chip0 = &octeon_irq_chip_ciu0;
  548. chip1 = &octeon_irq_chip_ciu1;
  549. }
  550. /* 0 - 15 reserved for i8259 master and slave controller. */
  551. /* 17 - 23 Mips internal */
  552. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  553. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  554. handle_percpu_irq);
  555. }
  556. /* 24 - 87 CIU_INT_SUM0 */
  557. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  558. set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
  559. }
  560. /* 88 - 151 CIU_INT_SUM1 */
  561. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
  562. set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
  563. }
  564. #ifdef CONFIG_PCI_MSI
  565. /* 152 - 215 PCI/PCIe MSI interrupts */
  566. for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
  567. set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
  568. handle_percpu_irq);
  569. }
  570. #endif
  571. set_c0_status(0x300 << 2);
  572. }
  573. asmlinkage void plat_irq_dispatch(void)
  574. {
  575. const unsigned long core_id = cvmx_get_core_num();
  576. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  577. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  578. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  579. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  580. unsigned long cop0_cause;
  581. unsigned long cop0_status;
  582. uint64_t ciu_en;
  583. uint64_t ciu_sum;
  584. while (1) {
  585. cop0_cause = read_c0_cause();
  586. cop0_status = read_c0_status();
  587. cop0_cause &= cop0_status;
  588. cop0_cause &= ST0_IM;
  589. if (unlikely(cop0_cause & STATUSF_IP2)) {
  590. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  591. ciu_en = cvmx_read_csr(ciu_en0_address);
  592. ciu_sum &= ciu_en;
  593. if (likely(ciu_sum))
  594. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
  595. else
  596. spurious_interrupt();
  597. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  598. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  599. ciu_en = cvmx_read_csr(ciu_en1_address);
  600. ciu_sum &= ciu_en;
  601. if (likely(ciu_sum))
  602. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
  603. else
  604. spurious_interrupt();
  605. } else if (likely(cop0_cause)) {
  606. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  607. } else {
  608. break;
  609. }
  610. }
  611. }
  612. #ifdef CONFIG_HOTPLUG_CPU
  613. static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
  614. {
  615. unsigned int isset;
  616. int coreid = octeon_coreid_for_cpu(cpu);
  617. int bit = (irq < OCTEON_IRQ_WDOG0) ?
  618. irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
  619. if (irq < 64) {
  620. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
  621. (1ull << bit)) >> bit;
  622. } else {
  623. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
  624. (1ull << bit)) >> bit;
  625. }
  626. return isset;
  627. }
  628. void fixup_irqs(void)
  629. {
  630. int irq;
  631. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  632. octeon_irq_core_disable_local(irq);
  633. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
  634. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  635. /* ciu irq migrates to next cpu */
  636. octeon_irq_chip_ciu0.disable(irq);
  637. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  638. }
  639. }
  640. #if 0
  641. for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
  642. octeon_irq_mailbox_mask(irq);
  643. #endif
  644. for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  645. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  646. /* ciu irq migrates to next cpu */
  647. octeon_irq_chip_ciu0.disable(irq);
  648. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  649. }
  650. }
  651. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
  652. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  653. /* ciu irq migrates to next cpu */
  654. octeon_irq_chip_ciu1.disable(irq);
  655. octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
  656. }
  657. }
  658. }
  659. #endif /* CONFIG_HOTPLUG_CPU */