octeon-irq.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. #include <asm/octeon/cvmx-pexp-defs.h>
  13. #include <asm/octeon/cvmx-npi-defs.h>
  14. DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
  15. DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
  16. DEFINE_SPINLOCK(octeon_irq_msi_lock);
  17. static int octeon_coreid_for_cpu(int cpu)
  18. {
  19. #ifdef CONFIG_SMP
  20. return cpu_logical_map(cpu);
  21. #else
  22. return cvmx_get_core_num();
  23. #endif
  24. }
  25. static void octeon_irq_core_ack(unsigned int irq)
  26. {
  27. unsigned int bit = irq - OCTEON_IRQ_SW0;
  28. /*
  29. * We don't need to disable IRQs to make these atomic since
  30. * they are already disabled earlier in the low level
  31. * interrupt code.
  32. */
  33. clear_c0_status(0x100 << bit);
  34. /* The two user interrupts must be cleared manually. */
  35. if (bit < 2)
  36. clear_c0_cause(0x100 << bit);
  37. }
  38. static void octeon_irq_core_eoi(unsigned int irq)
  39. {
  40. struct irq_desc *desc = irq_desc + irq;
  41. unsigned int bit = irq - OCTEON_IRQ_SW0;
  42. /*
  43. * If an IRQ is being processed while we are disabling it the
  44. * handler will attempt to unmask the interrupt after it has
  45. * been disabled.
  46. */
  47. if (desc->status & IRQ_DISABLED)
  48. return;
  49. /* There is a race here. We should fix it. */
  50. /*
  51. * We don't need to disable IRQs to make these atomic since
  52. * they are already disabled earlier in the low level
  53. * interrupt code.
  54. */
  55. set_c0_status(0x100 << bit);
  56. }
  57. static void octeon_irq_core_enable(unsigned int irq)
  58. {
  59. unsigned long flags;
  60. unsigned int bit = irq - OCTEON_IRQ_SW0;
  61. /*
  62. * We need to disable interrupts to make sure our updates are
  63. * atomic.
  64. */
  65. local_irq_save(flags);
  66. set_c0_status(0x100 << bit);
  67. local_irq_restore(flags);
  68. }
  69. static void octeon_irq_core_disable_local(unsigned int irq)
  70. {
  71. unsigned long flags;
  72. unsigned int bit = irq - OCTEON_IRQ_SW0;
  73. /*
  74. * We need to disable interrupts to make sure our updates are
  75. * atomic.
  76. */
  77. local_irq_save(flags);
  78. clear_c0_status(0x100 << bit);
  79. local_irq_restore(flags);
  80. }
  81. static void octeon_irq_core_disable(unsigned int irq)
  82. {
  83. #ifdef CONFIG_SMP
  84. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  85. (void *) (long) irq, 1);
  86. #else
  87. octeon_irq_core_disable_local(irq);
  88. #endif
  89. }
  90. static struct irq_chip octeon_irq_chip_core = {
  91. .name = "Core",
  92. .enable = octeon_irq_core_enable,
  93. .disable = octeon_irq_core_disable,
  94. .ack = octeon_irq_core_ack,
  95. .eoi = octeon_irq_core_eoi,
  96. };
  97. static void octeon_irq_ciu0_ack(unsigned int irq)
  98. {
  99. /*
  100. * In order to avoid any locking accessing the CIU, we
  101. * acknowledge CIU interrupts by disabling all of them. This
  102. * way we can use a per core register and avoid any out of
  103. * core locking requirements. This has the side affect that
  104. * CIU interrupts can't be processed recursively.
  105. *
  106. * We don't need to disable IRQs to make these atomic since
  107. * they are already disabled earlier in the low level
  108. * interrupt code.
  109. */
  110. clear_c0_status(0x100 << 2);
  111. }
  112. static void octeon_irq_ciu0_eoi(unsigned int irq)
  113. {
  114. /*
  115. * Enable all CIU interrupts again. We don't need to disable
  116. * IRQs to make these atomic since they are already disabled
  117. * earlier in the low level interrupt code.
  118. */
  119. set_c0_status(0x100 << 2);
  120. }
  121. static void octeon_irq_ciu0_enable(unsigned int irq)
  122. {
  123. int coreid = cvmx_get_core_num();
  124. unsigned long flags;
  125. uint64_t en0;
  126. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  127. /*
  128. * A read lock is used here to make sure only one core is ever
  129. * updating the CIU enable bits at a time. During an enable
  130. * the cores don't interfere with each other. During a disable
  131. * the write lock stops any enables that might cause a
  132. * problem.
  133. */
  134. read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  135. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  136. en0 |= 1ull << bit;
  137. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  138. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  139. read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  140. }
  141. static void octeon_irq_ciu0_disable(unsigned int irq)
  142. {
  143. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  144. unsigned long flags;
  145. uint64_t en0;
  146. int cpu;
  147. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  148. for_each_online_cpu(cpu) {
  149. int coreid = octeon_coreid_for_cpu(cpu);
  150. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  151. en0 &= ~(1ull << bit);
  152. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  153. }
  154. /*
  155. * We need to do a read after the last update to make sure all
  156. * of them are done.
  157. */
  158. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  159. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  160. }
  161. /*
  162. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  163. * registers.
  164. */
  165. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  166. {
  167. int index = cvmx_get_core_num() * 2;
  168. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  169. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  170. }
  171. /*
  172. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  173. * registers.
  174. */
  175. static void octeon_irq_ciu0_disable_v2(unsigned int irq)
  176. {
  177. int index = cvmx_get_core_num() * 2;
  178. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  179. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  180. }
  181. /*
  182. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  183. * registers.
  184. */
  185. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  186. {
  187. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  188. int index;
  189. int cpu;
  190. for_each_online_cpu(cpu) {
  191. index = octeon_coreid_for_cpu(cpu) * 2;
  192. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  193. }
  194. }
  195. #ifdef CONFIG_SMP
  196. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  197. {
  198. int cpu;
  199. unsigned long flags;
  200. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  201. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  202. for_each_online_cpu(cpu) {
  203. int coreid = octeon_coreid_for_cpu(cpu);
  204. uint64_t en0 =
  205. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  206. if (cpumask_test_cpu(cpu, dest))
  207. en0 |= 1ull << bit;
  208. else
  209. en0 &= ~(1ull << bit);
  210. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  211. }
  212. /*
  213. * We need to do a read after the last update to make sure all
  214. * of them are done.
  215. */
  216. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  217. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  218. return 0;
  219. }
  220. /*
  221. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  222. * registers.
  223. */
  224. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  225. const struct cpumask *dest)
  226. {
  227. int cpu;
  228. int index;
  229. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  230. for_each_online_cpu(cpu) {
  231. index = octeon_coreid_for_cpu(cpu) * 2;
  232. if (cpumask_test_cpu(cpu, dest))
  233. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  234. else
  235. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  236. }
  237. return 0;
  238. }
  239. #endif
  240. /*
  241. * Newer octeon chips have support for lockless CIU operation.
  242. */
  243. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  244. .name = "CIU0",
  245. .enable = octeon_irq_ciu0_enable_v2,
  246. .disable = octeon_irq_ciu0_disable_all_v2,
  247. .ack = octeon_irq_ciu0_disable_v2,
  248. .eoi = octeon_irq_ciu0_enable_v2,
  249. #ifdef CONFIG_SMP
  250. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  251. #endif
  252. };
  253. static struct irq_chip octeon_irq_chip_ciu0 = {
  254. .name = "CIU0",
  255. .enable = octeon_irq_ciu0_enable,
  256. .disable = octeon_irq_ciu0_disable,
  257. .ack = octeon_irq_ciu0_ack,
  258. .eoi = octeon_irq_ciu0_eoi,
  259. #ifdef CONFIG_SMP
  260. .set_affinity = octeon_irq_ciu0_set_affinity,
  261. #endif
  262. };
  263. static void octeon_irq_ciu1_ack(unsigned int irq)
  264. {
  265. /*
  266. * In order to avoid any locking accessing the CIU, we
  267. * acknowledge CIU interrupts by disabling all of them. This
  268. * way we can use a per core register and avoid any out of
  269. * core locking requirements. This has the side affect that
  270. * CIU interrupts can't be processed recursively. We don't
  271. * need to disable IRQs to make these atomic since they are
  272. * already disabled earlier in the low level interrupt code.
  273. */
  274. clear_c0_status(0x100 << 3);
  275. }
  276. static void octeon_irq_ciu1_eoi(unsigned int irq)
  277. {
  278. /*
  279. * Enable all CIU interrupts again. We don't need to disable
  280. * IRQs to make these atomic since they are already disabled
  281. * earlier in the low level interrupt code.
  282. */
  283. set_c0_status(0x100 << 3);
  284. }
  285. static void octeon_irq_ciu1_enable(unsigned int irq)
  286. {
  287. int coreid = cvmx_get_core_num();
  288. unsigned long flags;
  289. uint64_t en1;
  290. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  291. /*
  292. * A read lock is used here to make sure only one core is ever
  293. * updating the CIU enable bits at a time. During an enable
  294. * the cores don't interfere with each other. During a disable
  295. * the write lock stops any enables that might cause a
  296. * problem.
  297. */
  298. read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  299. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  300. en1 |= 1ull << bit;
  301. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  302. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  303. read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  304. }
  305. static void octeon_irq_ciu1_disable(unsigned int irq)
  306. {
  307. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  308. unsigned long flags;
  309. uint64_t en1;
  310. int cpu;
  311. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  312. for_each_online_cpu(cpu) {
  313. int coreid = octeon_coreid_for_cpu(cpu);
  314. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  315. en1 &= ~(1ull << bit);
  316. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  317. }
  318. /*
  319. * We need to do a read after the last update to make sure all
  320. * of them are done.
  321. */
  322. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  323. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  324. }
  325. /*
  326. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  327. * registers.
  328. */
  329. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  330. {
  331. int index = cvmx_get_core_num() * 2 + 1;
  332. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  333. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  334. }
  335. /*
  336. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  337. * registers.
  338. */
  339. static void octeon_irq_ciu1_disable_v2(unsigned int irq)
  340. {
  341. int index = cvmx_get_core_num() * 2 + 1;
  342. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  343. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  344. }
  345. /*
  346. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  347. * registers.
  348. */
  349. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  350. {
  351. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  352. int index;
  353. int cpu;
  354. for_each_online_cpu(cpu) {
  355. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  356. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  357. }
  358. }
  359. #ifdef CONFIG_SMP
  360. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  361. const struct cpumask *dest)
  362. {
  363. int cpu;
  364. unsigned long flags;
  365. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  366. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  367. for_each_online_cpu(cpu) {
  368. int coreid = octeon_coreid_for_cpu(cpu);
  369. uint64_t en1 =
  370. cvmx_read_csr(CVMX_CIU_INTX_EN1
  371. (coreid * 2 + 1));
  372. if (cpumask_test_cpu(cpu, dest))
  373. en1 |= 1ull << bit;
  374. else
  375. en1 &= ~(1ull << bit);
  376. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  377. }
  378. /*
  379. * We need to do a read after the last update to make sure all
  380. * of them are done.
  381. */
  382. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  383. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  384. return 0;
  385. }
  386. /*
  387. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  388. * registers.
  389. */
  390. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  391. const struct cpumask *dest)
  392. {
  393. int cpu;
  394. int index;
  395. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  396. for_each_online_cpu(cpu) {
  397. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  398. if (cpumask_test_cpu(cpu, dest))
  399. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  400. else
  401. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  402. }
  403. return 0;
  404. }
  405. #endif
  406. /*
  407. * Newer octeon chips have support for lockless CIU operation.
  408. */
  409. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  410. .name = "CIU0",
  411. .enable = octeon_irq_ciu1_enable_v2,
  412. .disable = octeon_irq_ciu1_disable_all_v2,
  413. .ack = octeon_irq_ciu1_disable_v2,
  414. .eoi = octeon_irq_ciu1_enable_v2,
  415. #ifdef CONFIG_SMP
  416. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  417. #endif
  418. };
  419. static struct irq_chip octeon_irq_chip_ciu1 = {
  420. .name = "CIU1",
  421. .enable = octeon_irq_ciu1_enable,
  422. .disable = octeon_irq_ciu1_disable,
  423. .ack = octeon_irq_ciu1_ack,
  424. .eoi = octeon_irq_ciu1_eoi,
  425. #ifdef CONFIG_SMP
  426. .set_affinity = octeon_irq_ciu1_set_affinity,
  427. #endif
  428. };
  429. #ifdef CONFIG_PCI_MSI
  430. static void octeon_irq_msi_ack(unsigned int irq)
  431. {
  432. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  433. /* These chips have PCI */
  434. cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
  435. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  436. } else {
  437. /*
  438. * These chips have PCIe. Thankfully the ACK doesn't
  439. * need any locking.
  440. */
  441. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
  442. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  443. }
  444. }
  445. static void octeon_irq_msi_eoi(unsigned int irq)
  446. {
  447. /* Nothing needed */
  448. }
  449. static void octeon_irq_msi_enable(unsigned int irq)
  450. {
  451. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  452. /*
  453. * Octeon PCI doesn't have the ability to mask/unmask
  454. * MSI interrupts individually. Instead of
  455. * masking/unmasking them in groups of 16, we simple
  456. * assume MSI devices are well behaved. MSI
  457. * interrupts are always enable and the ACK is assumed
  458. * to be enough.
  459. */
  460. } else {
  461. /* These chips have PCIe. Note that we only support
  462. * the first 64 MSI interrupts. Unfortunately all the
  463. * MSI enables are in the same register. We use
  464. * MSI0's lock to control access to them all.
  465. */
  466. uint64_t en;
  467. unsigned long flags;
  468. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  469. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  470. en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
  471. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  472. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  473. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  474. }
  475. }
  476. static void octeon_irq_msi_disable(unsigned int irq)
  477. {
  478. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  479. /* See comment in enable */
  480. } else {
  481. /*
  482. * These chips have PCIe. Note that we only support
  483. * the first 64 MSI interrupts. Unfortunately all the
  484. * MSI enables are in the same register. We use
  485. * MSI0's lock to control access to them all.
  486. */
  487. uint64_t en;
  488. unsigned long flags;
  489. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  490. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  491. en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  492. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  493. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  494. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  495. }
  496. }
  497. static struct irq_chip octeon_irq_chip_msi = {
  498. .name = "MSI",
  499. .enable = octeon_irq_msi_enable,
  500. .disable = octeon_irq_msi_disable,
  501. .ack = octeon_irq_msi_ack,
  502. .eoi = octeon_irq_msi_eoi,
  503. };
  504. #endif
  505. void __init arch_init_irq(void)
  506. {
  507. int irq;
  508. struct irq_chip *chip0;
  509. struct irq_chip *chip1;
  510. #ifdef CONFIG_SMP
  511. /* Set the default affinity to the boot cpu. */
  512. cpumask_clear(irq_default_affinity);
  513. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  514. #endif
  515. if (NR_IRQS < OCTEON_IRQ_LAST)
  516. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  517. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  518. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  519. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  520. chip0 = &octeon_irq_chip_ciu0_v2;
  521. chip1 = &octeon_irq_chip_ciu1_v2;
  522. } else {
  523. chip0 = &octeon_irq_chip_ciu0;
  524. chip1 = &octeon_irq_chip_ciu1;
  525. }
  526. /* 0 - 15 reserved for i8259 master and slave controller. */
  527. /* 17 - 23 Mips internal */
  528. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  529. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  530. handle_percpu_irq);
  531. }
  532. /* 24 - 87 CIU_INT_SUM0 */
  533. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  534. set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
  535. }
  536. /* 88 - 151 CIU_INT_SUM1 */
  537. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
  538. set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
  539. }
  540. #ifdef CONFIG_PCI_MSI
  541. /* 152 - 215 PCI/PCIe MSI interrupts */
  542. for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
  543. set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
  544. handle_percpu_irq);
  545. }
  546. #endif
  547. set_c0_status(0x300 << 2);
  548. }
  549. asmlinkage void plat_irq_dispatch(void)
  550. {
  551. const unsigned long core_id = cvmx_get_core_num();
  552. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  553. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  554. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  555. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  556. unsigned long cop0_cause;
  557. unsigned long cop0_status;
  558. uint64_t ciu_en;
  559. uint64_t ciu_sum;
  560. while (1) {
  561. cop0_cause = read_c0_cause();
  562. cop0_status = read_c0_status();
  563. cop0_cause &= cop0_status;
  564. cop0_cause &= ST0_IM;
  565. if (unlikely(cop0_cause & STATUSF_IP2)) {
  566. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  567. ciu_en = cvmx_read_csr(ciu_en0_address);
  568. ciu_sum &= ciu_en;
  569. if (likely(ciu_sum))
  570. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
  571. else
  572. spurious_interrupt();
  573. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  574. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  575. ciu_en = cvmx_read_csr(ciu_en1_address);
  576. ciu_sum &= ciu_en;
  577. if (likely(ciu_sum))
  578. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
  579. else
  580. spurious_interrupt();
  581. } else if (likely(cop0_cause)) {
  582. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  583. } else {
  584. break;
  585. }
  586. }
  587. }
  588. #ifdef CONFIG_HOTPLUG_CPU
  589. static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
  590. {
  591. unsigned int isset;
  592. int coreid = octeon_coreid_for_cpu(cpu);
  593. int bit = (irq < OCTEON_IRQ_WDOG0) ?
  594. irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
  595. if (irq < 64) {
  596. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
  597. (1ull << bit)) >> bit;
  598. } else {
  599. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
  600. (1ull << bit)) >> bit;
  601. }
  602. return isset;
  603. }
  604. void fixup_irqs(void)
  605. {
  606. int irq;
  607. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  608. octeon_irq_core_disable_local(irq);
  609. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
  610. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  611. /* ciu irq migrates to next cpu */
  612. octeon_irq_chip_ciu0.disable(irq);
  613. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  614. }
  615. }
  616. #if 0
  617. for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
  618. octeon_irq_mailbox_mask(irq);
  619. #endif
  620. for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  621. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  622. /* ciu irq migrates to next cpu */
  623. octeon_irq_chip_ciu0.disable(irq);
  624. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  625. }
  626. }
  627. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
  628. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  629. /* ciu irq migrates to next cpu */
  630. octeon_irq_chip_ciu1.disable(irq);
  631. octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
  632. }
  633. }
  634. }
  635. #endif /* CONFIG_HOTPLUG_CPU */