octeon-irq.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. #include <asm/octeon/cvmx-pexp-defs.h>
  13. #include <asm/octeon/cvmx-npi-defs.h>
  14. DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
  15. DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
  16. DEFINE_SPINLOCK(octeon_irq_msi_lock);
  17. static int octeon_coreid_for_cpu(int cpu)
  18. {
  19. #ifdef CONFIG_SMP
  20. return cpu_logical_map(cpu);
  21. #else
  22. return cvmx_get_core_num();
  23. #endif
  24. }
  25. static void octeon_irq_core_ack(unsigned int irq)
  26. {
  27. unsigned int bit = irq - OCTEON_IRQ_SW0;
  28. /*
  29. * We don't need to disable IRQs to make these atomic since
  30. * they are already disabled earlier in the low level
  31. * interrupt code.
  32. */
  33. clear_c0_status(0x100 << bit);
  34. /* The two user interrupts must be cleared manually. */
  35. if (bit < 2)
  36. clear_c0_cause(0x100 << bit);
  37. }
  38. static void octeon_irq_core_eoi(unsigned int irq)
  39. {
  40. struct irq_desc *desc = irq_desc + irq;
  41. unsigned int bit = irq - OCTEON_IRQ_SW0;
  42. /*
  43. * If an IRQ is being processed while we are disabling it the
  44. * handler will attempt to unmask the interrupt after it has
  45. * been disabled.
  46. */
  47. if (desc->status & IRQ_DISABLED)
  48. return;
  49. /*
  50. * We don't need to disable IRQs to make these atomic since
  51. * they are already disabled earlier in the low level
  52. * interrupt code.
  53. */
  54. set_c0_status(0x100 << bit);
  55. }
  56. static void octeon_irq_core_enable(unsigned int irq)
  57. {
  58. unsigned long flags;
  59. unsigned int bit = irq - OCTEON_IRQ_SW0;
  60. /*
  61. * We need to disable interrupts to make sure our updates are
  62. * atomic.
  63. */
  64. local_irq_save(flags);
  65. set_c0_status(0x100 << bit);
  66. local_irq_restore(flags);
  67. }
  68. static void octeon_irq_core_disable_local(unsigned int irq)
  69. {
  70. unsigned long flags;
  71. unsigned int bit = irq - OCTEON_IRQ_SW0;
  72. /*
  73. * We need to disable interrupts to make sure our updates are
  74. * atomic.
  75. */
  76. local_irq_save(flags);
  77. clear_c0_status(0x100 << bit);
  78. local_irq_restore(flags);
  79. }
  80. static void octeon_irq_core_disable(unsigned int irq)
  81. {
  82. #ifdef CONFIG_SMP
  83. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  84. (void *) (long) irq, 1);
  85. #else
  86. octeon_irq_core_disable_local(irq);
  87. #endif
  88. }
  89. static struct irq_chip octeon_irq_chip_core = {
  90. .name = "Core",
  91. .enable = octeon_irq_core_enable,
  92. .disable = octeon_irq_core_disable,
  93. .ack = octeon_irq_core_ack,
  94. .eoi = octeon_irq_core_eoi,
  95. };
  96. static void octeon_irq_ciu0_ack(unsigned int irq)
  97. {
  98. /*
  99. * In order to avoid any locking accessing the CIU, we
  100. * acknowledge CIU interrupts by disabling all of them. This
  101. * way we can use a per core register and avoid any out of
  102. * core locking requirements. This has the side affect that
  103. * CIU interrupts can't be processed recursively.
  104. *
  105. * We don't need to disable IRQs to make these atomic since
  106. * they are already disabled earlier in the low level
  107. * interrupt code.
  108. */
  109. clear_c0_status(0x100 << 2);
  110. }
  111. static void octeon_irq_ciu0_eoi(unsigned int irq)
  112. {
  113. /*
  114. * Enable all CIU interrupts again. We don't need to disable
  115. * IRQs to make these atomic since they are already disabled
  116. * earlier in the low level interrupt code.
  117. */
  118. set_c0_status(0x100 << 2);
  119. }
  120. static void octeon_irq_ciu0_enable(unsigned int irq)
  121. {
  122. int coreid = cvmx_get_core_num();
  123. unsigned long flags;
  124. uint64_t en0;
  125. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  126. /*
  127. * A read lock is used here to make sure only one core is ever
  128. * updating the CIU enable bits at a time. During an enable
  129. * the cores don't interfere with each other. During a disable
  130. * the write lock stops any enables that might cause a
  131. * problem.
  132. */
  133. read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  134. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  135. en0 |= 1ull << bit;
  136. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  137. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  138. read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  139. }
  140. static void octeon_irq_ciu0_disable(unsigned int irq)
  141. {
  142. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  143. unsigned long flags;
  144. uint64_t en0;
  145. int cpu;
  146. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  147. for_each_online_cpu(cpu) {
  148. int coreid = octeon_coreid_for_cpu(cpu);
  149. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  150. en0 &= ~(1ull << bit);
  151. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  152. }
  153. /*
  154. * We need to do a read after the last update to make sure all
  155. * of them are done.
  156. */
  157. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  158. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  159. }
  160. /*
  161. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  162. * registers.
  163. */
  164. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  165. {
  166. int index = cvmx_get_core_num() * 2;
  167. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  168. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  169. }
  170. /*
  171. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  172. * registers.
  173. */
  174. static void octeon_irq_ciu0_ack_v2(unsigned int irq)
  175. {
  176. int index = cvmx_get_core_num() * 2;
  177. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  178. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  179. }
  180. /*
  181. * CIU timer type interrupts must be acknoleged by writing a '1' bit
  182. * to their sum0 bit.
  183. */
  184. static void octeon_irq_ciu0_timer_ack(unsigned int irq)
  185. {
  186. int index = cvmx_get_core_num() * 2;
  187. uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  188. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  189. }
  190. static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
  191. {
  192. octeon_irq_ciu0_timer_ack(irq);
  193. octeon_irq_ciu0_ack(irq);
  194. }
  195. static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
  196. {
  197. octeon_irq_ciu0_timer_ack(irq);
  198. octeon_irq_ciu0_ack_v2(irq);
  199. }
  200. /*
  201. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  202. * registers.
  203. */
  204. static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
  205. {
  206. struct irq_desc *desc = irq_desc + irq;
  207. int index = cvmx_get_core_num() * 2;
  208. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  209. if ((desc->status & IRQ_DISABLED) == 0)
  210. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  211. }
  212. /*
  213. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  214. * registers.
  215. */
  216. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  217. {
  218. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  219. int index;
  220. int cpu;
  221. for_each_online_cpu(cpu) {
  222. index = octeon_coreid_for_cpu(cpu) * 2;
  223. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  224. }
  225. }
  226. #ifdef CONFIG_SMP
  227. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  228. {
  229. int cpu;
  230. unsigned long flags;
  231. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  232. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  233. for_each_online_cpu(cpu) {
  234. int coreid = octeon_coreid_for_cpu(cpu);
  235. uint64_t en0 =
  236. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  237. if (cpumask_test_cpu(cpu, dest))
  238. en0 |= 1ull << bit;
  239. else
  240. en0 &= ~(1ull << bit);
  241. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  242. }
  243. /*
  244. * We need to do a read after the last update to make sure all
  245. * of them are done.
  246. */
  247. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  248. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  249. return 0;
  250. }
  251. /*
  252. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  253. * registers.
  254. */
  255. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  256. const struct cpumask *dest)
  257. {
  258. int cpu;
  259. int index;
  260. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  261. for_each_online_cpu(cpu) {
  262. index = octeon_coreid_for_cpu(cpu) * 2;
  263. if (cpumask_test_cpu(cpu, dest))
  264. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  265. else
  266. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  267. }
  268. return 0;
  269. }
  270. #endif
  271. /*
  272. * Newer octeon chips have support for lockless CIU operation.
  273. */
  274. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  275. .name = "CIU0",
  276. .enable = octeon_irq_ciu0_enable_v2,
  277. .disable = octeon_irq_ciu0_disable_all_v2,
  278. .ack = octeon_irq_ciu0_ack_v2,
  279. .eoi = octeon_irq_ciu0_eoi_v2,
  280. #ifdef CONFIG_SMP
  281. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  282. #endif
  283. };
  284. static struct irq_chip octeon_irq_chip_ciu0 = {
  285. .name = "CIU0",
  286. .enable = octeon_irq_ciu0_enable,
  287. .disable = octeon_irq_ciu0_disable,
  288. .ack = octeon_irq_ciu0_ack,
  289. .eoi = octeon_irq_ciu0_eoi,
  290. #ifdef CONFIG_SMP
  291. .set_affinity = octeon_irq_ciu0_set_affinity,
  292. #endif
  293. };
  294. static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
  295. .name = "CIU0-T",
  296. .enable = octeon_irq_ciu0_enable_v2,
  297. .disable = octeon_irq_ciu0_disable_all_v2,
  298. .ack = octeon_irq_ciu0_timer_ack_v2,
  299. .eoi = octeon_irq_ciu0_eoi_v2,
  300. #ifdef CONFIG_SMP
  301. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  302. #endif
  303. };
  304. static struct irq_chip octeon_irq_chip_ciu0_timer = {
  305. .name = "CIU0-T",
  306. .enable = octeon_irq_ciu0_enable,
  307. .disable = octeon_irq_ciu0_disable,
  308. .ack = octeon_irq_ciu0_timer_ack_v1,
  309. .eoi = octeon_irq_ciu0_eoi,
  310. #ifdef CONFIG_SMP
  311. .set_affinity = octeon_irq_ciu0_set_affinity,
  312. #endif
  313. };
  314. static void octeon_irq_ciu1_ack(unsigned int irq)
  315. {
  316. /*
  317. * In order to avoid any locking accessing the CIU, we
  318. * acknowledge CIU interrupts by disabling all of them. This
  319. * way we can use a per core register and avoid any out of
  320. * core locking requirements. This has the side affect that
  321. * CIU interrupts can't be processed recursively. We don't
  322. * need to disable IRQs to make these atomic since they are
  323. * already disabled earlier in the low level interrupt code.
  324. */
  325. clear_c0_status(0x100 << 3);
  326. }
  327. static void octeon_irq_ciu1_eoi(unsigned int irq)
  328. {
  329. /*
  330. * Enable all CIU interrupts again. We don't need to disable
  331. * IRQs to make these atomic since they are already disabled
  332. * earlier in the low level interrupt code.
  333. */
  334. set_c0_status(0x100 << 3);
  335. }
  336. static void octeon_irq_ciu1_enable(unsigned int irq)
  337. {
  338. int coreid = cvmx_get_core_num();
  339. unsigned long flags;
  340. uint64_t en1;
  341. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  342. /*
  343. * A read lock is used here to make sure only one core is ever
  344. * updating the CIU enable bits at a time. During an enable
  345. * the cores don't interfere with each other. During a disable
  346. * the write lock stops any enables that might cause a
  347. * problem.
  348. */
  349. read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  350. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  351. en1 |= 1ull << bit;
  352. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  353. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  354. read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  355. }
  356. static void octeon_irq_ciu1_disable(unsigned int irq)
  357. {
  358. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  359. unsigned long flags;
  360. uint64_t en1;
  361. int cpu;
  362. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  363. for_each_online_cpu(cpu) {
  364. int coreid = octeon_coreid_for_cpu(cpu);
  365. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  366. en1 &= ~(1ull << bit);
  367. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  368. }
  369. /*
  370. * We need to do a read after the last update to make sure all
  371. * of them are done.
  372. */
  373. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  374. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  375. }
  376. /*
  377. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  378. * registers.
  379. */
  380. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  381. {
  382. int index = cvmx_get_core_num() * 2 + 1;
  383. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  384. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  385. }
  386. /*
  387. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  388. * registers.
  389. */
  390. static void octeon_irq_ciu1_ack_v2(unsigned int irq)
  391. {
  392. int index = cvmx_get_core_num() * 2 + 1;
  393. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  394. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  395. }
  396. /*
  397. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  398. * registers.
  399. */
  400. static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
  401. {
  402. struct irq_desc *desc = irq_desc + irq;
  403. int index = cvmx_get_core_num() * 2 + 1;
  404. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  405. if ((desc->status & IRQ_DISABLED) == 0)
  406. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  407. }
  408. /*
  409. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  410. * registers.
  411. */
  412. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  413. {
  414. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  415. int index;
  416. int cpu;
  417. for_each_online_cpu(cpu) {
  418. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  419. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  420. }
  421. }
  422. #ifdef CONFIG_SMP
  423. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  424. const struct cpumask *dest)
  425. {
  426. int cpu;
  427. unsigned long flags;
  428. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  429. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  430. for_each_online_cpu(cpu) {
  431. int coreid = octeon_coreid_for_cpu(cpu);
  432. uint64_t en1 =
  433. cvmx_read_csr(CVMX_CIU_INTX_EN1
  434. (coreid * 2 + 1));
  435. if (cpumask_test_cpu(cpu, dest))
  436. en1 |= 1ull << bit;
  437. else
  438. en1 &= ~(1ull << bit);
  439. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  440. }
  441. /*
  442. * We need to do a read after the last update to make sure all
  443. * of them are done.
  444. */
  445. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  446. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  447. return 0;
  448. }
  449. /*
  450. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  451. * registers.
  452. */
  453. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  454. const struct cpumask *dest)
  455. {
  456. int cpu;
  457. int index;
  458. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  459. for_each_online_cpu(cpu) {
  460. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  461. if (cpumask_test_cpu(cpu, dest))
  462. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  463. else
  464. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  465. }
  466. return 0;
  467. }
  468. #endif
  469. /*
  470. * Newer octeon chips have support for lockless CIU operation.
  471. */
  472. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  473. .name = "CIU0",
  474. .enable = octeon_irq_ciu1_enable_v2,
  475. .disable = octeon_irq_ciu1_disable_all_v2,
  476. .ack = octeon_irq_ciu1_ack_v2,
  477. .eoi = octeon_irq_ciu1_eoi_v2,
  478. #ifdef CONFIG_SMP
  479. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  480. #endif
  481. };
  482. static struct irq_chip octeon_irq_chip_ciu1 = {
  483. .name = "CIU1",
  484. .enable = octeon_irq_ciu1_enable,
  485. .disable = octeon_irq_ciu1_disable,
  486. .ack = octeon_irq_ciu1_ack,
  487. .eoi = octeon_irq_ciu1_eoi,
  488. #ifdef CONFIG_SMP
  489. .set_affinity = octeon_irq_ciu1_set_affinity,
  490. #endif
  491. };
  492. #ifdef CONFIG_PCI_MSI
  493. static void octeon_irq_msi_ack(unsigned int irq)
  494. {
  495. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  496. /* These chips have PCI */
  497. cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
  498. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  499. } else {
  500. /*
  501. * These chips have PCIe. Thankfully the ACK doesn't
  502. * need any locking.
  503. */
  504. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
  505. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  506. }
  507. }
  508. static void octeon_irq_msi_eoi(unsigned int irq)
  509. {
  510. /* Nothing needed */
  511. }
  512. static void octeon_irq_msi_enable(unsigned int irq)
  513. {
  514. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  515. /*
  516. * Octeon PCI doesn't have the ability to mask/unmask
  517. * MSI interrupts individually. Instead of
  518. * masking/unmasking them in groups of 16, we simple
  519. * assume MSI devices are well behaved. MSI
  520. * interrupts are always enable and the ACK is assumed
  521. * to be enough.
  522. */
  523. } else {
  524. /* These chips have PCIe. Note that we only support
  525. * the first 64 MSI interrupts. Unfortunately all the
  526. * MSI enables are in the same register. We use
  527. * MSI0's lock to control access to them all.
  528. */
  529. uint64_t en;
  530. unsigned long flags;
  531. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  532. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  533. en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
  534. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  535. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  536. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  537. }
  538. }
  539. static void octeon_irq_msi_disable(unsigned int irq)
  540. {
  541. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  542. /* See comment in enable */
  543. } else {
  544. /*
  545. * These chips have PCIe. Note that we only support
  546. * the first 64 MSI interrupts. Unfortunately all the
  547. * MSI enables are in the same register. We use
  548. * MSI0's lock to control access to them all.
  549. */
  550. uint64_t en;
  551. unsigned long flags;
  552. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  553. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  554. en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  555. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  556. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  557. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  558. }
  559. }
  560. static struct irq_chip octeon_irq_chip_msi = {
  561. .name = "MSI",
  562. .enable = octeon_irq_msi_enable,
  563. .disable = octeon_irq_msi_disable,
  564. .ack = octeon_irq_msi_ack,
  565. .eoi = octeon_irq_msi_eoi,
  566. };
  567. #endif
  568. void __init arch_init_irq(void)
  569. {
  570. int irq;
  571. struct irq_chip *chip0;
  572. struct irq_chip *chip0_timer;
  573. struct irq_chip *chip1;
  574. #ifdef CONFIG_SMP
  575. /* Set the default affinity to the boot cpu. */
  576. cpumask_clear(irq_default_affinity);
  577. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  578. #endif
  579. if (NR_IRQS < OCTEON_IRQ_LAST)
  580. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  581. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  582. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  583. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  584. chip0 = &octeon_irq_chip_ciu0_v2;
  585. chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
  586. chip1 = &octeon_irq_chip_ciu1_v2;
  587. } else {
  588. chip0 = &octeon_irq_chip_ciu0;
  589. chip0_timer = &octeon_irq_chip_ciu0_timer;
  590. chip1 = &octeon_irq_chip_ciu1;
  591. }
  592. /* 0 - 15 reserved for i8259 master and slave controller. */
  593. /* 17 - 23 Mips internal */
  594. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  595. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  596. handle_percpu_irq);
  597. }
  598. /* 24 - 87 CIU_INT_SUM0 */
  599. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  600. switch (irq) {
  601. case OCTEON_IRQ_GMX_DRP0:
  602. case OCTEON_IRQ_GMX_DRP1:
  603. case OCTEON_IRQ_IPD_DRP:
  604. case OCTEON_IRQ_KEY_ZERO:
  605. case OCTEON_IRQ_TIMER0:
  606. case OCTEON_IRQ_TIMER1:
  607. case OCTEON_IRQ_TIMER2:
  608. case OCTEON_IRQ_TIMER3:
  609. set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
  610. break;
  611. default:
  612. set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
  613. break;
  614. }
  615. }
  616. /* 88 - 151 CIU_INT_SUM1 */
  617. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
  618. set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
  619. }
  620. #ifdef CONFIG_PCI_MSI
  621. /* 152 - 215 PCI/PCIe MSI interrupts */
  622. for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
  623. set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
  624. handle_percpu_irq);
  625. }
  626. #endif
  627. set_c0_status(0x300 << 2);
  628. }
  629. asmlinkage void plat_irq_dispatch(void)
  630. {
  631. const unsigned long core_id = cvmx_get_core_num();
  632. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  633. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  634. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  635. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  636. unsigned long cop0_cause;
  637. unsigned long cop0_status;
  638. uint64_t ciu_en;
  639. uint64_t ciu_sum;
  640. while (1) {
  641. cop0_cause = read_c0_cause();
  642. cop0_status = read_c0_status();
  643. cop0_cause &= cop0_status;
  644. cop0_cause &= ST0_IM;
  645. if (unlikely(cop0_cause & STATUSF_IP2)) {
  646. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  647. ciu_en = cvmx_read_csr(ciu_en0_address);
  648. ciu_sum &= ciu_en;
  649. if (likely(ciu_sum))
  650. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
  651. else
  652. spurious_interrupt();
  653. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  654. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  655. ciu_en = cvmx_read_csr(ciu_en1_address);
  656. ciu_sum &= ciu_en;
  657. if (likely(ciu_sum))
  658. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
  659. else
  660. spurious_interrupt();
  661. } else if (likely(cop0_cause)) {
  662. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  663. } else {
  664. break;
  665. }
  666. }
  667. }
  668. #ifdef CONFIG_HOTPLUG_CPU
  669. static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
  670. {
  671. unsigned int isset;
  672. int coreid = octeon_coreid_for_cpu(cpu);
  673. int bit = (irq < OCTEON_IRQ_WDOG0) ?
  674. irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
  675. if (irq < 64) {
  676. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
  677. (1ull << bit)) >> bit;
  678. } else {
  679. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
  680. (1ull << bit)) >> bit;
  681. }
  682. return isset;
  683. }
  684. void fixup_irqs(void)
  685. {
  686. int irq;
  687. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  688. octeon_irq_core_disable_local(irq);
  689. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
  690. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  691. /* ciu irq migrates to next cpu */
  692. octeon_irq_chip_ciu0.disable(irq);
  693. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  694. }
  695. }
  696. #if 0
  697. for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
  698. octeon_irq_mailbox_mask(irq);
  699. #endif
  700. for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  701. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  702. /* ciu irq migrates to next cpu */
  703. octeon_irq_chip_ciu0.disable(irq);
  704. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  705. }
  706. }
  707. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
  708. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  709. /* ciu irq migrates to next cpu */
  710. octeon_irq_chip_ciu1.disable(irq);
  711. octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
  712. }
  713. }
  714. }
  715. #endif /* CONFIG_HOTPLUG_CPU */