octeon-irq.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. #include <asm/octeon/cvmx-pexp-defs.h>
  13. #include <asm/octeon/cvmx-npi-defs.h>
  14. DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
  15. DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
  16. static int octeon_coreid_for_cpu(int cpu)
  17. {
  18. #ifdef CONFIG_SMP
  19. return cpu_logical_map(cpu);
  20. #else
  21. return cvmx_get_core_num();
  22. #endif
  23. }
  24. static void octeon_irq_core_ack(unsigned int irq)
  25. {
  26. unsigned int bit = irq - OCTEON_IRQ_SW0;
  27. /*
  28. * We don't need to disable IRQs to make these atomic since
  29. * they are already disabled earlier in the low level
  30. * interrupt code.
  31. */
  32. clear_c0_status(0x100 << bit);
  33. /* The two user interrupts must be cleared manually. */
  34. if (bit < 2)
  35. clear_c0_cause(0x100 << bit);
  36. }
  37. static void octeon_irq_core_eoi(unsigned int irq)
  38. {
  39. struct irq_desc *desc = irq_desc + irq;
  40. unsigned int bit = irq - OCTEON_IRQ_SW0;
  41. /*
  42. * If an IRQ is being processed while we are disabling it the
  43. * handler will attempt to unmask the interrupt after it has
  44. * been disabled.
  45. */
  46. if (desc->status & IRQ_DISABLED)
  47. return;
  48. /*
  49. * We don't need to disable IRQs to make these atomic since
  50. * they are already disabled earlier in the low level
  51. * interrupt code.
  52. */
  53. set_c0_status(0x100 << bit);
  54. }
  55. static void octeon_irq_core_enable(unsigned int irq)
  56. {
  57. unsigned long flags;
  58. unsigned int bit = irq - OCTEON_IRQ_SW0;
  59. /*
  60. * We need to disable interrupts to make sure our updates are
  61. * atomic.
  62. */
  63. local_irq_save(flags);
  64. set_c0_status(0x100 << bit);
  65. local_irq_restore(flags);
  66. }
  67. static void octeon_irq_core_disable_local(unsigned int irq)
  68. {
  69. unsigned long flags;
  70. unsigned int bit = irq - OCTEON_IRQ_SW0;
  71. /*
  72. * We need to disable interrupts to make sure our updates are
  73. * atomic.
  74. */
  75. local_irq_save(flags);
  76. clear_c0_status(0x100 << bit);
  77. local_irq_restore(flags);
  78. }
  79. static void octeon_irq_core_disable(unsigned int irq)
  80. {
  81. #ifdef CONFIG_SMP
  82. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  83. (void *) (long) irq, 1);
  84. #else
  85. octeon_irq_core_disable_local(irq);
  86. #endif
  87. }
  88. static struct irq_chip octeon_irq_chip_core = {
  89. .name = "Core",
  90. .enable = octeon_irq_core_enable,
  91. .disable = octeon_irq_core_disable,
  92. .ack = octeon_irq_core_ack,
  93. .eoi = octeon_irq_core_eoi,
  94. };
  95. static void octeon_irq_ciu0_ack(unsigned int irq)
  96. {
  97. /*
  98. * In order to avoid any locking accessing the CIU, we
  99. * acknowledge CIU interrupts by disabling all of them. This
  100. * way we can use a per core register and avoid any out of
  101. * core locking requirements. This has the side affect that
  102. * CIU interrupts can't be processed recursively.
  103. *
  104. * We don't need to disable IRQs to make these atomic since
  105. * they are already disabled earlier in the low level
  106. * interrupt code.
  107. */
  108. clear_c0_status(0x100 << 2);
  109. }
  110. static void octeon_irq_ciu0_eoi(unsigned int irq)
  111. {
  112. /*
  113. * Enable all CIU interrupts again. We don't need to disable
  114. * IRQs to make these atomic since they are already disabled
  115. * earlier in the low level interrupt code.
  116. */
  117. set_c0_status(0x100 << 2);
  118. }
  119. static void octeon_irq_ciu0_enable(unsigned int irq)
  120. {
  121. int coreid = cvmx_get_core_num();
  122. unsigned long flags;
  123. uint64_t en0;
  124. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  125. /*
  126. * A read lock is used here to make sure only one core is ever
  127. * updating the CIU enable bits at a time. During an enable
  128. * the cores don't interfere with each other. During a disable
  129. * the write lock stops any enables that might cause a
  130. * problem.
  131. */
  132. read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  133. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  134. en0 |= 1ull << bit;
  135. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  136. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  137. read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  138. }
  139. static void octeon_irq_ciu0_disable(unsigned int irq)
  140. {
  141. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  142. unsigned long flags;
  143. uint64_t en0;
  144. int cpu;
  145. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  146. for_each_online_cpu(cpu) {
  147. int coreid = octeon_coreid_for_cpu(cpu);
  148. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  149. en0 &= ~(1ull << bit);
  150. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  151. }
  152. /*
  153. * We need to do a read after the last update to make sure all
  154. * of them are done.
  155. */
  156. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  157. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  158. }
  159. /*
  160. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  161. * registers.
  162. */
  163. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  164. {
  165. int index = cvmx_get_core_num() * 2;
  166. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  167. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  168. }
  169. /*
  170. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  171. * registers.
  172. */
  173. static void octeon_irq_ciu0_ack_v2(unsigned int irq)
  174. {
  175. int index = cvmx_get_core_num() * 2;
  176. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  177. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  178. }
  179. /*
  180. * CIU timer type interrupts must be acknoleged by writing a '1' bit
  181. * to their sum0 bit.
  182. */
  183. static void octeon_irq_ciu0_timer_ack(unsigned int irq)
  184. {
  185. int index = cvmx_get_core_num() * 2;
  186. uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  187. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  188. }
  189. static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
  190. {
  191. octeon_irq_ciu0_timer_ack(irq);
  192. octeon_irq_ciu0_ack(irq);
  193. }
  194. static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
  195. {
  196. octeon_irq_ciu0_timer_ack(irq);
  197. octeon_irq_ciu0_ack_v2(irq);
  198. }
  199. /*
  200. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  201. * registers.
  202. */
  203. static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
  204. {
  205. struct irq_desc *desc = irq_desc + irq;
  206. int index = cvmx_get_core_num() * 2;
  207. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  208. if ((desc->status & IRQ_DISABLED) == 0)
  209. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  210. }
  211. /*
  212. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  213. * registers.
  214. */
  215. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  216. {
  217. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  218. int index;
  219. int cpu;
  220. for_each_online_cpu(cpu) {
  221. index = octeon_coreid_for_cpu(cpu) * 2;
  222. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  223. }
  224. }
  225. #ifdef CONFIG_SMP
  226. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  227. {
  228. int cpu;
  229. unsigned long flags;
  230. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  231. write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
  232. for_each_online_cpu(cpu) {
  233. int coreid = octeon_coreid_for_cpu(cpu);
  234. uint64_t en0 =
  235. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  236. if (cpumask_test_cpu(cpu, dest))
  237. en0 |= 1ull << bit;
  238. else
  239. en0 &= ~(1ull << bit);
  240. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  241. }
  242. /*
  243. * We need to do a read after the last update to make sure all
  244. * of them are done.
  245. */
  246. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  247. write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
  248. return 0;
  249. }
  250. /*
  251. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  252. * registers.
  253. */
  254. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  255. const struct cpumask *dest)
  256. {
  257. int cpu;
  258. int index;
  259. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  260. for_each_online_cpu(cpu) {
  261. index = octeon_coreid_for_cpu(cpu) * 2;
  262. if (cpumask_test_cpu(cpu, dest))
  263. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  264. else
  265. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  266. }
  267. return 0;
  268. }
  269. #endif
  270. /*
  271. * Newer octeon chips have support for lockless CIU operation.
  272. */
  273. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  274. .name = "CIU0",
  275. .enable = octeon_irq_ciu0_enable_v2,
  276. .disable = octeon_irq_ciu0_disable_all_v2,
  277. .ack = octeon_irq_ciu0_ack_v2,
  278. .eoi = octeon_irq_ciu0_eoi_v2,
  279. #ifdef CONFIG_SMP
  280. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  281. #endif
  282. };
  283. static struct irq_chip octeon_irq_chip_ciu0 = {
  284. .name = "CIU0",
  285. .enable = octeon_irq_ciu0_enable,
  286. .disable = octeon_irq_ciu0_disable,
  287. .ack = octeon_irq_ciu0_ack,
  288. .eoi = octeon_irq_ciu0_eoi,
  289. #ifdef CONFIG_SMP
  290. .set_affinity = octeon_irq_ciu0_set_affinity,
  291. #endif
  292. };
  293. static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
  294. .name = "CIU0-T",
  295. .enable = octeon_irq_ciu0_enable_v2,
  296. .disable = octeon_irq_ciu0_disable_all_v2,
  297. .ack = octeon_irq_ciu0_timer_ack_v2,
  298. .eoi = octeon_irq_ciu0_eoi_v2,
  299. #ifdef CONFIG_SMP
  300. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  301. #endif
  302. };
  303. static struct irq_chip octeon_irq_chip_ciu0_timer = {
  304. .name = "CIU0-T",
  305. .enable = octeon_irq_ciu0_enable,
  306. .disable = octeon_irq_ciu0_disable,
  307. .ack = octeon_irq_ciu0_timer_ack_v1,
  308. .eoi = octeon_irq_ciu0_eoi,
  309. #ifdef CONFIG_SMP
  310. .set_affinity = octeon_irq_ciu0_set_affinity,
  311. #endif
  312. };
  313. static void octeon_irq_ciu1_ack(unsigned int irq)
  314. {
  315. /*
  316. * In order to avoid any locking accessing the CIU, we
  317. * acknowledge CIU interrupts by disabling all of them. This
  318. * way we can use a per core register and avoid any out of
  319. * core locking requirements. This has the side affect that
  320. * CIU interrupts can't be processed recursively. We don't
  321. * need to disable IRQs to make these atomic since they are
  322. * already disabled earlier in the low level interrupt code.
  323. */
  324. clear_c0_status(0x100 << 3);
  325. }
  326. static void octeon_irq_ciu1_eoi(unsigned int irq)
  327. {
  328. /*
  329. * Enable all CIU interrupts again. We don't need to disable
  330. * IRQs to make these atomic since they are already disabled
  331. * earlier in the low level interrupt code.
  332. */
  333. set_c0_status(0x100 << 3);
  334. }
  335. static void octeon_irq_ciu1_enable(unsigned int irq)
  336. {
  337. int coreid = cvmx_get_core_num();
  338. unsigned long flags;
  339. uint64_t en1;
  340. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  341. /*
  342. * A read lock is used here to make sure only one core is ever
  343. * updating the CIU enable bits at a time. During an enable
  344. * the cores don't interfere with each other. During a disable
  345. * the write lock stops any enables that might cause a
  346. * problem.
  347. */
  348. read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  349. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  350. en1 |= 1ull << bit;
  351. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  352. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  353. read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  354. }
  355. static void octeon_irq_ciu1_disable(unsigned int irq)
  356. {
  357. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  358. unsigned long flags;
  359. uint64_t en1;
  360. int cpu;
  361. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  362. for_each_online_cpu(cpu) {
  363. int coreid = octeon_coreid_for_cpu(cpu);
  364. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  365. en1 &= ~(1ull << bit);
  366. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  367. }
  368. /*
  369. * We need to do a read after the last update to make sure all
  370. * of them are done.
  371. */
  372. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  373. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  374. }
  375. /*
  376. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  377. * registers.
  378. */
  379. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  380. {
  381. int index = cvmx_get_core_num() * 2 + 1;
  382. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  383. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  384. }
  385. /*
  386. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  387. * registers.
  388. */
  389. static void octeon_irq_ciu1_ack_v2(unsigned int irq)
  390. {
  391. int index = cvmx_get_core_num() * 2 + 1;
  392. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  393. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  394. }
  395. /*
  396. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  397. * registers.
  398. */
  399. static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
  400. {
  401. struct irq_desc *desc = irq_desc + irq;
  402. int index = cvmx_get_core_num() * 2 + 1;
  403. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  404. if ((desc->status & IRQ_DISABLED) == 0)
  405. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  406. }
  407. /*
  408. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  409. * registers.
  410. */
  411. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  412. {
  413. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  414. int index;
  415. int cpu;
  416. for_each_online_cpu(cpu) {
  417. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  418. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  419. }
  420. }
  421. #ifdef CONFIG_SMP
  422. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  423. const struct cpumask *dest)
  424. {
  425. int cpu;
  426. unsigned long flags;
  427. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  428. write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
  429. for_each_online_cpu(cpu) {
  430. int coreid = octeon_coreid_for_cpu(cpu);
  431. uint64_t en1 =
  432. cvmx_read_csr(CVMX_CIU_INTX_EN1
  433. (coreid * 2 + 1));
  434. if (cpumask_test_cpu(cpu, dest))
  435. en1 |= 1ull << bit;
  436. else
  437. en1 &= ~(1ull << bit);
  438. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  439. }
  440. /*
  441. * We need to do a read after the last update to make sure all
  442. * of them are done.
  443. */
  444. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  445. write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
  446. return 0;
  447. }
  448. /*
  449. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  450. * registers.
  451. */
  452. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  453. const struct cpumask *dest)
  454. {
  455. int cpu;
  456. int index;
  457. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  458. for_each_online_cpu(cpu) {
  459. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  460. if (cpumask_test_cpu(cpu, dest))
  461. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  462. else
  463. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  464. }
  465. return 0;
  466. }
  467. #endif
  468. /*
  469. * Newer octeon chips have support for lockless CIU operation.
  470. */
  471. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  472. .name = "CIU0",
  473. .enable = octeon_irq_ciu1_enable_v2,
  474. .disable = octeon_irq_ciu1_disable_all_v2,
  475. .ack = octeon_irq_ciu1_ack_v2,
  476. .eoi = octeon_irq_ciu1_eoi_v2,
  477. #ifdef CONFIG_SMP
  478. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  479. #endif
  480. };
  481. static struct irq_chip octeon_irq_chip_ciu1 = {
  482. .name = "CIU1",
  483. .enable = octeon_irq_ciu1_enable,
  484. .disable = octeon_irq_ciu1_disable,
  485. .ack = octeon_irq_ciu1_ack,
  486. .eoi = octeon_irq_ciu1_eoi,
  487. #ifdef CONFIG_SMP
  488. .set_affinity = octeon_irq_ciu1_set_affinity,
  489. #endif
  490. };
  491. #ifdef CONFIG_PCI_MSI
  492. static DEFINE_SPINLOCK(octeon_irq_msi_lock);
  493. static void octeon_irq_msi_ack(unsigned int irq)
  494. {
  495. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  496. /* These chips have PCI */
  497. cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
  498. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  499. } else {
  500. /*
  501. * These chips have PCIe. Thankfully the ACK doesn't
  502. * need any locking.
  503. */
  504. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
  505. 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  506. }
  507. }
  508. static void octeon_irq_msi_eoi(unsigned int irq)
  509. {
  510. /* Nothing needed */
  511. }
  512. static void octeon_irq_msi_enable(unsigned int irq)
  513. {
  514. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  515. /*
  516. * Octeon PCI doesn't have the ability to mask/unmask
  517. * MSI interrupts individually. Instead of
  518. * masking/unmasking them in groups of 16, we simple
  519. * assume MSI devices are well behaved. MSI
  520. * interrupts are always enable and the ACK is assumed
  521. * to be enough.
  522. */
  523. } else {
  524. /* These chips have PCIe. Note that we only support
  525. * the first 64 MSI interrupts. Unfortunately all the
  526. * MSI enables are in the same register. We use
  527. * MSI0's lock to control access to them all.
  528. */
  529. uint64_t en;
  530. unsigned long flags;
  531. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  532. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  533. en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
  534. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  535. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  536. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  537. }
  538. }
  539. static void octeon_irq_msi_disable(unsigned int irq)
  540. {
  541. if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
  542. /* See comment in enable */
  543. } else {
  544. /*
  545. * These chips have PCIe. Note that we only support
  546. * the first 64 MSI interrupts. Unfortunately all the
  547. * MSI enables are in the same register. We use
  548. * MSI0's lock to control access to them all.
  549. */
  550. uint64_t en;
  551. unsigned long flags;
  552. spin_lock_irqsave(&octeon_irq_msi_lock, flags);
  553. en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  554. en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
  555. cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
  556. cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
  557. spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
  558. }
  559. }
  560. static struct irq_chip octeon_irq_chip_msi = {
  561. .name = "MSI",
  562. .enable = octeon_irq_msi_enable,
  563. .disable = octeon_irq_msi_disable,
  564. .ack = octeon_irq_msi_ack,
  565. .eoi = octeon_irq_msi_eoi,
  566. };
  567. #endif
  568. void __init arch_init_irq(void)
  569. {
  570. int irq;
  571. struct irq_chip *chip0;
  572. struct irq_chip *chip0_timer;
  573. struct irq_chip *chip1;
  574. #ifdef CONFIG_SMP
  575. /* Set the default affinity to the boot cpu. */
  576. cpumask_clear(irq_default_affinity);
  577. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  578. #endif
  579. if (NR_IRQS < OCTEON_IRQ_LAST)
  580. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  581. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  582. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  583. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  584. chip0 = &octeon_irq_chip_ciu0_v2;
  585. chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
  586. chip1 = &octeon_irq_chip_ciu1_v2;
  587. } else {
  588. chip0 = &octeon_irq_chip_ciu0;
  589. chip0_timer = &octeon_irq_chip_ciu0_timer;
  590. chip1 = &octeon_irq_chip_ciu1;
  591. }
  592. /* 0 - 15 reserved for i8259 master and slave controller. */
  593. /* 17 - 23 Mips internal */
  594. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  595. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  596. handle_percpu_irq);
  597. }
  598. /* 24 - 87 CIU_INT_SUM0 */
  599. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  600. switch (irq) {
  601. case OCTEON_IRQ_GMX_DRP0:
  602. case OCTEON_IRQ_GMX_DRP1:
  603. case OCTEON_IRQ_IPD_DRP:
  604. case OCTEON_IRQ_KEY_ZERO:
  605. case OCTEON_IRQ_TIMER0:
  606. case OCTEON_IRQ_TIMER1:
  607. case OCTEON_IRQ_TIMER2:
  608. case OCTEON_IRQ_TIMER3:
  609. set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
  610. break;
  611. default:
  612. set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
  613. break;
  614. }
  615. }
  616. /* 88 - 151 CIU_INT_SUM1 */
  617. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
  618. set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
  619. }
  620. #ifdef CONFIG_PCI_MSI
  621. /* 152 - 215 PCI/PCIe MSI interrupts */
  622. for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
  623. set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
  624. handle_percpu_irq);
  625. }
  626. #endif
  627. set_c0_status(0x300 << 2);
  628. }
  629. asmlinkage void plat_irq_dispatch(void)
  630. {
  631. const unsigned long core_id = cvmx_get_core_num();
  632. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  633. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  634. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  635. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  636. unsigned long cop0_cause;
  637. unsigned long cop0_status;
  638. uint64_t ciu_en;
  639. uint64_t ciu_sum;
  640. while (1) {
  641. cop0_cause = read_c0_cause();
  642. cop0_status = read_c0_status();
  643. cop0_cause &= cop0_status;
  644. cop0_cause &= ST0_IM;
  645. if (unlikely(cop0_cause & STATUSF_IP2)) {
  646. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  647. ciu_en = cvmx_read_csr(ciu_en0_address);
  648. ciu_sum &= ciu_en;
  649. if (likely(ciu_sum))
  650. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
  651. else
  652. spurious_interrupt();
  653. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  654. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  655. ciu_en = cvmx_read_csr(ciu_en1_address);
  656. ciu_sum &= ciu_en;
  657. if (likely(ciu_sum))
  658. do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
  659. else
  660. spurious_interrupt();
  661. } else if (likely(cop0_cause)) {
  662. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  663. } else {
  664. break;
  665. }
  666. }
  667. }
  668. #ifdef CONFIG_HOTPLUG_CPU
  669. static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
  670. {
  671. unsigned int isset;
  672. int coreid = octeon_coreid_for_cpu(cpu);
  673. int bit = (irq < OCTEON_IRQ_WDOG0) ?
  674. irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
  675. if (irq < 64) {
  676. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
  677. (1ull << bit)) >> bit;
  678. } else {
  679. isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
  680. (1ull << bit)) >> bit;
  681. }
  682. return isset;
  683. }
  684. void fixup_irqs(void)
  685. {
  686. int irq;
  687. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  688. octeon_irq_core_disable_local(irq);
  689. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
  690. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  691. /* ciu irq migrates to next cpu */
  692. octeon_irq_chip_ciu0.disable(irq);
  693. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  694. }
  695. }
  696. #if 0
  697. for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
  698. octeon_irq_mailbox_mask(irq);
  699. #endif
  700. for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  701. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  702. /* ciu irq migrates to next cpu */
  703. octeon_irq_chip_ciu0.disable(irq);
  704. octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
  705. }
  706. }
  707. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
  708. if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
  709. /* ciu irq migrates to next cpu */
  710. octeon_irq_chip_ciu1.disable(irq);
  711. octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
  712. }
  713. }
  714. }
  715. #endif /* CONFIG_HOTPLUG_CPU */