octeon-irq.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/smp.h>
  11. #include <asm/octeon/octeon.h>
  12. static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
  13. static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
  14. static int octeon_coreid_for_cpu(int cpu)
  15. {
  16. #ifdef CONFIG_SMP
  17. return cpu_logical_map(cpu);
  18. #else
  19. return cvmx_get_core_num();
  20. #endif
  21. }
  22. static void octeon_irq_core_ack(unsigned int irq)
  23. {
  24. unsigned int bit = irq - OCTEON_IRQ_SW0;
  25. /*
  26. * We don't need to disable IRQs to make these atomic since
  27. * they are already disabled earlier in the low level
  28. * interrupt code.
  29. */
  30. clear_c0_status(0x100 << bit);
  31. /* The two user interrupts must be cleared manually. */
  32. if (bit < 2)
  33. clear_c0_cause(0x100 << bit);
  34. }
  35. static void octeon_irq_core_eoi(unsigned int irq)
  36. {
  37. struct irq_desc *desc = irq_to_desc(irq);
  38. unsigned int bit = irq - OCTEON_IRQ_SW0;
  39. /*
  40. * If an IRQ is being processed while we are disabling it the
  41. * handler will attempt to unmask the interrupt after it has
  42. * been disabled.
  43. */
  44. if ((unlikely(desc->status & IRQ_DISABLED)))
  45. return;
  46. /*
  47. * We don't need to disable IRQs to make these atomic since
  48. * they are already disabled earlier in the low level
  49. * interrupt code.
  50. */
  51. set_c0_status(0x100 << bit);
  52. }
  53. static void octeon_irq_core_enable(unsigned int irq)
  54. {
  55. unsigned long flags;
  56. unsigned int bit = irq - OCTEON_IRQ_SW0;
  57. /*
  58. * We need to disable interrupts to make sure our updates are
  59. * atomic.
  60. */
  61. local_irq_save(flags);
  62. set_c0_status(0x100 << bit);
  63. local_irq_restore(flags);
  64. }
  65. static void octeon_irq_core_disable_local(unsigned int irq)
  66. {
  67. unsigned long flags;
  68. unsigned int bit = irq - OCTEON_IRQ_SW0;
  69. /*
  70. * We need to disable interrupts to make sure our updates are
  71. * atomic.
  72. */
  73. local_irq_save(flags);
  74. clear_c0_status(0x100 << bit);
  75. local_irq_restore(flags);
  76. }
  77. static void octeon_irq_core_disable(unsigned int irq)
  78. {
  79. #ifdef CONFIG_SMP
  80. on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
  81. (void *) (long) irq, 1);
  82. #else
  83. octeon_irq_core_disable_local(irq);
  84. #endif
  85. }
  86. static struct irq_chip octeon_irq_chip_core = {
  87. .name = "Core",
  88. .enable = octeon_irq_core_enable,
  89. .disable = octeon_irq_core_disable,
  90. .ack = octeon_irq_core_ack,
  91. .eoi = octeon_irq_core_eoi,
  92. };
  93. static void octeon_irq_ciu0_ack(unsigned int irq)
  94. {
  95. switch (irq) {
  96. case OCTEON_IRQ_GMX_DRP0:
  97. case OCTEON_IRQ_GMX_DRP1:
  98. case OCTEON_IRQ_IPD_DRP:
  99. case OCTEON_IRQ_KEY_ZERO:
  100. case OCTEON_IRQ_TIMER0:
  101. case OCTEON_IRQ_TIMER1:
  102. case OCTEON_IRQ_TIMER2:
  103. case OCTEON_IRQ_TIMER3:
  104. {
  105. int index = cvmx_get_core_num() * 2;
  106. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  107. /*
  108. * CIU timer type interrupts must be acknoleged by
  109. * writing a '1' bit to their sum0 bit.
  110. */
  111. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  112. break;
  113. }
  114. default:
  115. break;
  116. }
  117. /*
  118. * In order to avoid any locking accessing the CIU, we
  119. * acknowledge CIU interrupts by disabling all of them. This
  120. * way we can use a per core register and avoid any out of
  121. * core locking requirements. This has the side affect that
  122. * CIU interrupts can't be processed recursively.
  123. *
  124. * We don't need to disable IRQs to make these atomic since
  125. * they are already disabled earlier in the low level
  126. * interrupt code.
  127. */
  128. clear_c0_status(0x100 << 2);
  129. }
  130. static void octeon_irq_ciu0_eoi(unsigned int irq)
  131. {
  132. /*
  133. * Enable all CIU interrupts again. We don't need to disable
  134. * IRQs to make these atomic since they are already disabled
  135. * earlier in the low level interrupt code.
  136. */
  137. set_c0_status(0x100 << 2);
  138. }
  139. static int next_coreid_for_irq(struct irq_desc *desc)
  140. {
  141. #ifdef CONFIG_SMP
  142. int coreid;
  143. int weight = cpumask_weight(desc->affinity);
  144. if (weight > 1) {
  145. int cpu = smp_processor_id();
  146. for (;;) {
  147. cpu = cpumask_next(cpu, desc->affinity);
  148. if (cpu >= nr_cpu_ids) {
  149. cpu = -1;
  150. continue;
  151. } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
  152. break;
  153. }
  154. }
  155. coreid = octeon_coreid_for_cpu(cpu);
  156. } else if (weight == 1) {
  157. coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
  158. } else {
  159. coreid = cvmx_get_core_num();
  160. }
  161. return coreid;
  162. #else
  163. return cvmx_get_core_num();
  164. #endif
  165. }
  166. static void octeon_irq_ciu0_enable(unsigned int irq)
  167. {
  168. struct irq_desc *desc = irq_to_desc(irq);
  169. int coreid = next_coreid_for_irq(desc);
  170. unsigned long flags;
  171. uint64_t en0;
  172. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  173. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  174. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  175. en0 |= 1ull << bit;
  176. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  177. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  178. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  179. }
  180. static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
  181. {
  182. int coreid = cvmx_get_core_num();
  183. unsigned long flags;
  184. uint64_t en0;
  185. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  186. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  187. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  188. en0 |= 1ull << bit;
  189. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  190. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  191. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  192. }
  193. static void octeon_irq_ciu0_disable(unsigned int irq)
  194. {
  195. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  196. unsigned long flags;
  197. uint64_t en0;
  198. int cpu;
  199. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  200. for_each_online_cpu(cpu) {
  201. int coreid = octeon_coreid_for_cpu(cpu);
  202. en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  203. en0 &= ~(1ull << bit);
  204. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  205. }
  206. /*
  207. * We need to do a read after the last update to make sure all
  208. * of them are done.
  209. */
  210. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  211. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  212. }
  213. /*
  214. * Enable the irq on the next core in the affinity set for chips that
  215. * have the EN*_W1{S,C} registers.
  216. */
  217. static void octeon_irq_ciu0_enable_v2(unsigned int irq)
  218. {
  219. int index;
  220. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  221. struct irq_desc *desc = irq_to_desc(irq);
  222. if ((desc->status & IRQ_DISABLED) == 0) {
  223. index = next_coreid_for_irq(desc) * 2;
  224. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  225. }
  226. }
  227. /*
  228. * Enable the irq on the current CPU for chips that
  229. * have the EN*_W1{S,C} registers.
  230. */
  231. static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
  232. {
  233. int index;
  234. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  235. index = cvmx_get_core_num() * 2;
  236. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  237. }
  238. /*
  239. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  240. * registers.
  241. */
  242. static void octeon_irq_ciu0_ack_v2(unsigned int irq)
  243. {
  244. int index = cvmx_get_core_num() * 2;
  245. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  246. switch (irq) {
  247. case OCTEON_IRQ_GMX_DRP0:
  248. case OCTEON_IRQ_GMX_DRP1:
  249. case OCTEON_IRQ_IPD_DRP:
  250. case OCTEON_IRQ_KEY_ZERO:
  251. case OCTEON_IRQ_TIMER0:
  252. case OCTEON_IRQ_TIMER1:
  253. case OCTEON_IRQ_TIMER2:
  254. case OCTEON_IRQ_TIMER3:
  255. /*
  256. * CIU timer type interrupts must be acknoleged by
  257. * writing a '1' bit to their sum0 bit.
  258. */
  259. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  260. break;
  261. default:
  262. break;
  263. }
  264. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  265. }
  266. /*
  267. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  268. * registers.
  269. */
  270. static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
  271. {
  272. struct irq_desc *desc = irq_to_desc(irq);
  273. int index = cvmx_get_core_num() * 2;
  274. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  275. if (likely((desc->status & IRQ_DISABLED) == 0))
  276. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  277. }
  278. /*
  279. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  280. * registers.
  281. */
  282. static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
  283. {
  284. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  285. int index;
  286. int cpu;
  287. for_each_online_cpu(cpu) {
  288. index = octeon_coreid_for_cpu(cpu) * 2;
  289. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  290. }
  291. }
  292. #ifdef CONFIG_SMP
  293. static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
  294. {
  295. int cpu;
  296. struct irq_desc *desc = irq_to_desc(irq);
  297. int enable_one = (desc->status & IRQ_DISABLED) == 0;
  298. unsigned long flags;
  299. int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
  300. /*
  301. * For non-v2 CIU, we will allow only single CPU affinity.
  302. * This removes the need to do locking in the .ack/.eoi
  303. * functions.
  304. */
  305. if (cpumask_weight(dest) != 1)
  306. return -EINVAL;
  307. raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
  308. for_each_online_cpu(cpu) {
  309. int coreid = octeon_coreid_for_cpu(cpu);
  310. uint64_t en0 =
  311. cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
  312. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  313. enable_one = 0;
  314. en0 |= 1ull << bit;
  315. } else {
  316. en0 &= ~(1ull << bit);
  317. }
  318. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
  319. }
  320. /*
  321. * We need to do a read after the last update to make sure all
  322. * of them are done.
  323. */
  324. cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
  325. raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
  326. return 0;
  327. }
  328. /*
  329. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  330. * registers.
  331. */
  332. static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
  333. const struct cpumask *dest)
  334. {
  335. int cpu;
  336. int index;
  337. struct irq_desc *desc = irq_to_desc(irq);
  338. int enable_one = (desc->status & IRQ_DISABLED) == 0;
  339. u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
  340. for_each_online_cpu(cpu) {
  341. index = octeon_coreid_for_cpu(cpu) * 2;
  342. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  343. enable_one = 0;
  344. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  345. } else {
  346. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  347. }
  348. }
  349. return 0;
  350. }
  351. #endif
  352. /*
  353. * Newer octeon chips have support for lockless CIU operation.
  354. */
  355. static struct irq_chip octeon_irq_chip_ciu0_v2 = {
  356. .name = "CIU0",
  357. .enable = octeon_irq_ciu0_enable_v2,
  358. .disable = octeon_irq_ciu0_disable_all_v2,
  359. .eoi = octeon_irq_ciu0_enable_v2,
  360. #ifdef CONFIG_SMP
  361. .set_affinity = octeon_irq_ciu0_set_affinity_v2,
  362. #endif
  363. };
  364. static struct irq_chip octeon_irq_chip_ciu0 = {
  365. .name = "CIU0",
  366. .enable = octeon_irq_ciu0_enable,
  367. .disable = octeon_irq_ciu0_disable,
  368. .eoi = octeon_irq_ciu0_eoi,
  369. #ifdef CONFIG_SMP
  370. .set_affinity = octeon_irq_ciu0_set_affinity,
  371. #endif
  372. };
  373. /* The mbox versions don't do any affinity or round-robin. */
  374. static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
  375. .name = "CIU0-M",
  376. .enable = octeon_irq_ciu0_enable_mbox_v2,
  377. .disable = octeon_irq_ciu0_disable,
  378. .eoi = octeon_irq_ciu0_eoi_mbox_v2,
  379. };
  380. static struct irq_chip octeon_irq_chip_ciu0_mbox = {
  381. .name = "CIU0-M",
  382. .enable = octeon_irq_ciu0_enable_mbox,
  383. .disable = octeon_irq_ciu0_disable,
  384. .eoi = octeon_irq_ciu0_eoi,
  385. };
  386. static void octeon_irq_ciu1_ack(unsigned int irq)
  387. {
  388. /*
  389. * In order to avoid any locking accessing the CIU, we
  390. * acknowledge CIU interrupts by disabling all of them. This
  391. * way we can use a per core register and avoid any out of
  392. * core locking requirements. This has the side affect that
  393. * CIU interrupts can't be processed recursively. We don't
  394. * need to disable IRQs to make these atomic since they are
  395. * already disabled earlier in the low level interrupt code.
  396. */
  397. clear_c0_status(0x100 << 3);
  398. }
  399. static void octeon_irq_ciu1_eoi(unsigned int irq)
  400. {
  401. /*
  402. * Enable all CIU interrupts again. We don't need to disable
  403. * IRQs to make these atomic since they are already disabled
  404. * earlier in the low level interrupt code.
  405. */
  406. set_c0_status(0x100 << 3);
  407. }
  408. static void octeon_irq_ciu1_enable(unsigned int irq)
  409. {
  410. struct irq_desc *desc = irq_to_desc(irq);
  411. int coreid = next_coreid_for_irq(desc);
  412. unsigned long flags;
  413. uint64_t en1;
  414. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  415. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  416. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  417. en1 |= 1ull << bit;
  418. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  419. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  420. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  421. }
  422. /*
  423. * Watchdog interrupts are special. They are associated with a single
  424. * core, so we hardwire the affinity to that core.
  425. */
  426. static void octeon_irq_ciu1_wd_enable(unsigned int irq)
  427. {
  428. unsigned long flags;
  429. uint64_t en1;
  430. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  431. int coreid = bit;
  432. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  433. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  434. en1 |= 1ull << bit;
  435. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  436. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  437. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  438. }
  439. static void octeon_irq_ciu1_disable(unsigned int irq)
  440. {
  441. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  442. unsigned long flags;
  443. uint64_t en1;
  444. int cpu;
  445. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  446. for_each_online_cpu(cpu) {
  447. int coreid = octeon_coreid_for_cpu(cpu);
  448. en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  449. en1 &= ~(1ull << bit);
  450. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  451. }
  452. /*
  453. * We need to do a read after the last update to make sure all
  454. * of them are done.
  455. */
  456. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  457. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  458. }
  459. /*
  460. * Enable the irq on the current core for chips that have the EN*_W1{S,C}
  461. * registers.
  462. */
  463. static void octeon_irq_ciu1_enable_v2(unsigned int irq)
  464. {
  465. int index;
  466. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  467. struct irq_desc *desc = irq_to_desc(irq);
  468. if ((desc->status & IRQ_DISABLED) == 0) {
  469. index = next_coreid_for_irq(desc) * 2 + 1;
  470. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  471. }
  472. }
  473. /*
  474. * Watchdog interrupts are special. They are associated with a single
  475. * core, so we hardwire the affinity to that core.
  476. */
  477. static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
  478. {
  479. int index;
  480. int coreid = irq - OCTEON_IRQ_WDOG0;
  481. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  482. struct irq_desc *desc = irq_to_desc(irq);
  483. if ((desc->status & IRQ_DISABLED) == 0) {
  484. index = coreid * 2 + 1;
  485. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  486. }
  487. }
  488. /*
  489. * Disable the irq on the current core for chips that have the EN*_W1{S,C}
  490. * registers.
  491. */
  492. static void octeon_irq_ciu1_ack_v2(unsigned int irq)
  493. {
  494. int index = cvmx_get_core_num() * 2 + 1;
  495. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  496. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  497. }
  498. /*
  499. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  500. * registers.
  501. */
  502. static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
  503. {
  504. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  505. int index;
  506. int cpu;
  507. for_each_online_cpu(cpu) {
  508. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  509. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  510. }
  511. }
  512. #ifdef CONFIG_SMP
  513. static int octeon_irq_ciu1_set_affinity(unsigned int irq,
  514. const struct cpumask *dest)
  515. {
  516. int cpu;
  517. struct irq_desc *desc = irq_to_desc(irq);
  518. int enable_one = (desc->status & IRQ_DISABLED) == 0;
  519. unsigned long flags;
  520. int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  521. /*
  522. * For non-v2 CIU, we will allow only single CPU affinity.
  523. * This removes the need to do locking in the .ack/.eoi
  524. * functions.
  525. */
  526. if (cpumask_weight(dest) != 1)
  527. return -EINVAL;
  528. raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
  529. for_each_online_cpu(cpu) {
  530. int coreid = octeon_coreid_for_cpu(cpu);
  531. uint64_t en1 =
  532. cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
  533. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  534. enable_one = 0;
  535. en1 |= 1ull << bit;
  536. } else {
  537. en1 &= ~(1ull << bit);
  538. }
  539. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
  540. }
  541. /*
  542. * We need to do a read after the last update to make sure all
  543. * of them are done.
  544. */
  545. cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
  546. raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
  547. return 0;
  548. }
  549. /*
  550. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  551. * registers.
  552. */
  553. static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
  554. const struct cpumask *dest)
  555. {
  556. int cpu;
  557. int index;
  558. struct irq_desc *desc = irq_to_desc(irq);
  559. int enable_one = (desc->status & IRQ_DISABLED) == 0;
  560. u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
  561. for_each_online_cpu(cpu) {
  562. index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  563. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  564. enable_one = 0;
  565. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  566. } else {
  567. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  568. }
  569. }
  570. return 0;
  571. }
  572. #endif
  573. /*
  574. * Newer octeon chips have support for lockless CIU operation.
  575. */
  576. static struct irq_chip octeon_irq_chip_ciu1_v2 = {
  577. .name = "CIU1",
  578. .enable = octeon_irq_ciu1_enable_v2,
  579. .disable = octeon_irq_ciu1_disable_all_v2,
  580. .eoi = octeon_irq_ciu1_enable_v2,
  581. #ifdef CONFIG_SMP
  582. .set_affinity = octeon_irq_ciu1_set_affinity_v2,
  583. #endif
  584. };
  585. static struct irq_chip octeon_irq_chip_ciu1 = {
  586. .name = "CIU1",
  587. .enable = octeon_irq_ciu1_enable,
  588. .disable = octeon_irq_ciu1_disable,
  589. .eoi = octeon_irq_ciu1_eoi,
  590. #ifdef CONFIG_SMP
  591. .set_affinity = octeon_irq_ciu1_set_affinity,
  592. #endif
  593. };
  594. static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
  595. .name = "CIU1-W",
  596. .enable = octeon_irq_ciu1_wd_enable_v2,
  597. .disable = octeon_irq_ciu1_disable_all_v2,
  598. .eoi = octeon_irq_ciu1_wd_enable_v2,
  599. };
  600. static struct irq_chip octeon_irq_chip_ciu1_wd = {
  601. .name = "CIU1-W",
  602. .enable = octeon_irq_ciu1_wd_enable,
  603. .disable = octeon_irq_ciu1_disable,
  604. .eoi = octeon_irq_ciu1_eoi,
  605. };
  606. static void (*octeon_ciu0_ack)(unsigned int);
  607. static void (*octeon_ciu1_ack)(unsigned int);
  608. void __init arch_init_irq(void)
  609. {
  610. unsigned int irq;
  611. struct irq_chip *chip0;
  612. struct irq_chip *chip0_mbox;
  613. struct irq_chip *chip1;
  614. struct irq_chip *chip1_wd;
  615. #ifdef CONFIG_SMP
  616. /* Set the default affinity to the boot cpu. */
  617. cpumask_clear(irq_default_affinity);
  618. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  619. #endif
  620. if (NR_IRQS < OCTEON_IRQ_LAST)
  621. pr_err("octeon_irq_init: NR_IRQS is set too low\n");
  622. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  623. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  624. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
  625. octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
  626. octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
  627. chip0 = &octeon_irq_chip_ciu0_v2;
  628. chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
  629. chip1 = &octeon_irq_chip_ciu1_v2;
  630. chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
  631. } else {
  632. octeon_ciu0_ack = octeon_irq_ciu0_ack;
  633. octeon_ciu1_ack = octeon_irq_ciu1_ack;
  634. chip0 = &octeon_irq_chip_ciu0;
  635. chip0_mbox = &octeon_irq_chip_ciu0_mbox;
  636. chip1 = &octeon_irq_chip_ciu1;
  637. chip1_wd = &octeon_irq_chip_ciu1_wd;
  638. }
  639. /* 0 - 15 reserved for i8259 master and slave controller. */
  640. /* 17 - 23 Mips internal */
  641. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
  642. set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
  643. handle_percpu_irq);
  644. }
  645. /* 24 - 87 CIU_INT_SUM0 */
  646. for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
  647. switch (irq) {
  648. case OCTEON_IRQ_MBOX0:
  649. case OCTEON_IRQ_MBOX1:
  650. set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
  651. break;
  652. default:
  653. set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
  654. break;
  655. }
  656. }
  657. /* 88 - 151 CIU_INT_SUM1 */
  658. for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
  659. set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
  660. for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
  661. set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
  662. set_c0_status(0x300 << 2);
  663. }
  664. asmlinkage void plat_irq_dispatch(void)
  665. {
  666. const unsigned long core_id = cvmx_get_core_num();
  667. const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
  668. const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
  669. const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
  670. const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
  671. unsigned long cop0_cause;
  672. unsigned long cop0_status;
  673. uint64_t ciu_en;
  674. uint64_t ciu_sum;
  675. unsigned int irq;
  676. while (1) {
  677. cop0_cause = read_c0_cause();
  678. cop0_status = read_c0_status();
  679. cop0_cause &= cop0_status;
  680. cop0_cause &= ST0_IM;
  681. if (unlikely(cop0_cause & STATUSF_IP2)) {
  682. ciu_sum = cvmx_read_csr(ciu_sum0_address);
  683. ciu_en = cvmx_read_csr(ciu_en0_address);
  684. ciu_sum &= ciu_en;
  685. if (likely(ciu_sum)) {
  686. irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
  687. octeon_ciu0_ack(irq);
  688. do_IRQ(irq);
  689. } else {
  690. spurious_interrupt();
  691. }
  692. } else if (unlikely(cop0_cause & STATUSF_IP3)) {
  693. ciu_sum = cvmx_read_csr(ciu_sum1_address);
  694. ciu_en = cvmx_read_csr(ciu_en1_address);
  695. ciu_sum &= ciu_en;
  696. if (likely(ciu_sum)) {
  697. irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
  698. octeon_ciu1_ack(irq);
  699. do_IRQ(irq);
  700. } else {
  701. spurious_interrupt();
  702. }
  703. } else if (likely(cop0_cause)) {
  704. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  705. } else {
  706. break;
  707. }
  708. }
  709. }
  710. #ifdef CONFIG_HOTPLUG_CPU
  711. void fixup_irqs(void)
  712. {
  713. int irq;
  714. struct irq_desc *desc;
  715. cpumask_t new_affinity;
  716. unsigned long flags;
  717. int do_set_affinity;
  718. int cpu;
  719. cpu = smp_processor_id();
  720. for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
  721. octeon_irq_core_disable_local(irq);
  722. for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
  723. desc = irq_to_desc(irq);
  724. switch (irq) {
  725. case OCTEON_IRQ_MBOX0:
  726. case OCTEON_IRQ_MBOX1:
  727. /* The eoi function will disable them on this CPU. */
  728. desc->chip->eoi(irq);
  729. break;
  730. case OCTEON_IRQ_WDOG0:
  731. case OCTEON_IRQ_WDOG1:
  732. case OCTEON_IRQ_WDOG2:
  733. case OCTEON_IRQ_WDOG3:
  734. case OCTEON_IRQ_WDOG4:
  735. case OCTEON_IRQ_WDOG5:
  736. case OCTEON_IRQ_WDOG6:
  737. case OCTEON_IRQ_WDOG7:
  738. case OCTEON_IRQ_WDOG8:
  739. case OCTEON_IRQ_WDOG9:
  740. case OCTEON_IRQ_WDOG10:
  741. case OCTEON_IRQ_WDOG11:
  742. case OCTEON_IRQ_WDOG12:
  743. case OCTEON_IRQ_WDOG13:
  744. case OCTEON_IRQ_WDOG14:
  745. case OCTEON_IRQ_WDOG15:
  746. /*
  747. * These have special per CPU semantics and
  748. * are handled in the watchdog driver.
  749. */
  750. break;
  751. default:
  752. raw_spin_lock_irqsave(&desc->lock, flags);
  753. /*
  754. * If this irq has an action, it is in use and
  755. * must be migrated if it has affinity to this
  756. * cpu.
  757. */
  758. if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
  759. if (cpumask_weight(desc->affinity) > 1) {
  760. /*
  761. * It has multi CPU affinity,
  762. * just remove this CPU from
  763. * the affinity set.
  764. */
  765. cpumask_copy(&new_affinity, desc->affinity);
  766. cpumask_clear_cpu(cpu, &new_affinity);
  767. } else {
  768. /*
  769. * Otherwise, put it on lowest
  770. * numbered online CPU.
  771. */
  772. cpumask_clear(&new_affinity);
  773. cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
  774. }
  775. do_set_affinity = 1;
  776. } else {
  777. do_set_affinity = 0;
  778. }
  779. raw_spin_unlock_irqrestore(&desc->lock, flags);
  780. if (do_set_affinity)
  781. irq_set_affinity(irq, &new_affinity);
  782. break;
  783. }
  784. }
  785. }
  786. #endif /* CONFIG_HOTPLUG_CPU */