book3s_xics.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /*
  2. * Copyright 2012 Michael Ellerman, IBM Corporation.
  3. * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/err.h>
  12. #include <linux/gfp.h>
  13. #include <asm/uaccess.h>
  14. #include <asm/kvm_book3s.h>
  15. #include <asm/kvm_ppc.h>
  16. #include <asm/hvcall.h>
  17. #include <asm/xics.h>
  18. #include <asm/debug.h>
  19. #include <linux/debugfs.h>
  20. #include <linux/seq_file.h>
  21. #include "book3s_xics.h"
  22. #if 1
  23. #define XICS_DBG(fmt...) do { } while (0)
  24. #else
  25. #define XICS_DBG(fmt...) trace_printk(fmt)
  26. #endif
  27. #define ENABLE_REALMODE true
  28. #define DEBUG_REALMODE false
  29. /*
  30. * LOCKING
  31. * =======
  32. *
  33. * Each ICS has a mutex protecting the information about the IRQ
  34. * sources and avoiding simultaneous deliveries if the same interrupt.
  35. *
  36. * ICP operations are done via a single compare & swap transaction
  37. * (most ICP state fits in the union kvmppc_icp_state)
  38. */
  39. /*
  40. * TODO
  41. * ====
  42. *
  43. * - To speed up resends, keep a bitmap of "resend" set bits in the
  44. * ICS
  45. *
  46. * - Speed up server# -> ICP lookup (array ? hash table ?)
  47. *
  48. * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
  49. * locks array to improve scalability
  50. *
  51. * - ioctl's to save/restore the entire state for snapshot & migration
  52. */
  53. /* -- ICS routines -- */
  54. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  55. u32 new_irq);
  56. static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
  57. {
  58. struct ics_irq_state *state;
  59. struct kvmppc_ics *ics;
  60. u16 src;
  61. XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
  62. ics = kvmppc_xics_find_ics(xics, irq, &src);
  63. if (!ics) {
  64. XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
  65. return -EINVAL;
  66. }
  67. state = &ics->irq_state[src];
  68. if (!state->exists)
  69. return -EINVAL;
  70. /*
  71. * We set state->asserted locklessly. This should be fine as
  72. * we are the only setter, thus concurrent access is undefined
  73. * to begin with.
  74. */
  75. if (level == KVM_INTERRUPT_SET_LEVEL)
  76. state->asserted = 1;
  77. else if (level == KVM_INTERRUPT_UNSET) {
  78. state->asserted = 0;
  79. return 0;
  80. }
  81. /* Attempt delivery */
  82. icp_deliver_irq(xics, NULL, irq);
  83. return 0;
  84. }
  85. static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
  86. struct kvmppc_icp *icp)
  87. {
  88. int i;
  89. mutex_lock(&ics->lock);
  90. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  91. struct ics_irq_state *state = &ics->irq_state[i];
  92. if (!state->resend)
  93. continue;
  94. XICS_DBG("resend %#x prio %#x\n", state->number,
  95. state->priority);
  96. mutex_unlock(&ics->lock);
  97. icp_deliver_irq(xics, icp, state->number);
  98. mutex_lock(&ics->lock);
  99. }
  100. mutex_unlock(&ics->lock);
  101. }
  102. int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
  103. {
  104. struct kvmppc_xics *xics = kvm->arch.xics;
  105. struct kvmppc_icp *icp;
  106. struct kvmppc_ics *ics;
  107. struct ics_irq_state *state;
  108. u16 src;
  109. bool deliver;
  110. if (!xics)
  111. return -ENODEV;
  112. ics = kvmppc_xics_find_ics(xics, irq, &src);
  113. if (!ics)
  114. return -EINVAL;
  115. state = &ics->irq_state[src];
  116. icp = kvmppc_xics_find_server(kvm, server);
  117. if (!icp)
  118. return -EINVAL;
  119. mutex_lock(&ics->lock);
  120. XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
  121. irq, server, priority,
  122. state->masked_pending, state->resend);
  123. state->server = server;
  124. state->priority = priority;
  125. deliver = false;
  126. if ((state->masked_pending || state->resend) && priority != MASKED) {
  127. state->masked_pending = 0;
  128. deliver = true;
  129. }
  130. mutex_unlock(&ics->lock);
  131. if (deliver)
  132. icp_deliver_irq(xics, icp, irq);
  133. return 0;
  134. }
  135. int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
  136. {
  137. struct kvmppc_xics *xics = kvm->arch.xics;
  138. struct kvmppc_ics *ics;
  139. struct ics_irq_state *state;
  140. u16 src;
  141. if (!xics)
  142. return -ENODEV;
  143. ics = kvmppc_xics_find_ics(xics, irq, &src);
  144. if (!ics)
  145. return -EINVAL;
  146. state = &ics->irq_state[src];
  147. mutex_lock(&ics->lock);
  148. *server = state->server;
  149. *priority = state->priority;
  150. mutex_unlock(&ics->lock);
  151. return 0;
  152. }
  153. /* -- ICP routines, including hcalls -- */
  154. static inline bool icp_try_update(struct kvmppc_icp *icp,
  155. union kvmppc_icp_state old,
  156. union kvmppc_icp_state new,
  157. bool change_self)
  158. {
  159. bool success;
  160. /* Calculate new output value */
  161. new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
  162. /* Attempt atomic update */
  163. success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
  164. if (!success)
  165. goto bail;
  166. XICS_DBG("UPD [%04x] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  167. icp->server_num,
  168. old.cppr, old.mfrr, old.pending_pri, old.xisr,
  169. old.need_resend, old.out_ee);
  170. XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
  171. new.cppr, new.mfrr, new.pending_pri, new.xisr,
  172. new.need_resend, new.out_ee);
  173. /*
  174. * Check for output state update
  175. *
  176. * Note that this is racy since another processor could be updating
  177. * the state already. This is why we never clear the interrupt output
  178. * here, we only ever set it. The clear only happens prior to doing
  179. * an update and only by the processor itself. Currently we do it
  180. * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
  181. *
  182. * We also do not try to figure out whether the EE state has changed,
  183. * we unconditionally set it if the new state calls for it. The reason
  184. * for that is that we opportunistically remove the pending interrupt
  185. * flag when raising CPPR, so we need to set it back here if an
  186. * interrupt is still pending.
  187. */
  188. if (new.out_ee) {
  189. kvmppc_book3s_queue_irqprio(icp->vcpu,
  190. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  191. if (!change_self)
  192. kvmppc_fast_vcpu_kick(icp->vcpu);
  193. }
  194. bail:
  195. return success;
  196. }
  197. static void icp_check_resend(struct kvmppc_xics *xics,
  198. struct kvmppc_icp *icp)
  199. {
  200. u32 icsid;
  201. /* Order this load with the test for need_resend in the caller */
  202. smp_rmb();
  203. for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
  204. struct kvmppc_ics *ics = xics->ics[icsid];
  205. if (!test_and_clear_bit(icsid, icp->resend_map))
  206. continue;
  207. if (!ics)
  208. continue;
  209. ics_check_resend(xics, ics, icp);
  210. }
  211. }
  212. static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
  213. u32 *reject)
  214. {
  215. union kvmppc_icp_state old_state, new_state;
  216. bool success;
  217. XICS_DBG("try deliver %#x(P:%#x) to server %#x\n", irq, priority,
  218. icp->server_num);
  219. do {
  220. old_state = new_state = ACCESS_ONCE(icp->state);
  221. *reject = 0;
  222. /* See if we can deliver */
  223. success = new_state.cppr > priority &&
  224. new_state.mfrr > priority &&
  225. new_state.pending_pri > priority;
  226. /*
  227. * If we can, check for a rejection and perform the
  228. * delivery
  229. */
  230. if (success) {
  231. *reject = new_state.xisr;
  232. new_state.xisr = irq;
  233. new_state.pending_pri = priority;
  234. } else {
  235. /*
  236. * If we failed to deliver we set need_resend
  237. * so a subsequent CPPR state change causes us
  238. * to try a new delivery.
  239. */
  240. new_state.need_resend = true;
  241. }
  242. } while (!icp_try_update(icp, old_state, new_state, false));
  243. return success;
  244. }
  245. static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  246. u32 new_irq)
  247. {
  248. struct ics_irq_state *state;
  249. struct kvmppc_ics *ics;
  250. u32 reject;
  251. u16 src;
  252. /*
  253. * This is used both for initial delivery of an interrupt and
  254. * for subsequent rejection.
  255. *
  256. * Rejection can be racy vs. resends. We have evaluated the
  257. * rejection in an atomic ICP transaction which is now complete,
  258. * so potentially the ICP can already accept the interrupt again.
  259. *
  260. * So we need to retry the delivery. Essentially the reject path
  261. * boils down to a failed delivery. Always.
  262. *
  263. * Now the interrupt could also have moved to a different target,
  264. * thus we may need to re-do the ICP lookup as well
  265. */
  266. again:
  267. /* Get the ICS state and lock it */
  268. ics = kvmppc_xics_find_ics(xics, new_irq, &src);
  269. if (!ics) {
  270. XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
  271. return;
  272. }
  273. state = &ics->irq_state[src];
  274. /* Get a lock on the ICS */
  275. mutex_lock(&ics->lock);
  276. /* Get our server */
  277. if (!icp || state->server != icp->server_num) {
  278. icp = kvmppc_xics_find_server(xics->kvm, state->server);
  279. if (!icp) {
  280. pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
  281. new_irq, state->server);
  282. goto out;
  283. }
  284. }
  285. /* Clear the resend bit of that interrupt */
  286. state->resend = 0;
  287. /*
  288. * If masked, bail out
  289. *
  290. * Note: PAPR doesn't mention anything about masked pending
  291. * when doing a resend, only when doing a delivery.
  292. *
  293. * However that would have the effect of losing a masked
  294. * interrupt that was rejected and isn't consistent with
  295. * the whole masked_pending business which is about not
  296. * losing interrupts that occur while masked.
  297. *
  298. * I don't differenciate normal deliveries and resends, this
  299. * implementation will differ from PAPR and not lose such
  300. * interrupts.
  301. */
  302. if (state->priority == MASKED) {
  303. XICS_DBG("irq %#x masked pending\n", new_irq);
  304. state->masked_pending = 1;
  305. goto out;
  306. }
  307. /*
  308. * Try the delivery, this will set the need_resend flag
  309. * in the ICP as part of the atomic transaction if the
  310. * delivery is not possible.
  311. *
  312. * Note that if successful, the new delivery might have itself
  313. * rejected an interrupt that was "delivered" before we took the
  314. * icp mutex.
  315. *
  316. * In this case we do the whole sequence all over again for the
  317. * new guy. We cannot assume that the rejected interrupt is less
  318. * favored than the new one, and thus doesn't need to be delivered,
  319. * because by the time we exit icp_try_to_deliver() the target
  320. * processor may well have alrady consumed & completed it, and thus
  321. * the rejected interrupt might actually be already acceptable.
  322. */
  323. if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
  324. /*
  325. * Delivery was successful, did we reject somebody else ?
  326. */
  327. if (reject && reject != XICS_IPI) {
  328. mutex_unlock(&ics->lock);
  329. new_irq = reject;
  330. goto again;
  331. }
  332. } else {
  333. /*
  334. * We failed to deliver the interrupt we need to set the
  335. * resend map bit and mark the ICS state as needing a resend
  336. */
  337. set_bit(ics->icsid, icp->resend_map);
  338. state->resend = 1;
  339. /*
  340. * If the need_resend flag got cleared in the ICP some time
  341. * between icp_try_to_deliver() atomic update and now, then
  342. * we know it might have missed the resend_map bit. So we
  343. * retry
  344. */
  345. smp_mb();
  346. if (!icp->state.need_resend) {
  347. mutex_unlock(&ics->lock);
  348. goto again;
  349. }
  350. }
  351. out:
  352. mutex_unlock(&ics->lock);
  353. }
  354. static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
  355. u8 new_cppr)
  356. {
  357. union kvmppc_icp_state old_state, new_state;
  358. bool resend;
  359. /*
  360. * This handles several related states in one operation:
  361. *
  362. * ICP State: Down_CPPR
  363. *
  364. * Load CPPR with new value and if the XISR is 0
  365. * then check for resends:
  366. *
  367. * ICP State: Resend
  368. *
  369. * If MFRR is more favored than CPPR, check for IPIs
  370. * and notify ICS of a potential resend. This is done
  371. * asynchronously (when used in real mode, we will have
  372. * to exit here).
  373. *
  374. * We do not handle the complete Check_IPI as documented
  375. * here. In the PAPR, this state will be used for both
  376. * Set_MFRR and Down_CPPR. However, we know that we aren't
  377. * changing the MFRR state here so we don't need to handle
  378. * the case of an MFRR causing a reject of a pending irq,
  379. * this will have been handled when the MFRR was set in the
  380. * first place.
  381. *
  382. * Thus we don't have to handle rejects, only resends.
  383. *
  384. * When implementing real mode for HV KVM, resend will lead to
  385. * a H_TOO_HARD return and the whole transaction will be handled
  386. * in virtual mode.
  387. */
  388. do {
  389. old_state = new_state = ACCESS_ONCE(icp->state);
  390. /* Down_CPPR */
  391. new_state.cppr = new_cppr;
  392. /*
  393. * Cut down Resend / Check_IPI / IPI
  394. *
  395. * The logic is that we cannot have a pending interrupt
  396. * trumped by an IPI at this point (see above), so we
  397. * know that either the pending interrupt is already an
  398. * IPI (in which case we don't care to override it) or
  399. * it's either more favored than us or non existent
  400. */
  401. if (new_state.mfrr < new_cppr &&
  402. new_state.mfrr <= new_state.pending_pri) {
  403. WARN_ON(new_state.xisr != XICS_IPI &&
  404. new_state.xisr != 0);
  405. new_state.pending_pri = new_state.mfrr;
  406. new_state.xisr = XICS_IPI;
  407. }
  408. /* Latch/clear resend bit */
  409. resend = new_state.need_resend;
  410. new_state.need_resend = 0;
  411. } while (!icp_try_update(icp, old_state, new_state, true));
  412. /*
  413. * Now handle resend checks. Those are asynchronous to the ICP
  414. * state update in HW (ie bus transactions) so we can handle them
  415. * separately here too
  416. */
  417. if (resend)
  418. icp_check_resend(xics, icp);
  419. }
  420. static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
  421. {
  422. union kvmppc_icp_state old_state, new_state;
  423. struct kvmppc_icp *icp = vcpu->arch.icp;
  424. u32 xirr;
  425. /* First, remove EE from the processor */
  426. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  427. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  428. /*
  429. * ICP State: Accept_Interrupt
  430. *
  431. * Return the pending interrupt (if any) along with the
  432. * current CPPR, then clear the XISR & set CPPR to the
  433. * pending priority
  434. */
  435. do {
  436. old_state = new_state = ACCESS_ONCE(icp->state);
  437. xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
  438. if (!old_state.xisr)
  439. break;
  440. new_state.cppr = new_state.pending_pri;
  441. new_state.pending_pri = 0xff;
  442. new_state.xisr = 0;
  443. } while (!icp_try_update(icp, old_state, new_state, true));
  444. XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
  445. return xirr;
  446. }
  447. static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
  448. unsigned long mfrr)
  449. {
  450. union kvmppc_icp_state old_state, new_state;
  451. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  452. struct kvmppc_icp *icp;
  453. u32 reject;
  454. bool resend;
  455. bool local;
  456. XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
  457. vcpu->vcpu_id, server, mfrr);
  458. icp = vcpu->arch.icp;
  459. local = icp->server_num == server;
  460. if (!local) {
  461. icp = kvmppc_xics_find_server(vcpu->kvm, server);
  462. if (!icp)
  463. return H_PARAMETER;
  464. }
  465. /*
  466. * ICP state: Set_MFRR
  467. *
  468. * If the CPPR is more favored than the new MFRR, then
  469. * nothing needs to be rejected as there can be no XISR to
  470. * reject. If the MFRR is being made less favored then
  471. * there might be a previously-rejected interrupt needing
  472. * to be resent.
  473. *
  474. * If the CPPR is less favored, then we might be replacing
  475. * an interrupt, and thus need to possibly reject it as in
  476. *
  477. * ICP state: Check_IPI
  478. */
  479. do {
  480. old_state = new_state = ACCESS_ONCE(icp->state);
  481. /* Set_MFRR */
  482. new_state.mfrr = mfrr;
  483. /* Check_IPI */
  484. reject = 0;
  485. resend = false;
  486. if (mfrr < new_state.cppr) {
  487. /* Reject a pending interrupt if not an IPI */
  488. if (mfrr <= new_state.pending_pri)
  489. reject = new_state.xisr;
  490. new_state.pending_pri = mfrr;
  491. new_state.xisr = XICS_IPI;
  492. }
  493. if (mfrr > old_state.mfrr && mfrr > new_state.cppr) {
  494. resend = new_state.need_resend;
  495. new_state.need_resend = 0;
  496. }
  497. } while (!icp_try_update(icp, old_state, new_state, local));
  498. /* Handle reject */
  499. if (reject && reject != XICS_IPI)
  500. icp_deliver_irq(xics, icp, reject);
  501. /* Handle resend */
  502. if (resend)
  503. icp_check_resend(xics, icp);
  504. return H_SUCCESS;
  505. }
  506. static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
  507. {
  508. union kvmppc_icp_state old_state, new_state;
  509. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  510. struct kvmppc_icp *icp = vcpu->arch.icp;
  511. u32 reject;
  512. XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
  513. /*
  514. * ICP State: Set_CPPR
  515. *
  516. * We can safely compare the new value with the current
  517. * value outside of the transaction as the CPPR is only
  518. * ever changed by the processor on itself
  519. */
  520. if (cppr > icp->state.cppr)
  521. icp_down_cppr(xics, icp, cppr);
  522. else if (cppr == icp->state.cppr)
  523. return;
  524. /*
  525. * ICP State: Up_CPPR
  526. *
  527. * The processor is raising its priority, this can result
  528. * in a rejection of a pending interrupt:
  529. *
  530. * ICP State: Reject_Current
  531. *
  532. * We can remove EE from the current processor, the update
  533. * transaction will set it again if needed
  534. */
  535. kvmppc_book3s_dequeue_irqprio(icp->vcpu,
  536. BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  537. do {
  538. old_state = new_state = ACCESS_ONCE(icp->state);
  539. reject = 0;
  540. new_state.cppr = cppr;
  541. if (cppr <= new_state.pending_pri) {
  542. reject = new_state.xisr;
  543. new_state.xisr = 0;
  544. new_state.pending_pri = 0xff;
  545. }
  546. } while (!icp_try_update(icp, old_state, new_state, true));
  547. /*
  548. * Check for rejects. They are handled by doing a new delivery
  549. * attempt (see comments in icp_deliver_irq).
  550. */
  551. if (reject && reject != XICS_IPI)
  552. icp_deliver_irq(xics, icp, reject);
  553. }
  554. static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
  555. {
  556. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  557. struct kvmppc_icp *icp = vcpu->arch.icp;
  558. struct kvmppc_ics *ics;
  559. struct ics_irq_state *state;
  560. u32 irq = xirr & 0x00ffffff;
  561. u16 src;
  562. XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
  563. /*
  564. * ICP State: EOI
  565. *
  566. * Note: If EOI is incorrectly used by SW to lower the CPPR
  567. * value (ie more favored), we do not check for rejection of
  568. * a pending interrupt, this is a SW error and PAPR sepcifies
  569. * that we don't have to deal with it.
  570. *
  571. * The sending of an EOI to the ICS is handled after the
  572. * CPPR update
  573. *
  574. * ICP State: Down_CPPR which we handle
  575. * in a separate function as it's shared with H_CPPR.
  576. */
  577. icp_down_cppr(xics, icp, xirr >> 24);
  578. /* IPIs have no EOI */
  579. if (irq == XICS_IPI)
  580. return H_SUCCESS;
  581. /*
  582. * EOI handling: If the interrupt is still asserted, we need to
  583. * resend it. We can take a lockless "peek" at the ICS state here.
  584. *
  585. * "Message" interrupts will never have "asserted" set
  586. */
  587. ics = kvmppc_xics_find_ics(xics, irq, &src);
  588. if (!ics) {
  589. XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
  590. return H_PARAMETER;
  591. }
  592. state = &ics->irq_state[src];
  593. /* Still asserted, resend it */
  594. if (state->asserted)
  595. icp_deliver_irq(xics, icp, irq);
  596. return H_SUCCESS;
  597. }
  598. static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
  599. {
  600. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  601. struct kvmppc_icp *icp = vcpu->arch.icp;
  602. XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
  603. hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
  604. if (icp->rm_action & XICS_RM_KICK_VCPU)
  605. kvmppc_fast_vcpu_kick(icp->rm_kick_target);
  606. if (icp->rm_action & XICS_RM_CHECK_RESEND)
  607. icp_check_resend(xics, icp);
  608. if (icp->rm_action & XICS_RM_REJECT)
  609. icp_deliver_irq(xics, icp, icp->rm_reject);
  610. icp->rm_action = 0;
  611. return H_SUCCESS;
  612. }
  613. int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
  614. {
  615. struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
  616. unsigned long res;
  617. int rc = H_SUCCESS;
  618. /* Check if we have an ICP */
  619. if (!xics || !vcpu->arch.icp)
  620. return H_HARDWARE;
  621. /* Check for real mode returning too hard */
  622. if (xics->real_mode)
  623. return kvmppc_xics_rm_complete(vcpu, req);
  624. switch (req) {
  625. case H_XIRR:
  626. res = kvmppc_h_xirr(vcpu);
  627. kvmppc_set_gpr(vcpu, 4, res);
  628. break;
  629. case H_CPPR:
  630. kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
  631. break;
  632. case H_EOI:
  633. rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
  634. break;
  635. case H_IPI:
  636. rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
  637. kvmppc_get_gpr(vcpu, 5));
  638. break;
  639. }
  640. return rc;
  641. }
  642. /* -- Initialisation code etc. -- */
  643. static int xics_debug_show(struct seq_file *m, void *private)
  644. {
  645. struct kvmppc_xics *xics = m->private;
  646. struct kvm *kvm = xics->kvm;
  647. struct kvm_vcpu *vcpu;
  648. int icsid, i;
  649. if (!kvm)
  650. return 0;
  651. seq_printf(m, "=========\nICP state\n=========\n");
  652. kvm_for_each_vcpu(i, vcpu, kvm) {
  653. struct kvmppc_icp *icp = vcpu->arch.icp;
  654. union kvmppc_icp_state state;
  655. if (!icp)
  656. continue;
  657. state.raw = ACCESS_ONCE(icp->state.raw);
  658. seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
  659. icp->server_num, state.xisr,
  660. state.pending_pri, state.cppr, state.mfrr,
  661. state.out_ee, state.need_resend);
  662. }
  663. for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
  664. struct kvmppc_ics *ics = xics->ics[icsid];
  665. if (!ics)
  666. continue;
  667. seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
  668. icsid);
  669. mutex_lock(&ics->lock);
  670. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  671. struct ics_irq_state *irq = &ics->irq_state[i];
  672. seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
  673. irq->number, irq->server, irq->priority,
  674. irq->saved_priority, irq->asserted,
  675. irq->resend, irq->masked_pending);
  676. }
  677. mutex_unlock(&ics->lock);
  678. }
  679. return 0;
  680. }
  681. static int xics_debug_open(struct inode *inode, struct file *file)
  682. {
  683. return single_open(file, xics_debug_show, inode->i_private);
  684. }
  685. static const struct file_operations xics_debug_fops = {
  686. .open = xics_debug_open,
  687. .read = seq_read,
  688. .llseek = seq_lseek,
  689. .release = single_release,
  690. };
  691. static void xics_debugfs_init(struct kvmppc_xics *xics)
  692. {
  693. char *name;
  694. name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
  695. if (!name) {
  696. pr_err("%s: no memory for name\n", __func__);
  697. return;
  698. }
  699. xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
  700. xics, &xics_debug_fops);
  701. pr_debug("%s: created %s\n", __func__, name);
  702. kfree(name);
  703. }
  704. struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
  705. struct kvmppc_xics *xics, int irq)
  706. {
  707. struct kvmppc_ics *ics;
  708. int i, icsid;
  709. icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
  710. mutex_lock(&kvm->lock);
  711. /* ICS already exists - somebody else got here first */
  712. if (xics->ics[icsid])
  713. goto out;
  714. /* Create the ICS */
  715. ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
  716. if (!ics)
  717. goto out;
  718. mutex_init(&ics->lock);
  719. ics->icsid = icsid;
  720. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  721. ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
  722. ics->irq_state[i].priority = MASKED;
  723. ics->irq_state[i].saved_priority = MASKED;
  724. }
  725. smp_wmb();
  726. xics->ics[icsid] = ics;
  727. if (icsid > xics->max_icsid)
  728. xics->max_icsid = icsid;
  729. out:
  730. mutex_unlock(&kvm->lock);
  731. return xics->ics[icsid];
  732. }
  733. int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
  734. {
  735. struct kvmppc_icp *icp;
  736. if (!vcpu->kvm->arch.xics)
  737. return -ENODEV;
  738. if (kvmppc_xics_find_server(vcpu->kvm, server_num))
  739. return -EEXIST;
  740. icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
  741. if (!icp)
  742. return -ENOMEM;
  743. icp->vcpu = vcpu;
  744. icp->server_num = server_num;
  745. icp->state.mfrr = MASKED;
  746. icp->state.pending_pri = MASKED;
  747. vcpu->arch.icp = icp;
  748. XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
  749. return 0;
  750. }
  751. /* -- ioctls -- */
  752. int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args)
  753. {
  754. struct kvmppc_xics *xics;
  755. int r;
  756. /* locking against multiple callers? */
  757. xics = kvm->arch.xics;
  758. if (!xics)
  759. return -ENODEV;
  760. switch (args->level) {
  761. case KVM_INTERRUPT_SET:
  762. case KVM_INTERRUPT_SET_LEVEL:
  763. case KVM_INTERRUPT_UNSET:
  764. r = ics_deliver_irq(xics, args->irq, args->level);
  765. break;
  766. default:
  767. r = -EINVAL;
  768. }
  769. return r;
  770. }
  771. void kvmppc_xics_free(struct kvmppc_xics *xics)
  772. {
  773. int i;
  774. struct kvm *kvm = xics->kvm;
  775. debugfs_remove(xics->dentry);
  776. if (kvm)
  777. kvm->arch.xics = NULL;
  778. for (i = 0; i <= xics->max_icsid; i++)
  779. kfree(xics->ics[i]);
  780. kfree(xics);
  781. }
  782. int kvm_xics_create(struct kvm *kvm, u32 type)
  783. {
  784. struct kvmppc_xics *xics;
  785. int ret = 0;
  786. xics = kzalloc(sizeof(*xics), GFP_KERNEL);
  787. if (!xics)
  788. return -ENOMEM;
  789. xics->kvm = kvm;
  790. /* Already there ? */
  791. mutex_lock(&kvm->lock);
  792. if (kvm->arch.xics)
  793. ret = -EEXIST;
  794. else
  795. kvm->arch.xics = xics;
  796. mutex_unlock(&kvm->lock);
  797. if (ret)
  798. return ret;
  799. xics_debugfs_init(xics);
  800. #ifdef CONFIG_KVM_BOOK3S_64_HV
  801. if (cpu_has_feature(CPU_FTR_ARCH_206)) {
  802. /* Enable real mode support */
  803. xics->real_mode = ENABLE_REALMODE;
  804. xics->real_mode_dbg = DEBUG_REALMODE;
  805. }
  806. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  807. return 0;
  808. }
  809. void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
  810. {
  811. if (!vcpu->arch.icp)
  812. return;
  813. kfree(vcpu->arch.icp);
  814. vcpu->arch.icp = NULL;
  815. vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
  816. }