mpic.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843
  1. /*
  2. * OpenPIC emulation
  3. *
  4. * Copyright (c) 2004 Jocelyn Mayer
  5. * 2011 Alexander Graf
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include <linux/slab.h>
  26. #include <linux/mutex.h>
  27. #include <linux/kvm_host.h>
  28. #include <linux/errno.h>
  29. #include <linux/fs.h>
  30. #include <linux/anon_inodes.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/mpic.h>
  33. #include <asm/kvm_para.h>
  34. #include <asm/kvm_host.h>
  35. #include <asm/kvm_ppc.h>
  36. #include "iodev.h"
  37. #define MAX_CPU 32
  38. #define MAX_SRC 256
  39. #define MAX_TMR 4
  40. #define MAX_IPI 4
  41. #define MAX_MSI 8
  42. #define MAX_IRQ (MAX_SRC + MAX_IPI + MAX_TMR)
  43. #define VID 0x03 /* MPIC version ID */
  44. /* OpenPIC capability flags */
  45. #define OPENPIC_FLAG_IDR_CRIT (1 << 0)
  46. #define OPENPIC_FLAG_ILR (2 << 0)
  47. /* OpenPIC address map */
  48. #define OPENPIC_REG_SIZE 0x40000
  49. #define OPENPIC_GLB_REG_START 0x0
  50. #define OPENPIC_GLB_REG_SIZE 0x10F0
  51. #define OPENPIC_TMR_REG_START 0x10F0
  52. #define OPENPIC_TMR_REG_SIZE 0x220
  53. #define OPENPIC_MSI_REG_START 0x1600
  54. #define OPENPIC_MSI_REG_SIZE 0x200
  55. #define OPENPIC_SUMMARY_REG_START 0x3800
  56. #define OPENPIC_SUMMARY_REG_SIZE 0x800
  57. #define OPENPIC_SRC_REG_START 0x10000
  58. #define OPENPIC_SRC_REG_SIZE (MAX_SRC * 0x20)
  59. #define OPENPIC_CPU_REG_START 0x20000
  60. #define OPENPIC_CPU_REG_SIZE (0x100 + ((MAX_CPU - 1) * 0x1000))
  61. struct fsl_mpic_info {
  62. int max_ext;
  63. };
  64. static struct fsl_mpic_info fsl_mpic_20 = {
  65. .max_ext = 12,
  66. };
  67. static struct fsl_mpic_info fsl_mpic_42 = {
  68. .max_ext = 12,
  69. };
  70. #define FRR_NIRQ_SHIFT 16
  71. #define FRR_NCPU_SHIFT 8
  72. #define FRR_VID_SHIFT 0
  73. #define VID_REVISION_1_2 2
  74. #define VID_REVISION_1_3 3
  75. #define VIR_GENERIC 0x00000000 /* Generic Vendor ID */
  76. #define GCR_RESET 0x80000000
  77. #define GCR_MODE_PASS 0x00000000
  78. #define GCR_MODE_MIXED 0x20000000
  79. #define GCR_MODE_PROXY 0x60000000
  80. #define TBCR_CI 0x80000000 /* count inhibit */
  81. #define TCCR_TOG 0x80000000 /* toggles when decrement to zero */
  82. #define IDR_EP_SHIFT 31
  83. #define IDR_EP_MASK (1 << IDR_EP_SHIFT)
  84. #define IDR_CI0_SHIFT 30
  85. #define IDR_CI1_SHIFT 29
  86. #define IDR_P1_SHIFT 1
  87. #define IDR_P0_SHIFT 0
  88. #define ILR_INTTGT_MASK 0x000000ff
  89. #define ILR_INTTGT_INT 0x00
  90. #define ILR_INTTGT_CINT 0x01 /* critical */
  91. #define ILR_INTTGT_MCP 0x02 /* machine check */
  92. #define NUM_OUTPUTS 3
  93. #define MSIIR_OFFSET 0x140
  94. #define MSIIR_SRS_SHIFT 29
  95. #define MSIIR_SRS_MASK (0x7 << MSIIR_SRS_SHIFT)
  96. #define MSIIR_IBS_SHIFT 24
  97. #define MSIIR_IBS_MASK (0x1f << MSIIR_IBS_SHIFT)
  98. static int get_current_cpu(void)
  99. {
  100. #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
  101. struct kvm_vcpu *vcpu = current->thread.kvm_vcpu;
  102. return vcpu ? vcpu->arch.irq_cpu_id : -1;
  103. #else
  104. /* XXX */
  105. return -1;
  106. #endif
  107. }
  108. static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
  109. u32 val, int idx);
  110. static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
  111. u32 *ptr, int idx);
  112. enum irq_type {
  113. IRQ_TYPE_NORMAL = 0,
  114. IRQ_TYPE_FSLINT, /* FSL internal interrupt -- level only */
  115. IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */
  116. };
  117. struct irq_queue {
  118. /* Round up to the nearest 64 IRQs so that the queue length
  119. * won't change when moving between 32 and 64 bit hosts.
  120. */
  121. unsigned long queue[BITS_TO_LONGS((MAX_IRQ + 63) & ~63)];
  122. int next;
  123. int priority;
  124. };
  125. struct irq_source {
  126. uint32_t ivpr; /* IRQ vector/priority register */
  127. uint32_t idr; /* IRQ destination register */
  128. uint32_t destmask; /* bitmap of CPU destinations */
  129. int last_cpu;
  130. int output; /* IRQ level, e.g. ILR_INTTGT_INT */
  131. int pending; /* TRUE if IRQ is pending */
  132. enum irq_type type;
  133. bool level:1; /* level-triggered */
  134. bool nomask:1; /* critical interrupts ignore mask on some FSL MPICs */
  135. };
  136. #define IVPR_MASK_SHIFT 31
  137. #define IVPR_MASK_MASK (1 << IVPR_MASK_SHIFT)
  138. #define IVPR_ACTIVITY_SHIFT 30
  139. #define IVPR_ACTIVITY_MASK (1 << IVPR_ACTIVITY_SHIFT)
  140. #define IVPR_MODE_SHIFT 29
  141. #define IVPR_MODE_MASK (1 << IVPR_MODE_SHIFT)
  142. #define IVPR_POLARITY_SHIFT 23
  143. #define IVPR_POLARITY_MASK (1 << IVPR_POLARITY_SHIFT)
  144. #define IVPR_SENSE_SHIFT 22
  145. #define IVPR_SENSE_MASK (1 << IVPR_SENSE_SHIFT)
  146. #define IVPR_PRIORITY_MASK (0xF << 16)
  147. #define IVPR_PRIORITY(_ivprr_) ((int)(((_ivprr_) & IVPR_PRIORITY_MASK) >> 16))
  148. #define IVPR_VECTOR(opp, _ivprr_) ((_ivprr_) & (opp)->vector_mask)
  149. /* IDR[EP/CI] are only for FSL MPIC prior to v4.0 */
  150. #define IDR_EP 0x80000000 /* external pin */
  151. #define IDR_CI 0x40000000 /* critical interrupt */
  152. struct irq_dest {
  153. struct kvm_vcpu *vcpu;
  154. int32_t ctpr; /* CPU current task priority */
  155. struct irq_queue raised;
  156. struct irq_queue servicing;
  157. /* Count of IRQ sources asserting on non-INT outputs */
  158. uint32_t outputs_active[NUM_OUTPUTS];
  159. };
  160. struct openpic {
  161. struct kvm *kvm;
  162. struct kvm_device *dev;
  163. struct kvm_io_device mmio;
  164. struct list_head mmio_regions;
  165. atomic_t users;
  166. gpa_t reg_base;
  167. spinlock_t lock;
  168. /* Behavior control */
  169. struct fsl_mpic_info *fsl;
  170. uint32_t model;
  171. uint32_t flags;
  172. uint32_t nb_irqs;
  173. uint32_t vid;
  174. uint32_t vir; /* Vendor identification register */
  175. uint32_t vector_mask;
  176. uint32_t tfrr_reset;
  177. uint32_t ivpr_reset;
  178. uint32_t idr_reset;
  179. uint32_t brr1;
  180. uint32_t mpic_mode_mask;
  181. /* Global registers */
  182. uint32_t frr; /* Feature reporting register */
  183. uint32_t gcr; /* Global configuration register */
  184. uint32_t pir; /* Processor initialization register */
  185. uint32_t spve; /* Spurious vector register */
  186. uint32_t tfrr; /* Timer frequency reporting register */
  187. /* Source registers */
  188. struct irq_source src[MAX_IRQ];
  189. /* Local registers per output pin */
  190. struct irq_dest dst[MAX_CPU];
  191. uint32_t nb_cpus;
  192. /* Timer registers */
  193. struct {
  194. uint32_t tccr; /* Global timer current count register */
  195. uint32_t tbcr; /* Global timer base count register */
  196. } timers[MAX_TMR];
  197. /* Shared MSI registers */
  198. struct {
  199. uint32_t msir; /* Shared Message Signaled Interrupt Register */
  200. } msi[MAX_MSI];
  201. uint32_t max_irq;
  202. uint32_t irq_ipi0;
  203. uint32_t irq_tim0;
  204. uint32_t irq_msi;
  205. };
  206. static void mpic_irq_raise(struct openpic *opp, struct irq_dest *dst,
  207. int output)
  208. {
  209. struct kvm_interrupt irq = {
  210. .irq = KVM_INTERRUPT_SET_LEVEL,
  211. };
  212. if (!dst->vcpu) {
  213. pr_debug("%s: destination cpu %d does not exist\n",
  214. __func__, (int)(dst - &opp->dst[0]));
  215. return;
  216. }
  217. pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
  218. output);
  219. if (output != ILR_INTTGT_INT) /* TODO */
  220. return;
  221. kvm_vcpu_ioctl_interrupt(dst->vcpu, &irq);
  222. }
  223. static void mpic_irq_lower(struct openpic *opp, struct irq_dest *dst,
  224. int output)
  225. {
  226. if (!dst->vcpu) {
  227. pr_debug("%s: destination cpu %d does not exist\n",
  228. __func__, (int)(dst - &opp->dst[0]));
  229. return;
  230. }
  231. pr_debug("%s: cpu %d output %d\n", __func__, dst->vcpu->arch.irq_cpu_id,
  232. output);
  233. if (output != ILR_INTTGT_INT) /* TODO */
  234. return;
  235. kvmppc_core_dequeue_external(dst->vcpu);
  236. }
  237. static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ)
  238. {
  239. set_bit(n_IRQ, q->queue);
  240. }
  241. static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ)
  242. {
  243. clear_bit(n_IRQ, q->queue);
  244. }
  245. static inline int IRQ_testbit(struct irq_queue *q, int n_IRQ)
  246. {
  247. return test_bit(n_IRQ, q->queue);
  248. }
  249. static void IRQ_check(struct openpic *opp, struct irq_queue *q)
  250. {
  251. int irq = -1;
  252. int next = -1;
  253. int priority = -1;
  254. for (;;) {
  255. irq = find_next_bit(q->queue, opp->max_irq, irq + 1);
  256. if (irq == opp->max_irq)
  257. break;
  258. pr_debug("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
  259. irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
  260. if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
  261. next = irq;
  262. priority = IVPR_PRIORITY(opp->src[irq].ivpr);
  263. }
  264. }
  265. q->next = next;
  266. q->priority = priority;
  267. }
  268. static int IRQ_get_next(struct openpic *opp, struct irq_queue *q)
  269. {
  270. /* XXX: optimize */
  271. IRQ_check(opp, q);
  272. return q->next;
  273. }
  274. static void IRQ_local_pipe(struct openpic *opp, int n_CPU, int n_IRQ,
  275. bool active, bool was_active)
  276. {
  277. struct irq_dest *dst;
  278. struct irq_source *src;
  279. int priority;
  280. dst = &opp->dst[n_CPU];
  281. src = &opp->src[n_IRQ];
  282. pr_debug("%s: IRQ %d active %d was %d\n",
  283. __func__, n_IRQ, active, was_active);
  284. if (src->output != ILR_INTTGT_INT) {
  285. pr_debug("%s: output %d irq %d active %d was %d count %d\n",
  286. __func__, src->output, n_IRQ, active, was_active,
  287. dst->outputs_active[src->output]);
  288. /* On Freescale MPIC, critical interrupts ignore priority,
  289. * IACK, EOI, etc. Before MPIC v4.1 they also ignore
  290. * masking.
  291. */
  292. if (active) {
  293. if (!was_active &&
  294. dst->outputs_active[src->output]++ == 0) {
  295. pr_debug("%s: Raise OpenPIC output %d cpu %d irq %d\n",
  296. __func__, src->output, n_CPU, n_IRQ);
  297. mpic_irq_raise(opp, dst, src->output);
  298. }
  299. } else {
  300. if (was_active &&
  301. --dst->outputs_active[src->output] == 0) {
  302. pr_debug("%s: Lower OpenPIC output %d cpu %d irq %d\n",
  303. __func__, src->output, n_CPU, n_IRQ);
  304. mpic_irq_lower(opp, dst, src->output);
  305. }
  306. }
  307. return;
  308. }
  309. priority = IVPR_PRIORITY(src->ivpr);
  310. /* Even if the interrupt doesn't have enough priority,
  311. * it is still raised, in case ctpr is lowered later.
  312. */
  313. if (active)
  314. IRQ_setbit(&dst->raised, n_IRQ);
  315. else
  316. IRQ_resetbit(&dst->raised, n_IRQ);
  317. IRQ_check(opp, &dst->raised);
  318. if (active && priority <= dst->ctpr) {
  319. pr_debug("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
  320. __func__, n_IRQ, priority, dst->ctpr, n_CPU);
  321. active = 0;
  322. }
  323. if (active) {
  324. if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
  325. priority <= dst->servicing.priority) {
  326. pr_debug("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
  327. __func__, n_IRQ, dst->servicing.next, n_CPU);
  328. } else {
  329. pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
  330. __func__, n_CPU, n_IRQ, dst->raised.next);
  331. mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
  332. }
  333. } else {
  334. IRQ_get_next(opp, &dst->servicing);
  335. if (dst->raised.priority > dst->ctpr &&
  336. dst->raised.priority > dst->servicing.priority) {
  337. pr_debug("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
  338. __func__, n_IRQ, dst->raised.next,
  339. dst->raised.priority, dst->ctpr,
  340. dst->servicing.priority, n_CPU);
  341. /* IRQ line stays asserted */
  342. } else {
  343. pr_debug("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
  344. __func__, n_IRQ, dst->ctpr,
  345. dst->servicing.priority, n_CPU);
  346. mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
  347. }
  348. }
  349. }
  350. /* update pic state because registers for n_IRQ have changed value */
  351. static void openpic_update_irq(struct openpic *opp, int n_IRQ)
  352. {
  353. struct irq_source *src;
  354. bool active, was_active;
  355. int i;
  356. src = &opp->src[n_IRQ];
  357. active = src->pending;
  358. if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
  359. /* Interrupt source is disabled */
  360. pr_debug("%s: IRQ %d is disabled\n", __func__, n_IRQ);
  361. active = false;
  362. }
  363. was_active = !!(src->ivpr & IVPR_ACTIVITY_MASK);
  364. /*
  365. * We don't have a similar check for already-active because
  366. * ctpr may have changed and we need to withdraw the interrupt.
  367. */
  368. if (!active && !was_active) {
  369. pr_debug("%s: IRQ %d is already inactive\n", __func__, n_IRQ);
  370. return;
  371. }
  372. if (active)
  373. src->ivpr |= IVPR_ACTIVITY_MASK;
  374. else
  375. src->ivpr &= ~IVPR_ACTIVITY_MASK;
  376. if (src->destmask == 0) {
  377. /* No target */
  378. pr_debug("%s: IRQ %d has no target\n", __func__, n_IRQ);
  379. return;
  380. }
  381. if (src->destmask == (1 << src->last_cpu)) {
  382. /* Only one CPU is allowed to receive this IRQ */
  383. IRQ_local_pipe(opp, src->last_cpu, n_IRQ, active, was_active);
  384. } else if (!(src->ivpr & IVPR_MODE_MASK)) {
  385. /* Directed delivery mode */
  386. for (i = 0; i < opp->nb_cpus; i++) {
  387. if (src->destmask & (1 << i)) {
  388. IRQ_local_pipe(opp, i, n_IRQ, active,
  389. was_active);
  390. }
  391. }
  392. } else {
  393. /* Distributed delivery mode */
  394. for (i = src->last_cpu + 1; i != src->last_cpu; i++) {
  395. if (i == opp->nb_cpus)
  396. i = 0;
  397. if (src->destmask & (1 << i)) {
  398. IRQ_local_pipe(opp, i, n_IRQ, active,
  399. was_active);
  400. src->last_cpu = i;
  401. break;
  402. }
  403. }
  404. }
  405. }
  406. static void openpic_set_irq(void *opaque, int n_IRQ, int level)
  407. {
  408. struct openpic *opp = opaque;
  409. struct irq_source *src;
  410. if (n_IRQ >= MAX_IRQ) {
  411. WARN_ONCE(1, "%s: IRQ %d out of range\n", __func__, n_IRQ);
  412. return;
  413. }
  414. src = &opp->src[n_IRQ];
  415. pr_debug("openpic: set irq %d = %d ivpr=0x%08x\n",
  416. n_IRQ, level, src->ivpr);
  417. if (src->level) {
  418. /* level-sensitive irq */
  419. src->pending = level;
  420. openpic_update_irq(opp, n_IRQ);
  421. } else {
  422. /* edge-sensitive irq */
  423. if (level) {
  424. src->pending = 1;
  425. openpic_update_irq(opp, n_IRQ);
  426. }
  427. if (src->output != ILR_INTTGT_INT) {
  428. /* Edge-triggered interrupts shouldn't be used
  429. * with non-INT delivery, but just in case,
  430. * try to make it do something sane rather than
  431. * cause an interrupt storm. This is close to
  432. * what you'd probably see happen in real hardware.
  433. */
  434. src->pending = 0;
  435. openpic_update_irq(opp, n_IRQ);
  436. }
  437. }
  438. }
  439. static void openpic_reset(struct openpic *opp)
  440. {
  441. int i;
  442. opp->gcr = GCR_RESET;
  443. /* Initialise controller registers */
  444. opp->frr = ((opp->nb_irqs - 1) << FRR_NIRQ_SHIFT) |
  445. (opp->vid << FRR_VID_SHIFT);
  446. opp->pir = 0;
  447. opp->spve = -1 & opp->vector_mask;
  448. opp->tfrr = opp->tfrr_reset;
  449. /* Initialise IRQ sources */
  450. for (i = 0; i < opp->max_irq; i++) {
  451. opp->src[i].ivpr = opp->ivpr_reset;
  452. opp->src[i].idr = opp->idr_reset;
  453. switch (opp->src[i].type) {
  454. case IRQ_TYPE_NORMAL:
  455. opp->src[i].level =
  456. !!(opp->ivpr_reset & IVPR_SENSE_MASK);
  457. break;
  458. case IRQ_TYPE_FSLINT:
  459. opp->src[i].ivpr |= IVPR_POLARITY_MASK;
  460. break;
  461. case IRQ_TYPE_FSLSPECIAL:
  462. break;
  463. }
  464. }
  465. /* Initialise IRQ destinations */
  466. for (i = 0; i < MAX_CPU; i++) {
  467. opp->dst[i].ctpr = 15;
  468. memset(&opp->dst[i].raised, 0, sizeof(struct irq_queue));
  469. opp->dst[i].raised.next = -1;
  470. memset(&opp->dst[i].servicing, 0, sizeof(struct irq_queue));
  471. opp->dst[i].servicing.next = -1;
  472. }
  473. /* Initialise timers */
  474. for (i = 0; i < MAX_TMR; i++) {
  475. opp->timers[i].tccr = 0;
  476. opp->timers[i].tbcr = TBCR_CI;
  477. }
  478. /* Go out of RESET state */
  479. opp->gcr = 0;
  480. }
  481. static inline uint32_t read_IRQreg_idr(struct openpic *opp, int n_IRQ)
  482. {
  483. return opp->src[n_IRQ].idr;
  484. }
  485. static inline uint32_t read_IRQreg_ilr(struct openpic *opp, int n_IRQ)
  486. {
  487. if (opp->flags & OPENPIC_FLAG_ILR)
  488. return opp->src[n_IRQ].output;
  489. return 0xffffffff;
  490. }
  491. static inline uint32_t read_IRQreg_ivpr(struct openpic *opp, int n_IRQ)
  492. {
  493. return opp->src[n_IRQ].ivpr;
  494. }
  495. static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
  496. uint32_t val)
  497. {
  498. struct irq_source *src = &opp->src[n_IRQ];
  499. uint32_t normal_mask = (1UL << opp->nb_cpus) - 1;
  500. uint32_t crit_mask = 0;
  501. uint32_t mask = normal_mask;
  502. int crit_shift = IDR_EP_SHIFT - opp->nb_cpus;
  503. int i;
  504. if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
  505. crit_mask = mask << crit_shift;
  506. mask |= crit_mask | IDR_EP;
  507. }
  508. src->idr = val & mask;
  509. pr_debug("Set IDR %d to 0x%08x\n", n_IRQ, src->idr);
  510. if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
  511. if (src->idr & crit_mask) {
  512. if (src->idr & normal_mask) {
  513. pr_debug("%s: IRQ configured for multiple output types, using critical\n",
  514. __func__);
  515. }
  516. src->output = ILR_INTTGT_CINT;
  517. src->nomask = true;
  518. src->destmask = 0;
  519. for (i = 0; i < opp->nb_cpus; i++) {
  520. int n_ci = IDR_CI0_SHIFT - i;
  521. if (src->idr & (1UL << n_ci))
  522. src->destmask |= 1UL << i;
  523. }
  524. } else {
  525. src->output = ILR_INTTGT_INT;
  526. src->nomask = false;
  527. src->destmask = src->idr & normal_mask;
  528. }
  529. } else {
  530. src->destmask = src->idr;
  531. }
  532. }
  533. static inline void write_IRQreg_ilr(struct openpic *opp, int n_IRQ,
  534. uint32_t val)
  535. {
  536. if (opp->flags & OPENPIC_FLAG_ILR) {
  537. struct irq_source *src = &opp->src[n_IRQ];
  538. src->output = val & ILR_INTTGT_MASK;
  539. pr_debug("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
  540. src->output);
  541. /* TODO: on MPIC v4.0 only, set nomask for non-INT */
  542. }
  543. }
  544. static inline void write_IRQreg_ivpr(struct openpic *opp, int n_IRQ,
  545. uint32_t val)
  546. {
  547. uint32_t mask;
  548. /* NOTE when implementing newer FSL MPIC models: starting with v4.0,
  549. * the polarity bit is read-only on internal interrupts.
  550. */
  551. mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK |
  552. IVPR_POLARITY_MASK | opp->vector_mask;
  553. /* ACTIVITY bit is read-only */
  554. opp->src[n_IRQ].ivpr =
  555. (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask);
  556. /* For FSL internal interrupts, The sense bit is reserved and zero,
  557. * and the interrupt is always level-triggered. Timers and IPIs
  558. * have no sense or polarity bits, and are edge-triggered.
  559. */
  560. switch (opp->src[n_IRQ].type) {
  561. case IRQ_TYPE_NORMAL:
  562. opp->src[n_IRQ].level =
  563. !!(opp->src[n_IRQ].ivpr & IVPR_SENSE_MASK);
  564. break;
  565. case IRQ_TYPE_FSLINT:
  566. opp->src[n_IRQ].ivpr &= ~IVPR_SENSE_MASK;
  567. break;
  568. case IRQ_TYPE_FSLSPECIAL:
  569. opp->src[n_IRQ].ivpr &= ~(IVPR_POLARITY_MASK | IVPR_SENSE_MASK);
  570. break;
  571. }
  572. openpic_update_irq(opp, n_IRQ);
  573. pr_debug("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val,
  574. opp->src[n_IRQ].ivpr);
  575. }
  576. static void openpic_gcr_write(struct openpic *opp, uint64_t val)
  577. {
  578. if (val & GCR_RESET) {
  579. openpic_reset(opp);
  580. return;
  581. }
  582. opp->gcr &= ~opp->mpic_mode_mask;
  583. opp->gcr |= val & opp->mpic_mode_mask;
  584. }
  585. static int openpic_gbl_write(void *opaque, gpa_t addr, u32 val)
  586. {
  587. struct openpic *opp = opaque;
  588. int err = 0;
  589. pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
  590. if (addr & 0xF)
  591. return 0;
  592. switch (addr) {
  593. case 0x00: /* Block Revision Register1 (BRR1) is Readonly */
  594. break;
  595. case 0x40:
  596. case 0x50:
  597. case 0x60:
  598. case 0x70:
  599. case 0x80:
  600. case 0x90:
  601. case 0xA0:
  602. case 0xB0:
  603. err = openpic_cpu_write_internal(opp, addr, val,
  604. get_current_cpu());
  605. break;
  606. case 0x1000: /* FRR */
  607. break;
  608. case 0x1020: /* GCR */
  609. openpic_gcr_write(opp, val);
  610. break;
  611. case 0x1080: /* VIR */
  612. break;
  613. case 0x1090: /* PIR */
  614. /*
  615. * This register is used to reset a CPU core --
  616. * let userspace handle it.
  617. */
  618. err = -ENXIO;
  619. break;
  620. case 0x10A0: /* IPI_IVPR */
  621. case 0x10B0:
  622. case 0x10C0:
  623. case 0x10D0: {
  624. int idx;
  625. idx = (addr - 0x10A0) >> 4;
  626. write_IRQreg_ivpr(opp, opp->irq_ipi0 + idx, val);
  627. break;
  628. }
  629. case 0x10E0: /* SPVE */
  630. opp->spve = val & opp->vector_mask;
  631. break;
  632. default:
  633. break;
  634. }
  635. return err;
  636. }
  637. static int openpic_gbl_read(void *opaque, gpa_t addr, u32 *ptr)
  638. {
  639. struct openpic *opp = opaque;
  640. u32 retval;
  641. int err = 0;
  642. pr_debug("%s: addr %#llx\n", __func__, addr);
  643. retval = 0xFFFFFFFF;
  644. if (addr & 0xF)
  645. goto out;
  646. switch (addr) {
  647. case 0x1000: /* FRR */
  648. retval = opp->frr;
  649. retval |= (opp->nb_cpus - 1) << FRR_NCPU_SHIFT;
  650. break;
  651. case 0x1020: /* GCR */
  652. retval = opp->gcr;
  653. break;
  654. case 0x1080: /* VIR */
  655. retval = opp->vir;
  656. break;
  657. case 0x1090: /* PIR */
  658. retval = 0x00000000;
  659. break;
  660. case 0x00: /* Block Revision Register1 (BRR1) */
  661. retval = opp->brr1;
  662. break;
  663. case 0x40:
  664. case 0x50:
  665. case 0x60:
  666. case 0x70:
  667. case 0x80:
  668. case 0x90:
  669. case 0xA0:
  670. case 0xB0:
  671. err = openpic_cpu_read_internal(opp, addr,
  672. &retval, get_current_cpu());
  673. break;
  674. case 0x10A0: /* IPI_IVPR */
  675. case 0x10B0:
  676. case 0x10C0:
  677. case 0x10D0:
  678. {
  679. int idx;
  680. idx = (addr - 0x10A0) >> 4;
  681. retval = read_IRQreg_ivpr(opp, opp->irq_ipi0 + idx);
  682. }
  683. break;
  684. case 0x10E0: /* SPVE */
  685. retval = opp->spve;
  686. break;
  687. default:
  688. break;
  689. }
  690. out:
  691. pr_debug("%s: => 0x%08x\n", __func__, retval);
  692. *ptr = retval;
  693. return err;
  694. }
  695. static int openpic_tmr_write(void *opaque, gpa_t addr, u32 val)
  696. {
  697. struct openpic *opp = opaque;
  698. int idx;
  699. addr += 0x10f0;
  700. pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
  701. if (addr & 0xF)
  702. return 0;
  703. if (addr == 0x10f0) {
  704. /* TFRR */
  705. opp->tfrr = val;
  706. return 0;
  707. }
  708. idx = (addr >> 6) & 0x3;
  709. addr = addr & 0x30;
  710. switch (addr & 0x30) {
  711. case 0x00: /* TCCR */
  712. break;
  713. case 0x10: /* TBCR */
  714. if ((opp->timers[idx].tccr & TCCR_TOG) != 0 &&
  715. (val & TBCR_CI) == 0 &&
  716. (opp->timers[idx].tbcr & TBCR_CI) != 0)
  717. opp->timers[idx].tccr &= ~TCCR_TOG;
  718. opp->timers[idx].tbcr = val;
  719. break;
  720. case 0x20: /* TVPR */
  721. write_IRQreg_ivpr(opp, opp->irq_tim0 + idx, val);
  722. break;
  723. case 0x30: /* TDR */
  724. write_IRQreg_idr(opp, opp->irq_tim0 + idx, val);
  725. break;
  726. }
  727. return 0;
  728. }
  729. static int openpic_tmr_read(void *opaque, gpa_t addr, u32 *ptr)
  730. {
  731. struct openpic *opp = opaque;
  732. uint32_t retval = -1;
  733. int idx;
  734. pr_debug("%s: addr %#llx\n", __func__, addr);
  735. if (addr & 0xF)
  736. goto out;
  737. idx = (addr >> 6) & 0x3;
  738. if (addr == 0x0) {
  739. /* TFRR */
  740. retval = opp->tfrr;
  741. goto out;
  742. }
  743. switch (addr & 0x30) {
  744. case 0x00: /* TCCR */
  745. retval = opp->timers[idx].tccr;
  746. break;
  747. case 0x10: /* TBCR */
  748. retval = opp->timers[idx].tbcr;
  749. break;
  750. case 0x20: /* TIPV */
  751. retval = read_IRQreg_ivpr(opp, opp->irq_tim0 + idx);
  752. break;
  753. case 0x30: /* TIDE (TIDR) */
  754. retval = read_IRQreg_idr(opp, opp->irq_tim0 + idx);
  755. break;
  756. }
  757. out:
  758. pr_debug("%s: => 0x%08x\n", __func__, retval);
  759. *ptr = retval;
  760. return 0;
  761. }
  762. static int openpic_src_write(void *opaque, gpa_t addr, u32 val)
  763. {
  764. struct openpic *opp = opaque;
  765. int idx;
  766. pr_debug("%s: addr %#llx <= %08x\n", __func__, addr, val);
  767. addr = addr & 0xffff;
  768. idx = addr >> 5;
  769. switch (addr & 0x1f) {
  770. case 0x00:
  771. write_IRQreg_ivpr(opp, idx, val);
  772. break;
  773. case 0x10:
  774. write_IRQreg_idr(opp, idx, val);
  775. break;
  776. case 0x18:
  777. write_IRQreg_ilr(opp, idx, val);
  778. break;
  779. }
  780. return 0;
  781. }
  782. static int openpic_src_read(void *opaque, gpa_t addr, u32 *ptr)
  783. {
  784. struct openpic *opp = opaque;
  785. uint32_t retval;
  786. int idx;
  787. pr_debug("%s: addr %#llx\n", __func__, addr);
  788. retval = 0xFFFFFFFF;
  789. addr = addr & 0xffff;
  790. idx = addr >> 5;
  791. switch (addr & 0x1f) {
  792. case 0x00:
  793. retval = read_IRQreg_ivpr(opp, idx);
  794. break;
  795. case 0x10:
  796. retval = read_IRQreg_idr(opp, idx);
  797. break;
  798. case 0x18:
  799. retval = read_IRQreg_ilr(opp, idx);
  800. break;
  801. }
  802. pr_debug("%s: => 0x%08x\n", __func__, retval);
  803. *ptr = retval;
  804. return 0;
  805. }
  806. static int openpic_msi_write(void *opaque, gpa_t addr, u32 val)
  807. {
  808. struct openpic *opp = opaque;
  809. int idx = opp->irq_msi;
  810. int srs, ibs;
  811. pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
  812. if (addr & 0xF)
  813. return 0;
  814. switch (addr) {
  815. case MSIIR_OFFSET:
  816. srs = val >> MSIIR_SRS_SHIFT;
  817. idx += srs;
  818. ibs = (val & MSIIR_IBS_MASK) >> MSIIR_IBS_SHIFT;
  819. opp->msi[srs].msir |= 1 << ibs;
  820. openpic_set_irq(opp, idx, 1);
  821. break;
  822. default:
  823. /* most registers are read-only, thus ignored */
  824. break;
  825. }
  826. return 0;
  827. }
  828. static int openpic_msi_read(void *opaque, gpa_t addr, u32 *ptr)
  829. {
  830. struct openpic *opp = opaque;
  831. uint32_t r = 0;
  832. int i, srs;
  833. pr_debug("%s: addr %#llx\n", __func__, addr);
  834. if (addr & 0xF)
  835. return -ENXIO;
  836. srs = addr >> 4;
  837. switch (addr) {
  838. case 0x00:
  839. case 0x10:
  840. case 0x20:
  841. case 0x30:
  842. case 0x40:
  843. case 0x50:
  844. case 0x60:
  845. case 0x70: /* MSIRs */
  846. r = opp->msi[srs].msir;
  847. /* Clear on read */
  848. opp->msi[srs].msir = 0;
  849. openpic_set_irq(opp, opp->irq_msi + srs, 0);
  850. break;
  851. case 0x120: /* MSISR */
  852. for (i = 0; i < MAX_MSI; i++)
  853. r |= (opp->msi[i].msir ? 1 : 0) << i;
  854. break;
  855. }
  856. pr_debug("%s: => 0x%08x\n", __func__, r);
  857. *ptr = r;
  858. return 0;
  859. }
  860. static int openpic_summary_read(void *opaque, gpa_t addr, u32 *ptr)
  861. {
  862. uint32_t r = 0;
  863. pr_debug("%s: addr %#llx\n", __func__, addr);
  864. /* TODO: EISR/EIMR */
  865. *ptr = r;
  866. return 0;
  867. }
  868. static int openpic_summary_write(void *opaque, gpa_t addr, u32 val)
  869. {
  870. pr_debug("%s: addr %#llx <= 0x%08x\n", __func__, addr, val);
  871. /* TODO: EISR/EIMR */
  872. return 0;
  873. }
  874. static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
  875. u32 val, int idx)
  876. {
  877. struct openpic *opp = opaque;
  878. struct irq_source *src;
  879. struct irq_dest *dst;
  880. int s_IRQ, n_IRQ;
  881. pr_debug("%s: cpu %d addr %#llx <= 0x%08x\n", __func__, idx,
  882. addr, val);
  883. if (idx < 0)
  884. return 0;
  885. if (addr & 0xF)
  886. return 0;
  887. dst = &opp->dst[idx];
  888. addr &= 0xFF0;
  889. switch (addr) {
  890. case 0x40: /* IPIDR */
  891. case 0x50:
  892. case 0x60:
  893. case 0x70:
  894. idx = (addr - 0x40) >> 4;
  895. /* we use IDE as mask which CPUs to deliver the IPI to still. */
  896. opp->src[opp->irq_ipi0 + idx].destmask |= val;
  897. openpic_set_irq(opp, opp->irq_ipi0 + idx, 1);
  898. openpic_set_irq(opp, opp->irq_ipi0 + idx, 0);
  899. break;
  900. case 0x80: /* CTPR */
  901. dst->ctpr = val & 0x0000000F;
  902. pr_debug("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
  903. __func__, idx, dst->ctpr, dst->raised.priority,
  904. dst->servicing.priority);
  905. if (dst->raised.priority <= dst->ctpr) {
  906. pr_debug("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
  907. __func__, idx);
  908. mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
  909. } else if (dst->raised.priority > dst->servicing.priority) {
  910. pr_debug("%s: Raise OpenPIC INT output cpu %d irq %d\n",
  911. __func__, idx, dst->raised.next);
  912. mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
  913. }
  914. break;
  915. case 0x90: /* WHOAMI */
  916. /* Read-only register */
  917. break;
  918. case 0xA0: /* IACK */
  919. /* Read-only register */
  920. break;
  921. case 0xB0: { /* EOI */
  922. int notify_eoi;
  923. pr_debug("EOI\n");
  924. s_IRQ = IRQ_get_next(opp, &dst->servicing);
  925. if (s_IRQ < 0) {
  926. pr_debug("%s: EOI with no interrupt in service\n",
  927. __func__);
  928. break;
  929. }
  930. IRQ_resetbit(&dst->servicing, s_IRQ);
  931. /* Notify listeners that the IRQ is over */
  932. notify_eoi = s_IRQ;
  933. /* Set up next servicing IRQ */
  934. s_IRQ = IRQ_get_next(opp, &dst->servicing);
  935. /* Check queued interrupts. */
  936. n_IRQ = IRQ_get_next(opp, &dst->raised);
  937. src = &opp->src[n_IRQ];
  938. if (n_IRQ != -1 &&
  939. (s_IRQ == -1 ||
  940. IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
  941. pr_debug("Raise OpenPIC INT output cpu %d irq %d\n",
  942. idx, n_IRQ);
  943. mpic_irq_raise(opp, dst, ILR_INTTGT_INT);
  944. }
  945. spin_unlock(&opp->lock);
  946. kvm_notify_acked_irq(opp->kvm, 0, notify_eoi);
  947. spin_lock(&opp->lock);
  948. break;
  949. }
  950. default:
  951. break;
  952. }
  953. return 0;
  954. }
  955. static int openpic_cpu_write(void *opaque, gpa_t addr, u32 val)
  956. {
  957. struct openpic *opp = opaque;
  958. return openpic_cpu_write_internal(opp, addr, val,
  959. (addr & 0x1f000) >> 12);
  960. }
  961. static uint32_t openpic_iack(struct openpic *opp, struct irq_dest *dst,
  962. int cpu)
  963. {
  964. struct irq_source *src;
  965. int retval, irq;
  966. pr_debug("Lower OpenPIC INT output\n");
  967. mpic_irq_lower(opp, dst, ILR_INTTGT_INT);
  968. irq = IRQ_get_next(opp, &dst->raised);
  969. pr_debug("IACK: irq=%d\n", irq);
  970. if (irq == -1)
  971. /* No more interrupt pending */
  972. return opp->spve;
  973. src = &opp->src[irq];
  974. if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
  975. !(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
  976. pr_err("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
  977. __func__, irq, dst->ctpr, src->ivpr);
  978. openpic_update_irq(opp, irq);
  979. retval = opp->spve;
  980. } else {
  981. /* IRQ enter servicing state */
  982. IRQ_setbit(&dst->servicing, irq);
  983. retval = IVPR_VECTOR(opp, src->ivpr);
  984. }
  985. if (!src->level) {
  986. /* edge-sensitive IRQ */
  987. src->ivpr &= ~IVPR_ACTIVITY_MASK;
  988. src->pending = 0;
  989. IRQ_resetbit(&dst->raised, irq);
  990. }
  991. if ((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + MAX_IPI))) {
  992. src->destmask &= ~(1 << cpu);
  993. if (src->destmask && !src->level) {
  994. /* trigger on CPUs that didn't know about it yet */
  995. openpic_set_irq(opp, irq, 1);
  996. openpic_set_irq(opp, irq, 0);
  997. /* if all CPUs knew about it, set active bit again */
  998. src->ivpr |= IVPR_ACTIVITY_MASK;
  999. }
  1000. }
  1001. return retval;
  1002. }
  1003. void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
  1004. {
  1005. struct openpic *opp = vcpu->arch.mpic;
  1006. int cpu = vcpu->arch.irq_cpu_id;
  1007. unsigned long flags;
  1008. spin_lock_irqsave(&opp->lock, flags);
  1009. if ((opp->gcr & opp->mpic_mode_mask) == GCR_MODE_PROXY)
  1010. kvmppc_set_epr(vcpu, openpic_iack(opp, &opp->dst[cpu], cpu));
  1011. spin_unlock_irqrestore(&opp->lock, flags);
  1012. }
  1013. static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
  1014. u32 *ptr, int idx)
  1015. {
  1016. struct openpic *opp = opaque;
  1017. struct irq_dest *dst;
  1018. uint32_t retval;
  1019. pr_debug("%s: cpu %d addr %#llx\n", __func__, idx, addr);
  1020. retval = 0xFFFFFFFF;
  1021. if (idx < 0)
  1022. goto out;
  1023. if (addr & 0xF)
  1024. goto out;
  1025. dst = &opp->dst[idx];
  1026. addr &= 0xFF0;
  1027. switch (addr) {
  1028. case 0x80: /* CTPR */
  1029. retval = dst->ctpr;
  1030. break;
  1031. case 0x90: /* WHOAMI */
  1032. retval = idx;
  1033. break;
  1034. case 0xA0: /* IACK */
  1035. retval = openpic_iack(opp, dst, idx);
  1036. break;
  1037. case 0xB0: /* EOI */
  1038. retval = 0;
  1039. break;
  1040. default:
  1041. break;
  1042. }
  1043. pr_debug("%s: => 0x%08x\n", __func__, retval);
  1044. out:
  1045. *ptr = retval;
  1046. return 0;
  1047. }
  1048. static int openpic_cpu_read(void *opaque, gpa_t addr, u32 *ptr)
  1049. {
  1050. struct openpic *opp = opaque;
  1051. return openpic_cpu_read_internal(opp, addr, ptr,
  1052. (addr & 0x1f000) >> 12);
  1053. }
  1054. struct mem_reg {
  1055. struct list_head list;
  1056. int (*read)(void *opaque, gpa_t addr, u32 *ptr);
  1057. int (*write)(void *opaque, gpa_t addr, u32 val);
  1058. gpa_t start_addr;
  1059. int size;
  1060. };
  1061. static struct mem_reg openpic_gbl_mmio = {
  1062. .write = openpic_gbl_write,
  1063. .read = openpic_gbl_read,
  1064. .start_addr = OPENPIC_GLB_REG_START,
  1065. .size = OPENPIC_GLB_REG_SIZE,
  1066. };
  1067. static struct mem_reg openpic_tmr_mmio = {
  1068. .write = openpic_tmr_write,
  1069. .read = openpic_tmr_read,
  1070. .start_addr = OPENPIC_TMR_REG_START,
  1071. .size = OPENPIC_TMR_REG_SIZE,
  1072. };
  1073. static struct mem_reg openpic_cpu_mmio = {
  1074. .write = openpic_cpu_write,
  1075. .read = openpic_cpu_read,
  1076. .start_addr = OPENPIC_CPU_REG_START,
  1077. .size = OPENPIC_CPU_REG_SIZE,
  1078. };
  1079. static struct mem_reg openpic_src_mmio = {
  1080. .write = openpic_src_write,
  1081. .read = openpic_src_read,
  1082. .start_addr = OPENPIC_SRC_REG_START,
  1083. .size = OPENPIC_SRC_REG_SIZE,
  1084. };
  1085. static struct mem_reg openpic_msi_mmio = {
  1086. .read = openpic_msi_read,
  1087. .write = openpic_msi_write,
  1088. .start_addr = OPENPIC_MSI_REG_START,
  1089. .size = OPENPIC_MSI_REG_SIZE,
  1090. };
  1091. static struct mem_reg openpic_summary_mmio = {
  1092. .read = openpic_summary_read,
  1093. .write = openpic_summary_write,
  1094. .start_addr = OPENPIC_SUMMARY_REG_START,
  1095. .size = OPENPIC_SUMMARY_REG_SIZE,
  1096. };
  1097. static void fsl_common_init(struct openpic *opp)
  1098. {
  1099. int i;
  1100. int virq = MAX_SRC;
  1101. list_add(&openpic_msi_mmio.list, &opp->mmio_regions);
  1102. list_add(&openpic_summary_mmio.list, &opp->mmio_regions);
  1103. opp->vid = VID_REVISION_1_2;
  1104. opp->vir = VIR_GENERIC;
  1105. opp->vector_mask = 0xFFFF;
  1106. opp->tfrr_reset = 0;
  1107. opp->ivpr_reset = IVPR_MASK_MASK;
  1108. opp->idr_reset = 1 << 0;
  1109. opp->max_irq = MAX_IRQ;
  1110. opp->irq_ipi0 = virq;
  1111. virq += MAX_IPI;
  1112. opp->irq_tim0 = virq;
  1113. virq += MAX_TMR;
  1114. BUG_ON(virq > MAX_IRQ);
  1115. opp->irq_msi = 224;
  1116. for (i = 0; i < opp->fsl->max_ext; i++)
  1117. opp->src[i].level = false;
  1118. /* Internal interrupts, including message and MSI */
  1119. for (i = 16; i < MAX_SRC; i++) {
  1120. opp->src[i].type = IRQ_TYPE_FSLINT;
  1121. opp->src[i].level = true;
  1122. }
  1123. /* timers and IPIs */
  1124. for (i = MAX_SRC; i < virq; i++) {
  1125. opp->src[i].type = IRQ_TYPE_FSLSPECIAL;
  1126. opp->src[i].level = false;
  1127. }
  1128. }
  1129. static int kvm_mpic_read_internal(struct openpic *opp, gpa_t addr, u32 *ptr)
  1130. {
  1131. struct list_head *node;
  1132. list_for_each(node, &opp->mmio_regions) {
  1133. struct mem_reg *mr = list_entry(node, struct mem_reg, list);
  1134. if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
  1135. continue;
  1136. return mr->read(opp, addr - mr->start_addr, ptr);
  1137. }
  1138. return -ENXIO;
  1139. }
  1140. static int kvm_mpic_write_internal(struct openpic *opp, gpa_t addr, u32 val)
  1141. {
  1142. struct list_head *node;
  1143. list_for_each(node, &opp->mmio_regions) {
  1144. struct mem_reg *mr = list_entry(node, struct mem_reg, list);
  1145. if (mr->start_addr > addr || addr >= mr->start_addr + mr->size)
  1146. continue;
  1147. return mr->write(opp, addr - mr->start_addr, val);
  1148. }
  1149. return -ENXIO;
  1150. }
  1151. static int kvm_mpic_read(struct kvm_io_device *this, gpa_t addr,
  1152. int len, void *ptr)
  1153. {
  1154. struct openpic *opp = container_of(this, struct openpic, mmio);
  1155. int ret;
  1156. union {
  1157. u32 val;
  1158. u8 bytes[4];
  1159. } u;
  1160. if (addr & (len - 1)) {
  1161. pr_debug("%s: bad alignment %llx/%d\n",
  1162. __func__, addr, len);
  1163. return -EINVAL;
  1164. }
  1165. spin_lock_irq(&opp->lock);
  1166. ret = kvm_mpic_read_internal(opp, addr - opp->reg_base, &u.val);
  1167. spin_unlock_irq(&opp->lock);
  1168. /*
  1169. * Technically only 32-bit accesses are allowed, but be nice to
  1170. * people dumping registers a byte at a time -- it works in real
  1171. * hardware (reads only, not writes).
  1172. */
  1173. if (len == 4) {
  1174. *(u32 *)ptr = u.val;
  1175. pr_debug("%s: addr %llx ret %d len 4 val %x\n",
  1176. __func__, addr, ret, u.val);
  1177. } else if (len == 1) {
  1178. *(u8 *)ptr = u.bytes[addr & 3];
  1179. pr_debug("%s: addr %llx ret %d len 1 val %x\n",
  1180. __func__, addr, ret, u.bytes[addr & 3]);
  1181. } else {
  1182. pr_debug("%s: bad length %d\n", __func__, len);
  1183. return -EINVAL;
  1184. }
  1185. return ret;
  1186. }
  1187. static int kvm_mpic_write(struct kvm_io_device *this, gpa_t addr,
  1188. int len, const void *ptr)
  1189. {
  1190. struct openpic *opp = container_of(this, struct openpic, mmio);
  1191. int ret;
  1192. if (len != 4) {
  1193. pr_debug("%s: bad length %d\n", __func__, len);
  1194. return -EOPNOTSUPP;
  1195. }
  1196. if (addr & 3) {
  1197. pr_debug("%s: bad alignment %llx/%d\n", __func__, addr, len);
  1198. return -EOPNOTSUPP;
  1199. }
  1200. spin_lock_irq(&opp->lock);
  1201. ret = kvm_mpic_write_internal(opp, addr - opp->reg_base,
  1202. *(const u32 *)ptr);
  1203. spin_unlock_irq(&opp->lock);
  1204. pr_debug("%s: addr %llx ret %d val %x\n",
  1205. __func__, addr, ret, *(const u32 *)ptr);
  1206. return ret;
  1207. }
  1208. static const struct kvm_io_device_ops mpic_mmio_ops = {
  1209. .read = kvm_mpic_read,
  1210. .write = kvm_mpic_write,
  1211. };
  1212. static void map_mmio(struct openpic *opp)
  1213. {
  1214. kvm_iodevice_init(&opp->mmio, &mpic_mmio_ops);
  1215. kvm_io_bus_register_dev(opp->kvm, KVM_MMIO_BUS,
  1216. opp->reg_base, OPENPIC_REG_SIZE,
  1217. &opp->mmio);
  1218. }
  1219. static void unmap_mmio(struct openpic *opp)
  1220. {
  1221. kvm_io_bus_unregister_dev(opp->kvm, KVM_MMIO_BUS, &opp->mmio);
  1222. }
  1223. static int set_base_addr(struct openpic *opp, struct kvm_device_attr *attr)
  1224. {
  1225. u64 base;
  1226. if (copy_from_user(&base, (u64 __user *)(long)attr->addr, sizeof(u64)))
  1227. return -EFAULT;
  1228. if (base & 0x3ffff) {
  1229. pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx not aligned\n",
  1230. __func__, base);
  1231. return -EINVAL;
  1232. }
  1233. if (base == opp->reg_base)
  1234. return 0;
  1235. mutex_lock(&opp->kvm->slots_lock);
  1236. unmap_mmio(opp);
  1237. opp->reg_base = base;
  1238. pr_debug("kvm mpic %s: KVM_DEV_MPIC_BASE_ADDR %08llx\n",
  1239. __func__, base);
  1240. if (base == 0)
  1241. goto out;
  1242. map_mmio(opp);
  1243. mutex_unlock(&opp->kvm->slots_lock);
  1244. out:
  1245. return 0;
  1246. }
  1247. #define ATTR_SET 0
  1248. #define ATTR_GET 1
  1249. static int access_reg(struct openpic *opp, gpa_t addr, u32 *val, int type)
  1250. {
  1251. int ret;
  1252. if (addr & 3)
  1253. return -ENXIO;
  1254. spin_lock_irq(&opp->lock);
  1255. if (type == ATTR_SET)
  1256. ret = kvm_mpic_write_internal(opp, addr, *val);
  1257. else
  1258. ret = kvm_mpic_read_internal(opp, addr, val);
  1259. spin_unlock_irq(&opp->lock);
  1260. pr_debug("%s: type %d addr %llx val %x\n", __func__, type, addr, *val);
  1261. return ret;
  1262. }
  1263. static int mpic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1264. {
  1265. struct openpic *opp = dev->private;
  1266. u32 attr32;
  1267. switch (attr->group) {
  1268. case KVM_DEV_MPIC_GRP_MISC:
  1269. switch (attr->attr) {
  1270. case KVM_DEV_MPIC_BASE_ADDR:
  1271. return set_base_addr(opp, attr);
  1272. }
  1273. break;
  1274. case KVM_DEV_MPIC_GRP_REGISTER:
  1275. if (get_user(attr32, (u32 __user *)(long)attr->addr))
  1276. return -EFAULT;
  1277. return access_reg(opp, attr->attr, &attr32, ATTR_SET);
  1278. case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
  1279. if (attr->attr > MAX_SRC)
  1280. return -EINVAL;
  1281. if (get_user(attr32, (u32 __user *)(long)attr->addr))
  1282. return -EFAULT;
  1283. if (attr32 != 0 && attr32 != 1)
  1284. return -EINVAL;
  1285. spin_lock_irq(&opp->lock);
  1286. openpic_set_irq(opp, attr->attr, attr32);
  1287. spin_unlock_irq(&opp->lock);
  1288. return 0;
  1289. }
  1290. return -ENXIO;
  1291. }
  1292. static int mpic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1293. {
  1294. struct openpic *opp = dev->private;
  1295. u64 attr64;
  1296. u32 attr32;
  1297. int ret;
  1298. switch (attr->group) {
  1299. case KVM_DEV_MPIC_GRP_MISC:
  1300. switch (attr->attr) {
  1301. case KVM_DEV_MPIC_BASE_ADDR:
  1302. mutex_lock(&opp->kvm->slots_lock);
  1303. attr64 = opp->reg_base;
  1304. mutex_unlock(&opp->kvm->slots_lock);
  1305. if (copy_to_user((u64 __user *)(long)attr->addr,
  1306. &attr64, sizeof(u64)))
  1307. return -EFAULT;
  1308. return 0;
  1309. }
  1310. break;
  1311. case KVM_DEV_MPIC_GRP_REGISTER:
  1312. ret = access_reg(opp, attr->attr, &attr32, ATTR_GET);
  1313. if (ret)
  1314. return ret;
  1315. if (put_user(attr32, (u32 __user *)(long)attr->addr))
  1316. return -EFAULT;
  1317. return 0;
  1318. case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
  1319. if (attr->attr > MAX_SRC)
  1320. return -EINVAL;
  1321. spin_lock_irq(&opp->lock);
  1322. attr32 = opp->src[attr->attr].pending;
  1323. spin_unlock_irq(&opp->lock);
  1324. if (put_user(attr32, (u32 __user *)(long)attr->addr))
  1325. return -EFAULT;
  1326. return 0;
  1327. }
  1328. return -ENXIO;
  1329. }
  1330. static int mpic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1331. {
  1332. switch (attr->group) {
  1333. case KVM_DEV_MPIC_GRP_MISC:
  1334. switch (attr->attr) {
  1335. case KVM_DEV_MPIC_BASE_ADDR:
  1336. return 0;
  1337. }
  1338. break;
  1339. case KVM_DEV_MPIC_GRP_REGISTER:
  1340. return 0;
  1341. case KVM_DEV_MPIC_GRP_IRQ_ACTIVE:
  1342. if (attr->attr > MAX_SRC)
  1343. break;
  1344. return 0;
  1345. }
  1346. return -ENXIO;
  1347. }
  1348. static void mpic_destroy(struct kvm_device *dev)
  1349. {
  1350. struct openpic *opp = dev->private;
  1351. dev->kvm->arch.mpic = NULL;
  1352. kfree(opp);
  1353. }
  1354. static int mpic_set_default_irq_routing(struct openpic *opp)
  1355. {
  1356. struct kvm_irq_routing_entry *routing;
  1357. /* Create a nop default map, so that dereferencing it still works */
  1358. routing = kzalloc((sizeof(*routing)), GFP_KERNEL);
  1359. if (!routing)
  1360. return -ENOMEM;
  1361. kvm_set_irq_routing(opp->kvm, routing, 0, 0);
  1362. kfree(routing);
  1363. return 0;
  1364. }
  1365. static int mpic_create(struct kvm_device *dev, u32 type)
  1366. {
  1367. struct openpic *opp;
  1368. int ret;
  1369. /* We only support one MPIC at a time for now */
  1370. if (dev->kvm->arch.mpic)
  1371. return -EINVAL;
  1372. opp = kzalloc(sizeof(struct openpic), GFP_KERNEL);
  1373. if (!opp)
  1374. return -ENOMEM;
  1375. dev->private = opp;
  1376. opp->kvm = dev->kvm;
  1377. opp->dev = dev;
  1378. opp->model = type;
  1379. spin_lock_init(&opp->lock);
  1380. INIT_LIST_HEAD(&opp->mmio_regions);
  1381. list_add(&openpic_gbl_mmio.list, &opp->mmio_regions);
  1382. list_add(&openpic_tmr_mmio.list, &opp->mmio_regions);
  1383. list_add(&openpic_src_mmio.list, &opp->mmio_regions);
  1384. list_add(&openpic_cpu_mmio.list, &opp->mmio_regions);
  1385. switch (opp->model) {
  1386. case KVM_DEV_TYPE_FSL_MPIC_20:
  1387. opp->fsl = &fsl_mpic_20;
  1388. opp->brr1 = 0x00400200;
  1389. opp->flags |= OPENPIC_FLAG_IDR_CRIT;
  1390. opp->nb_irqs = 80;
  1391. opp->mpic_mode_mask = GCR_MODE_MIXED;
  1392. fsl_common_init(opp);
  1393. break;
  1394. case KVM_DEV_TYPE_FSL_MPIC_42:
  1395. opp->fsl = &fsl_mpic_42;
  1396. opp->brr1 = 0x00400402;
  1397. opp->flags |= OPENPIC_FLAG_ILR;
  1398. opp->nb_irqs = 196;
  1399. opp->mpic_mode_mask = GCR_MODE_PROXY;
  1400. fsl_common_init(opp);
  1401. break;
  1402. default:
  1403. ret = -ENODEV;
  1404. goto err;
  1405. }
  1406. ret = mpic_set_default_irq_routing(opp);
  1407. if (ret)
  1408. goto err;
  1409. openpic_reset(opp);
  1410. smp_wmb();
  1411. dev->kvm->arch.mpic = opp;
  1412. return 0;
  1413. err:
  1414. kfree(opp);
  1415. return ret;
  1416. }
  1417. struct kvm_device_ops kvm_mpic_ops = {
  1418. .name = "kvm-mpic",
  1419. .create = mpic_create,
  1420. .destroy = mpic_destroy,
  1421. .set_attr = mpic_set_attr,
  1422. .get_attr = mpic_get_attr,
  1423. .has_attr = mpic_has_attr,
  1424. };
  1425. int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
  1426. u32 cpu)
  1427. {
  1428. struct openpic *opp = dev->private;
  1429. int ret = 0;
  1430. if (dev->ops != &kvm_mpic_ops)
  1431. return -EPERM;
  1432. if (opp->kvm != vcpu->kvm)
  1433. return -EPERM;
  1434. if (cpu < 0 || cpu >= MAX_CPU)
  1435. return -EPERM;
  1436. spin_lock_irq(&opp->lock);
  1437. if (opp->dst[cpu].vcpu) {
  1438. ret = -EEXIST;
  1439. goto out;
  1440. }
  1441. if (vcpu->arch.irq_type) {
  1442. ret = -EBUSY;
  1443. goto out;
  1444. }
  1445. opp->dst[cpu].vcpu = vcpu;
  1446. opp->nb_cpus = max(opp->nb_cpus, cpu + 1);
  1447. vcpu->arch.mpic = opp;
  1448. vcpu->arch.irq_cpu_id = cpu;
  1449. vcpu->arch.irq_type = KVMPPC_IRQ_MPIC;
  1450. /* This might need to be changed if GCR gets extended */
  1451. if (opp->mpic_mode_mask == GCR_MODE_PROXY)
  1452. vcpu->arch.epr_flags |= KVMPPC_EPR_KERNEL;
  1453. out:
  1454. spin_unlock_irq(&opp->lock);
  1455. return ret;
  1456. }
  1457. /*
  1458. * This should only happen immediately before the mpic is destroyed,
  1459. * so we shouldn't need to worry about anything still trying to
  1460. * access the vcpu pointer.
  1461. */
  1462. void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu)
  1463. {
  1464. BUG_ON(!opp->dst[vcpu->arch.irq_cpu_id].vcpu);
  1465. opp->dst[vcpu->arch.irq_cpu_id].vcpu = NULL;
  1466. }
  1467. /*
  1468. * Return value:
  1469. * < 0 Interrupt was ignored (masked or not delivered for other reasons)
  1470. * = 0 Interrupt was coalesced (previous irq is still pending)
  1471. * > 0 Number of CPUs interrupt was delivered to
  1472. */
  1473. static int mpic_set_irq(struct kvm_kernel_irq_routing_entry *e,
  1474. struct kvm *kvm, int irq_source_id, int level,
  1475. bool line_status)
  1476. {
  1477. u32 irq = e->irqchip.pin;
  1478. struct openpic *opp = kvm->arch.mpic;
  1479. unsigned long flags;
  1480. spin_lock_irqsave(&opp->lock, flags);
  1481. openpic_set_irq(opp, irq, level);
  1482. spin_unlock_irqrestore(&opp->lock, flags);
  1483. /* All code paths we care about don't check for the return value */
  1484. return 0;
  1485. }
  1486. int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
  1487. struct kvm *kvm, int irq_source_id, int level, bool line_status)
  1488. {
  1489. struct openpic *opp = kvm->arch.mpic;
  1490. unsigned long flags;
  1491. spin_lock_irqsave(&opp->lock, flags);
  1492. /*
  1493. * XXX We ignore the target address for now, as we only support
  1494. * a single MSI bank.
  1495. */
  1496. openpic_msi_write(kvm->arch.mpic, MSIIR_OFFSET, e->msi.data);
  1497. spin_unlock_irqrestore(&opp->lock, flags);
  1498. /* All code paths we care about don't check for the return value */
  1499. return 0;
  1500. }
  1501. int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
  1502. struct kvm_kernel_irq_routing_entry *e,
  1503. const struct kvm_irq_routing_entry *ue)
  1504. {
  1505. int r = -EINVAL;
  1506. switch (ue->type) {
  1507. case KVM_IRQ_ROUTING_IRQCHIP:
  1508. e->set = mpic_set_irq;
  1509. e->irqchip.irqchip = ue->u.irqchip.irqchip;
  1510. e->irqchip.pin = ue->u.irqchip.pin;
  1511. if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
  1512. goto out;
  1513. rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
  1514. break;
  1515. case KVM_IRQ_ROUTING_MSI:
  1516. e->set = kvm_set_msi;
  1517. e->msi.address_lo = ue->u.msi.address_lo;
  1518. e->msi.address_hi = ue->u.msi.address_hi;
  1519. e->msi.data = ue->u.msi.data;
  1520. break;
  1521. default:
  1522. goto out;
  1523. }
  1524. r = 0;
  1525. out:
  1526. return r;
  1527. }