kvm_host.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_HOST_H__
  20. #define __POWERPC_KVM_HOST_H__
  21. #include <linux/mutex.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/types.h>
  25. #include <linux/kvm_types.h>
  26. #include <linux/threads.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/kvm_para.h>
  29. #include <linux/list.h>
  30. #include <linux/atomic.h>
  31. #include <asm/kvm_asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/page.h>
  34. #include <asm/cacheflush.h>
  35. #define KVM_MAX_VCPUS NR_CPUS
  36. #define KVM_MAX_VCORES NR_CPUS
  37. #define KVM_USER_MEM_SLOTS 32
  38. #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
  39. #ifdef CONFIG_KVM_MMIO
  40. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  41. #endif
  42. /* These values are internal and can be increased later */
  43. #define KVM_NR_IRQCHIPS 1
  44. #define KVM_IRQCHIP_NUM_PINS 256
  45. #if !defined(CONFIG_KVM_440)
  46. #include <linux/mmu_notifier.h>
  47. #define KVM_ARCH_WANT_MMU_NOTIFIER
  48. struct kvm;
  49. extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
  50. extern int kvm_unmap_hva_range(struct kvm *kvm,
  51. unsigned long start, unsigned long end);
  52. extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
  53. extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  54. extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  55. #endif
  56. #define HPTEG_CACHE_NUM (1 << 15)
  57. #define HPTEG_HASH_BITS_PTE 13
  58. #define HPTEG_HASH_BITS_PTE_LONG 12
  59. #define HPTEG_HASH_BITS_VPTE 13
  60. #define HPTEG_HASH_BITS_VPTE_LONG 5
  61. #define HPTEG_HASH_BITS_VPTE_64K 11
  62. #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
  63. #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
  64. #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
  65. #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
  66. #define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
  67. /* Physical Address Mask - allowed range of real mode RAM access */
  68. #define KVM_PAM 0x0fffffffffffffffULL
  69. struct kvm;
  70. struct kvm_run;
  71. struct kvm_vcpu;
  72. struct lppaca;
  73. struct slb_shadow;
  74. struct dtl_entry;
  75. struct kvmppc_vcpu_book3s;
  76. struct kvmppc_book3s_shadow_vcpu;
  77. struct kvm_vm_stat {
  78. u32 remote_tlb_flush;
  79. };
  80. struct kvm_vcpu_stat {
  81. u32 sum_exits;
  82. u32 mmio_exits;
  83. u32 dcr_exits;
  84. u32 signal_exits;
  85. u32 light_exits;
  86. /* Account for special types of light exits: */
  87. u32 itlb_real_miss_exits;
  88. u32 itlb_virt_miss_exits;
  89. u32 dtlb_real_miss_exits;
  90. u32 dtlb_virt_miss_exits;
  91. u32 syscall_exits;
  92. u32 isi_exits;
  93. u32 dsi_exits;
  94. u32 emulated_inst_exits;
  95. u32 dec_exits;
  96. u32 ext_intr_exits;
  97. u32 halt_wakeup;
  98. u32 dbell_exits;
  99. u32 gdbell_exits;
  100. #ifdef CONFIG_PPC_BOOK3S
  101. u32 pf_storage;
  102. u32 pf_instruc;
  103. u32 sp_storage;
  104. u32 sp_instruc;
  105. u32 queue_intr;
  106. u32 ld;
  107. u32 ld_slow;
  108. u32 st;
  109. u32 st_slow;
  110. #endif
  111. };
  112. enum kvm_exit_types {
  113. MMIO_EXITS,
  114. DCR_EXITS,
  115. SIGNAL_EXITS,
  116. ITLB_REAL_MISS_EXITS,
  117. ITLB_VIRT_MISS_EXITS,
  118. DTLB_REAL_MISS_EXITS,
  119. DTLB_VIRT_MISS_EXITS,
  120. SYSCALL_EXITS,
  121. ISI_EXITS,
  122. DSI_EXITS,
  123. EMULATED_INST_EXITS,
  124. EMULATED_MTMSRWE_EXITS,
  125. EMULATED_WRTEE_EXITS,
  126. EMULATED_MTSPR_EXITS,
  127. EMULATED_MFSPR_EXITS,
  128. EMULATED_MTMSR_EXITS,
  129. EMULATED_MFMSR_EXITS,
  130. EMULATED_TLBSX_EXITS,
  131. EMULATED_TLBWE_EXITS,
  132. EMULATED_RFI_EXITS,
  133. EMULATED_RFCI_EXITS,
  134. DEC_EXITS,
  135. EXT_INTR_EXITS,
  136. HALT_WAKEUP,
  137. USR_PR_INST,
  138. FP_UNAVAIL,
  139. DEBUG_EXITS,
  140. TIMEINGUEST,
  141. DBELL_EXITS,
  142. GDBELL_EXITS,
  143. __NUMBER_OF_KVM_EXIT_TYPES
  144. };
  145. /* allow access to big endian 32bit upper/lower parts and 64bit var */
  146. struct kvmppc_exit_timing {
  147. union {
  148. u64 tv64;
  149. struct {
  150. u32 tbu, tbl;
  151. } tv32;
  152. };
  153. };
  154. struct kvmppc_pginfo {
  155. unsigned long pfn;
  156. atomic_t refcnt;
  157. };
  158. struct kvmppc_spapr_tce_table {
  159. struct list_head list;
  160. struct kvm *kvm;
  161. u64 liobn;
  162. u32 window_size;
  163. struct page *pages[0];
  164. };
  165. struct kvm_rma_info {
  166. atomic_t use_count;
  167. unsigned long base_pfn;
  168. };
  169. /* XICS components, defined in book3s_xics.c */
  170. struct kvmppc_xics;
  171. struct kvmppc_icp;
  172. /*
  173. * The reverse mapping array has one entry for each HPTE,
  174. * which stores the guest's view of the second word of the HPTE
  175. * (including the guest physical address of the mapping),
  176. * plus forward and backward pointers in a doubly-linked ring
  177. * of HPTEs that map the same host page. The pointers in this
  178. * ring are 32-bit HPTE indexes, to save space.
  179. */
  180. struct revmap_entry {
  181. unsigned long guest_rpte;
  182. unsigned int forw, back;
  183. };
  184. /*
  185. * We use the top bit of each memslot->arch.rmap entry as a lock bit,
  186. * and bit 32 as a present flag. The bottom 32 bits are the
  187. * index in the guest HPT of a HPTE that points to the page.
  188. */
  189. #define KVMPPC_RMAP_LOCK_BIT 63
  190. #define KVMPPC_RMAP_RC_SHIFT 32
  191. #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
  192. #define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
  193. #define KVMPPC_RMAP_PRESENT 0x100000000ul
  194. #define KVMPPC_RMAP_INDEX 0xfffffffful
  195. /* Low-order bits in memslot->arch.slot_phys[] */
  196. #define KVMPPC_PAGE_ORDER_MASK 0x1f
  197. #define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
  198. #define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
  199. #define KVMPPC_GOT_PAGE 0x80
  200. struct kvm_arch_memory_slot {
  201. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  202. unsigned long *rmap;
  203. unsigned long *slot_phys;
  204. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  205. };
  206. struct kvm_arch {
  207. unsigned int lpid;
  208. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  209. unsigned long hpt_virt;
  210. struct revmap_entry *revmap;
  211. unsigned int host_lpid;
  212. unsigned long host_lpcr;
  213. unsigned long sdr1;
  214. unsigned long host_sdr1;
  215. int tlbie_lock;
  216. unsigned long lpcr;
  217. unsigned long rmor;
  218. struct kvm_rma_info *rma;
  219. unsigned long vrma_slb_v;
  220. int rma_setup_done;
  221. int using_mmu_notifiers;
  222. u32 hpt_order;
  223. atomic_t vcpus_running;
  224. u32 online_vcores;
  225. unsigned long hpt_npte;
  226. unsigned long hpt_mask;
  227. atomic_t hpte_mod_interest;
  228. spinlock_t slot_phys_lock;
  229. cpumask_t need_tlb_flush;
  230. struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
  231. int hpt_cma_alloc;
  232. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  233. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  234. struct mutex hpt_mutex;
  235. #endif
  236. #ifdef CONFIG_PPC_BOOK3S_64
  237. struct list_head spapr_tce_tables;
  238. struct list_head rtas_tokens;
  239. #endif
  240. #ifdef CONFIG_KVM_MPIC
  241. struct openpic *mpic;
  242. #endif
  243. #ifdef CONFIG_KVM_XICS
  244. struct kvmppc_xics *xics;
  245. #endif
  246. struct kvmppc_ops *kvm_ops;
  247. };
  248. /*
  249. * Struct for a virtual core.
  250. * Note: entry_exit_count combines an entry count in the bottom 8 bits
  251. * and an exit count in the next 8 bits. This is so that we can
  252. * atomically increment the entry count iff the exit count is 0
  253. * without taking the lock.
  254. */
  255. struct kvmppc_vcore {
  256. int n_runnable;
  257. int n_busy;
  258. int num_threads;
  259. int entry_exit_count;
  260. int n_woken;
  261. int nap_count;
  262. int napping_threads;
  263. u16 pcpu;
  264. u16 last_cpu;
  265. u8 vcore_state;
  266. u8 in_guest;
  267. struct list_head runnable_threads;
  268. spinlock_t lock;
  269. wait_queue_head_t wq;
  270. u64 stolen_tb;
  271. u64 preempt_tb;
  272. struct kvm_vcpu *runner;
  273. u64 tb_offset; /* guest timebase - host timebase */
  274. ulong lpcr;
  275. u32 arch_compat;
  276. ulong pcr;
  277. };
  278. #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
  279. #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
  280. /* Values for vcore_state */
  281. #define VCORE_INACTIVE 0
  282. #define VCORE_SLEEPING 1
  283. #define VCORE_STARTING 2
  284. #define VCORE_RUNNING 3
  285. #define VCORE_EXITING 4
  286. /*
  287. * Struct used to manage memory for a virtual processor area
  288. * registered by a PAPR guest. There are three types of area
  289. * that a guest can register.
  290. */
  291. struct kvmppc_vpa {
  292. unsigned long gpa; /* Current guest phys addr */
  293. void *pinned_addr; /* Address in kernel linear mapping */
  294. void *pinned_end; /* End of region */
  295. unsigned long next_gpa; /* Guest phys addr for update */
  296. unsigned long len; /* Number of bytes required */
  297. u8 update_pending; /* 1 => update pinned_addr from next_gpa */
  298. bool dirty; /* true => area has been modified by kernel */
  299. };
  300. struct kvmppc_pte {
  301. ulong eaddr;
  302. u64 vpage;
  303. ulong raddr;
  304. bool may_read : 1;
  305. bool may_write : 1;
  306. bool may_execute : 1;
  307. u8 page_size; /* MMU_PAGE_xxx */
  308. };
  309. struct kvmppc_mmu {
  310. /* book3s_64 only */
  311. void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
  312. u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
  313. u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
  314. void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
  315. void (*slbia)(struct kvm_vcpu *vcpu);
  316. /* book3s */
  317. void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
  318. u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
  319. int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
  320. struct kvmppc_pte *pte, bool data, bool iswrite);
  321. void (*reset_msr)(struct kvm_vcpu *vcpu);
  322. void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
  323. int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
  324. u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
  325. bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
  326. };
  327. struct kvmppc_slb {
  328. u64 esid;
  329. u64 vsid;
  330. u64 orige;
  331. u64 origv;
  332. bool valid : 1;
  333. bool Ks : 1;
  334. bool Kp : 1;
  335. bool nx : 1;
  336. bool large : 1; /* PTEs are 16MB */
  337. bool tb : 1; /* 1TB segment */
  338. bool class : 1;
  339. u8 base_page_size; /* MMU_PAGE_xxx */
  340. };
  341. # ifdef CONFIG_PPC_FSL_BOOK3E
  342. #define KVMPPC_BOOKE_IAC_NUM 2
  343. #define KVMPPC_BOOKE_DAC_NUM 2
  344. # else
  345. #define KVMPPC_BOOKE_IAC_NUM 4
  346. #define KVMPPC_BOOKE_DAC_NUM 2
  347. # endif
  348. #define KVMPPC_BOOKE_MAX_IAC 4
  349. #define KVMPPC_BOOKE_MAX_DAC 2
  350. /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
  351. #define KVMPPC_EPR_NONE 0 /* EPR not supported */
  352. #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
  353. #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
  354. #define KVMPPC_IRQ_DEFAULT 0
  355. #define KVMPPC_IRQ_MPIC 1
  356. #define KVMPPC_IRQ_XICS 2
  357. struct openpic;
  358. struct kvm_vcpu_arch {
  359. ulong host_stack;
  360. u32 host_pid;
  361. #ifdef CONFIG_PPC_BOOK3S
  362. struct kvmppc_slb slb[64];
  363. int slb_max; /* 1 + index of last valid entry in slb[] */
  364. int slb_nr; /* total number of entries in SLB */
  365. struct kvmppc_mmu mmu;
  366. struct kvmppc_vcpu_book3s *book3s;
  367. #endif
  368. #ifdef CONFIG_PPC_BOOK3S_32
  369. struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
  370. #endif
  371. ulong gpr[32];
  372. u64 fpr[32];
  373. u64 fpscr;
  374. #ifdef CONFIG_SPE
  375. ulong evr[32];
  376. ulong spefscr;
  377. ulong host_spefscr;
  378. u64 acc;
  379. #endif
  380. #ifdef CONFIG_ALTIVEC
  381. vector128 vr[32];
  382. vector128 vscr;
  383. #endif
  384. #ifdef CONFIG_VSX
  385. u64 vsr[64];
  386. #endif
  387. #ifdef CONFIG_KVM_BOOKE_HV
  388. u32 host_mas4;
  389. u32 host_mas6;
  390. u32 shadow_epcr;
  391. u32 shadow_msrp;
  392. u32 eplc;
  393. u32 epsc;
  394. u32 oldpir;
  395. #endif
  396. #if defined(CONFIG_BOOKE)
  397. #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
  398. u32 epcr;
  399. #endif
  400. #endif
  401. #ifdef CONFIG_PPC_BOOK3S
  402. /* For Gekko paired singles */
  403. u32 qpr[32];
  404. #endif
  405. ulong pc;
  406. ulong ctr;
  407. ulong lr;
  408. ulong xer;
  409. u32 cr;
  410. #ifdef CONFIG_PPC_BOOK3S
  411. ulong hflags;
  412. ulong guest_owned_ext;
  413. ulong purr;
  414. ulong spurr;
  415. ulong dscr;
  416. ulong amr;
  417. ulong uamor;
  418. u32 ctrl;
  419. ulong dabr;
  420. ulong cfar;
  421. ulong ppr;
  422. ulong shadow_srr1;
  423. #endif
  424. u32 vrsave; /* also USPRG0 */
  425. u32 mmucr;
  426. /* shadow_msr is unused for BookE HV */
  427. ulong shadow_msr;
  428. ulong csrr0;
  429. ulong csrr1;
  430. ulong dsrr0;
  431. ulong dsrr1;
  432. ulong mcsrr0;
  433. ulong mcsrr1;
  434. ulong mcsr;
  435. u32 dec;
  436. #ifdef CONFIG_BOOKE
  437. u32 decar;
  438. #endif
  439. u32 tbl;
  440. u32 tbu;
  441. u32 tcr;
  442. ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
  443. u32 ivor[64];
  444. ulong ivpr;
  445. u32 pvr;
  446. u32 shadow_pid;
  447. u32 shadow_pid1;
  448. u32 pid;
  449. u32 swap_pid;
  450. u32 ccr0;
  451. u32 ccr1;
  452. u32 dbsr;
  453. u64 mmcr[3];
  454. u32 pmc[8];
  455. u64 siar;
  456. u64 sdar;
  457. #ifdef CONFIG_KVM_EXIT_TIMING
  458. struct mutex exit_timing_lock;
  459. struct kvmppc_exit_timing timing_exit;
  460. struct kvmppc_exit_timing timing_last_enter;
  461. u32 last_exit_type;
  462. u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
  463. u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  464. u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  465. u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  466. u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  467. u64 timing_last_exit;
  468. struct dentry *debugfs_exit_timing;
  469. #endif
  470. #ifdef CONFIG_PPC_BOOK3S
  471. ulong fault_dar;
  472. u32 fault_dsisr;
  473. #endif
  474. #ifdef CONFIG_BOOKE
  475. ulong fault_dear;
  476. ulong fault_esr;
  477. ulong queued_dear;
  478. ulong queued_esr;
  479. spinlock_t wdt_lock;
  480. struct timer_list wdt_timer;
  481. u32 tlbcfg[4];
  482. u32 tlbps[4];
  483. u32 mmucfg;
  484. u32 eptcfg;
  485. u32 epr;
  486. u32 crit_save;
  487. /* guest debug registers*/
  488. struct debug_reg dbg_reg;
  489. /* hardware visible debug registers when in guest state */
  490. struct debug_reg shadow_dbg_reg;
  491. #endif
  492. gpa_t paddr_accessed;
  493. gva_t vaddr_accessed;
  494. u8 io_gpr; /* GPR used as IO source/target */
  495. u8 mmio_is_bigendian;
  496. u8 mmio_sign_extend;
  497. u8 dcr_needed;
  498. u8 dcr_is_write;
  499. u8 osi_needed;
  500. u8 osi_enabled;
  501. u8 papr_enabled;
  502. u8 watchdog_enabled;
  503. u8 sane;
  504. u8 cpu_type;
  505. u8 hcall_needed;
  506. u8 epr_flags; /* KVMPPC_EPR_xxx */
  507. u8 epr_needed;
  508. u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
  509. struct hrtimer dec_timer;
  510. struct tasklet_struct tasklet;
  511. u64 dec_jiffies;
  512. u64 dec_expires;
  513. unsigned long pending_exceptions;
  514. u8 ceded;
  515. u8 prodded;
  516. u32 last_inst;
  517. wait_queue_head_t *wqp;
  518. struct kvmppc_vcore *vcore;
  519. int ret;
  520. int trap;
  521. int state;
  522. int ptid;
  523. bool timer_running;
  524. wait_queue_head_t cpu_run;
  525. struct kvm_vcpu_arch_shared *shared;
  526. unsigned long magic_page_pa; /* phys addr to map the magic page to */
  527. unsigned long magic_page_ea; /* effect. addr to map the magic page to */
  528. int irq_type; /* one of KVM_IRQ_* */
  529. int irq_cpu_id;
  530. struct openpic *mpic; /* KVM_IRQ_MPIC */
  531. #ifdef CONFIG_KVM_XICS
  532. struct kvmppc_icp *icp; /* XICS presentation controller */
  533. #endif
  534. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  535. struct kvm_vcpu_arch_shared shregs;
  536. unsigned long pgfault_addr;
  537. long pgfault_index;
  538. unsigned long pgfault_hpte[2];
  539. struct list_head run_list;
  540. struct task_struct *run_task;
  541. struct kvm_run *kvm_run;
  542. pgd_t *pgdir;
  543. spinlock_t vpa_update_lock;
  544. struct kvmppc_vpa vpa;
  545. struct kvmppc_vpa dtl;
  546. struct dtl_entry *dtl_ptr;
  547. unsigned long dtl_index;
  548. u64 stolen_logged;
  549. struct kvmppc_vpa slb_shadow;
  550. spinlock_t tbacct_lock;
  551. u64 busy_stolen;
  552. u64 busy_preempt;
  553. #endif
  554. };
  555. /* Values for vcpu->arch.state */
  556. #define KVMPPC_VCPU_NOTREADY 0
  557. #define KVMPPC_VCPU_RUNNABLE 1
  558. #define KVMPPC_VCPU_BUSY_IN_HOST 2
  559. /* Values for vcpu->arch.io_gpr */
  560. #define KVM_MMIO_REG_MASK 0x001f
  561. #define KVM_MMIO_REG_EXT_MASK 0xffe0
  562. #define KVM_MMIO_REG_GPR 0x0000
  563. #define KVM_MMIO_REG_FPR 0x0020
  564. #define KVM_MMIO_REG_QPR 0x0040
  565. #define KVM_MMIO_REG_FQPR 0x0060
  566. #define __KVM_HAVE_ARCH_WQP
  567. #define __KVM_HAVE_CREATE_DEVICE
  568. #endif /* __POWERPC_KVM_HOST_H__ */