kvm_host.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_HOST_H__
  20. #define __POWERPC_KVM_HOST_H__
  21. #include <linux/mutex.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/types.h>
  25. #include <linux/kvm_types.h>
  26. #include <linux/threads.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/kvm_para.h>
  29. #include <linux/list.h>
  30. #include <linux/atomic.h>
  31. #include <asm/kvm_asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/page.h>
  34. #include <asm/cacheflush.h>
  35. #define KVM_MAX_VCPUS NR_CPUS
  36. #define KVM_MAX_VCORES NR_CPUS
  37. #define KVM_USER_MEM_SLOTS 32
  38. #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
  39. #ifdef CONFIG_KVM_MMIO
  40. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  41. #endif
  42. /* These values are internal and can be increased later */
  43. #define KVM_NR_IRQCHIPS 1
  44. #define KVM_IRQCHIP_NUM_PINS 256
  45. #if !defined(CONFIG_KVM_440)
  46. #include <linux/mmu_notifier.h>
  47. #define KVM_ARCH_WANT_MMU_NOTIFIER
  48. struct kvm;
  49. extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
  50. extern int kvm_unmap_hva_range(struct kvm *kvm,
  51. unsigned long start, unsigned long end);
  52. extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
  53. extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  54. extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  55. #endif
  56. /* We don't currently support large pages. */
  57. #define KVM_HPAGE_GFN_SHIFT(x) 0
  58. #define KVM_NR_PAGE_SIZES 1
  59. #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
  60. #define HPTEG_CACHE_NUM (1 << 15)
  61. #define HPTEG_HASH_BITS_PTE 13
  62. #define HPTEG_HASH_BITS_PTE_LONG 12
  63. #define HPTEG_HASH_BITS_VPTE 13
  64. #define HPTEG_HASH_BITS_VPTE_LONG 5
  65. #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
  66. #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
  67. #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
  68. #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
  69. /* Physical Address Mask - allowed range of real mode RAM access */
  70. #define KVM_PAM 0x0fffffffffffffffULL
  71. struct kvm;
  72. struct kvm_run;
  73. struct kvm_vcpu;
  74. struct lppaca;
  75. struct slb_shadow;
  76. struct dtl_entry;
  77. struct kvm_vm_stat {
  78. u32 remote_tlb_flush;
  79. };
  80. struct kvm_vcpu_stat {
  81. u32 sum_exits;
  82. u32 mmio_exits;
  83. u32 dcr_exits;
  84. u32 signal_exits;
  85. u32 light_exits;
  86. /* Account for special types of light exits: */
  87. u32 itlb_real_miss_exits;
  88. u32 itlb_virt_miss_exits;
  89. u32 dtlb_real_miss_exits;
  90. u32 dtlb_virt_miss_exits;
  91. u32 syscall_exits;
  92. u32 isi_exits;
  93. u32 dsi_exits;
  94. u32 emulated_inst_exits;
  95. u32 dec_exits;
  96. u32 ext_intr_exits;
  97. u32 halt_wakeup;
  98. u32 dbell_exits;
  99. u32 gdbell_exits;
  100. #ifdef CONFIG_PPC_BOOK3S
  101. u32 pf_storage;
  102. u32 pf_instruc;
  103. u32 sp_storage;
  104. u32 sp_instruc;
  105. u32 queue_intr;
  106. u32 ld;
  107. u32 ld_slow;
  108. u32 st;
  109. u32 st_slow;
  110. #endif
  111. };
  112. enum kvm_exit_types {
  113. MMIO_EXITS,
  114. DCR_EXITS,
  115. SIGNAL_EXITS,
  116. ITLB_REAL_MISS_EXITS,
  117. ITLB_VIRT_MISS_EXITS,
  118. DTLB_REAL_MISS_EXITS,
  119. DTLB_VIRT_MISS_EXITS,
  120. SYSCALL_EXITS,
  121. ISI_EXITS,
  122. DSI_EXITS,
  123. EMULATED_INST_EXITS,
  124. EMULATED_MTMSRWE_EXITS,
  125. EMULATED_WRTEE_EXITS,
  126. EMULATED_MTSPR_EXITS,
  127. EMULATED_MFSPR_EXITS,
  128. EMULATED_MTMSR_EXITS,
  129. EMULATED_MFMSR_EXITS,
  130. EMULATED_TLBSX_EXITS,
  131. EMULATED_TLBWE_EXITS,
  132. EMULATED_RFI_EXITS,
  133. EMULATED_RFCI_EXITS,
  134. DEC_EXITS,
  135. EXT_INTR_EXITS,
  136. HALT_WAKEUP,
  137. USR_PR_INST,
  138. FP_UNAVAIL,
  139. DEBUG_EXITS,
  140. TIMEINGUEST,
  141. DBELL_EXITS,
  142. GDBELL_EXITS,
  143. __NUMBER_OF_KVM_EXIT_TYPES
  144. };
  145. /* allow access to big endian 32bit upper/lower parts and 64bit var */
  146. struct kvmppc_exit_timing {
  147. union {
  148. u64 tv64;
  149. struct {
  150. u32 tbu, tbl;
  151. } tv32;
  152. };
  153. };
  154. struct kvmppc_pginfo {
  155. unsigned long pfn;
  156. atomic_t refcnt;
  157. };
  158. struct kvmppc_spapr_tce_table {
  159. struct list_head list;
  160. struct kvm *kvm;
  161. u64 liobn;
  162. u32 window_size;
  163. struct page *pages[0];
  164. };
  165. struct kvm_rma_info {
  166. atomic_t use_count;
  167. unsigned long base_pfn;
  168. };
  169. /* XICS components, defined in book3s_xics.c */
  170. struct kvmppc_xics;
  171. struct kvmppc_icp;
  172. /*
  173. * The reverse mapping array has one entry for each HPTE,
  174. * which stores the guest's view of the second word of the HPTE
  175. * (including the guest physical address of the mapping),
  176. * plus forward and backward pointers in a doubly-linked ring
  177. * of HPTEs that map the same host page. The pointers in this
  178. * ring are 32-bit HPTE indexes, to save space.
  179. */
  180. struct revmap_entry {
  181. unsigned long guest_rpte;
  182. unsigned int forw, back;
  183. };
  184. /*
  185. * We use the top bit of each memslot->arch.rmap entry as a lock bit,
  186. * and bit 32 as a present flag. The bottom 32 bits are the
  187. * index in the guest HPT of a HPTE that points to the page.
  188. */
  189. #define KVMPPC_RMAP_LOCK_BIT 63
  190. #define KVMPPC_RMAP_RC_SHIFT 32
  191. #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
  192. #define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
  193. #define KVMPPC_RMAP_PRESENT 0x100000000ul
  194. #define KVMPPC_RMAP_INDEX 0xfffffffful
  195. /* Low-order bits in memslot->arch.slot_phys[] */
  196. #define KVMPPC_PAGE_ORDER_MASK 0x1f
  197. #define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
  198. #define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
  199. #define KVMPPC_GOT_PAGE 0x80
  200. struct kvm_arch_memory_slot {
  201. #ifdef CONFIG_KVM_BOOK3S_64_HV
  202. unsigned long *rmap;
  203. unsigned long *slot_phys;
  204. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  205. };
  206. struct kvm_arch {
  207. unsigned int lpid;
  208. #ifdef CONFIG_KVM_BOOK3S_64_HV
  209. unsigned long hpt_virt;
  210. struct revmap_entry *revmap;
  211. unsigned int host_lpid;
  212. unsigned long host_lpcr;
  213. unsigned long sdr1;
  214. unsigned long host_sdr1;
  215. int tlbie_lock;
  216. unsigned long lpcr;
  217. unsigned long rmor;
  218. struct kvm_rma_info *rma;
  219. unsigned long vrma_slb_v;
  220. int rma_setup_done;
  221. int using_mmu_notifiers;
  222. u32 hpt_order;
  223. atomic_t vcpus_running;
  224. u32 online_vcores;
  225. unsigned long hpt_npte;
  226. unsigned long hpt_mask;
  227. atomic_t hpte_mod_interest;
  228. spinlock_t slot_phys_lock;
  229. cpumask_t need_tlb_flush;
  230. struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
  231. int hpt_cma_alloc;
  232. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  233. #ifdef CONFIG_PPC_BOOK3S_64
  234. struct list_head spapr_tce_tables;
  235. struct list_head rtas_tokens;
  236. #endif
  237. #ifdef CONFIG_KVM_MPIC
  238. struct openpic *mpic;
  239. #endif
  240. #ifdef CONFIG_KVM_XICS
  241. struct kvmppc_xics *xics;
  242. #endif
  243. };
  244. /*
  245. * Struct for a virtual core.
  246. * Note: entry_exit_count combines an entry count in the bottom 8 bits
  247. * and an exit count in the next 8 bits. This is so that we can
  248. * atomically increment the entry count iff the exit count is 0
  249. * without taking the lock.
  250. */
  251. struct kvmppc_vcore {
  252. int n_runnable;
  253. int n_busy;
  254. int num_threads;
  255. int entry_exit_count;
  256. int n_woken;
  257. int nap_count;
  258. int napping_threads;
  259. u16 pcpu;
  260. u16 last_cpu;
  261. u8 vcore_state;
  262. u8 in_guest;
  263. struct list_head runnable_threads;
  264. spinlock_t lock;
  265. wait_queue_head_t wq;
  266. u64 stolen_tb;
  267. u64 preempt_tb;
  268. struct kvm_vcpu *runner;
  269. };
  270. #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
  271. #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
  272. /* Values for vcore_state */
  273. #define VCORE_INACTIVE 0
  274. #define VCORE_SLEEPING 1
  275. #define VCORE_STARTING 2
  276. #define VCORE_RUNNING 3
  277. #define VCORE_EXITING 4
  278. /*
  279. * Struct used to manage memory for a virtual processor area
  280. * registered by a PAPR guest. There are three types of area
  281. * that a guest can register.
  282. */
  283. struct kvmppc_vpa {
  284. unsigned long gpa; /* Current guest phys addr */
  285. void *pinned_addr; /* Address in kernel linear mapping */
  286. void *pinned_end; /* End of region */
  287. unsigned long next_gpa; /* Guest phys addr for update */
  288. unsigned long len; /* Number of bytes required */
  289. u8 update_pending; /* 1 => update pinned_addr from next_gpa */
  290. bool dirty; /* true => area has been modified by kernel */
  291. };
  292. struct kvmppc_pte {
  293. ulong eaddr;
  294. u64 vpage;
  295. ulong raddr;
  296. bool may_read : 1;
  297. bool may_write : 1;
  298. bool may_execute : 1;
  299. };
  300. struct kvmppc_mmu {
  301. /* book3s_64 only */
  302. void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
  303. u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
  304. u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
  305. void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
  306. void (*slbia)(struct kvm_vcpu *vcpu);
  307. /* book3s */
  308. void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
  309. u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
  310. int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
  311. void (*reset_msr)(struct kvm_vcpu *vcpu);
  312. void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
  313. int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
  314. u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
  315. bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
  316. };
  317. struct kvmppc_slb {
  318. u64 esid;
  319. u64 vsid;
  320. u64 orige;
  321. u64 origv;
  322. bool valid : 1;
  323. bool Ks : 1;
  324. bool Kp : 1;
  325. bool nx : 1;
  326. bool large : 1; /* PTEs are 16MB */
  327. bool tb : 1; /* 1TB segment */
  328. bool class : 1;
  329. };
  330. # ifdef CONFIG_PPC_FSL_BOOK3E
  331. #define KVMPPC_BOOKE_IAC_NUM 2
  332. #define KVMPPC_BOOKE_DAC_NUM 2
  333. # else
  334. #define KVMPPC_BOOKE_IAC_NUM 4
  335. #define KVMPPC_BOOKE_DAC_NUM 2
  336. # endif
  337. #define KVMPPC_BOOKE_MAX_IAC 4
  338. #define KVMPPC_BOOKE_MAX_DAC 2
  339. /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
  340. #define KVMPPC_EPR_NONE 0 /* EPR not supported */
  341. #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
  342. #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
  343. struct kvmppc_booke_debug_reg {
  344. u32 dbcr0;
  345. u32 dbcr1;
  346. u32 dbcr2;
  347. #ifdef CONFIG_KVM_E500MC
  348. u32 dbcr4;
  349. #endif
  350. u64 iac[KVMPPC_BOOKE_MAX_IAC];
  351. u64 dac[KVMPPC_BOOKE_MAX_DAC];
  352. };
  353. #define KVMPPC_IRQ_DEFAULT 0
  354. #define KVMPPC_IRQ_MPIC 1
  355. #define KVMPPC_IRQ_XICS 2
  356. struct openpic;
  357. struct kvm_vcpu_arch {
  358. ulong host_stack;
  359. u32 host_pid;
  360. #ifdef CONFIG_PPC_BOOK3S
  361. struct kvmppc_slb slb[64];
  362. int slb_max; /* 1 + index of last valid entry in slb[] */
  363. int slb_nr; /* total number of entries in SLB */
  364. struct kvmppc_mmu mmu;
  365. #endif
  366. ulong gpr[32];
  367. u64 fpr[32];
  368. u64 fpscr;
  369. #ifdef CONFIG_SPE
  370. ulong evr[32];
  371. ulong spefscr;
  372. ulong host_spefscr;
  373. u64 acc;
  374. #endif
  375. #ifdef CONFIG_ALTIVEC
  376. vector128 vr[32];
  377. vector128 vscr;
  378. #endif
  379. #ifdef CONFIG_VSX
  380. u64 vsr[64];
  381. #endif
  382. #ifdef CONFIG_KVM_BOOKE_HV
  383. u32 host_mas4;
  384. u32 host_mas6;
  385. u32 shadow_epcr;
  386. u32 shadow_msrp;
  387. u32 eplc;
  388. u32 epsc;
  389. u32 oldpir;
  390. #endif
  391. #if defined(CONFIG_BOOKE)
  392. #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
  393. u32 epcr;
  394. #endif
  395. #endif
  396. #ifdef CONFIG_PPC_BOOK3S
  397. /* For Gekko paired singles */
  398. u32 qpr[32];
  399. #endif
  400. ulong pc;
  401. ulong ctr;
  402. ulong lr;
  403. ulong xer;
  404. u32 cr;
  405. #ifdef CONFIG_PPC_BOOK3S
  406. ulong hflags;
  407. ulong guest_owned_ext;
  408. ulong purr;
  409. ulong spurr;
  410. ulong dscr;
  411. ulong amr;
  412. ulong uamor;
  413. u32 ctrl;
  414. ulong dabr;
  415. ulong cfar;
  416. #endif
  417. u32 vrsave; /* also USPRG0 */
  418. u32 mmucr;
  419. /* shadow_msr is unused for BookE HV */
  420. ulong shadow_msr;
  421. ulong csrr0;
  422. ulong csrr1;
  423. ulong dsrr0;
  424. ulong dsrr1;
  425. ulong mcsrr0;
  426. ulong mcsrr1;
  427. ulong mcsr;
  428. u32 dec;
  429. #ifdef CONFIG_BOOKE
  430. u32 decar;
  431. #endif
  432. u32 tbl;
  433. u32 tbu;
  434. u32 tcr;
  435. ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
  436. u32 ivor[64];
  437. ulong ivpr;
  438. u32 pvr;
  439. u32 shadow_pid;
  440. u32 shadow_pid1;
  441. u32 pid;
  442. u32 swap_pid;
  443. u32 ccr0;
  444. u32 ccr1;
  445. u32 dbsr;
  446. u64 mmcr[3];
  447. u32 pmc[8];
  448. #ifdef CONFIG_KVM_EXIT_TIMING
  449. struct mutex exit_timing_lock;
  450. struct kvmppc_exit_timing timing_exit;
  451. struct kvmppc_exit_timing timing_last_enter;
  452. u32 last_exit_type;
  453. u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
  454. u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  455. u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  456. u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  457. u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  458. u64 timing_last_exit;
  459. struct dentry *debugfs_exit_timing;
  460. #endif
  461. #ifdef CONFIG_PPC_BOOK3S
  462. ulong fault_dar;
  463. u32 fault_dsisr;
  464. #endif
  465. #ifdef CONFIG_BOOKE
  466. ulong fault_dear;
  467. ulong fault_esr;
  468. ulong queued_dear;
  469. ulong queued_esr;
  470. spinlock_t wdt_lock;
  471. struct timer_list wdt_timer;
  472. u32 tlbcfg[4];
  473. u32 tlbps[4];
  474. u32 mmucfg;
  475. u32 eptcfg;
  476. u32 epr;
  477. u32 crit_save;
  478. struct kvmppc_booke_debug_reg dbg_reg;
  479. #endif
  480. gpa_t paddr_accessed;
  481. gva_t vaddr_accessed;
  482. u8 io_gpr; /* GPR used as IO source/target */
  483. u8 mmio_is_bigendian;
  484. u8 mmio_sign_extend;
  485. u8 dcr_needed;
  486. u8 dcr_is_write;
  487. u8 osi_needed;
  488. u8 osi_enabled;
  489. u8 papr_enabled;
  490. u8 watchdog_enabled;
  491. u8 sane;
  492. u8 cpu_type;
  493. u8 hcall_needed;
  494. u8 epr_flags; /* KVMPPC_EPR_xxx */
  495. u8 epr_needed;
  496. u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
  497. struct hrtimer dec_timer;
  498. struct tasklet_struct tasklet;
  499. u64 dec_jiffies;
  500. u64 dec_expires;
  501. unsigned long pending_exceptions;
  502. u8 ceded;
  503. u8 prodded;
  504. u32 last_inst;
  505. wait_queue_head_t *wqp;
  506. struct kvmppc_vcore *vcore;
  507. int ret;
  508. int trap;
  509. int state;
  510. int ptid;
  511. bool timer_running;
  512. wait_queue_head_t cpu_run;
  513. struct kvm_vcpu_arch_shared *shared;
  514. unsigned long magic_page_pa; /* phys addr to map the magic page to */
  515. unsigned long magic_page_ea; /* effect. addr to map the magic page to */
  516. int irq_type; /* one of KVM_IRQ_* */
  517. int irq_cpu_id;
  518. struct openpic *mpic; /* KVM_IRQ_MPIC */
  519. #ifdef CONFIG_KVM_XICS
  520. struct kvmppc_icp *icp; /* XICS presentation controller */
  521. #endif
  522. #ifdef CONFIG_KVM_BOOK3S_64_HV
  523. struct kvm_vcpu_arch_shared shregs;
  524. unsigned long pgfault_addr;
  525. long pgfault_index;
  526. unsigned long pgfault_hpte[2];
  527. struct list_head run_list;
  528. struct task_struct *run_task;
  529. struct kvm_run *kvm_run;
  530. pgd_t *pgdir;
  531. spinlock_t vpa_update_lock;
  532. struct kvmppc_vpa vpa;
  533. struct kvmppc_vpa dtl;
  534. struct dtl_entry *dtl_ptr;
  535. unsigned long dtl_index;
  536. u64 stolen_logged;
  537. struct kvmppc_vpa slb_shadow;
  538. spinlock_t tbacct_lock;
  539. u64 busy_stolen;
  540. u64 busy_preempt;
  541. #endif
  542. };
  543. /* Values for vcpu->arch.state */
  544. #define KVMPPC_VCPU_NOTREADY 0
  545. #define KVMPPC_VCPU_RUNNABLE 1
  546. #define KVMPPC_VCPU_BUSY_IN_HOST 2
  547. /* Values for vcpu->arch.io_gpr */
  548. #define KVM_MMIO_REG_MASK 0x001f
  549. #define KVM_MMIO_REG_EXT_MASK 0xffe0
  550. #define KVM_MMIO_REG_GPR 0x0000
  551. #define KVM_MMIO_REG_FPR 0x0020
  552. #define KVM_MMIO_REG_QPR 0x0040
  553. #define KVM_MMIO_REG_FQPR 0x0060
  554. #define __KVM_HAVE_ARCH_WQP
  555. #define __KVM_HAVE_CREATE_DEVICE
  556. #endif /* __POWERPC_KVM_HOST_H__ */