kvm_host.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright IBM Corp. 2007
  16. *
  17. * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18. */
  19. #ifndef __POWERPC_KVM_HOST_H__
  20. #define __POWERPC_KVM_HOST_H__
  21. #include <linux/mutex.h>
  22. #include <linux/hrtimer.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/types.h>
  25. #include <linux/kvm_types.h>
  26. #include <linux/threads.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/kvm_para.h>
  29. #include <linux/list.h>
  30. #include <linux/atomic.h>
  31. #include <asm/kvm_asm.h>
  32. #include <asm/processor.h>
  33. #include <asm/page.h>
  34. #include <asm/cacheflush.h>
  35. #define KVM_MAX_VCPUS NR_CPUS
  36. #define KVM_MAX_VCORES NR_CPUS
  37. #define KVM_MEMORY_SLOTS 32
  38. /* memory slots that does not exposed to userspace */
  39. #define KVM_PRIVATE_MEM_SLOTS 4
  40. #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
  41. #ifdef CONFIG_KVM_MMIO
  42. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  43. #endif
  44. #if !defined(CONFIG_KVM_440)
  45. #include <linux/mmu_notifier.h>
  46. #define KVM_ARCH_WANT_MMU_NOTIFIER
  47. struct kvm;
  48. extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
  49. extern int kvm_unmap_hva_range(struct kvm *kvm,
  50. unsigned long start, unsigned long end);
  51. extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
  52. extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
  53. extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
  54. #endif
  55. /* We don't currently support large pages. */
  56. #define KVM_HPAGE_GFN_SHIFT(x) 0
  57. #define KVM_NR_PAGE_SIZES 1
  58. #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
  59. #define HPTEG_CACHE_NUM (1 << 15)
  60. #define HPTEG_HASH_BITS_PTE 13
  61. #define HPTEG_HASH_BITS_PTE_LONG 12
  62. #define HPTEG_HASH_BITS_VPTE 13
  63. #define HPTEG_HASH_BITS_VPTE_LONG 5
  64. #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
  65. #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
  66. #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
  67. #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
  68. /* Physical Address Mask - allowed range of real mode RAM access */
  69. #define KVM_PAM 0x0fffffffffffffffULL
  70. struct kvm;
  71. struct kvm_run;
  72. struct kvm_vcpu;
  73. struct lppaca;
  74. struct slb_shadow;
  75. struct dtl_entry;
  76. struct kvm_vm_stat {
  77. u32 remote_tlb_flush;
  78. };
  79. struct kvm_vcpu_stat {
  80. u32 sum_exits;
  81. u32 mmio_exits;
  82. u32 dcr_exits;
  83. u32 signal_exits;
  84. u32 light_exits;
  85. /* Account for special types of light exits: */
  86. u32 itlb_real_miss_exits;
  87. u32 itlb_virt_miss_exits;
  88. u32 dtlb_real_miss_exits;
  89. u32 dtlb_virt_miss_exits;
  90. u32 syscall_exits;
  91. u32 isi_exits;
  92. u32 dsi_exits;
  93. u32 emulated_inst_exits;
  94. u32 dec_exits;
  95. u32 ext_intr_exits;
  96. u32 halt_wakeup;
  97. u32 dbell_exits;
  98. u32 gdbell_exits;
  99. #ifdef CONFIG_PPC_BOOK3S
  100. u32 pf_storage;
  101. u32 pf_instruc;
  102. u32 sp_storage;
  103. u32 sp_instruc;
  104. u32 queue_intr;
  105. u32 ld;
  106. u32 ld_slow;
  107. u32 st;
  108. u32 st_slow;
  109. #endif
  110. };
  111. enum kvm_exit_types {
  112. MMIO_EXITS,
  113. DCR_EXITS,
  114. SIGNAL_EXITS,
  115. ITLB_REAL_MISS_EXITS,
  116. ITLB_VIRT_MISS_EXITS,
  117. DTLB_REAL_MISS_EXITS,
  118. DTLB_VIRT_MISS_EXITS,
  119. SYSCALL_EXITS,
  120. ISI_EXITS,
  121. DSI_EXITS,
  122. EMULATED_INST_EXITS,
  123. EMULATED_MTMSRWE_EXITS,
  124. EMULATED_WRTEE_EXITS,
  125. EMULATED_MTSPR_EXITS,
  126. EMULATED_MFSPR_EXITS,
  127. EMULATED_MTMSR_EXITS,
  128. EMULATED_MFMSR_EXITS,
  129. EMULATED_TLBSX_EXITS,
  130. EMULATED_TLBWE_EXITS,
  131. EMULATED_RFI_EXITS,
  132. EMULATED_RFCI_EXITS,
  133. DEC_EXITS,
  134. EXT_INTR_EXITS,
  135. HALT_WAKEUP,
  136. USR_PR_INST,
  137. FP_UNAVAIL,
  138. DEBUG_EXITS,
  139. TIMEINGUEST,
  140. DBELL_EXITS,
  141. GDBELL_EXITS,
  142. __NUMBER_OF_KVM_EXIT_TYPES
  143. };
  144. /* allow access to big endian 32bit upper/lower parts and 64bit var */
  145. struct kvmppc_exit_timing {
  146. union {
  147. u64 tv64;
  148. struct {
  149. u32 tbu, tbl;
  150. } tv32;
  151. };
  152. };
  153. struct kvmppc_pginfo {
  154. unsigned long pfn;
  155. atomic_t refcnt;
  156. };
  157. struct kvmppc_spapr_tce_table {
  158. struct list_head list;
  159. struct kvm *kvm;
  160. u64 liobn;
  161. u32 window_size;
  162. struct page *pages[0];
  163. };
  164. struct kvmppc_linear_info {
  165. void *base_virt;
  166. unsigned long base_pfn;
  167. unsigned long npages;
  168. struct list_head list;
  169. atomic_t use_count;
  170. int type;
  171. };
  172. /*
  173. * The reverse mapping array has one entry for each HPTE,
  174. * which stores the guest's view of the second word of the HPTE
  175. * (including the guest physical address of the mapping),
  176. * plus forward and backward pointers in a doubly-linked ring
  177. * of HPTEs that map the same host page. The pointers in this
  178. * ring are 32-bit HPTE indexes, to save space.
  179. */
  180. struct revmap_entry {
  181. unsigned long guest_rpte;
  182. unsigned int forw, back;
  183. };
  184. /*
  185. * We use the top bit of each memslot->arch.rmap entry as a lock bit,
  186. * and bit 32 as a present flag. The bottom 32 bits are the
  187. * index in the guest HPT of a HPTE that points to the page.
  188. */
  189. #define KVMPPC_RMAP_LOCK_BIT 63
  190. #define KVMPPC_RMAP_RC_SHIFT 32
  191. #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
  192. #define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
  193. #define KVMPPC_RMAP_PRESENT 0x100000000ul
  194. #define KVMPPC_RMAP_INDEX 0xfffffffful
  195. /* Low-order bits in memslot->arch.slot_phys[] */
  196. #define KVMPPC_PAGE_ORDER_MASK 0x1f
  197. #define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
  198. #define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
  199. #define KVMPPC_GOT_PAGE 0x80
  200. struct kvm_arch_memory_slot {
  201. #ifdef CONFIG_KVM_BOOK3S_64_HV
  202. unsigned long *rmap;
  203. unsigned long *slot_phys;
  204. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  205. };
  206. struct kvm_arch {
  207. unsigned int lpid;
  208. #ifdef CONFIG_KVM_BOOK3S_64_HV
  209. unsigned long hpt_virt;
  210. struct revmap_entry *revmap;
  211. unsigned int host_lpid;
  212. unsigned long host_lpcr;
  213. unsigned long sdr1;
  214. unsigned long host_sdr1;
  215. int tlbie_lock;
  216. unsigned long lpcr;
  217. unsigned long rmor;
  218. struct kvmppc_linear_info *rma;
  219. unsigned long vrma_slb_v;
  220. int rma_setup_done;
  221. int using_mmu_notifiers;
  222. u32 hpt_order;
  223. atomic_t vcpus_running;
  224. u32 online_vcores;
  225. unsigned long hpt_npte;
  226. unsigned long hpt_mask;
  227. atomic_t hpte_mod_interest;
  228. spinlock_t slot_phys_lock;
  229. cpumask_t need_tlb_flush;
  230. struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
  231. struct kvmppc_linear_info *hpt_li;
  232. #endif /* CONFIG_KVM_BOOK3S_64_HV */
  233. #ifdef CONFIG_PPC_BOOK3S_64
  234. struct list_head spapr_tce_tables;
  235. #endif
  236. };
  237. /*
  238. * Struct for a virtual core.
  239. * Note: entry_exit_count combines an entry count in the bottom 8 bits
  240. * and an exit count in the next 8 bits. This is so that we can
  241. * atomically increment the entry count iff the exit count is 0
  242. * without taking the lock.
  243. */
  244. struct kvmppc_vcore {
  245. int n_runnable;
  246. int n_busy;
  247. int num_threads;
  248. int entry_exit_count;
  249. int n_woken;
  250. int nap_count;
  251. int napping_threads;
  252. u16 pcpu;
  253. u16 last_cpu;
  254. u8 vcore_state;
  255. u8 in_guest;
  256. struct list_head runnable_threads;
  257. spinlock_t lock;
  258. wait_queue_head_t wq;
  259. u64 stolen_tb;
  260. u64 preempt_tb;
  261. struct kvm_vcpu *runner;
  262. };
  263. #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
  264. #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
  265. /* Values for vcore_state */
  266. #define VCORE_INACTIVE 0
  267. #define VCORE_SLEEPING 1
  268. #define VCORE_STARTING 2
  269. #define VCORE_RUNNING 3
  270. #define VCORE_EXITING 4
  271. /*
  272. * Struct used to manage memory for a virtual processor area
  273. * registered by a PAPR guest. There are three types of area
  274. * that a guest can register.
  275. */
  276. struct kvmppc_vpa {
  277. void *pinned_addr; /* Address in kernel linear mapping */
  278. void *pinned_end; /* End of region */
  279. unsigned long next_gpa; /* Guest phys addr for update */
  280. unsigned long len; /* Number of bytes required */
  281. u8 update_pending; /* 1 => update pinned_addr from next_gpa */
  282. };
  283. struct kvmppc_pte {
  284. ulong eaddr;
  285. u64 vpage;
  286. ulong raddr;
  287. bool may_read : 1;
  288. bool may_write : 1;
  289. bool may_execute : 1;
  290. };
  291. struct kvmppc_mmu {
  292. /* book3s_64 only */
  293. void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
  294. u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
  295. u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
  296. void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
  297. void (*slbia)(struct kvm_vcpu *vcpu);
  298. /* book3s */
  299. void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
  300. u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
  301. int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
  302. void (*reset_msr)(struct kvm_vcpu *vcpu);
  303. void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
  304. int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
  305. u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
  306. bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
  307. };
  308. struct kvmppc_slb {
  309. u64 esid;
  310. u64 vsid;
  311. u64 orige;
  312. u64 origv;
  313. bool valid : 1;
  314. bool Ks : 1;
  315. bool Kp : 1;
  316. bool nx : 1;
  317. bool large : 1; /* PTEs are 16MB */
  318. bool tb : 1; /* 1TB segment */
  319. bool class : 1;
  320. };
  321. # ifdef CONFIG_PPC_FSL_BOOK3E
  322. #define KVMPPC_BOOKE_IAC_NUM 2
  323. #define KVMPPC_BOOKE_DAC_NUM 2
  324. # else
  325. #define KVMPPC_BOOKE_IAC_NUM 4
  326. #define KVMPPC_BOOKE_DAC_NUM 2
  327. # endif
  328. #define KVMPPC_BOOKE_MAX_IAC 4
  329. #define KVMPPC_BOOKE_MAX_DAC 2
  330. struct kvmppc_booke_debug_reg {
  331. u32 dbcr0;
  332. u32 dbcr1;
  333. u32 dbcr2;
  334. #ifdef CONFIG_KVM_E500MC
  335. u32 dbcr4;
  336. #endif
  337. u64 iac[KVMPPC_BOOKE_MAX_IAC];
  338. u64 dac[KVMPPC_BOOKE_MAX_DAC];
  339. };
  340. struct kvm_vcpu_arch {
  341. ulong host_stack;
  342. u32 host_pid;
  343. #ifdef CONFIG_PPC_BOOK3S
  344. struct kvmppc_slb slb[64];
  345. int slb_max; /* 1 + index of last valid entry in slb[] */
  346. int slb_nr; /* total number of entries in SLB */
  347. struct kvmppc_mmu mmu;
  348. #endif
  349. ulong gpr[32];
  350. u64 fpr[32];
  351. u64 fpscr;
  352. #ifdef CONFIG_SPE
  353. ulong evr[32];
  354. ulong spefscr;
  355. ulong host_spefscr;
  356. u64 acc;
  357. #endif
  358. #ifdef CONFIG_ALTIVEC
  359. vector128 vr[32];
  360. vector128 vscr;
  361. #endif
  362. #ifdef CONFIG_VSX
  363. u64 vsr[64];
  364. #endif
  365. #ifdef CONFIG_KVM_BOOKE_HV
  366. u32 host_mas4;
  367. u32 host_mas6;
  368. u32 shadow_epcr;
  369. u32 shadow_msrp;
  370. u32 eplc;
  371. u32 epsc;
  372. u32 oldpir;
  373. #endif
  374. #if defined(CONFIG_BOOKE)
  375. #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
  376. u32 epcr;
  377. #endif
  378. #endif
  379. #ifdef CONFIG_PPC_BOOK3S
  380. /* For Gekko paired singles */
  381. u32 qpr[32];
  382. #endif
  383. ulong pc;
  384. ulong ctr;
  385. ulong lr;
  386. ulong xer;
  387. u32 cr;
  388. #ifdef CONFIG_PPC_BOOK3S
  389. ulong hflags;
  390. ulong guest_owned_ext;
  391. ulong purr;
  392. ulong spurr;
  393. ulong dscr;
  394. ulong amr;
  395. ulong uamor;
  396. u32 ctrl;
  397. ulong dabr;
  398. #endif
  399. u32 vrsave; /* also USPRG0 */
  400. u32 mmucr;
  401. /* shadow_msr is unused for BookE HV */
  402. ulong shadow_msr;
  403. ulong csrr0;
  404. ulong csrr1;
  405. ulong dsrr0;
  406. ulong dsrr1;
  407. ulong mcsrr0;
  408. ulong mcsrr1;
  409. ulong mcsr;
  410. u32 dec;
  411. #ifdef CONFIG_BOOKE
  412. u32 decar;
  413. #endif
  414. u32 tbl;
  415. u32 tbu;
  416. u32 tcr;
  417. ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
  418. u32 ivor[64];
  419. ulong ivpr;
  420. u32 pvr;
  421. u32 shadow_pid;
  422. u32 shadow_pid1;
  423. u32 pid;
  424. u32 swap_pid;
  425. u32 ccr0;
  426. u32 ccr1;
  427. u32 dbsr;
  428. u64 mmcr[3];
  429. u32 pmc[8];
  430. #ifdef CONFIG_KVM_EXIT_TIMING
  431. struct mutex exit_timing_lock;
  432. struct kvmppc_exit_timing timing_exit;
  433. struct kvmppc_exit_timing timing_last_enter;
  434. u32 last_exit_type;
  435. u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
  436. u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  437. u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  438. u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  439. u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
  440. u64 timing_last_exit;
  441. struct dentry *debugfs_exit_timing;
  442. #endif
  443. #ifdef CONFIG_PPC_BOOK3S
  444. ulong fault_dar;
  445. u32 fault_dsisr;
  446. #endif
  447. #ifdef CONFIG_BOOKE
  448. ulong fault_dear;
  449. ulong fault_esr;
  450. ulong queued_dear;
  451. ulong queued_esr;
  452. spinlock_t wdt_lock;
  453. struct timer_list wdt_timer;
  454. u32 tlbcfg[4];
  455. u32 mmucfg;
  456. u32 epr;
  457. struct kvmppc_booke_debug_reg dbg_reg;
  458. #endif
  459. gpa_t paddr_accessed;
  460. gva_t vaddr_accessed;
  461. u8 io_gpr; /* GPR used as IO source/target */
  462. u8 mmio_is_bigendian;
  463. u8 mmio_sign_extend;
  464. u8 dcr_needed;
  465. u8 dcr_is_write;
  466. u8 osi_needed;
  467. u8 osi_enabled;
  468. u8 papr_enabled;
  469. u8 watchdog_enabled;
  470. u8 sane;
  471. u8 cpu_type;
  472. u8 hcall_needed;
  473. u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
  474. struct hrtimer dec_timer;
  475. struct tasklet_struct tasklet;
  476. u64 dec_jiffies;
  477. u64 dec_expires;
  478. unsigned long pending_exceptions;
  479. u8 ceded;
  480. u8 prodded;
  481. u32 last_inst;
  482. wait_queue_head_t *wqp;
  483. struct kvmppc_vcore *vcore;
  484. int ret;
  485. int trap;
  486. int state;
  487. int ptid;
  488. bool timer_running;
  489. wait_queue_head_t cpu_run;
  490. struct kvm_vcpu_arch_shared *shared;
  491. unsigned long magic_page_pa; /* phys addr to map the magic page to */
  492. unsigned long magic_page_ea; /* effect. addr to map the magic page to */
  493. #ifdef CONFIG_KVM_BOOK3S_64_HV
  494. struct kvm_vcpu_arch_shared shregs;
  495. unsigned long pgfault_addr;
  496. long pgfault_index;
  497. unsigned long pgfault_hpte[2];
  498. struct list_head run_list;
  499. struct task_struct *run_task;
  500. struct kvm_run *kvm_run;
  501. pgd_t *pgdir;
  502. spinlock_t vpa_update_lock;
  503. struct kvmppc_vpa vpa;
  504. struct kvmppc_vpa dtl;
  505. struct dtl_entry *dtl_ptr;
  506. unsigned long dtl_index;
  507. u64 stolen_logged;
  508. struct kvmppc_vpa slb_shadow;
  509. spinlock_t tbacct_lock;
  510. u64 busy_stolen;
  511. u64 busy_preempt;
  512. #endif
  513. };
  514. /* Values for vcpu->arch.state */
  515. #define KVMPPC_VCPU_NOTREADY 0
  516. #define KVMPPC_VCPU_RUNNABLE 1
  517. #define KVMPPC_VCPU_BUSY_IN_HOST 2
  518. /* Values for vcpu->arch.io_gpr */
  519. #define KVM_MMIO_REG_MASK 0x001f
  520. #define KVM_MMIO_REG_EXT_MASK 0xffe0
  521. #define KVM_MMIO_REG_GPR 0x0000
  522. #define KVM_MMIO_REG_FPR 0x0020
  523. #define KVM_MMIO_REG_QPR 0x0040
  524. #define KVM_MMIO_REG_FQPR 0x0060
  525. #define __KVM_HAVE_ARCH_WQP
  526. #endif /* __POWERPC_KVM_HOST_H__ */