kvm_cache_regs.h 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. #ifndef ASM_KVM_CACHE_REGS_H
  2. #define ASM_KVM_CACHE_REGS_H
  3. static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
  4. enum kvm_reg reg)
  5. {
  6. if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
  7. kvm_x86_ops->cache_reg(vcpu, reg);
  8. return vcpu->arch.regs[reg];
  9. }
  10. static inline void kvm_register_write(struct kvm_vcpu *vcpu,
  11. enum kvm_reg reg,
  12. unsigned long val)
  13. {
  14. vcpu->arch.regs[reg] = val;
  15. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  16. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  17. }
  18. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  19. {
  20. return kvm_register_read(vcpu, VCPU_REGS_RIP);
  21. }
  22. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  23. {
  24. kvm_register_write(vcpu, VCPU_REGS_RIP, val);
  25. }
  26. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  27. {
  28. if (!test_bit(VCPU_EXREG_PDPTR,
  29. (unsigned long *)&vcpu->arch.regs_avail))
  30. kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
  31. return vcpu->arch.pdptrs[index];
  32. }
  33. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  34. {
  35. if (mask & vcpu->arch.cr0_guest_owned_bits)
  36. kvm_x86_ops->decache_cr0_guest_bits(vcpu);
  37. return vcpu->arch.cr0 & mask;
  38. }
  39. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  40. {
  41. return kvm_read_cr0_bits(vcpu, ~0UL);
  42. }
  43. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  44. {
  45. if (mask & vcpu->arch.cr4_guest_owned_bits)
  46. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  47. return vcpu->arch.cr4 & mask;
  48. }
  49. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  50. {
  51. return kvm_read_cr4_bits(vcpu, ~0UL);
  52. }
  53. #endif