kvm_cache_regs.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. #ifndef ASM_KVM_CACHE_REGS_H
  2. #define ASM_KVM_CACHE_REGS_H
  3. static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
  4. enum kvm_reg reg)
  5. {
  6. if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
  7. kvm_x86_ops->cache_reg(vcpu, reg);
  8. return vcpu->arch.regs[reg];
  9. }
  10. static inline void kvm_register_write(struct kvm_vcpu *vcpu,
  11. enum kvm_reg reg,
  12. unsigned long val)
  13. {
  14. vcpu->arch.regs[reg] = val;
  15. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  16. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  17. }
  18. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  19. {
  20. return kvm_register_read(vcpu, VCPU_REGS_RIP);
  21. }
  22. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  23. {
  24. kvm_register_write(vcpu, VCPU_REGS_RIP, val);
  25. }
  26. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  27. {
  28. if (!test_bit(VCPU_EXREG_PDPTR,
  29. (unsigned long *)&vcpu->arch.regs_avail))
  30. kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
  31. return vcpu->arch.pdptrs[index];
  32. }
  33. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  34. {
  35. return vcpu->arch.cr0 & mask;
  36. }
  37. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  38. {
  39. return kvm_read_cr0_bits(vcpu, ~0UL);
  40. }
  41. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  42. {
  43. if (mask & vcpu->arch.cr4_guest_owned_bits)
  44. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  45. return vcpu->arch.cr4 & mask;
  46. }
  47. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  48. {
  49. return kvm_read_cr4_bits(vcpu, ~0UL);
  50. }
  51. #endif