gaccess.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /*
  2. * access guest memory
  3. *
  4. * Copyright IBM Corp. 2008, 2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. #include "kvm-s390.h"
  18. /* Convert real to absolute address by applying the prefix of the CPU */
  19. static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
  20. unsigned long gaddr)
  21. {
  22. unsigned long prefix = vcpu->arch.sie_block->prefix;
  23. if (gaddr < 2 * PAGE_SIZE)
  24. gaddr += prefix;
  25. else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE)
  26. gaddr -= prefix;
  27. return gaddr;
  28. }
  29. static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
  30. void __user *gptr,
  31. int prefixing)
  32. {
  33. unsigned long gaddr = (unsigned long) gptr;
  34. unsigned long uaddr;
  35. if (prefixing)
  36. gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
  37. uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
  38. if (IS_ERR_VALUE(uaddr))
  39. uaddr = -EFAULT;
  40. return (void __user *)uaddr;
  41. }
  42. #define get_guest(vcpu, x, gptr) \
  43. ({ \
  44. __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
  45. int __mask = sizeof(__typeof__(*(gptr))) - 1; \
  46. int __ret; \
  47. \
  48. if (IS_ERR((void __force *)__uptr)) { \
  49. __ret = PTR_ERR((void __force *)__uptr); \
  50. } else { \
  51. BUG_ON((unsigned long)__uptr & __mask); \
  52. __ret = get_user(x, __uptr); \
  53. } \
  54. __ret; \
  55. })
  56. #define put_guest(vcpu, x, gptr) \
  57. ({ \
  58. __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
  59. int __mask = sizeof(__typeof__(*(gptr))) - 1; \
  60. int __ret; \
  61. \
  62. if (IS_ERR((void __force *)__uptr)) { \
  63. __ret = PTR_ERR((void __force *)__uptr); \
  64. } else { \
  65. BUG_ON((unsigned long)__uptr & __mask); \
  66. __ret = put_user(x, __uptr); \
  67. } \
  68. __ret; \
  69. })
  70. static inline int __copy_guest(struct kvm_vcpu *vcpu, unsigned long to,
  71. unsigned long from, unsigned long len,
  72. int to_guest, int prefixing)
  73. {
  74. unsigned long _len, rc;
  75. void __user *uptr;
  76. while (len) {
  77. uptr = to_guest ? (void __user *)to : (void __user *)from;
  78. uptr = __gptr_to_uptr(vcpu, uptr, prefixing);
  79. if (IS_ERR((void __force *)uptr))
  80. return -EFAULT;
  81. _len = PAGE_SIZE - ((unsigned long)uptr & (PAGE_SIZE - 1));
  82. _len = min(_len, len);
  83. if (to_guest)
  84. rc = copy_to_user((void __user *) uptr, (void *)from, _len);
  85. else
  86. rc = copy_from_user((void *)to, (void __user *)uptr, _len);
  87. if (rc)
  88. return -EFAULT;
  89. len -= _len;
  90. from += _len;
  91. to += _len;
  92. }
  93. return 0;
  94. }
  95. #define copy_to_guest(vcpu, to, from, size) \
  96. __copy_guest(vcpu, to, (unsigned long)from, size, 1, 1)
  97. #define copy_from_guest(vcpu, to, from, size) \
  98. __copy_guest(vcpu, (unsigned long)to, from, size, 0, 1)
  99. #define copy_to_guest_absolute(vcpu, to, from, size) \
  100. __copy_guest(vcpu, to, (unsigned long)from, size, 1, 0)
  101. #define copy_from_guest_absolute(vcpu, to, from, size) \
  102. __copy_guest(vcpu, (unsigned long)to, from, size, 0, 0)
  103. #endif /* __KVM_S390_GACCESS_H */