gaccess.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /*
  2. * gaccess.h - access guest memory
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  18. u64 guestaddr)
  19. {
  20. u64 prefix = vcpu->arch.sie_block->prefix;
  21. u64 origin = vcpu->kvm->arch.guest_origin;
  22. u64 memsize = vcpu->kvm->arch.guest_memsize;
  23. if (guestaddr < 2 * PAGE_SIZE)
  24. guestaddr += prefix;
  25. else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  26. guestaddr -= prefix;
  27. if (guestaddr > memsize)
  28. return (void __user __force *) ERR_PTR(-EFAULT);
  29. guestaddr += origin;
  30. return (void __user *) guestaddr;
  31. }
  32. static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
  33. u64 *result)
  34. {
  35. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  36. BUG_ON(guestaddr & 7);
  37. if (IS_ERR((void __force *) uptr))
  38. return PTR_ERR((void __force *) uptr);
  39. return get_user(*result, (u64 __user *) uptr);
  40. }
  41. static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
  42. u32 *result)
  43. {
  44. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  45. BUG_ON(guestaddr & 3);
  46. if (IS_ERR((void __force *) uptr))
  47. return PTR_ERR((void __force *) uptr);
  48. return get_user(*result, (u32 __user *) uptr);
  49. }
  50. static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
  51. u16 *result)
  52. {
  53. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  54. BUG_ON(guestaddr & 1);
  55. if (IS_ERR(uptr))
  56. return PTR_ERR(uptr);
  57. return get_user(*result, (u16 __user *) uptr);
  58. }
  59. static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
  60. u8 *result)
  61. {
  62. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  63. if (IS_ERR((void __force *) uptr))
  64. return PTR_ERR((void __force *) uptr);
  65. return get_user(*result, (u8 __user *) uptr);
  66. }
  67. static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
  68. u64 value)
  69. {
  70. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  71. BUG_ON(guestaddr & 7);
  72. if (IS_ERR((void __force *) uptr))
  73. return PTR_ERR((void __force *) uptr);
  74. return put_user(value, (u64 __user *) uptr);
  75. }
  76. static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
  77. u32 value)
  78. {
  79. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  80. BUG_ON(guestaddr & 3);
  81. if (IS_ERR((void __force *) uptr))
  82. return PTR_ERR((void __force *) uptr);
  83. return put_user(value, (u32 __user *) uptr);
  84. }
  85. static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
  86. u16 value)
  87. {
  88. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  89. BUG_ON(guestaddr & 1);
  90. if (IS_ERR((void __force *) uptr))
  91. return PTR_ERR((void __force *) uptr);
  92. return put_user(value, (u16 __user *) uptr);
  93. }
  94. static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
  95. u8 value)
  96. {
  97. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  98. if (IS_ERR((void __force *) uptr))
  99. return PTR_ERR((void __force *) uptr);
  100. return put_user(value, (u8 __user *) uptr);
  101. }
  102. static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
  103. const void *from, unsigned long n)
  104. {
  105. int rc;
  106. unsigned long i;
  107. const u8 *data = from;
  108. for (i = 0; i < n; i++) {
  109. rc = put_guest_u8(vcpu, guestdest++, *(data++));
  110. if (rc < 0)
  111. return rc;
  112. }
  113. return 0;
  114. }
  115. static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
  116. const void *from, unsigned long n)
  117. {
  118. u64 prefix = vcpu->arch.sie_block->prefix;
  119. u64 origin = vcpu->kvm->arch.guest_origin;
  120. u64 memsize = vcpu->kvm->arch.guest_memsize;
  121. if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
  122. goto slowpath;
  123. if ((guestdest < prefix) && (guestdest + n > prefix))
  124. goto slowpath;
  125. if ((guestdest < prefix + 2 * PAGE_SIZE)
  126. && (guestdest + n > prefix + 2 * PAGE_SIZE))
  127. goto slowpath;
  128. if (guestdest < 2 * PAGE_SIZE)
  129. guestdest += prefix;
  130. else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
  131. guestdest -= prefix;
  132. if (guestdest + n > memsize)
  133. return -EFAULT;
  134. if (guestdest + n < guestdest)
  135. return -EFAULT;
  136. guestdest += origin;
  137. return copy_to_user((void __user *) guestdest, from, n);
  138. slowpath:
  139. return __copy_to_guest_slow(vcpu, guestdest, from, n);
  140. }
  141. static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
  142. u64 guestsrc, unsigned long n)
  143. {
  144. int rc;
  145. unsigned long i;
  146. u8 *data = to;
  147. for (i = 0; i < n; i++) {
  148. rc = get_guest_u8(vcpu, guestsrc++, data++);
  149. if (rc < 0)
  150. return rc;
  151. }
  152. return 0;
  153. }
  154. static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
  155. u64 guestsrc, unsigned long n)
  156. {
  157. u64 prefix = vcpu->arch.sie_block->prefix;
  158. u64 origin = vcpu->kvm->arch.guest_origin;
  159. u64 memsize = vcpu->kvm->arch.guest_memsize;
  160. if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
  161. goto slowpath;
  162. if ((guestsrc < prefix) && (guestsrc + n > prefix))
  163. goto slowpath;
  164. if ((guestsrc < prefix + 2 * PAGE_SIZE)
  165. && (guestsrc + n > prefix + 2 * PAGE_SIZE))
  166. goto slowpath;
  167. if (guestsrc < 2 * PAGE_SIZE)
  168. guestsrc += prefix;
  169. else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
  170. guestsrc -= prefix;
  171. if (guestsrc + n > memsize)
  172. return -EFAULT;
  173. if (guestsrc + n < guestsrc)
  174. return -EFAULT;
  175. guestsrc += origin;
  176. return copy_from_user(to, (void __user *) guestsrc, n);
  177. slowpath:
  178. return __copy_from_guest_slow(vcpu, to, guestsrc, n);
  179. }
  180. static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
  181. const void *from, unsigned long n)
  182. {
  183. u64 origin = vcpu->kvm->arch.guest_origin;
  184. u64 memsize = vcpu->kvm->arch.guest_memsize;
  185. if (guestdest + n > memsize)
  186. return -EFAULT;
  187. if (guestdest + n < guestdest)
  188. return -EFAULT;
  189. guestdest += origin;
  190. return copy_to_user((void __user *) guestdest, from, n);
  191. }
  192. static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
  193. u64 guestsrc, unsigned long n)
  194. {
  195. u64 origin = vcpu->kvm->arch.guest_origin;
  196. u64 memsize = vcpu->kvm->arch.guest_memsize;
  197. if (guestsrc + n > memsize)
  198. return -EFAULT;
  199. if (guestsrc + n < guestsrc)
  200. return -EFAULT;
  201. guestsrc += origin;
  202. return copy_from_user(to, (void __user *) guestsrc, n);
  203. }
  204. #endif