gaccess.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * gaccess.h - access guest memory
  3. *
  4. * Copyright IBM Corp. 2008
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  18. unsigned long guestaddr)
  19. {
  20. unsigned long prefix = vcpu->arch.sie_block->prefix;
  21. unsigned long origin = vcpu->kvm->arch.guest_origin;
  22. unsigned long memsize = vcpu->kvm->arch.guest_memsize;
  23. if (guestaddr < 2 * PAGE_SIZE)
  24. guestaddr += prefix;
  25. else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  26. guestaddr -= prefix;
  27. if (guestaddr > memsize)
  28. return (void __user __force *) ERR_PTR(-EFAULT);
  29. guestaddr += origin;
  30. return (void __user *) guestaddr;
  31. }
  32. static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  33. u64 *result)
  34. {
  35. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  36. BUG_ON(guestaddr & 7);
  37. if (IS_ERR((void __force *) uptr))
  38. return PTR_ERR((void __force *) uptr);
  39. return get_user(*result, (unsigned long __user *) uptr);
  40. }
  41. static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  42. u32 *result)
  43. {
  44. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  45. BUG_ON(guestaddr & 3);
  46. if (IS_ERR((void __force *) uptr))
  47. return PTR_ERR((void __force *) uptr);
  48. return get_user(*result, (u32 __user *) uptr);
  49. }
  50. static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  51. u16 *result)
  52. {
  53. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  54. BUG_ON(guestaddr & 1);
  55. if (IS_ERR(uptr))
  56. return PTR_ERR(uptr);
  57. return get_user(*result, (u16 __user *) uptr);
  58. }
  59. static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  60. u8 *result)
  61. {
  62. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  63. if (IS_ERR((void __force *) uptr))
  64. return PTR_ERR((void __force *) uptr);
  65. return get_user(*result, (u8 __user *) uptr);
  66. }
  67. static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  68. u64 value)
  69. {
  70. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  71. BUG_ON(guestaddr & 7);
  72. if (IS_ERR((void __force *) uptr))
  73. return PTR_ERR((void __force *) uptr);
  74. return put_user(value, (u64 __user *) uptr);
  75. }
  76. static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  77. u32 value)
  78. {
  79. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  80. BUG_ON(guestaddr & 3);
  81. if (IS_ERR((void __force *) uptr))
  82. return PTR_ERR((void __force *) uptr);
  83. return put_user(value, (u32 __user *) uptr);
  84. }
  85. static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  86. u16 value)
  87. {
  88. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  89. BUG_ON(guestaddr & 1);
  90. if (IS_ERR((void __force *) uptr))
  91. return PTR_ERR((void __force *) uptr);
  92. return put_user(value, (u16 __user *) uptr);
  93. }
  94. static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  95. u8 value)
  96. {
  97. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  98. if (IS_ERR((void __force *) uptr))
  99. return PTR_ERR((void __force *) uptr);
  100. return put_user(value, (u8 __user *) uptr);
  101. }
  102. static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
  103. unsigned long guestdest,
  104. const void *from, unsigned long n)
  105. {
  106. int rc;
  107. unsigned long i;
  108. const u8 *data = from;
  109. for (i = 0; i < n; i++) {
  110. rc = put_guest_u8(vcpu, guestdest++, *(data++));
  111. if (rc < 0)
  112. return rc;
  113. }
  114. return 0;
  115. }
  116. static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
  117. const void *from, unsigned long n)
  118. {
  119. unsigned long prefix = vcpu->arch.sie_block->prefix;
  120. unsigned long origin = vcpu->kvm->arch.guest_origin;
  121. unsigned long memsize = vcpu->kvm->arch.guest_memsize;
  122. if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
  123. goto slowpath;
  124. if ((guestdest < prefix) && (guestdest + n > prefix))
  125. goto slowpath;
  126. if ((guestdest < prefix + 2 * PAGE_SIZE)
  127. && (guestdest + n > prefix + 2 * PAGE_SIZE))
  128. goto slowpath;
  129. if (guestdest < 2 * PAGE_SIZE)
  130. guestdest += prefix;
  131. else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
  132. guestdest -= prefix;
  133. if (guestdest + n > memsize)
  134. return -EFAULT;
  135. if (guestdest + n < guestdest)
  136. return -EFAULT;
  137. guestdest += origin;
  138. return copy_to_user((void __user *) guestdest, from, n);
  139. slowpath:
  140. return __copy_to_guest_slow(vcpu, guestdest, from, n);
  141. }
  142. static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
  143. unsigned long guestsrc,
  144. unsigned long n)
  145. {
  146. int rc;
  147. unsigned long i;
  148. u8 *data = to;
  149. for (i = 0; i < n; i++) {
  150. rc = get_guest_u8(vcpu, guestsrc++, data++);
  151. if (rc < 0)
  152. return rc;
  153. }
  154. return 0;
  155. }
  156. static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
  157. unsigned long guestsrc, unsigned long n)
  158. {
  159. unsigned long prefix = vcpu->arch.sie_block->prefix;
  160. unsigned long origin = vcpu->kvm->arch.guest_origin;
  161. unsigned long memsize = vcpu->kvm->arch.guest_memsize;
  162. if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
  163. goto slowpath;
  164. if ((guestsrc < prefix) && (guestsrc + n > prefix))
  165. goto slowpath;
  166. if ((guestsrc < prefix + 2 * PAGE_SIZE)
  167. && (guestsrc + n > prefix + 2 * PAGE_SIZE))
  168. goto slowpath;
  169. if (guestsrc < 2 * PAGE_SIZE)
  170. guestsrc += prefix;
  171. else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
  172. guestsrc -= prefix;
  173. if (guestsrc + n > memsize)
  174. return -EFAULT;
  175. if (guestsrc + n < guestsrc)
  176. return -EFAULT;
  177. guestsrc += origin;
  178. return copy_from_user(to, (void __user *) guestsrc, n);
  179. slowpath:
  180. return __copy_from_guest_slow(vcpu, to, guestsrc, n);
  181. }
  182. static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
  183. unsigned long guestdest,
  184. const void *from, unsigned long n)
  185. {
  186. unsigned long origin = vcpu->kvm->arch.guest_origin;
  187. unsigned long memsize = vcpu->kvm->arch.guest_memsize;
  188. if (guestdest + n > memsize)
  189. return -EFAULT;
  190. if (guestdest + n < guestdest)
  191. return -EFAULT;
  192. guestdest += origin;
  193. return copy_to_user((void __user *) guestdest, from, n);
  194. }
  195. static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
  196. unsigned long guestsrc,
  197. unsigned long n)
  198. {
  199. unsigned long origin = vcpu->kvm->arch.guest_origin;
  200. unsigned long memsize = vcpu->kvm->arch.guest_memsize;
  201. if (guestsrc + n > memsize)
  202. return -EFAULT;
  203. if (guestsrc + n < guestsrc)
  204. return -EFAULT;
  205. guestsrc += origin;
  206. return copy_from_user(to, (void __user *) guestsrc, n);
  207. }
  208. #endif