gaccess.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * access guest memory
  3. *
  4. * Copyright IBM Corp. 2008, 2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. #include "kvm-s390.h"
  18. static inline void *__gptr_to_uptr(struct kvm_vcpu *vcpu, void *gptr)
  19. {
  20. unsigned long prefix = vcpu->arch.sie_block->prefix;
  21. unsigned long gaddr = (unsigned long) gptr;
  22. unsigned long uaddr;
  23. if (gaddr < 2 * PAGE_SIZE)
  24. gaddr += prefix;
  25. else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE))
  26. gaddr -= prefix;
  27. uaddr = gmap_fault(gaddr, vcpu->arch.gmap);
  28. if (IS_ERR_VALUE(uaddr))
  29. uaddr = -EFAULT;
  30. return (void *)uaddr;
  31. }
  32. #define get_guest(vcpu, x, gptr) \
  33. ({ \
  34. __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \
  35. int __mask = sizeof(__typeof__(*(gptr))) - 1; \
  36. int __ret = PTR_RET(__uptr); \
  37. \
  38. if (!__ret) { \
  39. BUG_ON((unsigned long)__uptr & __mask); \
  40. __ret = get_user(x, __uptr); \
  41. } \
  42. __ret; \
  43. })
  44. #define put_guest(vcpu, x, gptr) \
  45. ({ \
  46. __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr); \
  47. int __mask = sizeof(__typeof__(*(gptr))) - 1; \
  48. int __ret = PTR_RET(__uptr); \
  49. \
  50. if (!__ret) { \
  51. BUG_ON((unsigned long)__uptr & __mask); \
  52. __ret = put_user(x, __uptr); \
  53. } \
  54. __ret; \
  55. })
  56. static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
  57. unsigned long guestdest,
  58. void *from, unsigned long n)
  59. {
  60. int rc;
  61. unsigned long i;
  62. u8 *data = from;
  63. for (i = 0; i < n; i++) {
  64. rc = put_guest(vcpu, *(data++), (u8 *)guestdest++);
  65. if (rc < 0)
  66. return rc;
  67. }
  68. return 0;
  69. }
  70. static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
  71. unsigned long guestdest,
  72. void *from, unsigned long n)
  73. {
  74. int r;
  75. void __user *uptr;
  76. unsigned long size;
  77. if (guestdest + n < guestdest)
  78. return -EFAULT;
  79. /* simple case: all within one segment table entry? */
  80. if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
  81. uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
  82. if (IS_ERR((void __force *) uptr))
  83. return PTR_ERR((void __force *) uptr);
  84. r = copy_to_user(uptr, from, n);
  85. if (r)
  86. r = -EFAULT;
  87. goto out;
  88. }
  89. /* copy first segment */
  90. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  91. if (IS_ERR((void __force *) uptr))
  92. return PTR_ERR((void __force *) uptr);
  93. size = PMD_SIZE - (guestdest & ~PMD_MASK);
  94. r = copy_to_user(uptr, from, size);
  95. if (r) {
  96. r = -EFAULT;
  97. goto out;
  98. }
  99. from += size;
  100. n -= size;
  101. guestdest += size;
  102. /* copy full segments */
  103. while (n >= PMD_SIZE) {
  104. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  105. if (IS_ERR((void __force *) uptr))
  106. return PTR_ERR((void __force *) uptr);
  107. r = copy_to_user(uptr, from, PMD_SIZE);
  108. if (r) {
  109. r = -EFAULT;
  110. goto out;
  111. }
  112. from += PMD_SIZE;
  113. n -= PMD_SIZE;
  114. guestdest += PMD_SIZE;
  115. }
  116. /* copy the tail segment */
  117. if (n) {
  118. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  119. if (IS_ERR((void __force *) uptr))
  120. return PTR_ERR((void __force *) uptr);
  121. r = copy_to_user(uptr, from, n);
  122. if (r)
  123. r = -EFAULT;
  124. }
  125. out:
  126. return r;
  127. }
  128. static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
  129. unsigned long guestdest,
  130. void *from, unsigned long n)
  131. {
  132. return __copy_to_guest_fast(vcpu, guestdest, from, n);
  133. }
  134. static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
  135. void *from, unsigned long n)
  136. {
  137. unsigned long prefix = vcpu->arch.sie_block->prefix;
  138. if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
  139. goto slowpath;
  140. if ((guestdest < prefix) && (guestdest + n > prefix))
  141. goto slowpath;
  142. if ((guestdest < prefix + 2 * PAGE_SIZE)
  143. && (guestdest + n > prefix + 2 * PAGE_SIZE))
  144. goto slowpath;
  145. if (guestdest < 2 * PAGE_SIZE)
  146. guestdest += prefix;
  147. else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
  148. guestdest -= prefix;
  149. return __copy_to_guest_fast(vcpu, guestdest, from, n);
  150. slowpath:
  151. return __copy_to_guest_slow(vcpu, guestdest, from, n);
  152. }
  153. static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
  154. unsigned long guestsrc,
  155. unsigned long n)
  156. {
  157. int rc;
  158. unsigned long i;
  159. u8 *data = to;
  160. for (i = 0; i < n; i++) {
  161. rc = get_guest(vcpu, *(data++), (u8 *)guestsrc++);
  162. if (rc < 0)
  163. return rc;
  164. }
  165. return 0;
  166. }
  167. static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
  168. unsigned long guestsrc,
  169. unsigned long n)
  170. {
  171. int r;
  172. void __user *uptr;
  173. unsigned long size;
  174. if (guestsrc + n < guestsrc)
  175. return -EFAULT;
  176. /* simple case: all within one segment table entry? */
  177. if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
  178. uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
  179. if (IS_ERR((void __force *) uptr))
  180. return PTR_ERR((void __force *) uptr);
  181. r = copy_from_user(to, uptr, n);
  182. if (r)
  183. r = -EFAULT;
  184. goto out;
  185. }
  186. /* copy first segment */
  187. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  188. if (IS_ERR((void __force *) uptr))
  189. return PTR_ERR((void __force *) uptr);
  190. size = PMD_SIZE - (guestsrc & ~PMD_MASK);
  191. r = copy_from_user(to, uptr, size);
  192. if (r) {
  193. r = -EFAULT;
  194. goto out;
  195. }
  196. to += size;
  197. n -= size;
  198. guestsrc += size;
  199. /* copy full segments */
  200. while (n >= PMD_SIZE) {
  201. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  202. if (IS_ERR((void __force *) uptr))
  203. return PTR_ERR((void __force *) uptr);
  204. r = copy_from_user(to, uptr, PMD_SIZE);
  205. if (r) {
  206. r = -EFAULT;
  207. goto out;
  208. }
  209. to += PMD_SIZE;
  210. n -= PMD_SIZE;
  211. guestsrc += PMD_SIZE;
  212. }
  213. /* copy the tail segment */
  214. if (n) {
  215. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  216. if (IS_ERR((void __force *) uptr))
  217. return PTR_ERR((void __force *) uptr);
  218. r = copy_from_user(to, uptr, n);
  219. if (r)
  220. r = -EFAULT;
  221. }
  222. out:
  223. return r;
  224. }
  225. static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
  226. unsigned long guestsrc,
  227. unsigned long n)
  228. {
  229. return __copy_from_guest_fast(vcpu, to, guestsrc, n);
  230. }
  231. static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
  232. unsigned long guestsrc, unsigned long n)
  233. {
  234. unsigned long prefix = vcpu->arch.sie_block->prefix;
  235. if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
  236. goto slowpath;
  237. if ((guestsrc < prefix) && (guestsrc + n > prefix))
  238. goto slowpath;
  239. if ((guestsrc < prefix + 2 * PAGE_SIZE)
  240. && (guestsrc + n > prefix + 2 * PAGE_SIZE))
  241. goto slowpath;
  242. if (guestsrc < 2 * PAGE_SIZE)
  243. guestsrc += prefix;
  244. else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
  245. guestsrc -= prefix;
  246. return __copy_from_guest_fast(vcpu, to, guestsrc, n);
  247. slowpath:
  248. return __copy_from_guest_slow(vcpu, to, guestsrc, n);
  249. }
  250. #endif