gaccess.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /*
  2. * access guest memory
  3. *
  4. * Copyright IBM Corp. 2008, 2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. #include "kvm-s390.h"
  18. static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  19. unsigned long guestaddr)
  20. {
  21. unsigned long prefix = vcpu->arch.sie_block->prefix;
  22. unsigned long uaddress;
  23. if (guestaddr < 2 * PAGE_SIZE)
  24. guestaddr += prefix;
  25. else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  26. guestaddr -= prefix;
  27. uaddress = gmap_fault(guestaddr, vcpu->arch.gmap);
  28. if (IS_ERR_VALUE(uaddress))
  29. uaddress = -EFAULT;
  30. return (void __user *)uaddress;
  31. }
  32. static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  33. u64 *result)
  34. {
  35. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  36. BUG_ON(guestaddr & 7);
  37. if (IS_ERR((void __force *) uptr))
  38. return PTR_ERR((void __force *) uptr);
  39. return get_user(*result, (unsigned long __user *) uptr);
  40. }
  41. static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  42. u32 *result)
  43. {
  44. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  45. BUG_ON(guestaddr & 3);
  46. if (IS_ERR((void __force *) uptr))
  47. return PTR_ERR((void __force *) uptr);
  48. return get_user(*result, (u32 __user *) uptr);
  49. }
  50. static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  51. u16 *result)
  52. {
  53. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  54. BUG_ON(guestaddr & 1);
  55. if (IS_ERR(uptr))
  56. return PTR_ERR(uptr);
  57. return get_user(*result, (u16 __user *) uptr);
  58. }
  59. static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  60. u8 *result)
  61. {
  62. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  63. if (IS_ERR((void __force *) uptr))
  64. return PTR_ERR((void __force *) uptr);
  65. return get_user(*result, (u8 __user *) uptr);
  66. }
  67. static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  68. u64 value)
  69. {
  70. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  71. BUG_ON(guestaddr & 7);
  72. if (IS_ERR((void __force *) uptr))
  73. return PTR_ERR((void __force *) uptr);
  74. return put_user(value, (u64 __user *) uptr);
  75. }
  76. static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  77. u32 value)
  78. {
  79. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  80. BUG_ON(guestaddr & 3);
  81. if (IS_ERR((void __force *) uptr))
  82. return PTR_ERR((void __force *) uptr);
  83. return put_user(value, (u32 __user *) uptr);
  84. }
  85. static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  86. u16 value)
  87. {
  88. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  89. BUG_ON(guestaddr & 1);
  90. if (IS_ERR((void __force *) uptr))
  91. return PTR_ERR((void __force *) uptr);
  92. return put_user(value, (u16 __user *) uptr);
  93. }
  94. static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  95. u8 value)
  96. {
  97. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  98. if (IS_ERR((void __force *) uptr))
  99. return PTR_ERR((void __force *) uptr);
  100. return put_user(value, (u8 __user *) uptr);
  101. }
  102. static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
  103. unsigned long guestdest,
  104. void *from, unsigned long n)
  105. {
  106. int rc;
  107. unsigned long i;
  108. u8 *data = from;
  109. for (i = 0; i < n; i++) {
  110. rc = put_guest_u8(vcpu, guestdest++, *(data++));
  111. if (rc < 0)
  112. return rc;
  113. }
  114. return 0;
  115. }
  116. static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
  117. unsigned long guestdest,
  118. void *from, unsigned long n)
  119. {
  120. int r;
  121. void __user *uptr;
  122. unsigned long size;
  123. if (guestdest + n < guestdest)
  124. return -EFAULT;
  125. /* simple case: all within one segment table entry? */
  126. if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
  127. uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
  128. if (IS_ERR((void __force *) uptr))
  129. return PTR_ERR((void __force *) uptr);
  130. r = copy_to_user(uptr, from, n);
  131. if (r)
  132. r = -EFAULT;
  133. goto out;
  134. }
  135. /* copy first segment */
  136. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  137. if (IS_ERR((void __force *) uptr))
  138. return PTR_ERR((void __force *) uptr);
  139. size = PMD_SIZE - (guestdest & ~PMD_MASK);
  140. r = copy_to_user(uptr, from, size);
  141. if (r) {
  142. r = -EFAULT;
  143. goto out;
  144. }
  145. from += size;
  146. n -= size;
  147. guestdest += size;
  148. /* copy full segments */
  149. while (n >= PMD_SIZE) {
  150. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  151. if (IS_ERR((void __force *) uptr))
  152. return PTR_ERR((void __force *) uptr);
  153. r = copy_to_user(uptr, from, PMD_SIZE);
  154. if (r) {
  155. r = -EFAULT;
  156. goto out;
  157. }
  158. from += PMD_SIZE;
  159. n -= PMD_SIZE;
  160. guestdest += PMD_SIZE;
  161. }
  162. /* copy the tail segment */
  163. if (n) {
  164. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  165. if (IS_ERR((void __force *) uptr))
  166. return PTR_ERR((void __force *) uptr);
  167. r = copy_to_user(uptr, from, n);
  168. if (r)
  169. r = -EFAULT;
  170. }
  171. out:
  172. return r;
  173. }
  174. static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
  175. unsigned long guestdest,
  176. void *from, unsigned long n)
  177. {
  178. return __copy_to_guest_fast(vcpu, guestdest, from, n);
  179. }
  180. static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
  181. void *from, unsigned long n)
  182. {
  183. unsigned long prefix = vcpu->arch.sie_block->prefix;
  184. if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
  185. goto slowpath;
  186. if ((guestdest < prefix) && (guestdest + n > prefix))
  187. goto slowpath;
  188. if ((guestdest < prefix + 2 * PAGE_SIZE)
  189. && (guestdest + n > prefix + 2 * PAGE_SIZE))
  190. goto slowpath;
  191. if (guestdest < 2 * PAGE_SIZE)
  192. guestdest += prefix;
  193. else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
  194. guestdest -= prefix;
  195. return __copy_to_guest_fast(vcpu, guestdest, from, n);
  196. slowpath:
  197. return __copy_to_guest_slow(vcpu, guestdest, from, n);
  198. }
  199. static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
  200. unsigned long guestsrc,
  201. unsigned long n)
  202. {
  203. int rc;
  204. unsigned long i;
  205. u8 *data = to;
  206. for (i = 0; i < n; i++) {
  207. rc = get_guest_u8(vcpu, guestsrc++, data++);
  208. if (rc < 0)
  209. return rc;
  210. }
  211. return 0;
  212. }
  213. static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
  214. unsigned long guestsrc,
  215. unsigned long n)
  216. {
  217. int r;
  218. void __user *uptr;
  219. unsigned long size;
  220. if (guestsrc + n < guestsrc)
  221. return -EFAULT;
  222. /* simple case: all within one segment table entry? */
  223. if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
  224. uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
  225. if (IS_ERR((void __force *) uptr))
  226. return PTR_ERR((void __force *) uptr);
  227. r = copy_from_user(to, uptr, n);
  228. if (r)
  229. r = -EFAULT;
  230. goto out;
  231. }
  232. /* copy first segment */
  233. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  234. if (IS_ERR((void __force *) uptr))
  235. return PTR_ERR((void __force *) uptr);
  236. size = PMD_SIZE - (guestsrc & ~PMD_MASK);
  237. r = copy_from_user(to, uptr, size);
  238. if (r) {
  239. r = -EFAULT;
  240. goto out;
  241. }
  242. to += size;
  243. n -= size;
  244. guestsrc += size;
  245. /* copy full segments */
  246. while (n >= PMD_SIZE) {
  247. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  248. if (IS_ERR((void __force *) uptr))
  249. return PTR_ERR((void __force *) uptr);
  250. r = copy_from_user(to, uptr, PMD_SIZE);
  251. if (r) {
  252. r = -EFAULT;
  253. goto out;
  254. }
  255. to += PMD_SIZE;
  256. n -= PMD_SIZE;
  257. guestsrc += PMD_SIZE;
  258. }
  259. /* copy the tail segment */
  260. if (n) {
  261. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  262. if (IS_ERR((void __force *) uptr))
  263. return PTR_ERR((void __force *) uptr);
  264. r = copy_from_user(to, uptr, n);
  265. if (r)
  266. r = -EFAULT;
  267. }
  268. out:
  269. return r;
  270. }
  271. static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
  272. unsigned long guestsrc,
  273. unsigned long n)
  274. {
  275. return __copy_from_guest_fast(vcpu, to, guestsrc, n);
  276. }
  277. static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
  278. unsigned long guestsrc, unsigned long n)
  279. {
  280. unsigned long prefix = vcpu->arch.sie_block->prefix;
  281. if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
  282. goto slowpath;
  283. if ((guestsrc < prefix) && (guestsrc + n > prefix))
  284. goto slowpath;
  285. if ((guestsrc < prefix + 2 * PAGE_SIZE)
  286. && (guestsrc + n > prefix + 2 * PAGE_SIZE))
  287. goto slowpath;
  288. if (guestsrc < 2 * PAGE_SIZE)
  289. guestsrc += prefix;
  290. else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
  291. guestsrc -= prefix;
  292. return __copy_from_guest_fast(vcpu, to, guestsrc, n);
  293. slowpath:
  294. return __copy_from_guest_slow(vcpu, to, guestsrc, n);
  295. }
  296. #endif