gaccess.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * access.h - access guest memory
  3. *
  4. * Copyright IBM Corp. 2008,2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. */
  12. #ifndef __KVM_S390_GACCESS_H
  13. #define __KVM_S390_GACCESS_H
  14. #include <linux/compiler.h>
  15. #include <linux/kvm_host.h>
  16. #include <asm/uaccess.h>
  17. #include "kvm-s390.h"
  18. static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
  19. unsigned long guestaddr)
  20. {
  21. unsigned long prefix = vcpu->arch.sie_block->prefix;
  22. if (guestaddr < 2 * PAGE_SIZE)
  23. guestaddr += prefix;
  24. else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
  25. guestaddr -= prefix;
  26. return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
  27. }
  28. static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  29. u64 *result)
  30. {
  31. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  32. BUG_ON(guestaddr & 7);
  33. if (IS_ERR((void __force *) uptr))
  34. return PTR_ERR((void __force *) uptr);
  35. return get_user(*result, (unsigned long __user *) uptr);
  36. }
  37. static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  38. u32 *result)
  39. {
  40. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  41. BUG_ON(guestaddr & 3);
  42. if (IS_ERR((void __force *) uptr))
  43. return PTR_ERR((void __force *) uptr);
  44. return get_user(*result, (u32 __user *) uptr);
  45. }
  46. static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  47. u16 *result)
  48. {
  49. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  50. BUG_ON(guestaddr & 1);
  51. if (IS_ERR(uptr))
  52. return PTR_ERR(uptr);
  53. return get_user(*result, (u16 __user *) uptr);
  54. }
  55. static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  56. u8 *result)
  57. {
  58. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  59. if (IS_ERR((void __force *) uptr))
  60. return PTR_ERR((void __force *) uptr);
  61. return get_user(*result, (u8 __user *) uptr);
  62. }
  63. static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  64. u64 value)
  65. {
  66. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  67. BUG_ON(guestaddr & 7);
  68. if (IS_ERR((void __force *) uptr))
  69. return PTR_ERR((void __force *) uptr);
  70. return put_user(value, (u64 __user *) uptr);
  71. }
  72. static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  73. u32 value)
  74. {
  75. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  76. BUG_ON(guestaddr & 3);
  77. if (IS_ERR((void __force *) uptr))
  78. return PTR_ERR((void __force *) uptr);
  79. return put_user(value, (u32 __user *) uptr);
  80. }
  81. static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  82. u16 value)
  83. {
  84. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  85. BUG_ON(guestaddr & 1);
  86. if (IS_ERR((void __force *) uptr))
  87. return PTR_ERR((void __force *) uptr);
  88. return put_user(value, (u16 __user *) uptr);
  89. }
  90. static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
  91. u8 value)
  92. {
  93. void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
  94. if (IS_ERR((void __force *) uptr))
  95. return PTR_ERR((void __force *) uptr);
  96. return put_user(value, (u8 __user *) uptr);
  97. }
  98. static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
  99. unsigned long guestdest,
  100. void *from, unsigned long n)
  101. {
  102. int rc;
  103. unsigned long i;
  104. u8 *data = from;
  105. for (i = 0; i < n; i++) {
  106. rc = put_guest_u8(vcpu, guestdest++, *(data++));
  107. if (rc < 0)
  108. return rc;
  109. }
  110. return 0;
  111. }
  112. static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
  113. unsigned long guestdest,
  114. void *from, unsigned long n)
  115. {
  116. int r;
  117. void __user *uptr;
  118. unsigned long size;
  119. if (guestdest + n < guestdest)
  120. return -EFAULT;
  121. /* simple case: all within one segment table entry? */
  122. if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
  123. uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
  124. if (IS_ERR((void __force *) uptr))
  125. return PTR_ERR((void __force *) uptr);
  126. r = copy_to_user(uptr, from, n);
  127. if (r)
  128. r = -EFAULT;
  129. goto out;
  130. }
  131. /* copy first segment */
  132. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  133. if (IS_ERR((void __force *) uptr))
  134. return PTR_ERR((void __force *) uptr);
  135. size = PMD_SIZE - (guestdest & ~PMD_MASK);
  136. r = copy_to_user(uptr, from, size);
  137. if (r) {
  138. r = -EFAULT;
  139. goto out;
  140. }
  141. from += size;
  142. n -= size;
  143. guestdest += size;
  144. /* copy full segments */
  145. while (n >= PMD_SIZE) {
  146. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  147. if (IS_ERR((void __force *) uptr))
  148. return PTR_ERR((void __force *) uptr);
  149. r = copy_to_user(uptr, from, PMD_SIZE);
  150. if (r) {
  151. r = -EFAULT;
  152. goto out;
  153. }
  154. from += PMD_SIZE;
  155. n -= PMD_SIZE;
  156. guestdest += PMD_SIZE;
  157. }
  158. /* copy the tail segment */
  159. if (n) {
  160. uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
  161. if (IS_ERR((void __force *) uptr))
  162. return PTR_ERR((void __force *) uptr);
  163. r = copy_to_user(uptr, from, n);
  164. if (r)
  165. r = -EFAULT;
  166. }
  167. out:
  168. return r;
  169. }
  170. static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
  171. unsigned long guestdest,
  172. void *from, unsigned long n)
  173. {
  174. return __copy_to_guest_fast(vcpu, guestdest, from, n);
  175. }
  176. static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
  177. void *from, unsigned long n)
  178. {
  179. unsigned long prefix = vcpu->arch.sie_block->prefix;
  180. if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
  181. goto slowpath;
  182. if ((guestdest < prefix) && (guestdest + n > prefix))
  183. goto slowpath;
  184. if ((guestdest < prefix + 2 * PAGE_SIZE)
  185. && (guestdest + n > prefix + 2 * PAGE_SIZE))
  186. goto slowpath;
  187. if (guestdest < 2 * PAGE_SIZE)
  188. guestdest += prefix;
  189. else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
  190. guestdest -= prefix;
  191. return __copy_to_guest_fast(vcpu, guestdest, from, n);
  192. slowpath:
  193. return __copy_to_guest_slow(vcpu, guestdest, from, n);
  194. }
  195. static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
  196. unsigned long guestsrc,
  197. unsigned long n)
  198. {
  199. int rc;
  200. unsigned long i;
  201. u8 *data = to;
  202. for (i = 0; i < n; i++) {
  203. rc = get_guest_u8(vcpu, guestsrc++, data++);
  204. if (rc < 0)
  205. return rc;
  206. }
  207. return 0;
  208. }
  209. static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
  210. unsigned long guestsrc,
  211. unsigned long n)
  212. {
  213. int r;
  214. void __user *uptr;
  215. unsigned long size;
  216. if (guestsrc + n < guestsrc)
  217. return -EFAULT;
  218. /* simple case: all within one segment table entry? */
  219. if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
  220. uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
  221. if (IS_ERR((void __force *) uptr))
  222. return PTR_ERR((void __force *) uptr);
  223. r = copy_from_user(to, uptr, n);
  224. if (r)
  225. r = -EFAULT;
  226. goto out;
  227. }
  228. /* copy first segment */
  229. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  230. if (IS_ERR((void __force *) uptr))
  231. return PTR_ERR((void __force *) uptr);
  232. size = PMD_SIZE - (guestsrc & ~PMD_MASK);
  233. r = copy_from_user(to, uptr, size);
  234. if (r) {
  235. r = -EFAULT;
  236. goto out;
  237. }
  238. to += size;
  239. n -= size;
  240. guestsrc += size;
  241. /* copy full segments */
  242. while (n >= PMD_SIZE) {
  243. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  244. if (IS_ERR((void __force *) uptr))
  245. return PTR_ERR((void __force *) uptr);
  246. r = copy_from_user(to, uptr, PMD_SIZE);
  247. if (r) {
  248. r = -EFAULT;
  249. goto out;
  250. }
  251. to += PMD_SIZE;
  252. n -= PMD_SIZE;
  253. guestsrc += PMD_SIZE;
  254. }
  255. /* copy the tail segment */
  256. if (n) {
  257. uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
  258. if (IS_ERR((void __force *) uptr))
  259. return PTR_ERR((void __force *) uptr);
  260. r = copy_from_user(to, uptr, n);
  261. if (r)
  262. r = -EFAULT;
  263. }
  264. out:
  265. return r;
  266. }
  267. static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
  268. unsigned long guestsrc,
  269. unsigned long n)
  270. {
  271. return __copy_from_guest_fast(vcpu, to, guestsrc, n);
  272. }
  273. static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
  274. unsigned long guestsrc, unsigned long n)
  275. {
  276. unsigned long prefix = vcpu->arch.sie_block->prefix;
  277. if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
  278. goto slowpath;
  279. if ((guestsrc < prefix) && (guestsrc + n > prefix))
  280. goto slowpath;
  281. if ((guestsrc < prefix + 2 * PAGE_SIZE)
  282. && (guestsrc + n > prefix + 2 * PAGE_SIZE))
  283. goto slowpath;
  284. if (guestsrc < 2 * PAGE_SIZE)
  285. guestsrc += prefix;
  286. else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
  287. guestsrc -= prefix;
  288. return __copy_from_guest_fast(vcpu, to, guestsrc, n);
  289. slowpath:
  290. return __copy_from_guest_slow(vcpu, to, guestsrc, n);
  291. }
  292. #endif