diag.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. /*
  2. * handling diagnose instructions
  3. *
  4. * Copyright IBM Corp. 2008, 2011
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/kvm_host.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/gmap.h>
  17. #include <asm/virtio-ccw.h>
  18. #include "kvm-s390.h"
  19. #include "trace.h"
  20. #include "trace-s390.h"
  21. #include "gaccess.h"
  22. static int diag_release_pages(struct kvm_vcpu *vcpu)
  23. {
  24. unsigned long start, end;
  25. unsigned long prefix = kvm_s390_get_prefix(vcpu);
  26. start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
  27. end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
  28. vcpu->stat.diagnose_10++;
  29. if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
  30. || start < 2 * PAGE_SIZE)
  31. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  32. VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
  33. /*
  34. * We checked for start >= end above, so lets check for the
  35. * fast path (no prefix swap page involved)
  36. */
  37. if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) {
  38. gmap_discard(vcpu->arch.gmap, start, end);
  39. } else {
  40. /*
  41. * This is slow path. gmap_discard will check for start
  42. * so lets split this into before prefix, prefix, after
  43. * prefix and let gmap_discard make some of these calls
  44. * NOPs.
  45. */
  46. gmap_discard(vcpu->arch.gmap, start, prefix);
  47. if (start <= prefix)
  48. gmap_discard(vcpu->arch.gmap, 0, 4096);
  49. if (end > prefix + 4096)
  50. gmap_discard(vcpu->arch.gmap, 4096, 8192);
  51. gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
  52. }
  53. return 0;
  54. }
  55. static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
  56. {
  57. struct prs_parm {
  58. u16 code;
  59. u16 subcode;
  60. u16 parm_len;
  61. u16 parm_version;
  62. u64 token_addr;
  63. u64 select_mask;
  64. u64 compare_mask;
  65. u64 zarch;
  66. };
  67. struct prs_parm parm;
  68. int rc;
  69. u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
  70. u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
  71. VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
  72. vcpu->run->s.regs.gprs[rx]);
  73. vcpu->stat.diagnose_258++;
  74. if (vcpu->run->s.regs.gprs[rx] & 7)
  75. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  76. rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
  77. if (rc)
  78. return kvm_s390_inject_prog_cond(vcpu, rc);
  79. if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258)
  80. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  81. switch (parm.subcode) {
  82. case 0: /* TOKEN */
  83. VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
  84. "select mask 0x%llx compare mask 0x%llx",
  85. parm.token_addr, parm.select_mask, parm.compare_mask);
  86. if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
  87. /*
  88. * If the pagefault handshake is already activated,
  89. * the token must not be changed. We have to return
  90. * decimal 8 instead, as mandated in SC24-6084.
  91. */
  92. vcpu->run->s.regs.gprs[ry] = 8;
  93. return 0;
  94. }
  95. if ((parm.compare_mask & parm.select_mask) != parm.compare_mask ||
  96. parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
  97. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  98. if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
  99. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  100. vcpu->arch.pfault_token = parm.token_addr;
  101. vcpu->arch.pfault_select = parm.select_mask;
  102. vcpu->arch.pfault_compare = parm.compare_mask;
  103. vcpu->run->s.regs.gprs[ry] = 0;
  104. rc = 0;
  105. break;
  106. case 1: /*
  107. * CANCEL
  108. * Specification allows to let already pending tokens survive
  109. * the cancel, therefore to reduce code complexity, we assume
  110. * all outstanding tokens are already pending.
  111. */
  112. VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
  113. if (parm.token_addr || parm.select_mask ||
  114. parm.compare_mask || parm.zarch)
  115. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  116. vcpu->run->s.regs.gprs[ry] = 0;
  117. /*
  118. * If the pfault handling was not established or is already
  119. * canceled SC24-6084 requests to return decimal 4.
  120. */
  121. if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
  122. vcpu->run->s.regs.gprs[ry] = 4;
  123. else
  124. vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
  125. rc = 0;
  126. break;
  127. default:
  128. rc = -EOPNOTSUPP;
  129. break;
  130. }
  131. return rc;
  132. }
  133. static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
  134. {
  135. VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
  136. vcpu->stat.diagnose_44++;
  137. kvm_vcpu_on_spin(vcpu);
  138. return 0;
  139. }
  140. static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
  141. {
  142. struct kvm_vcpu *tcpu;
  143. int tid;
  144. tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
  145. vcpu->stat.diagnose_9c++;
  146. VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
  147. if (tid == vcpu->vcpu_id)
  148. return 0;
  149. tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
  150. if (tcpu)
  151. kvm_vcpu_yield_to(tcpu);
  152. return 0;
  153. }
  154. static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
  155. {
  156. unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
  157. unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
  158. VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
  159. vcpu->stat.diagnose_308++;
  160. switch (subcode) {
  161. case 3:
  162. vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
  163. break;
  164. case 4:
  165. vcpu->run->s390_reset_flags = 0;
  166. break;
  167. default:
  168. return -EOPNOTSUPP;
  169. }
  170. if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
  171. kvm_s390_vcpu_stop(vcpu);
  172. vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
  173. vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
  174. vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
  175. vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
  176. VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
  177. vcpu->run->s390_reset_flags);
  178. trace_kvm_s390_request_resets(vcpu->run->s390_reset_flags);
  179. return -EREMOTE;
  180. }
  181. static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
  182. {
  183. int ret;
  184. vcpu->stat.diagnose_500++;
  185. /* No virtio-ccw notification? Get out quickly. */
  186. if (!vcpu->kvm->arch.css_support ||
  187. (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
  188. return -EOPNOTSUPP;
  189. VCPU_EVENT(vcpu, 4, "diag 0x500 schid 0x%8.8x queue 0x%x cookie 0x%llx",
  190. (u32) vcpu->run->s.regs.gprs[2],
  191. (u32) vcpu->run->s.regs.gprs[3],
  192. vcpu->run->s.regs.gprs[4]);
  193. /*
  194. * The layout is as follows:
  195. * - gpr 2 contains the subchannel id (passed as addr)
  196. * - gpr 3 contains the virtqueue index (passed as datamatch)
  197. * - gpr 4 contains the index on the bus (optionally)
  198. */
  199. ret = kvm_io_bus_write_cookie(vcpu, KVM_VIRTIO_CCW_NOTIFY_BUS,
  200. vcpu->run->s.regs.gprs[2] & 0xffffffff,
  201. 8, &vcpu->run->s.regs.gprs[3],
  202. vcpu->run->s.regs.gprs[4]);
  203. /*
  204. * Return cookie in gpr 2, but don't overwrite the register if the
  205. * diagnose will be handled by userspace.
  206. */
  207. if (ret != -EOPNOTSUPP)
  208. vcpu->run->s.regs.gprs[2] = ret;
  209. /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
  210. return ret < 0 ? ret : 0;
  211. }
  212. int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
  213. {
  214. int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff;
  215. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  216. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  217. trace_kvm_s390_handle_diag(vcpu, code);
  218. switch (code) {
  219. case 0x10:
  220. return diag_release_pages(vcpu);
  221. case 0x44:
  222. return __diag_time_slice_end(vcpu);
  223. case 0x9c:
  224. return __diag_time_slice_end_directed(vcpu);
  225. case 0x258:
  226. return __diag_page_ref_service(vcpu);
  227. case 0x308:
  228. return __diag_ipl_functions(vcpu);
  229. case 0x500:
  230. return __diag_virtio_hypercall(vcpu);
  231. default:
  232. return -EOPNOTSUPP;
  233. }
  234. }