sigp.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422
  1. /*
  2. * sigp.c - handlinge interprocessor communication
  3. *
  4. * Copyright IBM Corp. 2008,2009
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. * Christian Ehrhardt <ehrhardt@de.ibm.com>
  13. */
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/slab.h>
  17. #include "gaccess.h"
  18. #include "kvm-s390.h"
  19. /* sigp order codes */
  20. #define SIGP_SENSE 0x01
  21. #define SIGP_EXTERNAL_CALL 0x02
  22. #define SIGP_EMERGENCY 0x03
  23. #define SIGP_START 0x04
  24. #define SIGP_STOP 0x05
  25. #define SIGP_RESTART 0x06
  26. #define SIGP_STOP_STORE_STATUS 0x09
  27. #define SIGP_INITIAL_CPU_RESET 0x0b
  28. #define SIGP_CPU_RESET 0x0c
  29. #define SIGP_SET_PREFIX 0x0d
  30. #define SIGP_STORE_STATUS_ADDR 0x0e
  31. #define SIGP_SET_ARCH 0x12
  32. #define SIGP_SENSE_RUNNING 0x15
  33. /* cpu status bits */
  34. #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
  35. #define SIGP_STAT_NOT_RUNNING 0x00000400UL
  36. #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
  37. #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
  38. #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
  39. #define SIGP_STAT_STOPPED 0x00000040UL
  40. #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
  41. #define SIGP_STAT_CHECK_STOP 0x00000010UL
  42. #define SIGP_STAT_INOPERATIVE 0x00000004UL
  43. #define SIGP_STAT_INVALID_ORDER 0x00000002UL
  44. #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
  45. static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
  46. u64 *reg)
  47. {
  48. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  49. int rc;
  50. if (cpu_addr >= KVM_MAX_VCPUS)
  51. return 3; /* not operational */
  52. spin_lock(&fi->lock);
  53. if (fi->local_int[cpu_addr] == NULL)
  54. rc = 3; /* not operational */
  55. else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
  56. & CPUSTAT_STOPPED)) {
  57. *reg &= 0xffffffff00000000UL;
  58. rc = 1; /* status stored */
  59. } else {
  60. *reg &= 0xffffffff00000000UL;
  61. *reg |= SIGP_STAT_STOPPED;
  62. rc = 1; /* status stored */
  63. }
  64. spin_unlock(&fi->lock);
  65. VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
  66. return rc;
  67. }
  68. static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
  69. {
  70. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  71. struct kvm_s390_local_interrupt *li;
  72. struct kvm_s390_interrupt_info *inti;
  73. int rc;
  74. if (cpu_addr >= KVM_MAX_VCPUS)
  75. return 3; /* not operational */
  76. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  77. if (!inti)
  78. return -ENOMEM;
  79. inti->type = KVM_S390_INT_EMERGENCY;
  80. inti->emerg.code = vcpu->vcpu_id;
  81. spin_lock(&fi->lock);
  82. li = fi->local_int[cpu_addr];
  83. if (li == NULL) {
  84. rc = 3; /* not operational */
  85. kfree(inti);
  86. goto unlock;
  87. }
  88. spin_lock_bh(&li->lock);
  89. list_add_tail(&inti->list, &li->list);
  90. atomic_set(&li->active, 1);
  91. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  92. if (waitqueue_active(&li->wq))
  93. wake_up_interruptible(&li->wq);
  94. spin_unlock_bh(&li->lock);
  95. rc = 0; /* order accepted */
  96. VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
  97. unlock:
  98. spin_unlock(&fi->lock);
  99. return rc;
  100. }
  101. static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
  102. {
  103. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  104. struct kvm_s390_local_interrupt *li;
  105. struct kvm_s390_interrupt_info *inti;
  106. int rc;
  107. if (cpu_addr >= KVM_MAX_VCPUS)
  108. return 3; /* not operational */
  109. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  110. if (!inti)
  111. return -ENOMEM;
  112. inti->type = KVM_S390_INT_EXTERNAL_CALL;
  113. inti->extcall.code = vcpu->vcpu_id;
  114. spin_lock(&fi->lock);
  115. li = fi->local_int[cpu_addr];
  116. if (li == NULL) {
  117. rc = 3; /* not operational */
  118. kfree(inti);
  119. goto unlock;
  120. }
  121. spin_lock_bh(&li->lock);
  122. list_add_tail(&inti->list, &li->list);
  123. atomic_set(&li->active, 1);
  124. atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
  125. if (waitqueue_active(&li->wq))
  126. wake_up_interruptible(&li->wq);
  127. spin_unlock_bh(&li->lock);
  128. rc = 0; /* order accepted */
  129. VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
  130. unlock:
  131. spin_unlock(&fi->lock);
  132. return rc;
  133. }
  134. static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
  135. {
  136. struct kvm_s390_interrupt_info *inti;
  137. inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
  138. if (!inti)
  139. return -ENOMEM;
  140. inti->type = KVM_S390_SIGP_STOP;
  141. spin_lock_bh(&li->lock);
  142. if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
  143. goto out;
  144. list_add_tail(&inti->list, &li->list);
  145. atomic_set(&li->active, 1);
  146. atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
  147. li->action_bits |= action;
  148. if (waitqueue_active(&li->wq))
  149. wake_up_interruptible(&li->wq);
  150. out:
  151. spin_unlock_bh(&li->lock);
  152. return 0; /* order accepted */
  153. }
  154. static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
  155. {
  156. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  157. struct kvm_s390_local_interrupt *li;
  158. int rc;
  159. if (cpu_addr >= KVM_MAX_VCPUS)
  160. return 3; /* not operational */
  161. spin_lock(&fi->lock);
  162. li = fi->local_int[cpu_addr];
  163. if (li == NULL) {
  164. rc = 3; /* not operational */
  165. goto unlock;
  166. }
  167. rc = __inject_sigp_stop(li, action);
  168. unlock:
  169. spin_unlock(&fi->lock);
  170. VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
  171. return rc;
  172. }
  173. int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action)
  174. {
  175. struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
  176. return __inject_sigp_stop(li, action);
  177. }
  178. static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
  179. {
  180. int rc;
  181. switch (parameter & 0xff) {
  182. case 0:
  183. rc = 3; /* not operational */
  184. break;
  185. case 1:
  186. case 2:
  187. rc = 0; /* order accepted */
  188. break;
  189. default:
  190. rc = -EOPNOTSUPP;
  191. }
  192. return rc;
  193. }
  194. static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
  195. u64 *reg)
  196. {
  197. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  198. struct kvm_s390_local_interrupt *li = NULL;
  199. struct kvm_s390_interrupt_info *inti;
  200. int rc;
  201. u8 tmp;
  202. /* make sure that the new value is valid memory */
  203. address = address & 0x7fffe000u;
  204. if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
  205. copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
  206. *reg |= SIGP_STAT_INVALID_PARAMETER;
  207. return 1; /* invalid parameter */
  208. }
  209. inti = kzalloc(sizeof(*inti), GFP_KERNEL);
  210. if (!inti)
  211. return 2; /* busy */
  212. spin_lock(&fi->lock);
  213. if (cpu_addr < KVM_MAX_VCPUS)
  214. li = fi->local_int[cpu_addr];
  215. if (li == NULL) {
  216. rc = 1; /* incorrect state */
  217. *reg &= SIGP_STAT_INCORRECT_STATE;
  218. kfree(inti);
  219. goto out_fi;
  220. }
  221. spin_lock_bh(&li->lock);
  222. /* cpu must be in stopped state */
  223. if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
  224. rc = 1; /* incorrect state */
  225. *reg &= SIGP_STAT_INCORRECT_STATE;
  226. kfree(inti);
  227. goto out_li;
  228. }
  229. inti->type = KVM_S390_SIGP_SET_PREFIX;
  230. inti->prefix.address = address;
  231. list_add_tail(&inti->list, &li->list);
  232. atomic_set(&li->active, 1);
  233. if (waitqueue_active(&li->wq))
  234. wake_up_interruptible(&li->wq);
  235. rc = 0; /* order accepted */
  236. VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
  237. out_li:
  238. spin_unlock_bh(&li->lock);
  239. out_fi:
  240. spin_unlock(&fi->lock);
  241. return rc;
  242. }
  243. static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
  244. u64 *reg)
  245. {
  246. int rc;
  247. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  248. if (cpu_addr >= KVM_MAX_VCPUS)
  249. return 3; /* not operational */
  250. spin_lock(&fi->lock);
  251. if (fi->local_int[cpu_addr] == NULL)
  252. rc = 3; /* not operational */
  253. else {
  254. if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
  255. & CPUSTAT_RUNNING) {
  256. /* running */
  257. rc = 1;
  258. } else {
  259. /* not running */
  260. *reg &= 0xffffffff00000000UL;
  261. *reg |= SIGP_STAT_NOT_RUNNING;
  262. rc = 0;
  263. }
  264. }
  265. spin_unlock(&fi->lock);
  266. VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
  267. rc);
  268. return rc;
  269. }
  270. static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
  271. {
  272. int rc = 0;
  273. struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
  274. struct kvm_s390_local_interrupt *li;
  275. if (cpu_addr >= KVM_MAX_VCPUS)
  276. return 3; /* not operational */
  277. spin_lock(&fi->lock);
  278. li = fi->local_int[cpu_addr];
  279. if (li == NULL) {
  280. rc = 3; /* not operational */
  281. goto out;
  282. }
  283. spin_lock_bh(&li->lock);
  284. if (li->action_bits & ACTION_STOP_ON_STOP)
  285. rc = 2; /* busy */
  286. else
  287. VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
  288. cpu_addr);
  289. spin_unlock_bh(&li->lock);
  290. out:
  291. spin_unlock(&fi->lock);
  292. return rc;
  293. }
  294. int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
  295. {
  296. int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  297. int r3 = vcpu->arch.sie_block->ipa & 0x000f;
  298. int base2 = vcpu->arch.sie_block->ipb >> 28;
  299. int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
  300. u32 parameter;
  301. u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
  302. u8 order_code;
  303. int rc;
  304. /* sigp in userspace can exit */
  305. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  306. return kvm_s390_inject_program_int(vcpu,
  307. PGM_PRIVILEGED_OPERATION);
  308. order_code = disp2;
  309. if (base2)
  310. order_code += vcpu->run->s.regs.gprs[base2];
  311. if (r1 % 2)
  312. parameter = vcpu->run->s.regs.gprs[r1];
  313. else
  314. parameter = vcpu->run->s.regs.gprs[r1 + 1];
  315. switch (order_code) {
  316. case SIGP_SENSE:
  317. vcpu->stat.instruction_sigp_sense++;
  318. rc = __sigp_sense(vcpu, cpu_addr,
  319. &vcpu->run->s.regs.gprs[r1]);
  320. break;
  321. case SIGP_EXTERNAL_CALL:
  322. vcpu->stat.instruction_sigp_external_call++;
  323. rc = __sigp_external_call(vcpu, cpu_addr);
  324. break;
  325. case SIGP_EMERGENCY:
  326. vcpu->stat.instruction_sigp_emergency++;
  327. rc = __sigp_emergency(vcpu, cpu_addr);
  328. break;
  329. case SIGP_STOP:
  330. vcpu->stat.instruction_sigp_stop++;
  331. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
  332. break;
  333. case SIGP_STOP_STORE_STATUS:
  334. vcpu->stat.instruction_sigp_stop++;
  335. rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
  336. ACTION_STOP_ON_STOP);
  337. break;
  338. case SIGP_SET_ARCH:
  339. vcpu->stat.instruction_sigp_arch++;
  340. rc = __sigp_set_arch(vcpu, parameter);
  341. break;
  342. case SIGP_SET_PREFIX:
  343. vcpu->stat.instruction_sigp_prefix++;
  344. rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
  345. &vcpu->run->s.regs.gprs[r1]);
  346. break;
  347. case SIGP_SENSE_RUNNING:
  348. vcpu->stat.instruction_sigp_sense_running++;
  349. rc = __sigp_sense_running(vcpu, cpu_addr,
  350. &vcpu->run->s.regs.gprs[r1]);
  351. break;
  352. case SIGP_RESTART:
  353. vcpu->stat.instruction_sigp_restart++;
  354. rc = __sigp_restart(vcpu, cpu_addr);
  355. if (rc == 2) /* busy */
  356. break;
  357. /* user space must know about restart */
  358. default:
  359. return -EOPNOTSUPP;
  360. }
  361. if (rc < 0)
  362. return rc;
  363. vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
  364. vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
  365. return 0;
  366. }