mmio.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. /*
  2. * mmio.c: MMIO emulation components.
  3. * Copyright (c) 2004, Intel Corporation.
  4. * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
  5. * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
  6. *
  7. * Copyright (c) 2007 Intel Corporation KVM support.
  8. * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
  9. * Xiantao Zhang (xiantao.zhang@intel.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify it
  12. * under the terms and conditions of the GNU General Public License,
  13. * version 2, as published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  18. * more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along with
  21. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  22. * Place - Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. */
  25. #include <linux/kvm_host.h>
  26. #include "vcpu.h"
  27. static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
  28. {
  29. VLSAPIC_XTP(v) = val;
  30. }
  31. /*
  32. * LSAPIC OFFSET
  33. */
  34. #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
  35. #define PIB_OFST_INTA 0x1E0000
  36. #define PIB_OFST_XTP 0x1E0008
  37. /*
  38. * execute write IPI op.
  39. */
  40. static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
  41. uint64_t addr, uint64_t data)
  42. {
  43. struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
  44. unsigned long psr;
  45. local_irq_save(psr);
  46. p->exit_reason = EXIT_REASON_IPI;
  47. p->u.ipi_data.addr.val = addr;
  48. p->u.ipi_data.data.val = data;
  49. vmm_transition(current_vcpu);
  50. local_irq_restore(psr);
  51. }
  52. void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
  53. unsigned long length, unsigned long val)
  54. {
  55. addr &= (PIB_SIZE - 1);
  56. switch (addr) {
  57. case PIB_OFST_INTA:
  58. panic_vm(v, "Undefined write on PIB INTA\n");
  59. break;
  60. case PIB_OFST_XTP:
  61. if (length == 1) {
  62. vlsapic_write_xtp(v, val);
  63. } else {
  64. panic_vm(v, "Undefined write on PIB XTP\n");
  65. }
  66. break;
  67. default:
  68. if (PIB_LOW_HALF(addr)) {
  69. /*Lower half */
  70. if (length != 8)
  71. panic_vm(v, "Can't LHF write with size %ld!\n",
  72. length);
  73. else
  74. vlsapic_write_ipi(v, addr, val);
  75. } else { /*Upper half */
  76. panic_vm(v, "IPI-UHF write %lx\n", addr);
  77. }
  78. break;
  79. }
  80. }
  81. unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
  82. unsigned long length)
  83. {
  84. uint64_t result = 0;
  85. addr &= (PIB_SIZE - 1);
  86. switch (addr) {
  87. case PIB_OFST_INTA:
  88. if (length == 1) /* 1 byte load */
  89. ; /* There is no i8259, there is no INTA access*/
  90. else
  91. panic_vm(v, "Undefined read on PIB INTA\n");
  92. break;
  93. case PIB_OFST_XTP:
  94. if (length == 1) {
  95. result = VLSAPIC_XTP(v);
  96. } else {
  97. panic_vm(v, "Undefined read on PIB XTP\n");
  98. }
  99. break;
  100. default:
  101. panic_vm(v, "Undefined addr access for lsapic!\n");
  102. break;
  103. }
  104. return result;
  105. }
  106. static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
  107. u16 s, int ma, int dir)
  108. {
  109. unsigned long iot;
  110. struct exit_ctl_data *p = &vcpu->arch.exit_data;
  111. unsigned long psr;
  112. iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
  113. local_irq_save(psr);
  114. /*Intercept the access for PIB range*/
  115. if (iot == GPFN_PIB) {
  116. if (!dir)
  117. lsapic_write(vcpu, src_pa, s, *dest);
  118. else
  119. *dest = lsapic_read(vcpu, src_pa, s);
  120. goto out;
  121. }
  122. p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
  123. p->u.ioreq.addr = src_pa;
  124. p->u.ioreq.size = s;
  125. p->u.ioreq.dir = dir;
  126. if (dir == IOREQ_WRITE)
  127. p->u.ioreq.data = *dest;
  128. p->u.ioreq.state = STATE_IOREQ_READY;
  129. vmm_transition(vcpu);
  130. if (p->u.ioreq.state == STATE_IORESP_READY) {
  131. if (dir == IOREQ_READ)
  132. /* it's necessary to ensure zero extending */
  133. *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
  134. } else
  135. panic_vm(vcpu, "Unhandled mmio access returned!\n");
  136. out:
  137. local_irq_restore(psr);
  138. return ;
  139. }
  140. /*
  141. dir 1: read 0:write
  142. inst_type 0:integer 1:floating point
  143. */
  144. #define SL_INTEGER 0 /* store/load interger*/
  145. #define SL_FLOATING 1 /* store/load floating*/
  146. void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
  147. {
  148. struct kvm_pt_regs *regs;
  149. IA64_BUNDLE bundle;
  150. int slot, dir = 0;
  151. int inst_type = -1;
  152. u16 size = 0;
  153. u64 data, slot1a, slot1b, temp, update_reg;
  154. s32 imm;
  155. INST64 inst;
  156. regs = vcpu_regs(vcpu);
  157. if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
  158. /* if fetch code fail, return and try again */
  159. return;
  160. }
  161. slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
  162. if (!slot)
  163. inst.inst = bundle.slot0;
  164. else if (slot == 1) {
  165. slot1a = bundle.slot1a;
  166. slot1b = bundle.slot1b;
  167. inst.inst = slot1a + (slot1b << 18);
  168. } else if (slot == 2)
  169. inst.inst = bundle.slot2;
  170. /* Integer Load/Store */
  171. if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
  172. inst_type = SL_INTEGER;
  173. size = (inst.M1.x6 & 0x3);
  174. if ((inst.M1.x6 >> 2) > 0xb) {
  175. /*write*/
  176. dir = IOREQ_WRITE;
  177. data = vcpu_get_gr(vcpu, inst.M4.r2);
  178. } else if ((inst.M1.x6 >> 2) < 0xb) {
  179. /*read*/
  180. dir = IOREQ_READ;
  181. }
  182. } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
  183. /* Integer Load + Reg update */
  184. inst_type = SL_INTEGER;
  185. dir = IOREQ_READ;
  186. size = (inst.M2.x6 & 0x3);
  187. temp = vcpu_get_gr(vcpu, inst.M2.r3);
  188. update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
  189. temp += update_reg;
  190. vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
  191. } else if (inst.M3.major == 5) {
  192. /*Integer Load/Store + Imm update*/
  193. inst_type = SL_INTEGER;
  194. size = (inst.M3.x6&0x3);
  195. if ((inst.M5.x6 >> 2) > 0xb) {
  196. /*write*/
  197. dir = IOREQ_WRITE;
  198. data = vcpu_get_gr(vcpu, inst.M5.r2);
  199. temp = vcpu_get_gr(vcpu, inst.M5.r3);
  200. imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
  201. (inst.M5.imm7 << 23);
  202. temp += imm >> 23;
  203. vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
  204. } else if ((inst.M3.x6 >> 2) < 0xb) {
  205. /*read*/
  206. dir = IOREQ_READ;
  207. temp = vcpu_get_gr(vcpu, inst.M3.r3);
  208. imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
  209. (inst.M3.imm7 << 23);
  210. temp += imm >> 23;
  211. vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
  212. }
  213. } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
  214. && inst.M9.m == 0 && inst.M9.x == 0) {
  215. /* Floating-point spill*/
  216. struct ia64_fpreg v;
  217. inst_type = SL_FLOATING;
  218. dir = IOREQ_WRITE;
  219. vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
  220. /* Write high word. FIXME: this is a kludge! */
  221. v.u.bits[1] &= 0x3ffff;
  222. mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
  223. ma, IOREQ_WRITE);
  224. data = v.u.bits[0];
  225. size = 3;
  226. } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
  227. /* Floating-point spill + Imm update */
  228. struct ia64_fpreg v;
  229. inst_type = SL_FLOATING;
  230. dir = IOREQ_WRITE;
  231. vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
  232. temp = vcpu_get_gr(vcpu, inst.M10.r3);
  233. imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
  234. (inst.M10.imm7 << 23);
  235. temp += imm >> 23;
  236. vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
  237. /* Write high word.FIXME: this is a kludge! */
  238. v.u.bits[1] &= 0x3ffff;
  239. mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
  240. 8, ma, IOREQ_WRITE);
  241. data = v.u.bits[0];
  242. size = 3;
  243. } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
  244. /* Floating-point stf8 + Imm update */
  245. struct ia64_fpreg v;
  246. inst_type = SL_FLOATING;
  247. dir = IOREQ_WRITE;
  248. size = 3;
  249. vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
  250. data = v.u.bits[0]; /* Significand. */
  251. temp = vcpu_get_gr(vcpu, inst.M10.r3);
  252. imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
  253. (inst.M10.imm7 << 23);
  254. temp += imm >> 23;
  255. vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
  256. } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
  257. && inst.M15.x6 <= 0x2f) {
  258. temp = vcpu_get_gr(vcpu, inst.M15.r3);
  259. imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
  260. (inst.M15.imm7 << 23);
  261. temp += imm >> 23;
  262. vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
  263. vcpu_increment_iip(vcpu);
  264. return;
  265. } else if (inst.M12.major == 6 && inst.M12.m == 1
  266. && inst.M12.x == 1 && inst.M12.x6 == 1) {
  267. /* Floating-point Load Pair + Imm ldfp8 M12*/
  268. struct ia64_fpreg v;
  269. inst_type = SL_FLOATING;
  270. dir = IOREQ_READ;
  271. size = 8; /*ldfd*/
  272. mmio_access(vcpu, padr, &data, size, ma, dir);
  273. v.u.bits[0] = data;
  274. v.u.bits[1] = 0x1003E;
  275. vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
  276. padr += 8;
  277. mmio_access(vcpu, padr, &data, size, ma, dir);
  278. v.u.bits[0] = data;
  279. v.u.bits[1] = 0x1003E;
  280. vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
  281. padr += 8;
  282. vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
  283. vcpu_increment_iip(vcpu);
  284. return;
  285. } else {
  286. inst_type = -1;
  287. panic_vm(vcpu, "Unsupported MMIO access instruction! "
  288. "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
  289. bundle.i64[0], bundle.i64[1]);
  290. }
  291. size = 1 << size;
  292. if (dir == IOREQ_WRITE) {
  293. mmio_access(vcpu, padr, &data, size, ma, dir);
  294. } else {
  295. mmio_access(vcpu, padr, &data, size, ma, dir);
  296. if (inst_type == SL_INTEGER)
  297. vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
  298. else
  299. panic_vm(vcpu, "Unsupported instruction type!\n");
  300. }
  301. vcpu_increment_iip(vcpu);
  302. }