book3s_pr_papr.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Copyright (C) 2011. Freescale Inc. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Paul Mackerras <paulus@samba.org>
  7. *
  8. * Description:
  9. *
  10. * Hypercall handling for running PAPR guests in PR KVM on Book 3S
  11. * processors.
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License, version 2, as
  15. * published by the Free Software Foundation.
  16. */
  17. #include <asm/uaccess.h>
  18. #include <asm/kvm_ppc.h>
  19. #include <asm/kvm_book3s.h>
  20. static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
  21. {
  22. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  23. unsigned long pteg_addr;
  24. pte_index <<= 4;
  25. pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
  26. pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
  27. pteg_addr |= pte_index;
  28. return pteg_addr;
  29. }
  30. static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
  31. {
  32. long flags = kvmppc_get_gpr(vcpu, 4);
  33. long pte_index = kvmppc_get_gpr(vcpu, 5);
  34. unsigned long pteg[2 * 8];
  35. unsigned long pteg_addr, i, *hpte;
  36. pte_index &= ~7UL;
  37. pteg_addr = get_pteg_addr(vcpu, pte_index);
  38. copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
  39. hpte = pteg;
  40. if (likely((flags & H_EXACT) == 0)) {
  41. pte_index &= ~7UL;
  42. for (i = 0; ; ++i) {
  43. if (i == 8)
  44. return H_PTEG_FULL;
  45. if ((*hpte & HPTE_V_VALID) == 0)
  46. break;
  47. hpte += 2;
  48. }
  49. } else {
  50. i = kvmppc_get_gpr(vcpu, 5) & 7UL;
  51. hpte += i * 2;
  52. }
  53. hpte[0] = kvmppc_get_gpr(vcpu, 6);
  54. hpte[1] = kvmppc_get_gpr(vcpu, 7);
  55. copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
  56. kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
  57. kvmppc_set_gpr(vcpu, 4, pte_index | i);
  58. return EMULATE_DONE;
  59. }
  60. static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
  61. {
  62. unsigned long flags= kvmppc_get_gpr(vcpu, 4);
  63. unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
  64. unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
  65. unsigned long v = 0, pteg, rb;
  66. unsigned long pte[2];
  67. pteg = get_pteg_addr(vcpu, pte_index);
  68. copy_from_user(pte, (void __user *)pteg, sizeof(pte));
  69. if ((pte[0] & HPTE_V_VALID) == 0 ||
  70. ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
  71. ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
  72. kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
  73. return EMULATE_DONE;
  74. }
  75. copy_to_user((void __user *)pteg, &v, sizeof(v));
  76. rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
  77. vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
  78. kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
  79. kvmppc_set_gpr(vcpu, 4, pte[0]);
  80. kvmppc_set_gpr(vcpu, 5, pte[1]);
  81. return EMULATE_DONE;
  82. }
  83. static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
  84. {
  85. unsigned long flags = kvmppc_get_gpr(vcpu, 4);
  86. unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
  87. unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
  88. unsigned long rb, pteg, r, v;
  89. unsigned long pte[2];
  90. pteg = get_pteg_addr(vcpu, pte_index);
  91. copy_from_user(pte, (void __user *)pteg, sizeof(pte));
  92. if ((pte[0] & HPTE_V_VALID) == 0 ||
  93. ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
  94. kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
  95. return EMULATE_DONE;
  96. }
  97. v = pte[0];
  98. r = pte[1];
  99. r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
  100. HPTE_R_KEY_LO);
  101. r |= (flags << 55) & HPTE_R_PP0;
  102. r |= (flags << 48) & HPTE_R_KEY_HI;
  103. r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
  104. pte[1] = r;
  105. rb = compute_tlbie_rb(v, r, pte_index);
  106. vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
  107. copy_to_user((void __user *)pteg, pte, sizeof(pte));
  108. kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
  109. return EMULATE_DONE;
  110. }
  111. int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
  112. {
  113. switch (cmd) {
  114. case H_ENTER:
  115. return kvmppc_h_pr_enter(vcpu);
  116. case H_REMOVE:
  117. return kvmppc_h_pr_remove(vcpu);
  118. case H_PROTECT:
  119. return kvmppc_h_pr_protect(vcpu);
  120. case H_BULK_REMOVE:
  121. /* We just flush all PTEs, so user space can
  122. handle the HPT modifications */
  123. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  124. break;
  125. case H_CEDE:
  126. kvm_vcpu_block(vcpu);
  127. vcpu->stat.halt_wakeup++;
  128. return EMULATE_DONE;
  129. }
  130. return EMULATE_FAIL;
  131. }