vgic-v2.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * Copyright (C) 2015, 2016 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/irqchip/arm-gic.h>
  17. #include <linux/kvm.h>
  18. #include <linux/kvm_host.h>
  19. #include <kvm/arm_vgic.h>
  20. #include <asm/kvm_mmu.h>
  21. #include "vgic.h"
  22. static inline void vgic_v2_write_lr(int lr, u32 val)
  23. {
  24. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  25. writel_relaxed(val, base + GICH_LR0 + (lr * 4));
  26. }
  27. void vgic_v2_init_lrs(void)
  28. {
  29. int i;
  30. for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
  31. vgic_v2_write_lr(i, 0);
  32. }
  33. void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
  34. {
  35. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  36. cpuif->vgic_hcr |= GICH_HCR_NPIE;
  37. }
  38. void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  39. {
  40. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  41. cpuif->vgic_hcr |= GICH_HCR_UIE;
  42. }
  43. static bool lr_signals_eoi_mi(u32 lr_val)
  44. {
  45. return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
  46. !(lr_val & GICH_LR_HW);
  47. }
  48. /*
  49. * transfer the content of the LRs back into the corresponding ap_list:
  50. * - active bit is transferred as is
  51. * - pending bit is
  52. * - transferred as is in case of edge sensitive IRQs
  53. * - set to the line-level (resample time) for level sensitive IRQs
  54. */
  55. void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
  56. {
  57. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  58. struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
  59. int lr;
  60. cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
  61. for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
  62. u32 val = cpuif->vgic_lr[lr];
  63. u32 intid = val & GICH_LR_VIRTUALID;
  64. struct vgic_irq *irq;
  65. /* Notify fds when the guest EOI'ed a level-triggered SPI */
  66. if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
  67. kvm_notify_acked_irq(vcpu->kvm, 0,
  68. intid - VGIC_NR_PRIVATE_IRQS);
  69. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  70. spin_lock(&irq->irq_lock);
  71. /* Always preserve the active bit */
  72. irq->active = !!(val & GICH_LR_ACTIVE_BIT);
  73. /* Edge is the only case where we preserve the pending bit */
  74. if (irq->config == VGIC_CONFIG_EDGE &&
  75. (val & GICH_LR_PENDING_BIT)) {
  76. irq->pending_latch = true;
  77. if (vgic_irq_is_sgi(intid)) {
  78. u32 cpuid = val & GICH_LR_PHYSID_CPUID;
  79. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  80. irq->source |= (1 << cpuid);
  81. }
  82. }
  83. /*
  84. * Clear soft pending state when level irqs have been acked.
  85. * Always regenerate the pending state.
  86. */
  87. if (irq->config == VGIC_CONFIG_LEVEL) {
  88. if (!(val & GICH_LR_PENDING_BIT))
  89. irq->pending_latch = false;
  90. }
  91. spin_unlock(&irq->irq_lock);
  92. vgic_put_irq(vcpu->kvm, irq);
  93. }
  94. vgic_cpu->used_lrs = 0;
  95. }
  96. /*
  97. * Populates the particular LR with the state of a given IRQ:
  98. * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
  99. * - for a level sensitive IRQ the pending state value is unchanged;
  100. * it is dictated directly by the input level
  101. *
  102. * If @irq describes an SGI with multiple sources, we choose the
  103. * lowest-numbered source VCPU and clear that bit in the source bitmap.
  104. *
  105. * The irq_lock must be held by the caller.
  106. */
  107. void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  108. {
  109. u32 val = irq->intid;
  110. if (irq_is_pending(irq)) {
  111. val |= GICH_LR_PENDING_BIT;
  112. if (irq->config == VGIC_CONFIG_EDGE)
  113. irq->pending_latch = false;
  114. if (vgic_irq_is_sgi(irq->intid)) {
  115. u32 src = ffs(irq->source);
  116. if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
  117. irq->intid))
  118. return;
  119. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  120. irq->source &= ~(1 << (src - 1));
  121. if (irq->source)
  122. irq->pending_latch = true;
  123. }
  124. }
  125. if (irq->active)
  126. val |= GICH_LR_ACTIVE_BIT;
  127. if (irq->hw) {
  128. val |= GICH_LR_HW;
  129. val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
  130. /*
  131. * Never set pending+active on a HW interrupt, as the
  132. * pending state is kept at the physical distributor
  133. * level.
  134. */
  135. if (irq->active && irq_is_pending(irq))
  136. val &= ~GICH_LR_PENDING_BIT;
  137. } else {
  138. if (irq->config == VGIC_CONFIG_LEVEL)
  139. val |= GICH_LR_EOI;
  140. }
  141. /* The GICv2 LR only holds five bits of priority. */
  142. val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
  143. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
  144. }
  145. void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
  146. {
  147. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
  148. }
  149. void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  150. {
  151. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  152. u32 vmcr;
  153. vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
  154. GICH_VMCR_ENABLE_GRP0_MASK;
  155. vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
  156. GICH_VMCR_ENABLE_GRP1_MASK;
  157. vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
  158. GICH_VMCR_ACK_CTL_MASK;
  159. vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
  160. GICH_VMCR_FIQ_EN_MASK;
  161. vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
  162. GICH_VMCR_CBPR_MASK;
  163. vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
  164. GICH_VMCR_EOI_MODE_MASK;
  165. vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
  166. GICH_VMCR_ALIAS_BINPOINT_MASK;
  167. vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
  168. GICH_VMCR_BINPOINT_MASK;
  169. vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
  170. GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
  171. cpu_if->vgic_vmcr = vmcr;
  172. }
  173. void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  174. {
  175. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  176. u32 vmcr;
  177. vmcr = cpu_if->vgic_vmcr;
  178. vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
  179. GICH_VMCR_ENABLE_GRP0_SHIFT;
  180. vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
  181. GICH_VMCR_ENABLE_GRP1_SHIFT;
  182. vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
  183. GICH_VMCR_ACK_CTL_SHIFT;
  184. vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
  185. GICH_VMCR_FIQ_EN_SHIFT;
  186. vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
  187. GICH_VMCR_CBPR_SHIFT;
  188. vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
  189. GICH_VMCR_EOI_MODE_SHIFT;
  190. vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
  191. GICH_VMCR_ALIAS_BINPOINT_SHIFT;
  192. vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
  193. GICH_VMCR_BINPOINT_SHIFT;
  194. vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
  195. GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
  196. }
  197. void vgic_v2_enable(struct kvm_vcpu *vcpu)
  198. {
  199. /*
  200. * By forcing VMCR to zero, the GIC will restore the binary
  201. * points to their reset values. Anything else resets to zero
  202. * anyway.
  203. */
  204. vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
  205. vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
  206. /* Get the show on the road... */
  207. vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
  208. }
  209. /* check for overlapping regions and for regions crossing the end of memory */
  210. static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
  211. {
  212. if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
  213. return false;
  214. if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
  215. return false;
  216. if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
  217. return true;
  218. if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
  219. return true;
  220. return false;
  221. }
  222. int vgic_v2_map_resources(struct kvm *kvm)
  223. {
  224. struct vgic_dist *dist = &kvm->arch.vgic;
  225. int ret = 0;
  226. if (vgic_ready(kvm))
  227. goto out;
  228. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  229. IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
  230. kvm_err("Need to set vgic cpu and dist addresses first\n");
  231. ret = -ENXIO;
  232. goto out;
  233. }
  234. if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
  235. kvm_err("VGIC CPU and dist frames overlap\n");
  236. ret = -EINVAL;
  237. goto out;
  238. }
  239. /*
  240. * Initialize the vgic if this hasn't already been done on demand by
  241. * accessing the vgic state from userspace.
  242. */
  243. ret = vgic_init(kvm);
  244. if (ret) {
  245. kvm_err("Unable to initialize VGIC dynamic data structures\n");
  246. goto out;
  247. }
  248. ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
  249. if (ret) {
  250. kvm_err("Unable to register VGIC MMIO regions\n");
  251. goto out;
  252. }
  253. if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  254. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  255. kvm_vgic_global_state.vcpu_base,
  256. KVM_VGIC_V2_CPU_SIZE, true);
  257. if (ret) {
  258. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  259. goto out;
  260. }
  261. }
  262. dist->ready = true;
  263. out:
  264. return ret;
  265. }
  266. DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
  267. /**
  268. * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
  269. * @node: pointer to the DT node
  270. *
  271. * Returns 0 if a GICv2 has been found, returns an error code otherwise
  272. */
  273. int vgic_v2_probe(const struct gic_kvm_info *info)
  274. {
  275. int ret;
  276. u32 vtr;
  277. if (!info->vctrl.start) {
  278. kvm_err("GICH not present in the firmware table\n");
  279. return -ENXIO;
  280. }
  281. if (!PAGE_ALIGNED(info->vcpu.start) ||
  282. !PAGE_ALIGNED(resource_size(&info->vcpu))) {
  283. kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
  284. kvm_vgic_global_state.vcpu_base_va = ioremap(info->vcpu.start,
  285. resource_size(&info->vcpu));
  286. if (!kvm_vgic_global_state.vcpu_base_va) {
  287. kvm_err("Cannot ioremap GICV\n");
  288. return -ENOMEM;
  289. }
  290. ret = create_hyp_io_mappings(kvm_vgic_global_state.vcpu_base_va,
  291. kvm_vgic_global_state.vcpu_base_va + resource_size(&info->vcpu),
  292. info->vcpu.start);
  293. if (ret) {
  294. kvm_err("Cannot map GICV into hyp\n");
  295. goto out;
  296. }
  297. static_branch_enable(&vgic_v2_cpuif_trap);
  298. }
  299. kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
  300. resource_size(&info->vctrl));
  301. if (!kvm_vgic_global_state.vctrl_base) {
  302. kvm_err("Cannot ioremap GICH\n");
  303. ret = -ENOMEM;
  304. goto out;
  305. }
  306. vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
  307. kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
  308. ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
  309. kvm_vgic_global_state.vctrl_base +
  310. resource_size(&info->vctrl),
  311. info->vctrl.start);
  312. if (ret) {
  313. kvm_err("Cannot map VCTRL into hyp\n");
  314. goto out;
  315. }
  316. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  317. if (ret) {
  318. kvm_err("Cannot register GICv2 KVM device\n");
  319. goto out;
  320. }
  321. kvm_vgic_global_state.can_emulate_gicv2 = true;
  322. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  323. kvm_vgic_global_state.type = VGIC_V2;
  324. kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
  325. kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
  326. return 0;
  327. out:
  328. if (kvm_vgic_global_state.vctrl_base)
  329. iounmap(kvm_vgic_global_state.vctrl_base);
  330. if (kvm_vgic_global_state.vcpu_base_va)
  331. iounmap(kvm_vgic_global_state.vcpu_base_va);
  332. return ret;
  333. }
  334. void vgic_v2_load(struct kvm_vcpu *vcpu)
  335. {
  336. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  337. struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
  338. writel_relaxed(cpu_if->vgic_vmcr, vgic->vctrl_base + GICH_VMCR);
  339. }
  340. void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
  341. {
  342. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  343. struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
  344. cpu_if->vgic_vmcr = readl_relaxed(vgic->vctrl_base + GICH_VMCR);
  345. }
  346. void vgic_v2_put(struct kvm_vcpu *vcpu)
  347. {
  348. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  349. struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
  350. vgic_v2_vmcr_sync(vcpu);
  351. cpu_if->vgic_apr = readl_relaxed(vgic->vctrl_base + GICH_APR);
  352. }