vgic-v2.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * Copyright (C) 2015, 2016 ARM Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/irqchip/arm-gic.h>
  17. #include <linux/kvm.h>
  18. #include <linux/kvm_host.h>
  19. #include <kvm/arm_vgic.h>
  20. #include <asm/kvm_mmu.h>
  21. #include "vgic.h"
  22. /*
  23. * Call this function to convert a u64 value to an unsigned long * bitmask
  24. * in a way that works on both 32-bit and 64-bit LE and BE platforms.
  25. *
  26. * Warning: Calling this function may modify *val.
  27. */
  28. static unsigned long *u64_to_bitmask(u64 *val)
  29. {
  30. #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
  31. *val = (*val >> 32) | (*val << 32);
  32. #endif
  33. return (unsigned long *)val;
  34. }
  35. void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
  36. {
  37. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  38. if (cpuif->vgic_misr & GICH_MISR_EOI) {
  39. u64 eisr = cpuif->vgic_eisr;
  40. unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
  41. int lr;
  42. for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
  43. u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
  44. WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
  45. /* Only SPIs require notification */
  46. if (vgic_valid_spi(vcpu->kvm, intid))
  47. kvm_notify_acked_irq(vcpu->kvm, 0,
  48. intid - VGIC_NR_PRIVATE_IRQS);
  49. }
  50. }
  51. /* check and disable underflow maintenance IRQ */
  52. cpuif->vgic_hcr &= ~GICH_HCR_UIE;
  53. /*
  54. * In the next iterations of the vcpu loop, if we sync the
  55. * vgic state after flushing it, but before entering the guest
  56. * (this happens for pending signals and vmid rollovers), then
  57. * make sure we don't pick up any old maintenance interrupts
  58. * here.
  59. */
  60. cpuif->vgic_eisr = 0;
  61. }
  62. void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  63. {
  64. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  65. cpuif->vgic_hcr |= GICH_HCR_UIE;
  66. }
  67. /*
  68. * transfer the content of the LRs back into the corresponding ap_list:
  69. * - active bit is transferred as is
  70. * - pending bit is
  71. * - transferred as is in case of edge sensitive IRQs
  72. * - set to the line-level (resample time) for level sensitive IRQs
  73. */
  74. void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
  75. {
  76. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  77. int lr;
  78. for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
  79. u32 val = cpuif->vgic_lr[lr];
  80. u32 intid = val & GICH_LR_VIRTUALID;
  81. struct vgic_irq *irq;
  82. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  83. spin_lock(&irq->irq_lock);
  84. /* Always preserve the active bit */
  85. irq->active = !!(val & GICH_LR_ACTIVE_BIT);
  86. /* Edge is the only case where we preserve the pending bit */
  87. if (irq->config == VGIC_CONFIG_EDGE &&
  88. (val & GICH_LR_PENDING_BIT)) {
  89. irq->pending = true;
  90. if (vgic_irq_is_sgi(intid)) {
  91. u32 cpuid = val & GICH_LR_PHYSID_CPUID;
  92. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  93. irq->source |= (1 << cpuid);
  94. }
  95. }
  96. /*
  97. * Clear soft pending state when level irqs have been acked.
  98. * Always regenerate the pending state.
  99. */
  100. if (irq->config == VGIC_CONFIG_LEVEL) {
  101. if (!(val & GICH_LR_PENDING_BIT))
  102. irq->soft_pending = false;
  103. irq->pending = irq->line_level || irq->soft_pending;
  104. }
  105. spin_unlock(&irq->irq_lock);
  106. vgic_put_irq(vcpu->kvm, irq);
  107. }
  108. }
  109. /*
  110. * Populates the particular LR with the state of a given IRQ:
  111. * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
  112. * - for a level sensitive IRQ the pending state value is unchanged;
  113. * it is dictated directly by the input level
  114. *
  115. * If @irq describes an SGI with multiple sources, we choose the
  116. * lowest-numbered source VCPU and clear that bit in the source bitmap.
  117. *
  118. * The irq_lock must be held by the caller.
  119. */
  120. void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  121. {
  122. u32 val = irq->intid;
  123. if (irq->pending) {
  124. val |= GICH_LR_PENDING_BIT;
  125. if (irq->config == VGIC_CONFIG_EDGE)
  126. irq->pending = false;
  127. if (vgic_irq_is_sgi(irq->intid)) {
  128. u32 src = ffs(irq->source);
  129. BUG_ON(!src);
  130. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  131. irq->source &= ~(1 << (src - 1));
  132. if (irq->source)
  133. irq->pending = true;
  134. }
  135. }
  136. if (irq->active)
  137. val |= GICH_LR_ACTIVE_BIT;
  138. if (irq->hw) {
  139. val |= GICH_LR_HW;
  140. val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
  141. /*
  142. * Never set pending+active on a HW interrupt, as the
  143. * pending state is kept at the physical distributor
  144. * level.
  145. */
  146. if (irq->active && irq->pending)
  147. val &= ~GICH_LR_PENDING_BIT;
  148. } else {
  149. if (irq->config == VGIC_CONFIG_LEVEL)
  150. val |= GICH_LR_EOI;
  151. }
  152. /* The GICv2 LR only holds five bits of priority. */
  153. val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
  154. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
  155. }
  156. void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
  157. {
  158. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
  159. }
  160. void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  161. {
  162. u32 vmcr;
  163. vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
  164. vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
  165. GICH_VMCR_ALIAS_BINPOINT_MASK;
  166. vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
  167. GICH_VMCR_BINPOINT_MASK;
  168. vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
  169. GICH_VMCR_PRIMASK_MASK;
  170. vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
  171. }
  172. void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  173. {
  174. u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
  175. vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
  176. GICH_VMCR_CTRL_SHIFT;
  177. vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
  178. GICH_VMCR_ALIAS_BINPOINT_SHIFT;
  179. vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
  180. GICH_VMCR_BINPOINT_SHIFT;
  181. vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
  182. GICH_VMCR_PRIMASK_SHIFT;
  183. }
  184. void vgic_v2_enable(struct kvm_vcpu *vcpu)
  185. {
  186. /*
  187. * By forcing VMCR to zero, the GIC will restore the binary
  188. * points to their reset values. Anything else resets to zero
  189. * anyway.
  190. */
  191. vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
  192. vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
  193. /* Get the show on the road... */
  194. vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
  195. }
  196. /* check for overlapping regions and for regions crossing the end of memory */
  197. static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
  198. {
  199. if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
  200. return false;
  201. if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
  202. return false;
  203. if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
  204. return true;
  205. if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
  206. return true;
  207. return false;
  208. }
  209. int vgic_v2_map_resources(struct kvm *kvm)
  210. {
  211. struct vgic_dist *dist = &kvm->arch.vgic;
  212. int ret = 0;
  213. if (vgic_ready(kvm))
  214. goto out;
  215. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  216. IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
  217. kvm_err("Need to set vgic cpu and dist addresses first\n");
  218. ret = -ENXIO;
  219. goto out;
  220. }
  221. if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
  222. kvm_err("VGIC CPU and dist frames overlap\n");
  223. ret = -EINVAL;
  224. goto out;
  225. }
  226. /*
  227. * Initialize the vgic if this hasn't already been done on demand by
  228. * accessing the vgic state from userspace.
  229. */
  230. ret = vgic_init(kvm);
  231. if (ret) {
  232. kvm_err("Unable to initialize VGIC dynamic data structures\n");
  233. goto out;
  234. }
  235. ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
  236. if (ret) {
  237. kvm_err("Unable to register VGIC MMIO regions\n");
  238. goto out;
  239. }
  240. if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  241. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  242. kvm_vgic_global_state.vcpu_base,
  243. KVM_VGIC_V2_CPU_SIZE, true);
  244. if (ret) {
  245. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  246. goto out;
  247. }
  248. }
  249. dist->ready = true;
  250. out:
  251. return ret;
  252. }
  253. DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
  254. /**
  255. * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
  256. * @node: pointer to the DT node
  257. *
  258. * Returns 0 if a GICv2 has been found, returns an error code otherwise
  259. */
  260. int vgic_v2_probe(const struct gic_kvm_info *info)
  261. {
  262. int ret;
  263. u32 vtr;
  264. if (!info->vctrl.start) {
  265. kvm_err("GICH not present in the firmware table\n");
  266. return -ENXIO;
  267. }
  268. if (!PAGE_ALIGNED(info->vcpu.start) ||
  269. !PAGE_ALIGNED(resource_size(&info->vcpu))) {
  270. kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
  271. kvm_vgic_global_state.vcpu_base_va = ioremap(info->vcpu.start,
  272. resource_size(&info->vcpu));
  273. if (!kvm_vgic_global_state.vcpu_base_va) {
  274. kvm_err("Cannot ioremap GICV\n");
  275. return -ENOMEM;
  276. }
  277. ret = create_hyp_io_mappings(kvm_vgic_global_state.vcpu_base_va,
  278. kvm_vgic_global_state.vcpu_base_va + resource_size(&info->vcpu),
  279. info->vcpu.start);
  280. if (ret) {
  281. kvm_err("Cannot map GICV into hyp\n");
  282. goto out;
  283. }
  284. static_branch_enable(&vgic_v2_cpuif_trap);
  285. }
  286. kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
  287. resource_size(&info->vctrl));
  288. if (!kvm_vgic_global_state.vctrl_base) {
  289. kvm_err("Cannot ioremap GICH\n");
  290. ret = -ENOMEM;
  291. goto out;
  292. }
  293. vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
  294. kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
  295. ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
  296. kvm_vgic_global_state.vctrl_base +
  297. resource_size(&info->vctrl),
  298. info->vctrl.start);
  299. if (ret) {
  300. kvm_err("Cannot map VCTRL into hyp\n");
  301. goto out;
  302. }
  303. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  304. if (ret) {
  305. kvm_err("Cannot register GICv2 KVM device\n");
  306. goto out;
  307. }
  308. kvm_vgic_global_state.can_emulate_gicv2 = true;
  309. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  310. kvm_vgic_global_state.type = VGIC_V2;
  311. kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
  312. kvm_info("vgic-v2@%llx\n", info->vctrl.start);
  313. return 0;
  314. out:
  315. if (kvm_vgic_global_state.vctrl_base)
  316. iounmap(kvm_vgic_global_state.vctrl_base);
  317. if (kvm_vgic_global_state.vcpu_base_va)
  318. iounmap(kvm_vgic_global_state.vcpu_base_va);
  319. return ret;
  320. }