vgic-mmio-v2.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /*
  2. * VGICv2 MMIO handling functions
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/irqchip/arm-gic.h>
  14. #include <linux/kvm.h>
  15. #include <linux/kvm_host.h>
  16. #include <kvm/iodev.h>
  17. #include <kvm/arm_vgic.h>
  18. #include "vgic.h"
  19. #include "vgic-mmio.h"
  20. static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
  21. gpa_t addr, unsigned int len)
  22. {
  23. u32 value;
  24. switch (addr & 0x0c) {
  25. case GIC_DIST_CTRL:
  26. value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
  27. break;
  28. case GIC_DIST_CTR:
  29. value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  30. value = (value >> 5) - 1;
  31. value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
  32. break;
  33. case GIC_DIST_IIDR:
  34. value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
  35. break;
  36. default:
  37. return 0;
  38. }
  39. return value;
  40. }
  41. static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
  42. gpa_t addr, unsigned int len,
  43. unsigned long val)
  44. {
  45. struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
  46. bool was_enabled = dist->enabled;
  47. switch (addr & 0x0c) {
  48. case GIC_DIST_CTRL:
  49. dist->enabled = val & GICD_ENABLE;
  50. if (!was_enabled && dist->enabled)
  51. vgic_kick_vcpus(vcpu->kvm);
  52. break;
  53. case GIC_DIST_CTR:
  54. case GIC_DIST_IIDR:
  55. /* Nothing to do */
  56. return;
  57. }
  58. }
  59. static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
  60. gpa_t addr, unsigned int len,
  61. unsigned long val)
  62. {
  63. int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
  64. int intid = val & 0xf;
  65. int targets = (val >> 16) & 0xff;
  66. int mode = (val >> 24) & 0x03;
  67. int c;
  68. struct kvm_vcpu *vcpu;
  69. switch (mode) {
  70. case 0x0: /* as specified by targets */
  71. break;
  72. case 0x1:
  73. targets = (1U << nr_vcpus) - 1; /* all, ... */
  74. targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
  75. break;
  76. case 0x2: /* this very vCPU only */
  77. targets = (1U << source_vcpu->vcpu_id);
  78. break;
  79. case 0x3: /* reserved */
  80. return;
  81. }
  82. kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
  83. struct vgic_irq *irq;
  84. if (!(targets & (1U << c)))
  85. continue;
  86. irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
  87. spin_lock(&irq->irq_lock);
  88. irq->pending = true;
  89. irq->source |= 1U << source_vcpu->vcpu_id;
  90. vgic_queue_irq_unlock(source_vcpu->kvm, irq);
  91. vgic_put_irq(source_vcpu->kvm, irq);
  92. }
  93. }
  94. static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
  95. gpa_t addr, unsigned int len)
  96. {
  97. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  98. int i;
  99. u64 val = 0;
  100. for (i = 0; i < len; i++) {
  101. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  102. val |= (u64)irq->targets << (i * 8);
  103. vgic_put_irq(vcpu->kvm, irq);
  104. }
  105. return val;
  106. }
  107. static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
  108. gpa_t addr, unsigned int len,
  109. unsigned long val)
  110. {
  111. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  112. int i;
  113. /* GICD_ITARGETSR[0-7] are read-only */
  114. if (intid < VGIC_NR_PRIVATE_IRQS)
  115. return;
  116. for (i = 0; i < len; i++) {
  117. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
  118. int target;
  119. spin_lock(&irq->irq_lock);
  120. irq->targets = (val >> (i * 8)) & 0xff;
  121. target = irq->targets ? __ffs(irq->targets) : 0;
  122. irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
  123. spin_unlock(&irq->irq_lock);
  124. vgic_put_irq(vcpu->kvm, irq);
  125. }
  126. }
  127. static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
  128. gpa_t addr, unsigned int len)
  129. {
  130. u32 intid = addr & 0x0f;
  131. int i;
  132. u64 val = 0;
  133. for (i = 0; i < len; i++) {
  134. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  135. val |= (u64)irq->source << (i * 8);
  136. vgic_put_irq(vcpu->kvm, irq);
  137. }
  138. return val;
  139. }
  140. static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
  141. gpa_t addr, unsigned int len,
  142. unsigned long val)
  143. {
  144. u32 intid = addr & 0x0f;
  145. int i;
  146. for (i = 0; i < len; i++) {
  147. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  148. spin_lock(&irq->irq_lock);
  149. irq->source &= ~((val >> (i * 8)) & 0xff);
  150. if (!irq->source)
  151. irq->pending = false;
  152. spin_unlock(&irq->irq_lock);
  153. vgic_put_irq(vcpu->kvm, irq);
  154. }
  155. }
  156. static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
  157. gpa_t addr, unsigned int len,
  158. unsigned long val)
  159. {
  160. u32 intid = addr & 0x0f;
  161. int i;
  162. for (i = 0; i < len; i++) {
  163. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  164. spin_lock(&irq->irq_lock);
  165. irq->source |= (val >> (i * 8)) & 0xff;
  166. if (irq->source) {
  167. irq->pending = true;
  168. vgic_queue_irq_unlock(vcpu->kvm, irq);
  169. } else {
  170. spin_unlock(&irq->irq_lock);
  171. }
  172. vgic_put_irq(vcpu->kvm, irq);
  173. }
  174. }
  175. static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  176. {
  177. if (kvm_vgic_global_state.type == VGIC_V2)
  178. vgic_v2_set_vmcr(vcpu, vmcr);
  179. else
  180. vgic_v3_set_vmcr(vcpu, vmcr);
  181. }
  182. static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  183. {
  184. if (kvm_vgic_global_state.type == VGIC_V2)
  185. vgic_v2_get_vmcr(vcpu, vmcr);
  186. else
  187. vgic_v3_get_vmcr(vcpu, vmcr);
  188. }
  189. #define GICC_ARCH_VERSION_V2 0x2
  190. /* These are for userland accesses only, there is no guest-facing emulation. */
  191. static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
  192. gpa_t addr, unsigned int len)
  193. {
  194. struct vgic_vmcr vmcr;
  195. u32 val;
  196. vgic_get_vmcr(vcpu, &vmcr);
  197. switch (addr & 0xff) {
  198. case GIC_CPU_CTRL:
  199. val = vmcr.ctlr;
  200. break;
  201. case GIC_CPU_PRIMASK:
  202. val = vmcr.pmr;
  203. break;
  204. case GIC_CPU_BINPOINT:
  205. val = vmcr.bpr;
  206. break;
  207. case GIC_CPU_ALIAS_BINPOINT:
  208. val = vmcr.abpr;
  209. break;
  210. case GIC_CPU_IDENT:
  211. val = ((PRODUCT_ID_KVM << 20) |
  212. (GICC_ARCH_VERSION_V2 << 16) |
  213. IMPLEMENTER_ARM);
  214. break;
  215. default:
  216. return 0;
  217. }
  218. return val;
  219. }
  220. static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
  221. gpa_t addr, unsigned int len,
  222. unsigned long val)
  223. {
  224. struct vgic_vmcr vmcr;
  225. vgic_get_vmcr(vcpu, &vmcr);
  226. switch (addr & 0xff) {
  227. case GIC_CPU_CTRL:
  228. vmcr.ctlr = val;
  229. break;
  230. case GIC_CPU_PRIMASK:
  231. vmcr.pmr = val;
  232. break;
  233. case GIC_CPU_BINPOINT:
  234. vmcr.bpr = val;
  235. break;
  236. case GIC_CPU_ALIAS_BINPOINT:
  237. vmcr.abpr = val;
  238. break;
  239. }
  240. vgic_set_vmcr(vcpu, &vmcr);
  241. }
  242. static const struct vgic_register_region vgic_v2_dist_registers[] = {
  243. REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
  244. vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
  245. VGIC_ACCESS_32bit),
  246. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
  247. vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
  248. VGIC_ACCESS_32bit),
  249. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
  250. vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
  251. VGIC_ACCESS_32bit),
  252. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
  253. vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
  254. VGIC_ACCESS_32bit),
  255. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
  256. vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
  257. VGIC_ACCESS_32bit),
  258. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
  259. vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
  260. VGIC_ACCESS_32bit),
  261. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
  262. vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
  263. VGIC_ACCESS_32bit),
  264. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
  265. vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
  266. VGIC_ACCESS_32bit),
  267. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
  268. vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
  269. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  270. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
  271. vgic_mmio_read_target, vgic_mmio_write_target, 8,
  272. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  273. REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
  274. vgic_mmio_read_config, vgic_mmio_write_config, 2,
  275. VGIC_ACCESS_32bit),
  276. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
  277. vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
  278. VGIC_ACCESS_32bit),
  279. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
  280. vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
  281. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  282. REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
  283. vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
  284. VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
  285. };
  286. static const struct vgic_register_region vgic_v2_cpu_registers[] = {
  287. REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
  288. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  289. VGIC_ACCESS_32bit),
  290. REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
  291. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  292. VGIC_ACCESS_32bit),
  293. REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
  294. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  295. VGIC_ACCESS_32bit),
  296. REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
  297. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  298. VGIC_ACCESS_32bit),
  299. REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
  300. vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
  301. VGIC_ACCESS_32bit),
  302. REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
  303. vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
  304. VGIC_ACCESS_32bit),
  305. };
  306. unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
  307. {
  308. dev->regions = vgic_v2_dist_registers;
  309. dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
  310. kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
  311. return SZ_4K;
  312. }
  313. int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
  314. {
  315. int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  316. const struct vgic_register_region *regions;
  317. gpa_t addr;
  318. int nr_regions, i, len;
  319. addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  320. switch (attr->group) {
  321. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  322. regions = vgic_v2_dist_registers;
  323. nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
  324. break;
  325. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  326. regions = vgic_v2_cpu_registers;
  327. nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
  328. break;
  329. default:
  330. return -ENXIO;
  331. }
  332. /* We only support aligned 32-bit accesses. */
  333. if (addr & 3)
  334. return -ENXIO;
  335. for (i = 0; i < nr_regions; i++) {
  336. if (regions[i].bits_per_irq)
  337. len = (regions[i].bits_per_irq * nr_irqs) / 8;
  338. else
  339. len = regions[i].len;
  340. if (regions[i].reg_offset <= addr &&
  341. regions[i].reg_offset + len > addr)
  342. return 0;
  343. }
  344. return -ENXIO;
  345. }
  346. /*
  347. * When userland tries to access the VGIC register handlers, we need to
  348. * create a usable struct vgic_io_device to be passed to the handlers and we
  349. * have to set up a buffer similar to what would have happened if a guest MMIO
  350. * access occurred, including doing endian conversions on BE systems.
  351. */
  352. static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
  353. bool is_write, int offset, u32 *val)
  354. {
  355. unsigned int len = 4;
  356. u8 buf[4];
  357. int ret;
  358. if (is_write) {
  359. vgic_data_host_to_mmio_bus(buf, len, *val);
  360. ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
  361. } else {
  362. ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
  363. if (!ret)
  364. *val = vgic_data_mmio_bus_to_host(buf, len);
  365. }
  366. return ret;
  367. }
  368. int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  369. int offset, u32 *val)
  370. {
  371. struct vgic_io_device dev = {
  372. .regions = vgic_v2_cpu_registers,
  373. .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
  374. .iodev_type = IODEV_CPUIF,
  375. };
  376. return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  377. }
  378. int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
  379. int offset, u32 *val)
  380. {
  381. struct vgic_io_device dev = {
  382. .regions = vgic_v2_dist_registers,
  383. .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
  384. .iodev_type = IODEV_DIST,
  385. };
  386. return vgic_uaccess(vcpu, &dev, is_write, offset, val);
  387. }