vgic-mmio.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. /*
  2. * VGIC MMIO handling functions
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/bitops.h>
  14. #include <linux/bsearch.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_host.h>
  17. #include <kvm/iodev.h>
  18. #include <kvm/arm_vgic.h>
  19. #include "vgic.h"
  20. #include "vgic-mmio.h"
  21. unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  22. gpa_t addr, unsigned int len)
  23. {
  24. return 0;
  25. }
  26. unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  27. gpa_t addr, unsigned int len)
  28. {
  29. return -1UL;
  30. }
  31. void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  32. unsigned int len, unsigned long val)
  33. {
  34. /* Ignore */
  35. }
  36. /*
  37. * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  38. * of the enabled bit, so there is only one function for both here.
  39. */
  40. unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  41. gpa_t addr, unsigned int len)
  42. {
  43. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  44. u32 value = 0;
  45. int i;
  46. /* Loop over all IRQs affected by this read */
  47. for (i = 0; i < len * 8; i++) {
  48. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  49. if (irq->enabled)
  50. value |= (1U << i);
  51. vgic_put_irq(vcpu->kvm, irq);
  52. }
  53. return value;
  54. }
  55. void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
  56. gpa_t addr, unsigned int len,
  57. unsigned long val)
  58. {
  59. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  60. int i;
  61. for_each_set_bit(i, &val, len * 8) {
  62. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  63. spin_lock(&irq->irq_lock);
  64. irq->enabled = true;
  65. vgic_queue_irq_unlock(vcpu->kvm, irq);
  66. vgic_put_irq(vcpu->kvm, irq);
  67. }
  68. }
  69. void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
  70. gpa_t addr, unsigned int len,
  71. unsigned long val)
  72. {
  73. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  74. int i;
  75. for_each_set_bit(i, &val, len * 8) {
  76. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  77. spin_lock(&irq->irq_lock);
  78. irq->enabled = false;
  79. spin_unlock(&irq->irq_lock);
  80. vgic_put_irq(vcpu->kvm, irq);
  81. }
  82. }
  83. unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
  84. gpa_t addr, unsigned int len)
  85. {
  86. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  87. u32 value = 0;
  88. int i;
  89. /* Loop over all IRQs affected by this read */
  90. for (i = 0; i < len * 8; i++) {
  91. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  92. unsigned long flags;
  93. spin_lock_irqsave(&irq->irq_lock, flags);
  94. if (irq_is_pending(irq))
  95. value |= (1U << i);
  96. spin_unlock_irqrestore(&irq->irq_lock, flags);
  97. vgic_put_irq(vcpu->kvm, irq);
  98. }
  99. return value;
  100. }
  101. static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
  102. {
  103. return (vgic_irq_is_sgi(irq->intid) &&
  104. vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
  105. }
  106. void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
  107. gpa_t addr, unsigned int len,
  108. unsigned long val)
  109. {
  110. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  111. int i;
  112. for_each_set_bit(i, &val, len * 8) {
  113. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  114. /* GICD_ISPENDR0 SGI bits are WI */
  115. if (is_vgic_v2_sgi(vcpu, irq)) {
  116. vgic_put_irq(vcpu->kvm, irq);
  117. continue;
  118. }
  119. spin_lock(&irq->irq_lock);
  120. irq->pending_latch = true;
  121. vgic_queue_irq_unlock(vcpu->kvm, irq);
  122. vgic_put_irq(vcpu->kvm, irq);
  123. }
  124. }
  125. void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
  126. gpa_t addr, unsigned int len,
  127. unsigned long val)
  128. {
  129. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  130. int i;
  131. for_each_set_bit(i, &val, len * 8) {
  132. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  133. /* GICD_ICPENDR0 SGI bits are WI */
  134. if (is_vgic_v2_sgi(vcpu, irq)) {
  135. vgic_put_irq(vcpu->kvm, irq);
  136. continue;
  137. }
  138. spin_lock(&irq->irq_lock);
  139. irq->pending_latch = false;
  140. spin_unlock(&irq->irq_lock);
  141. vgic_put_irq(vcpu->kvm, irq);
  142. }
  143. }
  144. unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
  145. gpa_t addr, unsigned int len)
  146. {
  147. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  148. u32 value = 0;
  149. int i;
  150. /* Loop over all IRQs affected by this read */
  151. for (i = 0; i < len * 8; i++) {
  152. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  153. if (irq->active)
  154. value |= (1U << i);
  155. vgic_put_irq(vcpu->kvm, irq);
  156. }
  157. return value;
  158. }
  159. static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  160. bool new_active_state)
  161. {
  162. struct kvm_vcpu *requester_vcpu;
  163. spin_lock(&irq->irq_lock);
  164. /*
  165. * The vcpu parameter here can mean multiple things depending on how
  166. * this function is called; when handling a trap from the kernel it
  167. * depends on the GIC version, and these functions are also called as
  168. * part of save/restore from userspace.
  169. *
  170. * Therefore, we have to figure out the requester in a reliable way.
  171. *
  172. * When accessing VGIC state from user space, the requester_vcpu is
  173. * NULL, which is fine, because we guarantee that no VCPUs are running
  174. * when accessing VGIC state from user space so irq->vcpu->cpu is
  175. * always -1.
  176. */
  177. requester_vcpu = kvm_arm_get_running_vcpu();
  178. /*
  179. * If this virtual IRQ was written into a list register, we
  180. * have to make sure the CPU that runs the VCPU thread has
  181. * synced back the LR state to the struct vgic_irq.
  182. *
  183. * As long as the conditions below are true, we know the VCPU thread
  184. * may be on its way back from the guest (we kicked the VCPU thread in
  185. * vgic_change_active_prepare) and still has to sync back this IRQ,
  186. * so we release and re-acquire the spin_lock to let the other thread
  187. * sync back the IRQ.
  188. */
  189. while (irq->vcpu && /* IRQ may have state in an LR somewhere */
  190. irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
  191. irq->vcpu->cpu != -1) /* VCPU thread is running */
  192. cond_resched_lock(&irq->irq_lock);
  193. irq->active = new_active_state;
  194. if (new_active_state)
  195. vgic_queue_irq_unlock(vcpu->kvm, irq);
  196. else
  197. spin_unlock(&irq->irq_lock);
  198. }
  199. /*
  200. * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
  201. * is not queued on some running VCPU's LRs, because then the change to the
  202. * active state can be overwritten when the VCPU's state is synced coming back
  203. * from the guest.
  204. *
  205. * For shared interrupts, we have to stop all the VCPUs because interrupts can
  206. * be migrated while we don't hold the IRQ locks and we don't want to be
  207. * chasing moving targets.
  208. *
  209. * For private interrupts we don't have to do anything because userspace
  210. * accesses to the VGIC state already require all VCPUs to be stopped, and
  211. * only the VCPU itself can modify its private interrupts active state, which
  212. * guarantees that the VCPU is not running.
  213. */
  214. static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
  215. {
  216. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
  217. intid >= VGIC_NR_PRIVATE_IRQS)
  218. kvm_arm_halt_guest(vcpu->kvm);
  219. }
  220. /* See vgic_change_active_prepare */
  221. static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
  222. {
  223. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
  224. intid >= VGIC_NR_PRIVATE_IRQS)
  225. kvm_arm_resume_guest(vcpu->kvm);
  226. }
  227. static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  228. gpa_t addr, unsigned int len,
  229. unsigned long val)
  230. {
  231. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  232. int i;
  233. for_each_set_bit(i, &val, len * 8) {
  234. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  235. vgic_mmio_change_active(vcpu, irq, false);
  236. vgic_put_irq(vcpu->kvm, irq);
  237. }
  238. }
  239. void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  240. gpa_t addr, unsigned int len,
  241. unsigned long val)
  242. {
  243. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  244. mutex_lock(&vcpu->kvm->lock);
  245. vgic_change_active_prepare(vcpu, intid);
  246. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  247. vgic_change_active_finish(vcpu, intid);
  248. mutex_unlock(&vcpu->kvm->lock);
  249. }
  250. void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
  251. gpa_t addr, unsigned int len,
  252. unsigned long val)
  253. {
  254. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  255. }
  256. static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  257. gpa_t addr, unsigned int len,
  258. unsigned long val)
  259. {
  260. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  261. int i;
  262. for_each_set_bit(i, &val, len * 8) {
  263. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  264. vgic_mmio_change_active(vcpu, irq, true);
  265. vgic_put_irq(vcpu->kvm, irq);
  266. }
  267. }
  268. void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  269. gpa_t addr, unsigned int len,
  270. unsigned long val)
  271. {
  272. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  273. mutex_lock(&vcpu->kvm->lock);
  274. vgic_change_active_prepare(vcpu, intid);
  275. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  276. vgic_change_active_finish(vcpu, intid);
  277. mutex_unlock(&vcpu->kvm->lock);
  278. }
  279. void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
  280. gpa_t addr, unsigned int len,
  281. unsigned long val)
  282. {
  283. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  284. }
  285. unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
  286. gpa_t addr, unsigned int len)
  287. {
  288. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  289. int i;
  290. u64 val = 0;
  291. for (i = 0; i < len; i++) {
  292. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  293. val |= (u64)irq->priority << (i * 8);
  294. vgic_put_irq(vcpu->kvm, irq);
  295. }
  296. return val;
  297. }
  298. /*
  299. * We currently don't handle changing the priority of an interrupt that
  300. * is already pending on a VCPU. If there is a need for this, we would
  301. * need to make this VCPU exit and re-evaluate the priorities, potentially
  302. * leading to this interrupt getting presented now to the guest (if it has
  303. * been masked by the priority mask before).
  304. */
  305. void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
  306. gpa_t addr, unsigned int len,
  307. unsigned long val)
  308. {
  309. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  310. int i;
  311. for (i = 0; i < len; i++) {
  312. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  313. spin_lock(&irq->irq_lock);
  314. /* Narrow the priority range to what we actually support */
  315. irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
  316. spin_unlock(&irq->irq_lock);
  317. vgic_put_irq(vcpu->kvm, irq);
  318. }
  319. }
  320. unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
  321. gpa_t addr, unsigned int len)
  322. {
  323. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  324. u32 value = 0;
  325. int i;
  326. for (i = 0; i < len * 4; i++) {
  327. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  328. if (irq->config == VGIC_CONFIG_EDGE)
  329. value |= (2U << (i * 2));
  330. vgic_put_irq(vcpu->kvm, irq);
  331. }
  332. return value;
  333. }
  334. void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
  335. gpa_t addr, unsigned int len,
  336. unsigned long val)
  337. {
  338. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  339. int i;
  340. for (i = 0; i < len * 4; i++) {
  341. struct vgic_irq *irq;
  342. /*
  343. * The configuration cannot be changed for SGIs in general,
  344. * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
  345. * code relies on PPIs being level triggered, so we also
  346. * make them read-only here.
  347. */
  348. if (intid + i < VGIC_NR_PRIVATE_IRQS)
  349. continue;
  350. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  351. spin_lock(&irq->irq_lock);
  352. if (test_bit(i * 2 + 1, &val))
  353. irq->config = VGIC_CONFIG_EDGE;
  354. else
  355. irq->config = VGIC_CONFIG_LEVEL;
  356. spin_unlock(&irq->irq_lock);
  357. vgic_put_irq(vcpu->kvm, irq);
  358. }
  359. }
  360. u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
  361. {
  362. int i;
  363. u64 val = 0;
  364. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  365. for (i = 0; i < 32; i++) {
  366. struct vgic_irq *irq;
  367. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  368. continue;
  369. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  370. if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
  371. val |= (1U << i);
  372. vgic_put_irq(vcpu->kvm, irq);
  373. }
  374. return val;
  375. }
  376. void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
  377. const u64 val)
  378. {
  379. int i;
  380. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  381. for (i = 0; i < 32; i++) {
  382. struct vgic_irq *irq;
  383. bool new_level;
  384. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  385. continue;
  386. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  387. /*
  388. * Line level is set irrespective of irq type
  389. * (level or edge) to avoid dependency that VM should
  390. * restore irq config before line level.
  391. */
  392. new_level = !!(val & (1U << i));
  393. spin_lock(&irq->irq_lock);
  394. irq->line_level = new_level;
  395. if (new_level)
  396. vgic_queue_irq_unlock(vcpu->kvm, irq);
  397. else
  398. spin_unlock(&irq->irq_lock);
  399. vgic_put_irq(vcpu->kvm, irq);
  400. }
  401. }
  402. static int match_region(const void *key, const void *elt)
  403. {
  404. const unsigned int offset = (unsigned long)key;
  405. const struct vgic_register_region *region = elt;
  406. if (offset < region->reg_offset)
  407. return -1;
  408. if (offset >= region->reg_offset + region->len)
  409. return 1;
  410. return 0;
  411. }
  412. const struct vgic_register_region *
  413. vgic_find_mmio_region(const struct vgic_register_region *regions,
  414. int nr_regions, unsigned int offset)
  415. {
  416. return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
  417. sizeof(regions[0]), match_region);
  418. }
  419. void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  420. {
  421. if (kvm_vgic_global_state.type == VGIC_V2)
  422. vgic_v2_set_vmcr(vcpu, vmcr);
  423. else
  424. vgic_v3_set_vmcr(vcpu, vmcr);
  425. }
  426. void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  427. {
  428. if (kvm_vgic_global_state.type == VGIC_V2)
  429. vgic_v2_get_vmcr(vcpu, vmcr);
  430. else
  431. vgic_v3_get_vmcr(vcpu, vmcr);
  432. }
  433. /*
  434. * kvm_mmio_read_buf() returns a value in a format where it can be converted
  435. * to a byte array and be directly observed as the guest wanted it to appear
  436. * in memory if it had done the store itself, which is LE for the GIC, as the
  437. * guest knows the GIC is always LE.
  438. *
  439. * We convert this value to the CPUs native format to deal with it as a data
  440. * value.
  441. */
  442. unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
  443. {
  444. unsigned long data = kvm_mmio_read_buf(val, len);
  445. switch (len) {
  446. case 1:
  447. return data;
  448. case 2:
  449. return le16_to_cpu(data);
  450. case 4:
  451. return le32_to_cpu(data);
  452. default:
  453. return le64_to_cpu(data);
  454. }
  455. }
  456. /*
  457. * kvm_mmio_write_buf() expects a value in a format such that if converted to
  458. * a byte array it is observed as the guest would see it if it could perform
  459. * the load directly. Since the GIC is LE, and the guest knows this, the
  460. * guest expects a value in little endian format.
  461. *
  462. * We convert the data value from the CPUs native format to LE so that the
  463. * value is returned in the proper format.
  464. */
  465. void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
  466. unsigned long data)
  467. {
  468. switch (len) {
  469. case 1:
  470. break;
  471. case 2:
  472. data = cpu_to_le16(data);
  473. break;
  474. case 4:
  475. data = cpu_to_le32(data);
  476. break;
  477. default:
  478. data = cpu_to_le64(data);
  479. }
  480. kvm_mmio_write_buf(buf, len, data);
  481. }
  482. static
  483. struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
  484. {
  485. return container_of(dev, struct vgic_io_device, dev);
  486. }
  487. static bool check_region(const struct kvm *kvm,
  488. const struct vgic_register_region *region,
  489. gpa_t addr, int len)
  490. {
  491. int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  492. switch (len) {
  493. case sizeof(u8):
  494. flags = VGIC_ACCESS_8bit;
  495. break;
  496. case sizeof(u32):
  497. flags = VGIC_ACCESS_32bit;
  498. break;
  499. case sizeof(u64):
  500. flags = VGIC_ACCESS_64bit;
  501. break;
  502. default:
  503. return false;
  504. }
  505. if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
  506. if (!region->bits_per_irq)
  507. return true;
  508. /* Do we access a non-allocated IRQ? */
  509. return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
  510. }
  511. return false;
  512. }
  513. const struct vgic_register_region *
  514. vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
  515. gpa_t addr, int len)
  516. {
  517. const struct vgic_register_region *region;
  518. region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
  519. addr - iodev->base_addr);
  520. if (!region || !check_region(vcpu->kvm, region, addr, len))
  521. return NULL;
  522. return region;
  523. }
  524. static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  525. gpa_t addr, u32 *val)
  526. {
  527. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  528. const struct vgic_register_region *region;
  529. struct kvm_vcpu *r_vcpu;
  530. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  531. if (!region) {
  532. *val = 0;
  533. return 0;
  534. }
  535. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  536. if (region->uaccess_read)
  537. *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
  538. else
  539. *val = region->read(r_vcpu, addr, sizeof(u32));
  540. return 0;
  541. }
  542. static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  543. gpa_t addr, const u32 *val)
  544. {
  545. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  546. const struct vgic_register_region *region;
  547. struct kvm_vcpu *r_vcpu;
  548. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  549. if (!region)
  550. return 0;
  551. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  552. if (region->uaccess_write)
  553. region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
  554. else
  555. region->write(r_vcpu, addr, sizeof(u32), *val);
  556. return 0;
  557. }
  558. /*
  559. * Userland access to VGIC registers.
  560. */
  561. int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
  562. bool is_write, int offset, u32 *val)
  563. {
  564. if (is_write)
  565. return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
  566. else
  567. return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
  568. }
  569. static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  570. gpa_t addr, int len, void *val)
  571. {
  572. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  573. const struct vgic_register_region *region;
  574. unsigned long data = 0;
  575. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  576. if (!region) {
  577. memset(val, 0, len);
  578. return 0;
  579. }
  580. switch (iodev->iodev_type) {
  581. case IODEV_CPUIF:
  582. data = region->read(vcpu, addr, len);
  583. break;
  584. case IODEV_DIST:
  585. data = region->read(vcpu, addr, len);
  586. break;
  587. case IODEV_REDIST:
  588. data = region->read(iodev->redist_vcpu, addr, len);
  589. break;
  590. case IODEV_ITS:
  591. data = region->its_read(vcpu->kvm, iodev->its, addr, len);
  592. break;
  593. }
  594. vgic_data_host_to_mmio_bus(val, len, data);
  595. return 0;
  596. }
  597. static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  598. gpa_t addr, int len, const void *val)
  599. {
  600. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  601. const struct vgic_register_region *region;
  602. unsigned long data = vgic_data_mmio_bus_to_host(val, len);
  603. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  604. if (!region)
  605. return 0;
  606. switch (iodev->iodev_type) {
  607. case IODEV_CPUIF:
  608. region->write(vcpu, addr, len, data);
  609. break;
  610. case IODEV_DIST:
  611. region->write(vcpu, addr, len, data);
  612. break;
  613. case IODEV_REDIST:
  614. region->write(iodev->redist_vcpu, addr, len, data);
  615. break;
  616. case IODEV_ITS:
  617. region->its_write(vcpu->kvm, iodev->its, addr, len, data);
  618. break;
  619. }
  620. return 0;
  621. }
  622. struct kvm_io_device_ops kvm_io_gic_ops = {
  623. .read = dispatch_mmio_read,
  624. .write = dispatch_mmio_write,
  625. };
  626. int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
  627. enum vgic_type type)
  628. {
  629. struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
  630. int ret = 0;
  631. unsigned int len;
  632. switch (type) {
  633. case VGIC_V2:
  634. len = vgic_v2_init_dist_iodev(io_device);
  635. break;
  636. case VGIC_V3:
  637. len = vgic_v3_init_dist_iodev(io_device);
  638. break;
  639. default:
  640. BUG_ON(1);
  641. }
  642. io_device->base_addr = dist_base_address;
  643. io_device->iodev_type = IODEV_DIST;
  644. io_device->redist_vcpu = NULL;
  645. mutex_lock(&kvm->slots_lock);
  646. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
  647. len, &io_device->dev);
  648. mutex_unlock(&kvm->slots_lock);
  649. return ret;
  650. }