vgic-kvm-device.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /*
  2. * VGIC: KVM DEVICE API
  3. *
  4. * Copyright (C) 2015 ARM Ltd.
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <kvm/arm_vgic.h>
  18. #include <linux/uaccess.h>
  19. #include <asm/kvm_mmu.h>
  20. #include <asm/cputype.h>
  21. #include "vgic.h"
  22. /* common helpers */
  23. int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
  24. phys_addr_t addr, phys_addr_t alignment)
  25. {
  26. if (addr & ~KVM_PHYS_MASK)
  27. return -E2BIG;
  28. if (!IS_ALIGNED(addr, alignment))
  29. return -EINVAL;
  30. if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
  31. return -EEXIST;
  32. return 0;
  33. }
  34. static int vgic_check_type(struct kvm *kvm, int type_needed)
  35. {
  36. if (kvm->arch.vgic.vgic_model != type_needed)
  37. return -ENODEV;
  38. else
  39. return 0;
  40. }
  41. /**
  42. * kvm_vgic_addr - set or get vgic VM base addresses
  43. * @kvm: pointer to the vm struct
  44. * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
  45. * @addr: pointer to address value
  46. * @write: if true set the address in the VM address space, if false read the
  47. * address
  48. *
  49. * Set or get the vgic base addresses for the distributor and the virtual CPU
  50. * interface in the VM physical address space. These addresses are properties
  51. * of the emulated core/SoC and therefore user space initially knows this
  52. * information.
  53. * Check them for sanity (alignment, double assignment). We can't check for
  54. * overlapping regions in case of a virtual GICv3 here, since we don't know
  55. * the number of VCPUs yet, so we defer this check to map_resources().
  56. */
  57. int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
  58. {
  59. int r = 0;
  60. struct vgic_dist *vgic = &kvm->arch.vgic;
  61. phys_addr_t *addr_ptr, alignment;
  62. mutex_lock(&kvm->lock);
  63. switch (type) {
  64. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  65. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
  66. addr_ptr = &vgic->vgic_dist_base;
  67. alignment = SZ_4K;
  68. break;
  69. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  70. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
  71. addr_ptr = &vgic->vgic_cpu_base;
  72. alignment = SZ_4K;
  73. break;
  74. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  75. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
  76. addr_ptr = &vgic->vgic_dist_base;
  77. alignment = SZ_64K;
  78. break;
  79. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  80. r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
  81. if (r)
  82. break;
  83. if (write) {
  84. r = vgic_v3_set_redist_base(kvm, *addr);
  85. goto out;
  86. }
  87. addr_ptr = &vgic->vgic_redist_base;
  88. break;
  89. default:
  90. r = -ENODEV;
  91. }
  92. if (r)
  93. goto out;
  94. if (write) {
  95. r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
  96. if (!r)
  97. *addr_ptr = *addr;
  98. } else {
  99. *addr = *addr_ptr;
  100. }
  101. out:
  102. mutex_unlock(&kvm->lock);
  103. return r;
  104. }
  105. static int vgic_set_common_attr(struct kvm_device *dev,
  106. struct kvm_device_attr *attr)
  107. {
  108. int r;
  109. switch (attr->group) {
  110. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  111. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  112. u64 addr;
  113. unsigned long type = (unsigned long)attr->attr;
  114. if (copy_from_user(&addr, uaddr, sizeof(addr)))
  115. return -EFAULT;
  116. r = kvm_vgic_addr(dev->kvm, type, &addr, true);
  117. return (r == -ENODEV) ? -ENXIO : r;
  118. }
  119. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
  120. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  121. u32 val;
  122. int ret = 0;
  123. if (get_user(val, uaddr))
  124. return -EFAULT;
  125. /*
  126. * We require:
  127. * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
  128. * - at most 1024 interrupts
  129. * - a multiple of 32 interrupts
  130. */
  131. if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
  132. val > VGIC_MAX_RESERVED ||
  133. (val & 31))
  134. return -EINVAL;
  135. mutex_lock(&dev->kvm->lock);
  136. if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
  137. ret = -EBUSY;
  138. else
  139. dev->kvm->arch.vgic.nr_spis =
  140. val - VGIC_NR_PRIVATE_IRQS;
  141. mutex_unlock(&dev->kvm->lock);
  142. return ret;
  143. }
  144. case KVM_DEV_ARM_VGIC_GRP_CTRL: {
  145. switch (attr->attr) {
  146. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  147. mutex_lock(&dev->kvm->lock);
  148. r = vgic_init(dev->kvm);
  149. mutex_unlock(&dev->kvm->lock);
  150. return r;
  151. }
  152. break;
  153. }
  154. }
  155. return -ENXIO;
  156. }
  157. static int vgic_get_common_attr(struct kvm_device *dev,
  158. struct kvm_device_attr *attr)
  159. {
  160. int r = -ENXIO;
  161. switch (attr->group) {
  162. case KVM_DEV_ARM_VGIC_GRP_ADDR: {
  163. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  164. u64 addr;
  165. unsigned long type = (unsigned long)attr->attr;
  166. r = kvm_vgic_addr(dev->kvm, type, &addr, false);
  167. if (r)
  168. return (r == -ENODEV) ? -ENXIO : r;
  169. if (copy_to_user(uaddr, &addr, sizeof(addr)))
  170. return -EFAULT;
  171. break;
  172. }
  173. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
  174. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  175. r = put_user(dev->kvm->arch.vgic.nr_spis +
  176. VGIC_NR_PRIVATE_IRQS, uaddr);
  177. break;
  178. }
  179. }
  180. return r;
  181. }
  182. static int vgic_create(struct kvm_device *dev, u32 type)
  183. {
  184. return kvm_vgic_create(dev->kvm, type);
  185. }
  186. static void vgic_destroy(struct kvm_device *dev)
  187. {
  188. kfree(dev);
  189. }
  190. int kvm_register_vgic_device(unsigned long type)
  191. {
  192. int ret = -ENODEV;
  193. switch (type) {
  194. case KVM_DEV_TYPE_ARM_VGIC_V2:
  195. ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
  196. KVM_DEV_TYPE_ARM_VGIC_V2);
  197. break;
  198. case KVM_DEV_TYPE_ARM_VGIC_V3:
  199. ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
  200. KVM_DEV_TYPE_ARM_VGIC_V3);
  201. if (ret)
  202. break;
  203. ret = kvm_vgic_register_its_device();
  204. break;
  205. }
  206. return ret;
  207. }
  208. int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
  209. struct vgic_reg_attr *reg_attr)
  210. {
  211. int cpuid;
  212. cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
  213. KVM_DEV_ARM_VGIC_CPUID_SHIFT;
  214. if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
  215. return -EINVAL;
  216. reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
  217. reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  218. return 0;
  219. }
  220. /* unlocks vcpus from @vcpu_lock_idx and smaller */
  221. static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
  222. {
  223. struct kvm_vcpu *tmp_vcpu;
  224. for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
  225. tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
  226. mutex_unlock(&tmp_vcpu->mutex);
  227. }
  228. }
  229. void unlock_all_vcpus(struct kvm *kvm)
  230. {
  231. unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
  232. }
  233. /* Returns true if all vcpus were locked, false otherwise */
  234. bool lock_all_vcpus(struct kvm *kvm)
  235. {
  236. struct kvm_vcpu *tmp_vcpu;
  237. int c;
  238. /*
  239. * Any time a vcpu is run, vcpu_load is called which tries to grab the
  240. * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
  241. * that no other VCPUs are run and fiddle with the vgic state while we
  242. * access it.
  243. */
  244. kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
  245. if (!mutex_trylock(&tmp_vcpu->mutex)) {
  246. unlock_vcpus(kvm, c - 1);
  247. return false;
  248. }
  249. }
  250. return true;
  251. }
  252. /**
  253. * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
  254. *
  255. * @dev: kvm device handle
  256. * @attr: kvm device attribute
  257. * @reg: address the value is read or written
  258. * @is_write: true if userspace is writing a register
  259. */
  260. static int vgic_v2_attr_regs_access(struct kvm_device *dev,
  261. struct kvm_device_attr *attr,
  262. u32 *reg, bool is_write)
  263. {
  264. struct vgic_reg_attr reg_attr;
  265. gpa_t addr;
  266. struct kvm_vcpu *vcpu;
  267. int ret;
  268. ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
  269. if (ret)
  270. return ret;
  271. vcpu = reg_attr.vcpu;
  272. addr = reg_attr.addr;
  273. mutex_lock(&dev->kvm->lock);
  274. ret = vgic_init(dev->kvm);
  275. if (ret)
  276. goto out;
  277. if (!lock_all_vcpus(dev->kvm)) {
  278. ret = -EBUSY;
  279. goto out;
  280. }
  281. switch (attr->group) {
  282. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  283. ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
  284. break;
  285. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  286. ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
  287. break;
  288. default:
  289. ret = -EINVAL;
  290. break;
  291. }
  292. unlock_all_vcpus(dev->kvm);
  293. out:
  294. mutex_unlock(&dev->kvm->lock);
  295. return ret;
  296. }
  297. static int vgic_v2_set_attr(struct kvm_device *dev,
  298. struct kvm_device_attr *attr)
  299. {
  300. int ret;
  301. ret = vgic_set_common_attr(dev, attr);
  302. if (ret != -ENXIO)
  303. return ret;
  304. switch (attr->group) {
  305. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  306. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  307. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  308. u32 reg;
  309. if (get_user(reg, uaddr))
  310. return -EFAULT;
  311. return vgic_v2_attr_regs_access(dev, attr, &reg, true);
  312. }
  313. }
  314. return -ENXIO;
  315. }
  316. static int vgic_v2_get_attr(struct kvm_device *dev,
  317. struct kvm_device_attr *attr)
  318. {
  319. int ret;
  320. ret = vgic_get_common_attr(dev, attr);
  321. if (ret != -ENXIO)
  322. return ret;
  323. switch (attr->group) {
  324. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  325. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
  326. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  327. u32 reg = 0;
  328. ret = vgic_v2_attr_regs_access(dev, attr, &reg, false);
  329. if (ret)
  330. return ret;
  331. return put_user(reg, uaddr);
  332. }
  333. }
  334. return -ENXIO;
  335. }
  336. static int vgic_v2_has_attr(struct kvm_device *dev,
  337. struct kvm_device_attr *attr)
  338. {
  339. switch (attr->group) {
  340. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  341. switch (attr->attr) {
  342. case KVM_VGIC_V2_ADDR_TYPE_DIST:
  343. case KVM_VGIC_V2_ADDR_TYPE_CPU:
  344. return 0;
  345. }
  346. break;
  347. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  348. case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
  349. return vgic_v2_has_attr_regs(dev, attr);
  350. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  351. return 0;
  352. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  353. switch (attr->attr) {
  354. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  355. return 0;
  356. }
  357. }
  358. return -ENXIO;
  359. }
  360. struct kvm_device_ops kvm_arm_vgic_v2_ops = {
  361. .name = "kvm-arm-vgic-v2",
  362. .create = vgic_create,
  363. .destroy = vgic_destroy,
  364. .set_attr = vgic_v2_set_attr,
  365. .get_attr = vgic_v2_get_attr,
  366. .has_attr = vgic_v2_has_attr,
  367. };
  368. int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
  369. struct vgic_reg_attr *reg_attr)
  370. {
  371. unsigned long vgic_mpidr, mpidr_reg;
  372. /*
  373. * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
  374. * attr might not hold MPIDR. Hence assume vcpu0.
  375. */
  376. if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
  377. vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
  378. KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
  379. mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
  380. reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
  381. } else {
  382. reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
  383. }
  384. if (!reg_attr->vcpu)
  385. return -EINVAL;
  386. reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
  387. return 0;
  388. }
  389. /*
  390. * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
  391. *
  392. * @dev: kvm device handle
  393. * @attr: kvm device attribute
  394. * @reg: address the value is read or written
  395. * @is_write: true if userspace is writing a register
  396. */
  397. static int vgic_v3_attr_regs_access(struct kvm_device *dev,
  398. struct kvm_device_attr *attr,
  399. u64 *reg, bool is_write)
  400. {
  401. struct vgic_reg_attr reg_attr;
  402. gpa_t addr;
  403. struct kvm_vcpu *vcpu;
  404. int ret;
  405. u32 tmp32;
  406. ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
  407. if (ret)
  408. return ret;
  409. vcpu = reg_attr.vcpu;
  410. addr = reg_attr.addr;
  411. mutex_lock(&dev->kvm->lock);
  412. if (unlikely(!vgic_initialized(dev->kvm))) {
  413. ret = -EBUSY;
  414. goto out;
  415. }
  416. if (!lock_all_vcpus(dev->kvm)) {
  417. ret = -EBUSY;
  418. goto out;
  419. }
  420. switch (attr->group) {
  421. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  422. if (is_write)
  423. tmp32 = *reg;
  424. ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
  425. if (!is_write)
  426. *reg = tmp32;
  427. break;
  428. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
  429. if (is_write)
  430. tmp32 = *reg;
  431. ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
  432. if (!is_write)
  433. *reg = tmp32;
  434. break;
  435. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  436. u64 regid;
  437. regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
  438. ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
  439. regid, reg);
  440. break;
  441. }
  442. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  443. unsigned int info, intid;
  444. info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
  445. KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
  446. if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
  447. intid = attr->attr &
  448. KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
  449. ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
  450. intid, reg);
  451. } else {
  452. ret = -EINVAL;
  453. }
  454. break;
  455. }
  456. default:
  457. ret = -EINVAL;
  458. break;
  459. }
  460. unlock_all_vcpus(dev->kvm);
  461. out:
  462. mutex_unlock(&dev->kvm->lock);
  463. return ret;
  464. }
  465. static int vgic_v3_set_attr(struct kvm_device *dev,
  466. struct kvm_device_attr *attr)
  467. {
  468. int ret;
  469. ret = vgic_set_common_attr(dev, attr);
  470. if (ret != -ENXIO)
  471. return ret;
  472. switch (attr->group) {
  473. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  474. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
  475. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  476. u32 tmp32;
  477. u64 reg;
  478. if (get_user(tmp32, uaddr))
  479. return -EFAULT;
  480. reg = tmp32;
  481. return vgic_v3_attr_regs_access(dev, attr, &reg, true);
  482. }
  483. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  484. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  485. u64 reg;
  486. if (get_user(reg, uaddr))
  487. return -EFAULT;
  488. return vgic_v3_attr_regs_access(dev, attr, &reg, true);
  489. }
  490. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  491. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  492. u64 reg;
  493. u32 tmp32;
  494. if (get_user(tmp32, uaddr))
  495. return -EFAULT;
  496. reg = tmp32;
  497. return vgic_v3_attr_regs_access(dev, attr, &reg, true);
  498. }
  499. case KVM_DEV_ARM_VGIC_GRP_CTRL: {
  500. int ret;
  501. switch (attr->attr) {
  502. case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
  503. mutex_lock(&dev->kvm->lock);
  504. if (!lock_all_vcpus(dev->kvm)) {
  505. mutex_unlock(&dev->kvm->lock);
  506. return -EBUSY;
  507. }
  508. ret = vgic_v3_save_pending_tables(dev->kvm);
  509. unlock_all_vcpus(dev->kvm);
  510. mutex_unlock(&dev->kvm->lock);
  511. return ret;
  512. }
  513. break;
  514. }
  515. }
  516. return -ENXIO;
  517. }
  518. static int vgic_v3_get_attr(struct kvm_device *dev,
  519. struct kvm_device_attr *attr)
  520. {
  521. int ret;
  522. ret = vgic_get_common_attr(dev, attr);
  523. if (ret != -ENXIO)
  524. return ret;
  525. switch (attr->group) {
  526. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  527. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
  528. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  529. u64 reg;
  530. u32 tmp32;
  531. ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
  532. if (ret)
  533. return ret;
  534. tmp32 = reg;
  535. return put_user(tmp32, uaddr);
  536. }
  537. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
  538. u64 __user *uaddr = (u64 __user *)(long)attr->addr;
  539. u64 reg;
  540. ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
  541. if (ret)
  542. return ret;
  543. return put_user(reg, uaddr);
  544. }
  545. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  546. u32 __user *uaddr = (u32 __user *)(long)attr->addr;
  547. u64 reg;
  548. u32 tmp32;
  549. ret = vgic_v3_attr_regs_access(dev, attr, &reg, false);
  550. if (ret)
  551. return ret;
  552. tmp32 = reg;
  553. return put_user(tmp32, uaddr);
  554. }
  555. }
  556. return -ENXIO;
  557. }
  558. static int vgic_v3_has_attr(struct kvm_device *dev,
  559. struct kvm_device_attr *attr)
  560. {
  561. switch (attr->group) {
  562. case KVM_DEV_ARM_VGIC_GRP_ADDR:
  563. switch (attr->attr) {
  564. case KVM_VGIC_V3_ADDR_TYPE_DIST:
  565. case KVM_VGIC_V3_ADDR_TYPE_REDIST:
  566. return 0;
  567. }
  568. break;
  569. case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
  570. case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
  571. case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
  572. return vgic_v3_has_attr_regs(dev, attr);
  573. case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
  574. return 0;
  575. case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
  576. if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
  577. KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
  578. VGIC_LEVEL_INFO_LINE_LEVEL)
  579. return 0;
  580. break;
  581. }
  582. case KVM_DEV_ARM_VGIC_GRP_CTRL:
  583. switch (attr->attr) {
  584. case KVM_DEV_ARM_VGIC_CTRL_INIT:
  585. return 0;
  586. case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
  587. return 0;
  588. }
  589. }
  590. return -ENXIO;
  591. }
  592. struct kvm_device_ops kvm_arm_vgic_v3_ops = {
  593. .name = "kvm-arm-vgic-v3",
  594. .create = vgic_create,
  595. .destroy = vgic_destroy,
  596. .set_attr = vgic_v3_set_attr,
  597. .get_attr = vgic_v3_get_attr,
  598. .has_attr = vgic_v3_has_attr,
  599. };