iommu.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (c) 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Copyright (C) 2006-2008 Intel Corporation
  18. * Copyright IBM Corporation, 2008
  19. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  20. *
  21. * Author: Allen M. Kay <allen.m.kay@intel.com>
  22. * Author: Weidong Han <weidong.han@intel.com>
  23. * Author: Ben-Ami Yassour <benami@il.ibm.com>
  24. */
  25. #include <linux/list.h>
  26. #include <linux/kvm_host.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/pci.h>
  29. #include <linux/stat.h>
  30. #include <linux/iommu.h>
  31. #include "assigned-dev.h"
  32. static bool allow_unsafe_assigned_interrupts;
  33. module_param_named(allow_unsafe_assigned_interrupts,
  34. allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
  35. MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
  36. "Enable device assignment on platforms without interrupt remapping support.");
  37. static int kvm_iommu_unmap_memslots(struct kvm *kvm);
  38. static void kvm_iommu_put_pages(struct kvm *kvm,
  39. gfn_t base_gfn, unsigned long npages);
  40. static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
  41. unsigned long npages)
  42. {
  43. gfn_t end_gfn;
  44. kvm_pfn_t pfn;
  45. pfn = gfn_to_pfn_memslot(slot, gfn);
  46. end_gfn = gfn + npages;
  47. gfn += 1;
  48. if (is_error_noslot_pfn(pfn))
  49. return pfn;
  50. while (gfn < end_gfn)
  51. gfn_to_pfn_memslot(slot, gfn++);
  52. return pfn;
  53. }
  54. static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn,
  55. unsigned long npages)
  56. {
  57. unsigned long i;
  58. for (i = 0; i < npages; ++i)
  59. kvm_release_pfn_clean(pfn + i);
  60. }
  61. int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
  62. {
  63. gfn_t gfn, end_gfn;
  64. kvm_pfn_t pfn;
  65. int r = 0;
  66. struct iommu_domain *domain = kvm->arch.iommu_domain;
  67. int flags;
  68. /* check if iommu exists and in use */
  69. if (!domain)
  70. return 0;
  71. gfn = slot->base_gfn;
  72. end_gfn = gfn + slot->npages;
  73. flags = IOMMU_READ;
  74. if (!(slot->flags & KVM_MEM_READONLY))
  75. flags |= IOMMU_WRITE;
  76. if (!kvm->arch.iommu_noncoherent)
  77. flags |= IOMMU_CACHE;
  78. while (gfn < end_gfn) {
  79. unsigned long page_size;
  80. /* Check if already mapped */
  81. if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
  82. gfn += 1;
  83. continue;
  84. }
  85. /* Get the page size we could use to map */
  86. page_size = kvm_host_page_size(kvm, gfn);
  87. /* Make sure the page_size does not exceed the memslot */
  88. while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
  89. page_size >>= 1;
  90. /* Make sure gfn is aligned to the page size we want to map */
  91. while ((gfn << PAGE_SHIFT) & (page_size - 1))
  92. page_size >>= 1;
  93. /* Make sure hva is aligned to the page size we want to map */
  94. while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
  95. page_size >>= 1;
  96. /*
  97. * Pin all pages we are about to map in memory. This is
  98. * important because we unmap and unpin in 4kb steps later.
  99. */
  100. pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
  101. if (is_error_noslot_pfn(pfn)) {
  102. gfn += 1;
  103. continue;
  104. }
  105. /* Map into IO address space */
  106. r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
  107. page_size, flags);
  108. if (r) {
  109. printk(KERN_ERR "kvm_iommu_map_address:"
  110. "iommu failed to map pfn=%llx\n", pfn);
  111. kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
  112. goto unmap_pages;
  113. }
  114. gfn += page_size >> PAGE_SHIFT;
  115. cond_resched();
  116. }
  117. return 0;
  118. unmap_pages:
  119. kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
  120. return r;
  121. }
  122. static int kvm_iommu_map_memslots(struct kvm *kvm)
  123. {
  124. int idx, r = 0;
  125. struct kvm_memslots *slots;
  126. struct kvm_memory_slot *memslot;
  127. if (kvm->arch.iommu_noncoherent)
  128. kvm_arch_register_noncoherent_dma(kvm);
  129. idx = srcu_read_lock(&kvm->srcu);
  130. slots = kvm_memslots(kvm);
  131. kvm_for_each_memslot(memslot, slots) {
  132. r = kvm_iommu_map_pages(kvm, memslot);
  133. if (r)
  134. break;
  135. }
  136. srcu_read_unlock(&kvm->srcu, idx);
  137. return r;
  138. }
  139. int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
  140. {
  141. struct iommu_domain *domain = kvm->arch.iommu_domain;
  142. int r;
  143. bool noncoherent;
  144. /* check if iommu exists and in use */
  145. if (!domain)
  146. return 0;
  147. if (pdev == NULL)
  148. return -ENODEV;
  149. r = iommu_attach_device(domain, &pdev->dev);
  150. if (r) {
  151. dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
  152. return r;
  153. }
  154. noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
  155. /* Check if need to update IOMMU page table for guest memory */
  156. if (noncoherent != kvm->arch.iommu_noncoherent) {
  157. kvm_iommu_unmap_memslots(kvm);
  158. kvm->arch.iommu_noncoherent = noncoherent;
  159. r = kvm_iommu_map_memslots(kvm);
  160. if (r)
  161. goto out_unmap;
  162. }
  163. kvm_arch_start_assignment(kvm);
  164. pci_set_dev_assigned(pdev);
  165. dev_info(&pdev->dev, "kvm assign device\n");
  166. return 0;
  167. out_unmap:
  168. kvm_iommu_unmap_memslots(kvm);
  169. return r;
  170. }
  171. int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
  172. {
  173. struct iommu_domain *domain = kvm->arch.iommu_domain;
  174. /* check if iommu exists and in use */
  175. if (!domain)
  176. return 0;
  177. if (pdev == NULL)
  178. return -ENODEV;
  179. iommu_detach_device(domain, &pdev->dev);
  180. pci_clear_dev_assigned(pdev);
  181. kvm_arch_end_assignment(kvm);
  182. dev_info(&pdev->dev, "kvm deassign device\n");
  183. return 0;
  184. }
  185. int kvm_iommu_map_guest(struct kvm *kvm)
  186. {
  187. int r;
  188. if (!iommu_present(&pci_bus_type)) {
  189. printk(KERN_ERR "%s: iommu not found\n", __func__);
  190. return -ENODEV;
  191. }
  192. mutex_lock(&kvm->slots_lock);
  193. kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
  194. if (!kvm->arch.iommu_domain) {
  195. r = -ENOMEM;
  196. goto out_unlock;
  197. }
  198. if (!allow_unsafe_assigned_interrupts &&
  199. !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
  200. printk(KERN_WARNING "%s: No interrupt remapping support,"
  201. " disallowing device assignment."
  202. " Re-enable with \"allow_unsafe_assigned_interrupts=1\""
  203. " module option.\n", __func__);
  204. iommu_domain_free(kvm->arch.iommu_domain);
  205. kvm->arch.iommu_domain = NULL;
  206. r = -EPERM;
  207. goto out_unlock;
  208. }
  209. r = kvm_iommu_map_memslots(kvm);
  210. if (r)
  211. kvm_iommu_unmap_memslots(kvm);
  212. out_unlock:
  213. mutex_unlock(&kvm->slots_lock);
  214. return r;
  215. }
  216. static void kvm_iommu_put_pages(struct kvm *kvm,
  217. gfn_t base_gfn, unsigned long npages)
  218. {
  219. struct iommu_domain *domain;
  220. gfn_t end_gfn, gfn;
  221. kvm_pfn_t pfn;
  222. u64 phys;
  223. domain = kvm->arch.iommu_domain;
  224. end_gfn = base_gfn + npages;
  225. gfn = base_gfn;
  226. /* check if iommu exists and in use */
  227. if (!domain)
  228. return;
  229. while (gfn < end_gfn) {
  230. unsigned long unmap_pages;
  231. size_t size;
  232. /* Get physical address */
  233. phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
  234. if (!phys) {
  235. gfn++;
  236. continue;
  237. }
  238. pfn = phys >> PAGE_SHIFT;
  239. /* Unmap address from IO address space */
  240. size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
  241. unmap_pages = 1ULL << get_order(size);
  242. /* Unpin all pages we just unmapped to not leak any memory */
  243. kvm_unpin_pages(kvm, pfn, unmap_pages);
  244. gfn += unmap_pages;
  245. cond_resched();
  246. }
  247. }
  248. void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
  249. {
  250. kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
  251. }
  252. static int kvm_iommu_unmap_memslots(struct kvm *kvm)
  253. {
  254. int idx;
  255. struct kvm_memslots *slots;
  256. struct kvm_memory_slot *memslot;
  257. idx = srcu_read_lock(&kvm->srcu);
  258. slots = kvm_memslots(kvm);
  259. kvm_for_each_memslot(memslot, slots)
  260. kvm_iommu_unmap_pages(kvm, memslot);
  261. srcu_read_unlock(&kvm->srcu, idx);
  262. if (kvm->arch.iommu_noncoherent)
  263. kvm_arch_unregister_noncoherent_dma(kvm);
  264. return 0;
  265. }
  266. int kvm_iommu_unmap_guest(struct kvm *kvm)
  267. {
  268. struct iommu_domain *domain = kvm->arch.iommu_domain;
  269. /* check if iommu exists and in use */
  270. if (!domain)
  271. return 0;
  272. mutex_lock(&kvm->slots_lock);
  273. kvm_iommu_unmap_memslots(kvm);
  274. kvm->arch.iommu_domain = NULL;
  275. kvm->arch.iommu_noncoherent = false;
  276. mutex_unlock(&kvm->slots_lock);
  277. iommu_domain_free(domain);
  278. return 0;
  279. }