passthrough.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI Backend - Provides restricted access to the real PCI bus topology
  4. * to the frontend
  5. *
  6. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  7. */
  8. #include <linux/list.h>
  9. #include <linux/pci.h>
  10. #include <linux/mutex.h>
  11. #include "pciback.h"
  12. struct passthrough_dev_data {
  13. /* Access to dev_list must be protected by lock */
  14. struct list_head dev_list;
  15. struct mutex lock;
  16. };
  17. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  18. unsigned int domain,
  19. unsigned int bus,
  20. unsigned int devfn)
  21. {
  22. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  23. struct pci_dev_entry *dev_entry;
  24. struct pci_dev *dev = NULL;
  25. mutex_lock(&dev_data->lock);
  26. list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
  27. if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
  28. && bus == (unsigned int)dev_entry->dev->bus->number
  29. && devfn == dev_entry->dev->devfn) {
  30. dev = dev_entry->dev;
  31. break;
  32. }
  33. }
  34. mutex_unlock(&dev_data->lock);
  35. return dev;
  36. }
  37. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  38. struct pci_dev *dev,
  39. int devid, publish_pci_dev_cb publish_cb)
  40. {
  41. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  42. struct pci_dev_entry *dev_entry;
  43. unsigned int domain, bus, devfn;
  44. int err;
  45. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  46. if (!dev_entry)
  47. return -ENOMEM;
  48. dev_entry->dev = dev;
  49. mutex_lock(&dev_data->lock);
  50. list_add_tail(&dev_entry->list, &dev_data->dev_list);
  51. mutex_unlock(&dev_data->lock);
  52. /* Publish this device. */
  53. domain = (unsigned int)pci_domain_nr(dev->bus);
  54. bus = (unsigned int)dev->bus->number;
  55. devfn = dev->devfn;
  56. err = publish_cb(pdev, domain, bus, devfn, devid);
  57. return err;
  58. }
  59. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  60. struct pci_dev *dev, bool lock)
  61. {
  62. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  63. struct pci_dev_entry *dev_entry, *t;
  64. struct pci_dev *found_dev = NULL;
  65. mutex_lock(&dev_data->lock);
  66. list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
  67. if (dev_entry->dev == dev) {
  68. list_del(&dev_entry->list);
  69. found_dev = dev_entry->dev;
  70. kfree(dev_entry);
  71. }
  72. }
  73. mutex_unlock(&dev_data->lock);
  74. if (found_dev) {
  75. if (lock)
  76. device_lock(&found_dev->dev);
  77. pcistub_put_pci_dev(found_dev);
  78. if (lock)
  79. device_unlock(&found_dev->dev);
  80. }
  81. }
  82. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  83. {
  84. struct passthrough_dev_data *dev_data;
  85. dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
  86. if (!dev_data)
  87. return -ENOMEM;
  88. mutex_init(&dev_data->lock);
  89. INIT_LIST_HEAD(&dev_data->dev_list);
  90. pdev->pci_dev_data = dev_data;
  91. return 0;
  92. }
  93. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  94. publish_pci_root_cb publish_root_cb)
  95. {
  96. int err = 0;
  97. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  98. struct pci_dev_entry *dev_entry, *e;
  99. struct pci_dev *dev;
  100. int found;
  101. unsigned int domain, bus;
  102. mutex_lock(&dev_data->lock);
  103. list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
  104. /* Only publish this device as a root if none of its
  105. * parent bridges are exported
  106. */
  107. found = 0;
  108. dev = dev_entry->dev->bus->self;
  109. for (; !found && dev != NULL; dev = dev->bus->self) {
  110. list_for_each_entry(e, &dev_data->dev_list, list) {
  111. if (dev == e->dev) {
  112. found = 1;
  113. break;
  114. }
  115. }
  116. }
  117. domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
  118. bus = (unsigned int)dev_entry->dev->bus->number;
  119. if (!found) {
  120. err = publish_root_cb(pdev, domain, bus);
  121. if (err)
  122. break;
  123. }
  124. }
  125. mutex_unlock(&dev_data->lock);
  126. return err;
  127. }
  128. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  129. {
  130. struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
  131. struct pci_dev_entry *dev_entry, *t;
  132. list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
  133. struct pci_dev *dev = dev_entry->dev;
  134. list_del(&dev_entry->list);
  135. device_lock(&dev->dev);
  136. pcistub_put_pci_dev(dev);
  137. device_unlock(&dev->dev);
  138. kfree(dev_entry);
  139. }
  140. kfree(dev_data);
  141. pdev->pci_dev_data = NULL;
  142. }
  143. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  144. struct xen_pcibk_device *pdev,
  145. unsigned int *domain, unsigned int *bus,
  146. unsigned int *devfn)
  147. {
  148. *domain = pci_domain_nr(pcidev->bus);
  149. *bus = pcidev->bus->number;
  150. *devfn = pcidev->devfn;
  151. return 1;
  152. }
  153. const struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
  154. .name = "passthrough",
  155. .init = __xen_pcibk_init_devices,
  156. .free = __xen_pcibk_release_devices,
  157. .find = __xen_pcibk_get_pcifront_dev,
  158. .publish = __xen_pcibk_publish_pci_roots,
  159. .release = __xen_pcibk_release_pci_dev,
  160. .add = __xen_pcibk_add_pci_dev,
  161. .get = __xen_pcibk_get_pci_dev,
  162. };