vpci.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. /*
  2. * PCI Backend - Provides a Virtual PCI bus (with real devices)
  3. * to the frontend
  4. *
  5. * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  6. */
  7. #include <linux/list.h>
  8. #include <linux/slab.h>
  9. #include <linux/pci.h>
  10. #include <linux/mutex.h>
  11. #include "pciback.h"
  12. #define PCI_SLOT_MAX 32
  13. struct vpci_dev_data {
  14. /* Access to dev_list must be protected by lock */
  15. struct list_head dev_list[PCI_SLOT_MAX];
  16. struct mutex lock;
  17. };
  18. static inline struct list_head *list_first(struct list_head *head)
  19. {
  20. return head->next;
  21. }
  22. static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
  23. unsigned int domain,
  24. unsigned int bus,
  25. unsigned int devfn)
  26. {
  27. struct pci_dev_entry *entry;
  28. struct pci_dev *dev = NULL;
  29. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  30. if (domain != 0 || bus != 0)
  31. return NULL;
  32. if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
  33. mutex_lock(&vpci_dev->lock);
  34. list_for_each_entry(entry,
  35. &vpci_dev->dev_list[PCI_SLOT(devfn)],
  36. list) {
  37. if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
  38. dev = entry->dev;
  39. break;
  40. }
  41. }
  42. mutex_unlock(&vpci_dev->lock);
  43. }
  44. return dev;
  45. }
  46. static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
  47. {
  48. if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
  49. && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
  50. return 1;
  51. return 0;
  52. }
  53. static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
  54. struct pci_dev *dev, int devid,
  55. publish_pci_dev_cb publish_cb)
  56. {
  57. int err = 0, slot, func = -1;
  58. struct pci_dev_entry *t, *dev_entry;
  59. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  60. if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
  61. err = -EFAULT;
  62. xenbus_dev_fatal(pdev->xdev, err,
  63. "Can't export bridges on the virtual PCI bus");
  64. goto out;
  65. }
  66. dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
  67. if (!dev_entry) {
  68. err = -ENOMEM;
  69. xenbus_dev_fatal(pdev->xdev, err,
  70. "Error adding entry to virtual PCI bus");
  71. goto out;
  72. }
  73. dev_entry->dev = dev;
  74. mutex_lock(&vpci_dev->lock);
  75. /* Keep multi-function devices together on the virtual PCI bus */
  76. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  77. if (!list_empty(&vpci_dev->dev_list[slot])) {
  78. t = list_entry(list_first(&vpci_dev->dev_list[slot]),
  79. struct pci_dev_entry, list);
  80. if (match_slot(dev, t->dev)) {
  81. pr_info(DRV_NAME ": vpci: %s: "
  82. "assign to virtual slot %d func %d\n",
  83. pci_name(dev), slot,
  84. PCI_FUNC(dev->devfn));
  85. list_add_tail(&dev_entry->list,
  86. &vpci_dev->dev_list[slot]);
  87. func = PCI_FUNC(dev->devfn);
  88. goto unlock;
  89. }
  90. }
  91. }
  92. /* Assign to a new slot on the virtual PCI bus */
  93. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  94. if (list_empty(&vpci_dev->dev_list[slot])) {
  95. printk(KERN_INFO DRV_NAME
  96. ": vpci: %s: assign to virtual slot %d\n",
  97. pci_name(dev), slot);
  98. list_add_tail(&dev_entry->list,
  99. &vpci_dev->dev_list[slot]);
  100. func = PCI_FUNC(dev->devfn);
  101. goto unlock;
  102. }
  103. }
  104. err = -ENOMEM;
  105. xenbus_dev_fatal(pdev->xdev, err,
  106. "No more space on root virtual PCI bus");
  107. unlock:
  108. mutex_unlock(&vpci_dev->lock);
  109. /* Publish this device. */
  110. if (!err)
  111. err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
  112. out:
  113. return err;
  114. }
  115. static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
  116. struct pci_dev *dev)
  117. {
  118. int slot;
  119. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  120. struct pci_dev *found_dev = NULL;
  121. mutex_lock(&vpci_dev->lock);
  122. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  123. struct pci_dev_entry *e;
  124. list_for_each_entry(e, &vpci_dev->dev_list[slot], list) {
  125. if (e->dev == dev) {
  126. list_del(&e->list);
  127. found_dev = e->dev;
  128. kfree(e);
  129. goto out;
  130. }
  131. }
  132. }
  133. out:
  134. mutex_unlock(&vpci_dev->lock);
  135. if (found_dev)
  136. pcistub_put_pci_dev(found_dev);
  137. }
  138. static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
  139. {
  140. int slot;
  141. struct vpci_dev_data *vpci_dev;
  142. vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
  143. if (!vpci_dev)
  144. return -ENOMEM;
  145. mutex_init(&vpci_dev->lock);
  146. for (slot = 0; slot < PCI_SLOT_MAX; slot++)
  147. INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
  148. pdev->pci_dev_data = vpci_dev;
  149. return 0;
  150. }
  151. static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
  152. publish_pci_root_cb publish_cb)
  153. {
  154. /* The Virtual PCI bus has only one root */
  155. return publish_cb(pdev, 0, 0);
  156. }
  157. static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
  158. {
  159. int slot;
  160. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  161. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  162. struct pci_dev_entry *e, *tmp;
  163. list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
  164. list) {
  165. list_del(&e->list);
  166. pcistub_put_pci_dev(e->dev);
  167. kfree(e);
  168. }
  169. }
  170. kfree(vpci_dev);
  171. pdev->pci_dev_data = NULL;
  172. }
  173. static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
  174. struct xen_pcibk_device *pdev,
  175. unsigned int *domain, unsigned int *bus,
  176. unsigned int *devfn)
  177. {
  178. struct pci_dev_entry *entry;
  179. struct pci_dev *dev = NULL;
  180. struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
  181. int found = 0, slot;
  182. mutex_lock(&vpci_dev->lock);
  183. for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
  184. list_for_each_entry(entry,
  185. &vpci_dev->dev_list[slot],
  186. list) {
  187. dev = entry->dev;
  188. if (dev && dev->bus->number == pcidev->bus->number
  189. && pci_domain_nr(dev->bus) ==
  190. pci_domain_nr(pcidev->bus)
  191. && dev->devfn == pcidev->devfn) {
  192. found = 1;
  193. *domain = 0;
  194. *bus = 0;
  195. *devfn = PCI_DEVFN(slot,
  196. PCI_FUNC(pcidev->devfn));
  197. }
  198. }
  199. }
  200. mutex_unlock(&vpci_dev->lock);
  201. return found;
  202. }
  203. const struct xen_pcibk_backend xen_pcibk_vpci_backend = {
  204. .name = "vpci",
  205. .init = __xen_pcibk_init_devices,
  206. .free = __xen_pcibk_release_devices,
  207. .find = __xen_pcibk_get_pcifront_dev,
  208. .publish = __xen_pcibk_publish_pci_roots,
  209. .release = __xen_pcibk_release_pci_dev,
  210. .add = __xen_pcibk_add_pci_dev,
  211. .get = __xen_pcibk_get_pci_dev,
  212. };