pci_stub.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * PCI Stub Driver - Grabs devices in backend to be exported later
  3. *
  4. * Ryan Wilson <hap9@epoch.ncsc.mil>
  5. * Chris Bookholt <hap10@epoch.ncsc.mil>
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/rwsem.h>
  11. #include <linux/list.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/kref.h>
  14. #include <linux/pci.h>
  15. #include <linux/wait.h>
  16. #include <linux/sched.h>
  17. #include <linux/atomic.h>
  18. #include <xen/events.h>
  19. #include <asm/xen/pci.h>
  20. #include <asm/xen/hypervisor.h>
  21. #include <xen/interface/physdev.h>
  22. #include "pciback.h"
  23. #include "conf_space.h"
  24. #include "conf_space_quirks.h"
  25. #define PCISTUB_DRIVER_NAME "pciback"
  26. static char *pci_devs_to_hide;
  27. wait_queue_head_t xen_pcibk_aer_wait_queue;
  28. /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
  29. * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
  30. */
  31. static DECLARE_RWSEM(pcistub_sem);
  32. module_param_named(hide, pci_devs_to_hide, charp, 0444);
  33. struct pcistub_device_id {
  34. struct list_head slot_list;
  35. int domain;
  36. unsigned char bus;
  37. unsigned int devfn;
  38. };
  39. static LIST_HEAD(pcistub_device_ids);
  40. static DEFINE_SPINLOCK(device_ids_lock);
  41. struct pcistub_device {
  42. struct kref kref;
  43. struct list_head dev_list;
  44. spinlock_t lock;
  45. struct pci_dev *dev;
  46. struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
  47. };
  48. /* Access to pcistub_devices & seized_devices lists and the initialize_devices
  49. * flag must be locked with pcistub_devices_lock
  50. */
  51. static DEFINE_SPINLOCK(pcistub_devices_lock);
  52. static LIST_HEAD(pcistub_devices);
  53. /* wait for device_initcall before initializing our devices
  54. * (see pcistub_init_devices_late)
  55. */
  56. static int initialize_devices;
  57. static LIST_HEAD(seized_devices);
  58. static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
  59. {
  60. struct pcistub_device *psdev;
  61. dev_dbg(&dev->dev, "pcistub_device_alloc\n");
  62. psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
  63. if (!psdev)
  64. return NULL;
  65. psdev->dev = pci_dev_get(dev);
  66. if (!psdev->dev) {
  67. kfree(psdev);
  68. return NULL;
  69. }
  70. kref_init(&psdev->kref);
  71. spin_lock_init(&psdev->lock);
  72. return psdev;
  73. }
  74. /* Don't call this directly as it's called by pcistub_device_put */
  75. static void pcistub_device_release(struct kref *kref)
  76. {
  77. struct pcistub_device *psdev;
  78. struct pci_dev *dev;
  79. struct xen_pcibk_dev_data *dev_data;
  80. psdev = container_of(kref, struct pcistub_device, kref);
  81. dev = psdev->dev;
  82. dev_data = pci_get_drvdata(dev);
  83. dev_dbg(&dev->dev, "pcistub_device_release\n");
  84. xen_unregister_device_domain_owner(dev);
  85. /* Call the reset function which does not take lock as this
  86. * is called from "unbind" which takes a device_lock mutex.
  87. */
  88. __pci_reset_function_locked(dev);
  89. if (dev_data &&
  90. pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
  91. dev_info(&dev->dev, "Could not reload PCI state\n");
  92. else
  93. pci_restore_state(dev);
  94. if (dev->msix_cap) {
  95. struct physdev_pci_device ppdev = {
  96. .seg = pci_domain_nr(dev->bus),
  97. .bus = dev->bus->number,
  98. .devfn = dev->devfn
  99. };
  100. int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
  101. &ppdev);
  102. if (err && err != -ENOSYS)
  103. dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
  104. err);
  105. }
  106. /* Disable the device */
  107. xen_pcibk_reset_device(dev);
  108. kfree(dev_data);
  109. pci_set_drvdata(dev, NULL);
  110. /* Clean-up the device */
  111. xen_pcibk_config_free_dyn_fields(dev);
  112. xen_pcibk_config_free_dev(dev);
  113. pci_clear_dev_assigned(dev);
  114. pci_dev_put(dev);
  115. kfree(psdev);
  116. }
  117. static inline void pcistub_device_get(struct pcistub_device *psdev)
  118. {
  119. kref_get(&psdev->kref);
  120. }
  121. static inline void pcistub_device_put(struct pcistub_device *psdev)
  122. {
  123. kref_put(&psdev->kref, pcistub_device_release);
  124. }
  125. static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
  126. int slot, int func)
  127. {
  128. struct pcistub_device *psdev;
  129. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  130. if (psdev->dev != NULL
  131. && domain == pci_domain_nr(psdev->dev->bus)
  132. && bus == psdev->dev->bus->number
  133. && slot == PCI_SLOT(psdev->dev->devfn)
  134. && func == PCI_FUNC(psdev->dev->devfn)) {
  135. return psdev;
  136. }
  137. }
  138. return NULL;
  139. }
  140. static struct pcistub_device *pcistub_device_find(int domain, int bus,
  141. int slot, int func)
  142. {
  143. struct pcistub_device *psdev;
  144. unsigned long flags;
  145. spin_lock_irqsave(&pcistub_devices_lock, flags);
  146. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  147. if (psdev)
  148. pcistub_device_get(psdev);
  149. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  150. return psdev;
  151. }
  152. static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
  153. struct pcistub_device *psdev)
  154. {
  155. struct pci_dev *pci_dev = NULL;
  156. unsigned long flags;
  157. pcistub_device_get(psdev);
  158. spin_lock_irqsave(&psdev->lock, flags);
  159. if (!psdev->pdev) {
  160. psdev->pdev = pdev;
  161. pci_dev = psdev->dev;
  162. }
  163. spin_unlock_irqrestore(&psdev->lock, flags);
  164. if (!pci_dev)
  165. pcistub_device_put(psdev);
  166. return pci_dev;
  167. }
  168. struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
  169. int domain, int bus,
  170. int slot, int func)
  171. {
  172. struct pcistub_device *psdev;
  173. struct pci_dev *found_dev = NULL;
  174. unsigned long flags;
  175. spin_lock_irqsave(&pcistub_devices_lock, flags);
  176. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  177. if (psdev)
  178. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  179. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  180. return found_dev;
  181. }
  182. struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
  183. struct pci_dev *dev)
  184. {
  185. struct pcistub_device *psdev;
  186. struct pci_dev *found_dev = NULL;
  187. unsigned long flags;
  188. spin_lock_irqsave(&pcistub_devices_lock, flags);
  189. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  190. if (psdev->dev == dev) {
  191. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  192. break;
  193. }
  194. }
  195. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  196. return found_dev;
  197. }
  198. /*
  199. * Called when:
  200. * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
  201. * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove
  202. * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove
  203. * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
  204. *
  205. * As such we have to be careful.
  206. *
  207. * To make this easier, the caller has to hold the device lock.
  208. */
  209. void pcistub_put_pci_dev(struct pci_dev *dev)
  210. {
  211. struct pcistub_device *psdev, *found_psdev = NULL;
  212. unsigned long flags;
  213. struct xen_pcibk_dev_data *dev_data;
  214. int ret;
  215. spin_lock_irqsave(&pcistub_devices_lock, flags);
  216. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  217. if (psdev->dev == dev) {
  218. found_psdev = psdev;
  219. break;
  220. }
  221. }
  222. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  223. if (WARN_ON(!found_psdev))
  224. return;
  225. /*hold this lock for avoiding breaking link between
  226. * pcistub and xen_pcibk when AER is in processing
  227. */
  228. down_write(&pcistub_sem);
  229. /* Cleanup our device
  230. * (so it's ready for the next domain)
  231. */
  232. device_lock_assert(&dev->dev);
  233. __pci_reset_function_locked(dev);
  234. dev_data = pci_get_drvdata(dev);
  235. ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
  236. if (!ret) {
  237. /*
  238. * The usual sequence is pci_save_state & pci_restore_state
  239. * but the guest might have messed the configuration space up.
  240. * Use the initial version (when device was bound to us).
  241. */
  242. pci_restore_state(dev);
  243. } else
  244. dev_info(&dev->dev, "Could not reload PCI state\n");
  245. /* This disables the device. */
  246. xen_pcibk_reset_device(dev);
  247. /* And cleanup up our emulated fields. */
  248. xen_pcibk_config_reset_dev(dev);
  249. xen_pcibk_config_free_dyn_fields(dev);
  250. xen_unregister_device_domain_owner(dev);
  251. spin_lock_irqsave(&found_psdev->lock, flags);
  252. found_psdev->pdev = NULL;
  253. spin_unlock_irqrestore(&found_psdev->lock, flags);
  254. pcistub_device_put(found_psdev);
  255. up_write(&pcistub_sem);
  256. }
  257. static int pcistub_match_one(struct pci_dev *dev,
  258. struct pcistub_device_id *pdev_id)
  259. {
  260. /* Match the specified device by domain, bus, slot, func and also if
  261. * any of the device's parent bridges match.
  262. */
  263. for (; dev != NULL; dev = dev->bus->self) {
  264. if (pci_domain_nr(dev->bus) == pdev_id->domain
  265. && dev->bus->number == pdev_id->bus
  266. && dev->devfn == pdev_id->devfn)
  267. return 1;
  268. /* Sometimes topmost bridge links to itself. */
  269. if (dev == dev->bus->self)
  270. break;
  271. }
  272. return 0;
  273. }
  274. static int pcistub_match(struct pci_dev *dev)
  275. {
  276. struct pcistub_device_id *pdev_id;
  277. unsigned long flags;
  278. int found = 0;
  279. spin_lock_irqsave(&device_ids_lock, flags);
  280. list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
  281. if (pcistub_match_one(dev, pdev_id)) {
  282. found = 1;
  283. break;
  284. }
  285. }
  286. spin_unlock_irqrestore(&device_ids_lock, flags);
  287. return found;
  288. }
  289. static int pcistub_init_device(struct pci_dev *dev)
  290. {
  291. struct xen_pcibk_dev_data *dev_data;
  292. int err = 0;
  293. dev_dbg(&dev->dev, "initializing...\n");
  294. /* The PCI backend is not intended to be a module (or to work with
  295. * removable PCI devices (yet). If it were, xen_pcibk_config_free()
  296. * would need to be called somewhere to free the memory allocated
  297. * here and then to call kfree(pci_get_drvdata(psdev->dev)).
  298. */
  299. dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
  300. + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
  301. if (!dev_data) {
  302. err = -ENOMEM;
  303. goto out;
  304. }
  305. pci_set_drvdata(dev, dev_data);
  306. /*
  307. * Setup name for fake IRQ handler. It will only be enabled
  308. * once the device is turned on by the guest.
  309. */
  310. sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
  311. dev_dbg(&dev->dev, "initializing config\n");
  312. init_waitqueue_head(&xen_pcibk_aer_wait_queue);
  313. err = xen_pcibk_config_init_dev(dev);
  314. if (err)
  315. goto out;
  316. /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
  317. * must do this here because pcibios_enable_device may specify
  318. * the pci device's true irq (and possibly its other resources)
  319. * if they differ from what's in the configuration space.
  320. * This makes the assumption that the device's resources won't
  321. * change after this point (otherwise this code may break!)
  322. */
  323. dev_dbg(&dev->dev, "enabling device\n");
  324. err = pci_enable_device(dev);
  325. if (err)
  326. goto config_release;
  327. if (dev->msix_cap) {
  328. struct physdev_pci_device ppdev = {
  329. .seg = pci_domain_nr(dev->bus),
  330. .bus = dev->bus->number,
  331. .devfn = dev->devfn
  332. };
  333. err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
  334. if (err && err != -ENOSYS)
  335. dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
  336. err);
  337. }
  338. /* We need the device active to save the state. */
  339. dev_dbg(&dev->dev, "save state of device\n");
  340. pci_save_state(dev);
  341. dev_data->pci_saved_state = pci_store_saved_state(dev);
  342. if (!dev_data->pci_saved_state)
  343. dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
  344. else {
  345. dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
  346. __pci_reset_function_locked(dev);
  347. pci_restore_state(dev);
  348. }
  349. /* Now disable the device (this also ensures some private device
  350. * data is setup before we export)
  351. */
  352. dev_dbg(&dev->dev, "reset device\n");
  353. xen_pcibk_reset_device(dev);
  354. pci_set_dev_assigned(dev);
  355. return 0;
  356. config_release:
  357. xen_pcibk_config_free_dev(dev);
  358. out:
  359. pci_set_drvdata(dev, NULL);
  360. kfree(dev_data);
  361. return err;
  362. }
  363. /*
  364. * Because some initialization still happens on
  365. * devices during fs_initcall, we need to defer
  366. * full initialization of our devices until
  367. * device_initcall.
  368. */
  369. static int __init pcistub_init_devices_late(void)
  370. {
  371. struct pcistub_device *psdev;
  372. unsigned long flags;
  373. int err = 0;
  374. spin_lock_irqsave(&pcistub_devices_lock, flags);
  375. while (!list_empty(&seized_devices)) {
  376. psdev = container_of(seized_devices.next,
  377. struct pcistub_device, dev_list);
  378. list_del(&psdev->dev_list);
  379. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  380. err = pcistub_init_device(psdev->dev);
  381. if (err) {
  382. dev_err(&psdev->dev->dev,
  383. "error %d initializing device\n", err);
  384. kfree(psdev);
  385. psdev = NULL;
  386. }
  387. spin_lock_irqsave(&pcistub_devices_lock, flags);
  388. if (psdev)
  389. list_add_tail(&psdev->dev_list, &pcistub_devices);
  390. }
  391. initialize_devices = 1;
  392. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  393. return 0;
  394. }
  395. static void pcistub_device_id_add_list(struct pcistub_device_id *new,
  396. int domain, int bus, unsigned int devfn)
  397. {
  398. struct pcistub_device_id *pci_dev_id;
  399. unsigned long flags;
  400. int found = 0;
  401. spin_lock_irqsave(&device_ids_lock, flags);
  402. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  403. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
  404. pci_dev_id->devfn == devfn) {
  405. found = 1;
  406. break;
  407. }
  408. }
  409. if (!found) {
  410. new->domain = domain;
  411. new->bus = bus;
  412. new->devfn = devfn;
  413. list_add_tail(&new->slot_list, &pcistub_device_ids);
  414. }
  415. spin_unlock_irqrestore(&device_ids_lock, flags);
  416. if (found)
  417. kfree(new);
  418. }
  419. static int pcistub_seize(struct pci_dev *dev,
  420. struct pcistub_device_id *pci_dev_id)
  421. {
  422. struct pcistub_device *psdev;
  423. unsigned long flags;
  424. int err = 0;
  425. psdev = pcistub_device_alloc(dev);
  426. if (!psdev) {
  427. kfree(pci_dev_id);
  428. return -ENOMEM;
  429. }
  430. spin_lock_irqsave(&pcistub_devices_lock, flags);
  431. if (initialize_devices) {
  432. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  433. /* don't want irqs disabled when calling pcistub_init_device */
  434. err = pcistub_init_device(psdev->dev);
  435. spin_lock_irqsave(&pcistub_devices_lock, flags);
  436. if (!err)
  437. list_add(&psdev->dev_list, &pcistub_devices);
  438. } else {
  439. dev_dbg(&dev->dev, "deferring initialization\n");
  440. list_add(&psdev->dev_list, &seized_devices);
  441. }
  442. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  443. if (err) {
  444. kfree(pci_dev_id);
  445. pcistub_device_put(psdev);
  446. } else if (pci_dev_id)
  447. pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
  448. dev->bus->number, dev->devfn);
  449. return err;
  450. }
  451. /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
  452. * other functions that take the sysfs lock. */
  453. static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
  454. {
  455. int err = 0, match;
  456. struct pcistub_device_id *pci_dev_id = NULL;
  457. dev_dbg(&dev->dev, "probing...\n");
  458. match = pcistub_match(dev);
  459. if ((dev->driver_override &&
  460. !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
  461. match) {
  462. if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
  463. && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
  464. dev_err(&dev->dev, "can't export pci devices that "
  465. "don't have a normal (0) or bridge (1) "
  466. "header type!\n");
  467. err = -ENODEV;
  468. goto out;
  469. }
  470. if (!match) {
  471. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC);
  472. if (!pci_dev_id) {
  473. err = -ENOMEM;
  474. goto out;
  475. }
  476. }
  477. dev_info(&dev->dev, "seizing device\n");
  478. err = pcistub_seize(dev, pci_dev_id);
  479. } else
  480. /* Didn't find the device */
  481. err = -ENODEV;
  482. out:
  483. return err;
  484. }
  485. /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
  486. * other functions that take the sysfs lock. */
  487. static void pcistub_remove(struct pci_dev *dev)
  488. {
  489. struct pcistub_device *psdev, *found_psdev = NULL;
  490. unsigned long flags;
  491. dev_dbg(&dev->dev, "removing\n");
  492. spin_lock_irqsave(&pcistub_devices_lock, flags);
  493. xen_pcibk_config_quirk_release(dev);
  494. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  495. if (psdev->dev == dev) {
  496. found_psdev = psdev;
  497. break;
  498. }
  499. }
  500. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  501. if (found_psdev) {
  502. dev_dbg(&dev->dev, "found device to remove %s\n",
  503. found_psdev->pdev ? "- in-use" : "");
  504. if (found_psdev->pdev) {
  505. int domid = xen_find_device_domain_owner(dev);
  506. pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
  507. pci_name(found_psdev->dev), domid);
  508. pr_warn("****** driver domain may still access this device's i/o resources!\n");
  509. pr_warn("****** shutdown driver domain before binding device\n");
  510. pr_warn("****** to other drivers or domains\n");
  511. /* N.B. This ends up calling pcistub_put_pci_dev which ends up
  512. * doing the FLR. */
  513. xen_pcibk_release_pci_dev(found_psdev->pdev,
  514. found_psdev->dev,
  515. false /* caller holds the lock. */);
  516. }
  517. spin_lock_irqsave(&pcistub_devices_lock, flags);
  518. list_del(&found_psdev->dev_list);
  519. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  520. /* the final put for releasing from the list */
  521. pcistub_device_put(found_psdev);
  522. }
  523. }
  524. static const struct pci_device_id pcistub_ids[] = {
  525. {
  526. .vendor = PCI_ANY_ID,
  527. .device = PCI_ANY_ID,
  528. .subvendor = PCI_ANY_ID,
  529. .subdevice = PCI_ANY_ID,
  530. },
  531. {0,},
  532. };
  533. #define PCI_NODENAME_MAX 40
  534. static void kill_domain_by_device(struct pcistub_device *psdev)
  535. {
  536. struct xenbus_transaction xbt;
  537. int err;
  538. char nodename[PCI_NODENAME_MAX];
  539. BUG_ON(!psdev);
  540. snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
  541. psdev->pdev->xdev->otherend_id);
  542. again:
  543. err = xenbus_transaction_start(&xbt);
  544. if (err) {
  545. dev_err(&psdev->dev->dev,
  546. "error %d when start xenbus transaction\n", err);
  547. return;
  548. }
  549. /*PV AER handlers will set this flag*/
  550. xenbus_printf(xbt, nodename, "aerState" , "aerfail");
  551. err = xenbus_transaction_end(xbt, 0);
  552. if (err) {
  553. if (err == -EAGAIN)
  554. goto again;
  555. dev_err(&psdev->dev->dev,
  556. "error %d when end xenbus transaction\n", err);
  557. return;
  558. }
  559. }
  560. /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
  561. * backend need to have cooperation. In xen_pcibk, those steps will do similar
  562. * jobs: send service request and waiting for front_end response.
  563. */
  564. static pci_ers_result_t common_process(struct pcistub_device *psdev,
  565. pci_channel_state_t state, int aer_cmd,
  566. pci_ers_result_t result)
  567. {
  568. pci_ers_result_t res = result;
  569. struct xen_pcie_aer_op *aer_op;
  570. struct xen_pcibk_device *pdev = psdev->pdev;
  571. struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
  572. int ret;
  573. /*with PV AER drivers*/
  574. aer_op = &(sh_info->aer_op);
  575. aer_op->cmd = aer_cmd ;
  576. /*useful for error_detected callback*/
  577. aer_op->err = state;
  578. /*pcifront_end BDF*/
  579. ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
  580. &aer_op->domain, &aer_op->bus, &aer_op->devfn);
  581. if (!ret) {
  582. dev_err(&psdev->dev->dev,
  583. DRV_NAME ": failed to get pcifront device\n");
  584. return PCI_ERS_RESULT_NONE;
  585. }
  586. wmb();
  587. dev_dbg(&psdev->dev->dev,
  588. DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
  589. aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
  590. /*local flag to mark there's aer request, xen_pcibk callback will use
  591. * this flag to judge whether we need to check pci-front give aer
  592. * service ack signal
  593. */
  594. set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  595. /*It is possible that a pcifront conf_read_write ops request invokes
  596. * the callback which cause the spurious execution of wake_up.
  597. * Yet it is harmless and better than a spinlock here
  598. */
  599. set_bit(_XEN_PCIB_active,
  600. (unsigned long *)&sh_info->flags);
  601. wmb();
  602. notify_remote_via_irq(pdev->evtchn_irq);
  603. /* Enable IRQ to signal "request done". */
  604. xen_pcibk_lateeoi(pdev, 0);
  605. ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
  606. !(test_bit(_XEN_PCIB_active, (unsigned long *)
  607. &sh_info->flags)), 300*HZ);
  608. /* Enable IRQ for pcifront request if not already active. */
  609. if (!test_bit(_PDEVF_op_active, &pdev->flags))
  610. xen_pcibk_lateeoi(pdev, 0);
  611. if (!ret) {
  612. if (test_bit(_XEN_PCIB_active,
  613. (unsigned long *)&sh_info->flags)) {
  614. dev_err(&psdev->dev->dev,
  615. "pcifront aer process not responding!\n");
  616. clear_bit(_XEN_PCIB_active,
  617. (unsigned long *)&sh_info->flags);
  618. aer_op->err = PCI_ERS_RESULT_NONE;
  619. return res;
  620. }
  621. }
  622. clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  623. res = (pci_ers_result_t)aer_op->err;
  624. return res;
  625. }
  626. /*
  627. * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
  628. * of the device driver could provide this service, and then wait for pcifront
  629. * ack.
  630. * @dev: pointer to PCI devices
  631. * return value is used by aer_core do_recovery policy
  632. */
  633. static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
  634. {
  635. struct pcistub_device *psdev;
  636. pci_ers_result_t result;
  637. result = PCI_ERS_RESULT_RECOVERED;
  638. dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
  639. dev->bus->number, dev->devfn);
  640. down_write(&pcistub_sem);
  641. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  642. dev->bus->number,
  643. PCI_SLOT(dev->devfn),
  644. PCI_FUNC(dev->devfn));
  645. if (!psdev || !psdev->pdev) {
  646. dev_err(&dev->dev,
  647. DRV_NAME " device is not found/assigned\n");
  648. goto end;
  649. }
  650. if (!psdev->pdev->sh_info) {
  651. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  652. " by HVM, kill it\n");
  653. kill_domain_by_device(psdev);
  654. goto end;
  655. }
  656. if (!test_bit(_XEN_PCIB_AERHANDLER,
  657. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  658. dev_err(&dev->dev,
  659. "guest with no AER driver should have been killed\n");
  660. goto end;
  661. }
  662. result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
  663. if (result == PCI_ERS_RESULT_NONE ||
  664. result == PCI_ERS_RESULT_DISCONNECT) {
  665. dev_dbg(&dev->dev,
  666. "No AER slot_reset service or disconnected!\n");
  667. kill_domain_by_device(psdev);
  668. }
  669. end:
  670. if (psdev)
  671. pcistub_device_put(psdev);
  672. up_write(&pcistub_sem);
  673. return result;
  674. }
  675. /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
  676. * in case of the device driver could provide this service, and then wait
  677. * for pcifront ack
  678. * @dev: pointer to PCI devices
  679. * return value is used by aer_core do_recovery policy
  680. */
  681. static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
  682. {
  683. struct pcistub_device *psdev;
  684. pci_ers_result_t result;
  685. result = PCI_ERS_RESULT_RECOVERED;
  686. dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
  687. dev->bus->number, dev->devfn);
  688. down_write(&pcistub_sem);
  689. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  690. dev->bus->number,
  691. PCI_SLOT(dev->devfn),
  692. PCI_FUNC(dev->devfn));
  693. if (!psdev || !psdev->pdev) {
  694. dev_err(&dev->dev,
  695. DRV_NAME " device is not found/assigned\n");
  696. goto end;
  697. }
  698. if (!psdev->pdev->sh_info) {
  699. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  700. " by HVM, kill it\n");
  701. kill_domain_by_device(psdev);
  702. goto end;
  703. }
  704. if (!test_bit(_XEN_PCIB_AERHANDLER,
  705. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  706. dev_err(&dev->dev,
  707. "guest with no AER driver should have been killed\n");
  708. goto end;
  709. }
  710. result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
  711. if (result == PCI_ERS_RESULT_NONE ||
  712. result == PCI_ERS_RESULT_DISCONNECT) {
  713. dev_dbg(&dev->dev,
  714. "No AER mmio_enabled service or disconnected!\n");
  715. kill_domain_by_device(psdev);
  716. }
  717. end:
  718. if (psdev)
  719. pcistub_device_put(psdev);
  720. up_write(&pcistub_sem);
  721. return result;
  722. }
  723. /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
  724. * in case of the device driver could provide this service, and then wait
  725. * for pcifront ack.
  726. * @dev: pointer to PCI devices
  727. * @error: the current PCI connection state
  728. * return value is used by aer_core do_recovery policy
  729. */
  730. static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
  731. pci_channel_state_t error)
  732. {
  733. struct pcistub_device *psdev;
  734. pci_ers_result_t result;
  735. result = PCI_ERS_RESULT_CAN_RECOVER;
  736. dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
  737. dev->bus->number, dev->devfn);
  738. down_write(&pcistub_sem);
  739. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  740. dev->bus->number,
  741. PCI_SLOT(dev->devfn),
  742. PCI_FUNC(dev->devfn));
  743. if (!psdev || !psdev->pdev) {
  744. dev_err(&dev->dev,
  745. DRV_NAME " device is not found/assigned\n");
  746. goto end;
  747. }
  748. if (!psdev->pdev->sh_info) {
  749. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  750. " by HVM, kill it\n");
  751. kill_domain_by_device(psdev);
  752. goto end;
  753. }
  754. /*Guest owns the device yet no aer handler regiested, kill guest*/
  755. if (!test_bit(_XEN_PCIB_AERHANDLER,
  756. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  757. dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
  758. kill_domain_by_device(psdev);
  759. goto end;
  760. }
  761. result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
  762. if (result == PCI_ERS_RESULT_NONE ||
  763. result == PCI_ERS_RESULT_DISCONNECT) {
  764. dev_dbg(&dev->dev,
  765. "No AER error_detected service or disconnected!\n");
  766. kill_domain_by_device(psdev);
  767. }
  768. end:
  769. if (psdev)
  770. pcistub_device_put(psdev);
  771. up_write(&pcistub_sem);
  772. return result;
  773. }
  774. /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
  775. * in case of the device driver could provide this service, and then wait
  776. * for pcifront ack.
  777. * @dev: pointer to PCI devices
  778. */
  779. static void xen_pcibk_error_resume(struct pci_dev *dev)
  780. {
  781. struct pcistub_device *psdev;
  782. dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
  783. dev->bus->number, dev->devfn);
  784. down_write(&pcistub_sem);
  785. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  786. dev->bus->number,
  787. PCI_SLOT(dev->devfn),
  788. PCI_FUNC(dev->devfn));
  789. if (!psdev || !psdev->pdev) {
  790. dev_err(&dev->dev,
  791. DRV_NAME " device is not found/assigned\n");
  792. goto end;
  793. }
  794. if (!psdev->pdev->sh_info) {
  795. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  796. " by HVM, kill it\n");
  797. kill_domain_by_device(psdev);
  798. goto end;
  799. }
  800. if (!test_bit(_XEN_PCIB_AERHANDLER,
  801. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  802. dev_err(&dev->dev,
  803. "guest with no AER driver should have been killed\n");
  804. kill_domain_by_device(psdev);
  805. goto end;
  806. }
  807. common_process(psdev, 1, XEN_PCI_OP_aer_resume,
  808. PCI_ERS_RESULT_RECOVERED);
  809. end:
  810. if (psdev)
  811. pcistub_device_put(psdev);
  812. up_write(&pcistub_sem);
  813. return;
  814. }
  815. /*add xen_pcibk AER handling*/
  816. static const struct pci_error_handlers xen_pcibk_error_handler = {
  817. .error_detected = xen_pcibk_error_detected,
  818. .mmio_enabled = xen_pcibk_mmio_enabled,
  819. .slot_reset = xen_pcibk_slot_reset,
  820. .resume = xen_pcibk_error_resume,
  821. };
  822. /*
  823. * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
  824. * for a normal device. I don't want it to be loaded automatically.
  825. */
  826. static struct pci_driver xen_pcibk_pci_driver = {
  827. /* The name should be xen_pciback, but until the tools are updated
  828. * we will keep it as pciback. */
  829. .name = PCISTUB_DRIVER_NAME,
  830. .id_table = pcistub_ids,
  831. .probe = pcistub_probe,
  832. .remove = pcistub_remove,
  833. .err_handler = &xen_pcibk_error_handler,
  834. };
  835. static inline int str_to_slot(const char *buf, int *domain, int *bus,
  836. int *slot, int *func)
  837. {
  838. int parsed = 0;
  839. switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
  840. &parsed)) {
  841. case 3:
  842. *func = -1;
  843. sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
  844. break;
  845. case 2:
  846. *slot = *func = -1;
  847. sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
  848. break;
  849. }
  850. if (parsed && !buf[parsed])
  851. return 0;
  852. /* try again without domain */
  853. *domain = 0;
  854. switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
  855. case 2:
  856. *func = -1;
  857. sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
  858. break;
  859. case 1:
  860. *slot = *func = -1;
  861. sscanf(buf, " %x:*.* %n", bus, &parsed);
  862. break;
  863. }
  864. if (parsed && !buf[parsed])
  865. return 0;
  866. return -EINVAL;
  867. }
  868. static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
  869. *slot, int *func, int *reg, int *size, int *mask)
  870. {
  871. int parsed = 0;
  872. sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
  873. reg, size, mask, &parsed);
  874. if (parsed && !buf[parsed])
  875. return 0;
  876. /* try again without domain */
  877. *domain = 0;
  878. sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
  879. mask, &parsed);
  880. if (parsed && !buf[parsed])
  881. return 0;
  882. return -EINVAL;
  883. }
  884. static int pcistub_device_id_add(int domain, int bus, int slot, int func)
  885. {
  886. struct pcistub_device_id *pci_dev_id;
  887. int rc = 0, devfn = PCI_DEVFN(slot, func);
  888. if (slot < 0) {
  889. for (slot = 0; !rc && slot < 32; ++slot)
  890. rc = pcistub_device_id_add(domain, bus, slot, func);
  891. return rc;
  892. }
  893. if (func < 0) {
  894. for (func = 0; !rc && func < 8; ++func)
  895. rc = pcistub_device_id_add(domain, bus, slot, func);
  896. return rc;
  897. }
  898. if ((
  899. #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
  900. || !defined(CONFIG_PCI_DOMAINS)
  901. !pci_domains_supported ? domain :
  902. #endif
  903. domain < 0 || domain > 0xffff)
  904. || bus < 0 || bus > 0xff
  905. || PCI_SLOT(devfn) != slot
  906. || PCI_FUNC(devfn) != func)
  907. return -EINVAL;
  908. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  909. if (!pci_dev_id)
  910. return -ENOMEM;
  911. pr_debug("wants to seize %04x:%02x:%02x.%d\n",
  912. domain, bus, slot, func);
  913. pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
  914. return 0;
  915. }
  916. static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
  917. {
  918. struct pcistub_device_id *pci_dev_id, *t;
  919. int err = -ENOENT;
  920. unsigned long flags;
  921. spin_lock_irqsave(&device_ids_lock, flags);
  922. list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
  923. slot_list) {
  924. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
  925. && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
  926. && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
  927. /* Don't break; here because it's possible the same
  928. * slot could be in the list more than once
  929. */
  930. list_del(&pci_dev_id->slot_list);
  931. kfree(pci_dev_id);
  932. err = 0;
  933. pr_debug("removed %04x:%02x:%02x.%d from seize list\n",
  934. domain, bus, slot, func);
  935. }
  936. }
  937. spin_unlock_irqrestore(&device_ids_lock, flags);
  938. return err;
  939. }
  940. static int pcistub_reg_add(int domain, int bus, int slot, int func,
  941. unsigned int reg, unsigned int size,
  942. unsigned int mask)
  943. {
  944. int err = 0;
  945. struct pcistub_device *psdev;
  946. struct pci_dev *dev;
  947. struct config_field *field;
  948. if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
  949. return -EINVAL;
  950. psdev = pcistub_device_find(domain, bus, slot, func);
  951. if (!psdev) {
  952. err = -ENODEV;
  953. goto out;
  954. }
  955. dev = psdev->dev;
  956. field = kzalloc(sizeof(*field), GFP_ATOMIC);
  957. if (!field) {
  958. err = -ENOMEM;
  959. goto out;
  960. }
  961. field->offset = reg;
  962. field->size = size;
  963. field->mask = mask;
  964. field->init = NULL;
  965. field->reset = NULL;
  966. field->release = NULL;
  967. field->clean = xen_pcibk_config_field_free;
  968. err = xen_pcibk_config_quirks_add_field(dev, field);
  969. if (err)
  970. kfree(field);
  971. out:
  972. if (psdev)
  973. pcistub_device_put(psdev);
  974. return err;
  975. }
  976. static ssize_t new_slot_store(struct device_driver *drv, const char *buf,
  977. size_t count)
  978. {
  979. int domain, bus, slot, func;
  980. int err;
  981. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  982. if (err)
  983. goto out;
  984. err = pcistub_device_id_add(domain, bus, slot, func);
  985. out:
  986. if (!err)
  987. err = count;
  988. return err;
  989. }
  990. static DRIVER_ATTR_WO(new_slot);
  991. static ssize_t remove_slot_store(struct device_driver *drv, const char *buf,
  992. size_t count)
  993. {
  994. int domain, bus, slot, func;
  995. int err;
  996. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  997. if (err)
  998. goto out;
  999. err = pcistub_device_id_remove(domain, bus, slot, func);
  1000. out:
  1001. if (!err)
  1002. err = count;
  1003. return err;
  1004. }
  1005. static DRIVER_ATTR_WO(remove_slot);
  1006. static ssize_t slots_show(struct device_driver *drv, char *buf)
  1007. {
  1008. struct pcistub_device_id *pci_dev_id;
  1009. size_t count = 0;
  1010. unsigned long flags;
  1011. spin_lock_irqsave(&device_ids_lock, flags);
  1012. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  1013. if (count >= PAGE_SIZE)
  1014. break;
  1015. count += scnprintf(buf + count, PAGE_SIZE - count,
  1016. "%04x:%02x:%02x.%d\n",
  1017. pci_dev_id->domain, pci_dev_id->bus,
  1018. PCI_SLOT(pci_dev_id->devfn),
  1019. PCI_FUNC(pci_dev_id->devfn));
  1020. }
  1021. spin_unlock_irqrestore(&device_ids_lock, flags);
  1022. return count;
  1023. }
  1024. static DRIVER_ATTR_RO(slots);
  1025. static ssize_t irq_handlers_show(struct device_driver *drv, char *buf)
  1026. {
  1027. struct pcistub_device *psdev;
  1028. struct xen_pcibk_dev_data *dev_data;
  1029. size_t count = 0;
  1030. unsigned long flags;
  1031. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1032. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1033. if (count >= PAGE_SIZE)
  1034. break;
  1035. if (!psdev->dev)
  1036. continue;
  1037. dev_data = pci_get_drvdata(psdev->dev);
  1038. if (!dev_data)
  1039. continue;
  1040. count +=
  1041. scnprintf(buf + count, PAGE_SIZE - count,
  1042. "%s:%s:%sing:%ld\n",
  1043. pci_name(psdev->dev),
  1044. dev_data->isr_on ? "on" : "off",
  1045. dev_data->ack_intr ? "ack" : "not ack",
  1046. dev_data->handled);
  1047. }
  1048. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1049. return count;
  1050. }
  1051. static DRIVER_ATTR_RO(irq_handlers);
  1052. static ssize_t irq_handler_state_store(struct device_driver *drv,
  1053. const char *buf, size_t count)
  1054. {
  1055. struct pcistub_device *psdev;
  1056. struct xen_pcibk_dev_data *dev_data;
  1057. int domain, bus, slot, func;
  1058. int err;
  1059. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1060. if (err)
  1061. return err;
  1062. psdev = pcistub_device_find(domain, bus, slot, func);
  1063. if (!psdev) {
  1064. err = -ENOENT;
  1065. goto out;
  1066. }
  1067. dev_data = pci_get_drvdata(psdev->dev);
  1068. if (!dev_data) {
  1069. err = -ENOENT;
  1070. goto out;
  1071. }
  1072. dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
  1073. dev_data->irq_name, dev_data->isr_on,
  1074. !dev_data->isr_on);
  1075. dev_data->isr_on = !(dev_data->isr_on);
  1076. if (dev_data->isr_on)
  1077. dev_data->ack_intr = 1;
  1078. out:
  1079. if (psdev)
  1080. pcistub_device_put(psdev);
  1081. if (!err)
  1082. err = count;
  1083. return err;
  1084. }
  1085. static DRIVER_ATTR_WO(irq_handler_state);
  1086. static ssize_t quirks_store(struct device_driver *drv, const char *buf,
  1087. size_t count)
  1088. {
  1089. int domain, bus, slot, func, reg, size, mask;
  1090. int err;
  1091. err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
  1092. &mask);
  1093. if (err)
  1094. goto out;
  1095. err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
  1096. out:
  1097. if (!err)
  1098. err = count;
  1099. return err;
  1100. }
  1101. static ssize_t quirks_show(struct device_driver *drv, char *buf)
  1102. {
  1103. int count = 0;
  1104. unsigned long flags;
  1105. struct xen_pcibk_config_quirk *quirk;
  1106. struct xen_pcibk_dev_data *dev_data;
  1107. const struct config_field *field;
  1108. const struct config_field_entry *cfg_entry;
  1109. spin_lock_irqsave(&device_ids_lock, flags);
  1110. list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
  1111. if (count >= PAGE_SIZE)
  1112. goto out;
  1113. count += scnprintf(buf + count, PAGE_SIZE - count,
  1114. "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
  1115. quirk->pdev->bus->number,
  1116. PCI_SLOT(quirk->pdev->devfn),
  1117. PCI_FUNC(quirk->pdev->devfn),
  1118. quirk->devid.vendor, quirk->devid.device,
  1119. quirk->devid.subvendor,
  1120. quirk->devid.subdevice);
  1121. dev_data = pci_get_drvdata(quirk->pdev);
  1122. list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
  1123. field = cfg_entry->field;
  1124. if (count >= PAGE_SIZE)
  1125. goto out;
  1126. count += scnprintf(buf + count, PAGE_SIZE - count,
  1127. "\t\t%08x:%01x:%08x\n",
  1128. cfg_entry->base_offset +
  1129. field->offset, field->size,
  1130. field->mask);
  1131. }
  1132. }
  1133. out:
  1134. spin_unlock_irqrestore(&device_ids_lock, flags);
  1135. return count;
  1136. }
  1137. static DRIVER_ATTR_RW(quirks);
  1138. static ssize_t permissive_store(struct device_driver *drv, const char *buf,
  1139. size_t count)
  1140. {
  1141. int domain, bus, slot, func;
  1142. int err;
  1143. struct pcistub_device *psdev;
  1144. struct xen_pcibk_dev_data *dev_data;
  1145. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1146. if (err)
  1147. goto out;
  1148. psdev = pcistub_device_find(domain, bus, slot, func);
  1149. if (!psdev) {
  1150. err = -ENODEV;
  1151. goto out;
  1152. }
  1153. dev_data = pci_get_drvdata(psdev->dev);
  1154. /* the driver data for a device should never be null at this point */
  1155. if (!dev_data) {
  1156. err = -ENXIO;
  1157. goto release;
  1158. }
  1159. if (!dev_data->permissive) {
  1160. dev_data->permissive = 1;
  1161. /* Let user know that what they're doing could be unsafe */
  1162. dev_warn(&psdev->dev->dev, "enabling permissive mode "
  1163. "configuration space accesses!\n");
  1164. dev_warn(&psdev->dev->dev,
  1165. "permissive mode is potentially unsafe!\n");
  1166. }
  1167. release:
  1168. pcistub_device_put(psdev);
  1169. out:
  1170. if (!err)
  1171. err = count;
  1172. return err;
  1173. }
  1174. static ssize_t permissive_show(struct device_driver *drv, char *buf)
  1175. {
  1176. struct pcistub_device *psdev;
  1177. struct xen_pcibk_dev_data *dev_data;
  1178. size_t count = 0;
  1179. unsigned long flags;
  1180. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1181. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1182. if (count >= PAGE_SIZE)
  1183. break;
  1184. if (!psdev->dev)
  1185. continue;
  1186. dev_data = pci_get_drvdata(psdev->dev);
  1187. if (!dev_data || !dev_data->permissive)
  1188. continue;
  1189. count +=
  1190. scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
  1191. pci_name(psdev->dev));
  1192. }
  1193. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1194. return count;
  1195. }
  1196. static DRIVER_ATTR_RW(permissive);
  1197. static void pcistub_exit(void)
  1198. {
  1199. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
  1200. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1201. &driver_attr_remove_slot);
  1202. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
  1203. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
  1204. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1205. &driver_attr_permissive);
  1206. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1207. &driver_attr_irq_handlers);
  1208. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1209. &driver_attr_irq_handler_state);
  1210. pci_unregister_driver(&xen_pcibk_pci_driver);
  1211. }
  1212. static int __init pcistub_init(void)
  1213. {
  1214. int pos = 0;
  1215. int err = 0;
  1216. int domain, bus, slot, func;
  1217. int parsed;
  1218. if (pci_devs_to_hide && *pci_devs_to_hide) {
  1219. do {
  1220. parsed = 0;
  1221. err = sscanf(pci_devs_to_hide + pos,
  1222. " (%x:%x:%x.%x) %n",
  1223. &domain, &bus, &slot, &func, &parsed);
  1224. switch (err) {
  1225. case 3:
  1226. func = -1;
  1227. sscanf(pci_devs_to_hide + pos,
  1228. " (%x:%x:%x.*) %n",
  1229. &domain, &bus, &slot, &parsed);
  1230. break;
  1231. case 2:
  1232. slot = func = -1;
  1233. sscanf(pci_devs_to_hide + pos,
  1234. " (%x:%x:*.*) %n",
  1235. &domain, &bus, &parsed);
  1236. break;
  1237. }
  1238. if (!parsed) {
  1239. domain = 0;
  1240. err = sscanf(pci_devs_to_hide + pos,
  1241. " (%x:%x.%x) %n",
  1242. &bus, &slot, &func, &parsed);
  1243. switch (err) {
  1244. case 2:
  1245. func = -1;
  1246. sscanf(pci_devs_to_hide + pos,
  1247. " (%x:%x.*) %n",
  1248. &bus, &slot, &parsed);
  1249. break;
  1250. case 1:
  1251. slot = func = -1;
  1252. sscanf(pci_devs_to_hide + pos,
  1253. " (%x:*.*) %n",
  1254. &bus, &parsed);
  1255. break;
  1256. }
  1257. }
  1258. if (parsed <= 0)
  1259. goto parse_error;
  1260. err = pcistub_device_id_add(domain, bus, slot, func);
  1261. if (err)
  1262. goto out;
  1263. pos += parsed;
  1264. } while (pci_devs_to_hide[pos]);
  1265. }
  1266. /* If we're the first PCI Device Driver to register, we're the
  1267. * first one to get offered PCI devices as they become
  1268. * available (and thus we can be the first to grab them)
  1269. */
  1270. err = pci_register_driver(&xen_pcibk_pci_driver);
  1271. if (err < 0)
  1272. goto out;
  1273. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1274. &driver_attr_new_slot);
  1275. if (!err)
  1276. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1277. &driver_attr_remove_slot);
  1278. if (!err)
  1279. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1280. &driver_attr_slots);
  1281. if (!err)
  1282. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1283. &driver_attr_quirks);
  1284. if (!err)
  1285. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1286. &driver_attr_permissive);
  1287. if (!err)
  1288. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1289. &driver_attr_irq_handlers);
  1290. if (!err)
  1291. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1292. &driver_attr_irq_handler_state);
  1293. if (err)
  1294. pcistub_exit();
  1295. out:
  1296. return err;
  1297. parse_error:
  1298. pr_err("Error parsing pci_devs_to_hide at \"%s\"\n",
  1299. pci_devs_to_hide + pos);
  1300. return -EINVAL;
  1301. }
  1302. #ifndef MODULE
  1303. /*
  1304. * fs_initcall happens before device_initcall
  1305. * so xen_pcibk *should* get called first (b/c we
  1306. * want to suck up any device before other drivers
  1307. * get a chance by being the first pci device
  1308. * driver to register)
  1309. */
  1310. fs_initcall(pcistub_init);
  1311. #endif
  1312. #ifdef CONFIG_PCI_IOV
  1313. static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
  1314. {
  1315. struct pcistub_device *psdev = NULL;
  1316. unsigned long flags;
  1317. bool found = false;
  1318. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1319. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1320. if (!psdev->pdev && psdev->dev != pdev
  1321. && pci_physfn(psdev->dev) == pdev) {
  1322. found = true;
  1323. break;
  1324. }
  1325. }
  1326. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1327. if (found)
  1328. return psdev;
  1329. return NULL;
  1330. }
  1331. static int pci_stub_notifier(struct notifier_block *nb,
  1332. unsigned long action, void *data)
  1333. {
  1334. struct device *dev = data;
  1335. const struct pci_dev *pdev = to_pci_dev(dev);
  1336. if (action != BUS_NOTIFY_UNBIND_DRIVER)
  1337. return NOTIFY_DONE;
  1338. if (!pdev->is_physfn)
  1339. return NOTIFY_DONE;
  1340. for (;;) {
  1341. struct pcistub_device *psdev = find_vfs(pdev);
  1342. if (!psdev)
  1343. break;
  1344. device_release_driver(&psdev->dev->dev);
  1345. }
  1346. return NOTIFY_DONE;
  1347. }
  1348. static struct notifier_block pci_stub_nb = {
  1349. .notifier_call = pci_stub_notifier,
  1350. };
  1351. #endif
  1352. static int __init xen_pcibk_init(void)
  1353. {
  1354. int err;
  1355. if (!xen_initial_domain())
  1356. return -ENODEV;
  1357. err = xen_pcibk_config_init();
  1358. if (err)
  1359. return err;
  1360. #ifdef MODULE
  1361. err = pcistub_init();
  1362. if (err < 0)
  1363. return err;
  1364. #endif
  1365. pcistub_init_devices_late();
  1366. err = xen_pcibk_xenbus_register();
  1367. if (err)
  1368. pcistub_exit();
  1369. #ifdef CONFIG_PCI_IOV
  1370. else
  1371. bus_register_notifier(&pci_bus_type, &pci_stub_nb);
  1372. #endif
  1373. return err;
  1374. }
  1375. static void __exit xen_pcibk_cleanup(void)
  1376. {
  1377. #ifdef CONFIG_PCI_IOV
  1378. bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
  1379. #endif
  1380. xen_pcibk_xenbus_unregister();
  1381. pcistub_exit();
  1382. }
  1383. module_init(xen_pcibk_init);
  1384. module_exit(xen_pcibk_cleanup);
  1385. MODULE_LICENSE("Dual BSD/GPL");
  1386. MODULE_ALIAS("xen-backend:pci");