pci_stub.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656
  1. /*
  2. * PCI Stub Driver - Grabs devices in backend to be exported later
  3. *
  4. * Ryan Wilson <hap9@epoch.ncsc.mil>
  5. * Chris Bookholt <hap10@epoch.ncsc.mil>
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/rwsem.h>
  11. #include <linux/list.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/kref.h>
  14. #include <linux/pci.h>
  15. #include <linux/wait.h>
  16. #include <linux/sched.h>
  17. #include <linux/atomic.h>
  18. #include <xen/events.h>
  19. #include <asm/xen/pci.h>
  20. #include <asm/xen/hypervisor.h>
  21. #include <xen/interface/physdev.h>
  22. #include "pciback.h"
  23. #include "conf_space.h"
  24. #include "conf_space_quirks.h"
  25. #define PCISTUB_DRIVER_NAME "pciback"
  26. static char *pci_devs_to_hide;
  27. wait_queue_head_t xen_pcibk_aer_wait_queue;
  28. /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
  29. * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
  30. */
  31. static DECLARE_RWSEM(pcistub_sem);
  32. module_param_named(hide, pci_devs_to_hide, charp, 0444);
  33. struct pcistub_device_id {
  34. struct list_head slot_list;
  35. int domain;
  36. unsigned char bus;
  37. unsigned int devfn;
  38. };
  39. static LIST_HEAD(pcistub_device_ids);
  40. static DEFINE_SPINLOCK(device_ids_lock);
  41. struct pcistub_device {
  42. struct kref kref;
  43. struct list_head dev_list;
  44. spinlock_t lock;
  45. struct pci_dev *dev;
  46. struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
  47. };
  48. /* Access to pcistub_devices & seized_devices lists and the initialize_devices
  49. * flag must be locked with pcistub_devices_lock
  50. */
  51. static DEFINE_SPINLOCK(pcistub_devices_lock);
  52. static LIST_HEAD(pcistub_devices);
  53. /* wait for device_initcall before initializing our devices
  54. * (see pcistub_init_devices_late)
  55. */
  56. static int initialize_devices;
  57. static LIST_HEAD(seized_devices);
  58. static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
  59. {
  60. struct pcistub_device *psdev;
  61. dev_dbg(&dev->dev, "pcistub_device_alloc\n");
  62. psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
  63. if (!psdev)
  64. return NULL;
  65. psdev->dev = pci_dev_get(dev);
  66. if (!psdev->dev) {
  67. kfree(psdev);
  68. return NULL;
  69. }
  70. kref_init(&psdev->kref);
  71. spin_lock_init(&psdev->lock);
  72. return psdev;
  73. }
  74. /* Don't call this directly as it's called by pcistub_device_put */
  75. static void pcistub_device_release(struct kref *kref)
  76. {
  77. struct pcistub_device *psdev;
  78. struct pci_dev *dev;
  79. struct xen_pcibk_dev_data *dev_data;
  80. psdev = container_of(kref, struct pcistub_device, kref);
  81. dev = psdev->dev;
  82. dev_data = pci_get_drvdata(dev);
  83. dev_dbg(&dev->dev, "pcistub_device_release\n");
  84. xen_unregister_device_domain_owner(dev);
  85. /* Call the reset function which does not take lock as this
  86. * is called from "unbind" which takes a device_lock mutex.
  87. */
  88. __pci_reset_function_locked(dev);
  89. if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
  90. dev_info(&dev->dev, "Could not reload PCI state\n");
  91. else
  92. pci_restore_state(dev);
  93. if (dev->msix_cap) {
  94. struct physdev_pci_device ppdev = {
  95. .seg = pci_domain_nr(dev->bus),
  96. .bus = dev->bus->number,
  97. .devfn = dev->devfn
  98. };
  99. int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
  100. &ppdev);
  101. if (err && err != -ENOSYS)
  102. dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
  103. err);
  104. }
  105. /* Disable the device */
  106. xen_pcibk_reset_device(dev);
  107. kfree(dev_data);
  108. pci_set_drvdata(dev, NULL);
  109. /* Clean-up the device */
  110. xen_pcibk_config_free_dyn_fields(dev);
  111. xen_pcibk_config_free_dev(dev);
  112. pci_clear_dev_assigned(dev);
  113. pci_dev_put(dev);
  114. kfree(psdev);
  115. }
  116. static inline void pcistub_device_get(struct pcistub_device *psdev)
  117. {
  118. kref_get(&psdev->kref);
  119. }
  120. static inline void pcistub_device_put(struct pcistub_device *psdev)
  121. {
  122. kref_put(&psdev->kref, pcistub_device_release);
  123. }
  124. static struct pcistub_device *pcistub_device_find_locked(int domain, int bus,
  125. int slot, int func)
  126. {
  127. struct pcistub_device *psdev;
  128. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  129. if (psdev->dev != NULL
  130. && domain == pci_domain_nr(psdev->dev->bus)
  131. && bus == psdev->dev->bus->number
  132. && slot == PCI_SLOT(psdev->dev->devfn)
  133. && func == PCI_FUNC(psdev->dev->devfn)) {
  134. return psdev;
  135. }
  136. }
  137. return NULL;
  138. }
  139. static struct pcistub_device *pcistub_device_find(int domain, int bus,
  140. int slot, int func)
  141. {
  142. struct pcistub_device *psdev;
  143. unsigned long flags;
  144. spin_lock_irqsave(&pcistub_devices_lock, flags);
  145. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  146. if (psdev)
  147. pcistub_device_get(psdev);
  148. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  149. return psdev;
  150. }
  151. static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
  152. struct pcistub_device *psdev)
  153. {
  154. struct pci_dev *pci_dev = NULL;
  155. unsigned long flags;
  156. pcistub_device_get(psdev);
  157. spin_lock_irqsave(&psdev->lock, flags);
  158. if (!psdev->pdev) {
  159. psdev->pdev = pdev;
  160. pci_dev = psdev->dev;
  161. }
  162. spin_unlock_irqrestore(&psdev->lock, flags);
  163. if (!pci_dev)
  164. pcistub_device_put(psdev);
  165. return pci_dev;
  166. }
  167. struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
  168. int domain, int bus,
  169. int slot, int func)
  170. {
  171. struct pcistub_device *psdev;
  172. struct pci_dev *found_dev = NULL;
  173. unsigned long flags;
  174. spin_lock_irqsave(&pcistub_devices_lock, flags);
  175. psdev = pcistub_device_find_locked(domain, bus, slot, func);
  176. if (psdev)
  177. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  178. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  179. return found_dev;
  180. }
  181. struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
  182. struct pci_dev *dev)
  183. {
  184. struct pcistub_device *psdev;
  185. struct pci_dev *found_dev = NULL;
  186. unsigned long flags;
  187. spin_lock_irqsave(&pcistub_devices_lock, flags);
  188. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  189. if (psdev->dev == dev) {
  190. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  191. break;
  192. }
  193. }
  194. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  195. return found_dev;
  196. }
  197. /*
  198. * Called when:
  199. * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
  200. * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove
  201. * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove
  202. * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
  203. *
  204. * As such we have to be careful.
  205. *
  206. * To make this easier, the caller has to hold the device lock.
  207. */
  208. void pcistub_put_pci_dev(struct pci_dev *dev)
  209. {
  210. struct pcistub_device *psdev, *found_psdev = NULL;
  211. unsigned long flags;
  212. struct xen_pcibk_dev_data *dev_data;
  213. int ret;
  214. spin_lock_irqsave(&pcistub_devices_lock, flags);
  215. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  216. if (psdev->dev == dev) {
  217. found_psdev = psdev;
  218. break;
  219. }
  220. }
  221. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  222. if (WARN_ON(!found_psdev))
  223. return;
  224. /*hold this lock for avoiding breaking link between
  225. * pcistub and xen_pcibk when AER is in processing
  226. */
  227. down_write(&pcistub_sem);
  228. /* Cleanup our device
  229. * (so it's ready for the next domain)
  230. */
  231. device_lock_assert(&dev->dev);
  232. __pci_reset_function_locked(dev);
  233. dev_data = pci_get_drvdata(dev);
  234. ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
  235. if (!ret) {
  236. /*
  237. * The usual sequence is pci_save_state & pci_restore_state
  238. * but the guest might have messed the configuration space up.
  239. * Use the initial version (when device was bound to us).
  240. */
  241. pci_restore_state(dev);
  242. } else
  243. dev_info(&dev->dev, "Could not reload PCI state\n");
  244. /* This disables the device. */
  245. xen_pcibk_reset_device(dev);
  246. /* And cleanup up our emulated fields. */
  247. xen_pcibk_config_reset_dev(dev);
  248. xen_pcibk_config_free_dyn_fields(dev);
  249. xen_unregister_device_domain_owner(dev);
  250. spin_lock_irqsave(&found_psdev->lock, flags);
  251. found_psdev->pdev = NULL;
  252. spin_unlock_irqrestore(&found_psdev->lock, flags);
  253. pcistub_device_put(found_psdev);
  254. up_write(&pcistub_sem);
  255. }
  256. static int pcistub_match_one(struct pci_dev *dev,
  257. struct pcistub_device_id *pdev_id)
  258. {
  259. /* Match the specified device by domain, bus, slot, func and also if
  260. * any of the device's parent bridges match.
  261. */
  262. for (; dev != NULL; dev = dev->bus->self) {
  263. if (pci_domain_nr(dev->bus) == pdev_id->domain
  264. && dev->bus->number == pdev_id->bus
  265. && dev->devfn == pdev_id->devfn)
  266. return 1;
  267. /* Sometimes topmost bridge links to itself. */
  268. if (dev == dev->bus->self)
  269. break;
  270. }
  271. return 0;
  272. }
  273. static int pcistub_match(struct pci_dev *dev)
  274. {
  275. struct pcistub_device_id *pdev_id;
  276. unsigned long flags;
  277. int found = 0;
  278. spin_lock_irqsave(&device_ids_lock, flags);
  279. list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
  280. if (pcistub_match_one(dev, pdev_id)) {
  281. found = 1;
  282. break;
  283. }
  284. }
  285. spin_unlock_irqrestore(&device_ids_lock, flags);
  286. return found;
  287. }
  288. static int pcistub_init_device(struct pci_dev *dev)
  289. {
  290. struct xen_pcibk_dev_data *dev_data;
  291. int err = 0;
  292. dev_dbg(&dev->dev, "initializing...\n");
  293. /* The PCI backend is not intended to be a module (or to work with
  294. * removable PCI devices (yet). If it were, xen_pcibk_config_free()
  295. * would need to be called somewhere to free the memory allocated
  296. * here and then to call kfree(pci_get_drvdata(psdev->dev)).
  297. */
  298. dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
  299. + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
  300. if (!dev_data) {
  301. err = -ENOMEM;
  302. goto out;
  303. }
  304. pci_set_drvdata(dev, dev_data);
  305. /*
  306. * Setup name for fake IRQ handler. It will only be enabled
  307. * once the device is turned on by the guest.
  308. */
  309. sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
  310. dev_dbg(&dev->dev, "initializing config\n");
  311. init_waitqueue_head(&xen_pcibk_aer_wait_queue);
  312. err = xen_pcibk_config_init_dev(dev);
  313. if (err)
  314. goto out;
  315. /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
  316. * must do this here because pcibios_enable_device may specify
  317. * the pci device's true irq (and possibly its other resources)
  318. * if they differ from what's in the configuration space.
  319. * This makes the assumption that the device's resources won't
  320. * change after this point (otherwise this code may break!)
  321. */
  322. dev_dbg(&dev->dev, "enabling device\n");
  323. err = pci_enable_device(dev);
  324. if (err)
  325. goto config_release;
  326. if (dev->msix_cap) {
  327. struct physdev_pci_device ppdev = {
  328. .seg = pci_domain_nr(dev->bus),
  329. .bus = dev->bus->number,
  330. .devfn = dev->devfn
  331. };
  332. err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
  333. if (err && err != -ENOSYS)
  334. dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
  335. err);
  336. }
  337. /* We need the device active to save the state. */
  338. dev_dbg(&dev->dev, "save state of device\n");
  339. pci_save_state(dev);
  340. dev_data->pci_saved_state = pci_store_saved_state(dev);
  341. if (!dev_data->pci_saved_state)
  342. dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
  343. else {
  344. dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
  345. __pci_reset_function_locked(dev);
  346. pci_restore_state(dev);
  347. }
  348. /* Now disable the device (this also ensures some private device
  349. * data is setup before we export)
  350. */
  351. dev_dbg(&dev->dev, "reset device\n");
  352. xen_pcibk_reset_device(dev);
  353. pci_set_dev_assigned(dev);
  354. return 0;
  355. config_release:
  356. xen_pcibk_config_free_dev(dev);
  357. out:
  358. pci_set_drvdata(dev, NULL);
  359. kfree(dev_data);
  360. return err;
  361. }
  362. /*
  363. * Because some initialization still happens on
  364. * devices during fs_initcall, we need to defer
  365. * full initialization of our devices until
  366. * device_initcall.
  367. */
  368. static int __init pcistub_init_devices_late(void)
  369. {
  370. struct pcistub_device *psdev;
  371. unsigned long flags;
  372. int err = 0;
  373. spin_lock_irqsave(&pcistub_devices_lock, flags);
  374. while (!list_empty(&seized_devices)) {
  375. psdev = container_of(seized_devices.next,
  376. struct pcistub_device, dev_list);
  377. list_del(&psdev->dev_list);
  378. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  379. err = pcistub_init_device(psdev->dev);
  380. if (err) {
  381. dev_err(&psdev->dev->dev,
  382. "error %d initializing device\n", err);
  383. kfree(psdev);
  384. psdev = NULL;
  385. }
  386. spin_lock_irqsave(&pcistub_devices_lock, flags);
  387. if (psdev)
  388. list_add_tail(&psdev->dev_list, &pcistub_devices);
  389. }
  390. initialize_devices = 1;
  391. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  392. return 0;
  393. }
  394. static void pcistub_device_id_add_list(struct pcistub_device_id *new,
  395. int domain, int bus, unsigned int devfn)
  396. {
  397. struct pcistub_device_id *pci_dev_id;
  398. unsigned long flags;
  399. int found = 0;
  400. spin_lock_irqsave(&device_ids_lock, flags);
  401. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  402. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus &&
  403. pci_dev_id->devfn == devfn) {
  404. found = 1;
  405. break;
  406. }
  407. }
  408. if (!found) {
  409. new->domain = domain;
  410. new->bus = bus;
  411. new->devfn = devfn;
  412. list_add_tail(&new->slot_list, &pcistub_device_ids);
  413. }
  414. spin_unlock_irqrestore(&device_ids_lock, flags);
  415. if (found)
  416. kfree(new);
  417. }
  418. static int pcistub_seize(struct pci_dev *dev,
  419. struct pcistub_device_id *pci_dev_id)
  420. {
  421. struct pcistub_device *psdev;
  422. unsigned long flags;
  423. int err = 0;
  424. psdev = pcistub_device_alloc(dev);
  425. if (!psdev) {
  426. kfree(pci_dev_id);
  427. return -ENOMEM;
  428. }
  429. spin_lock_irqsave(&pcistub_devices_lock, flags);
  430. if (initialize_devices) {
  431. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  432. /* don't want irqs disabled when calling pcistub_init_device */
  433. err = pcistub_init_device(psdev->dev);
  434. spin_lock_irqsave(&pcistub_devices_lock, flags);
  435. if (!err)
  436. list_add(&psdev->dev_list, &pcistub_devices);
  437. } else {
  438. dev_dbg(&dev->dev, "deferring initialization\n");
  439. list_add(&psdev->dev_list, &seized_devices);
  440. }
  441. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  442. if (err) {
  443. kfree(pci_dev_id);
  444. pcistub_device_put(psdev);
  445. } else if (pci_dev_id)
  446. pcistub_device_id_add_list(pci_dev_id, pci_domain_nr(dev->bus),
  447. dev->bus->number, dev->devfn);
  448. return err;
  449. }
  450. /* Called when 'bind'. This means we must _NOT_ call pci_reset_function or
  451. * other functions that take the sysfs lock. */
  452. static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
  453. {
  454. int err = 0, match;
  455. struct pcistub_device_id *pci_dev_id = NULL;
  456. dev_dbg(&dev->dev, "probing...\n");
  457. match = pcistub_match(dev);
  458. if ((dev->driver_override &&
  459. !strcmp(dev->driver_override, PCISTUB_DRIVER_NAME)) ||
  460. match) {
  461. if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
  462. && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
  463. dev_err(&dev->dev, "can't export pci devices that "
  464. "don't have a normal (0) or bridge (1) "
  465. "header type!\n");
  466. err = -ENODEV;
  467. goto out;
  468. }
  469. if (!match) {
  470. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_ATOMIC);
  471. if (!pci_dev_id) {
  472. err = -ENOMEM;
  473. goto out;
  474. }
  475. }
  476. dev_info(&dev->dev, "seizing device\n");
  477. err = pcistub_seize(dev, pci_dev_id);
  478. } else
  479. /* Didn't find the device */
  480. err = -ENODEV;
  481. out:
  482. return err;
  483. }
  484. /* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or
  485. * other functions that take the sysfs lock. */
  486. static void pcistub_remove(struct pci_dev *dev)
  487. {
  488. struct pcistub_device *psdev, *found_psdev = NULL;
  489. unsigned long flags;
  490. dev_dbg(&dev->dev, "removing\n");
  491. spin_lock_irqsave(&pcistub_devices_lock, flags);
  492. xen_pcibk_config_quirk_release(dev);
  493. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  494. if (psdev->dev == dev) {
  495. found_psdev = psdev;
  496. break;
  497. }
  498. }
  499. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  500. if (found_psdev) {
  501. dev_dbg(&dev->dev, "found device to remove %s\n",
  502. found_psdev->pdev ? "- in-use" : "");
  503. if (found_psdev->pdev) {
  504. int domid = xen_find_device_domain_owner(dev);
  505. pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
  506. pci_name(found_psdev->dev), domid);
  507. pr_warn("****** driver domain may still access this device's i/o resources!\n");
  508. pr_warn("****** shutdown driver domain before binding device\n");
  509. pr_warn("****** to other drivers or domains\n");
  510. /* N.B. This ends up calling pcistub_put_pci_dev which ends up
  511. * doing the FLR. */
  512. xen_pcibk_release_pci_dev(found_psdev->pdev,
  513. found_psdev->dev,
  514. false /* caller holds the lock. */);
  515. }
  516. spin_lock_irqsave(&pcistub_devices_lock, flags);
  517. list_del(&found_psdev->dev_list);
  518. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  519. /* the final put for releasing from the list */
  520. pcistub_device_put(found_psdev);
  521. }
  522. }
  523. static const struct pci_device_id pcistub_ids[] = {
  524. {
  525. .vendor = PCI_ANY_ID,
  526. .device = PCI_ANY_ID,
  527. .subvendor = PCI_ANY_ID,
  528. .subdevice = PCI_ANY_ID,
  529. },
  530. {0,},
  531. };
  532. #define PCI_NODENAME_MAX 40
  533. static void kill_domain_by_device(struct pcistub_device *psdev)
  534. {
  535. struct xenbus_transaction xbt;
  536. int err;
  537. char nodename[PCI_NODENAME_MAX];
  538. BUG_ON(!psdev);
  539. snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
  540. psdev->pdev->xdev->otherend_id);
  541. again:
  542. err = xenbus_transaction_start(&xbt);
  543. if (err) {
  544. dev_err(&psdev->dev->dev,
  545. "error %d when start xenbus transaction\n", err);
  546. return;
  547. }
  548. /*PV AER handlers will set this flag*/
  549. xenbus_printf(xbt, nodename, "aerState" , "aerfail");
  550. err = xenbus_transaction_end(xbt, 0);
  551. if (err) {
  552. if (err == -EAGAIN)
  553. goto again;
  554. dev_err(&psdev->dev->dev,
  555. "error %d when end xenbus transaction\n", err);
  556. return;
  557. }
  558. }
  559. /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
  560. * backend need to have cooperation. In xen_pcibk, those steps will do similar
  561. * jobs: send service request and waiting for front_end response.
  562. */
  563. static pci_ers_result_t common_process(struct pcistub_device *psdev,
  564. pci_channel_state_t state, int aer_cmd,
  565. pci_ers_result_t result)
  566. {
  567. pci_ers_result_t res = result;
  568. struct xen_pcie_aer_op *aer_op;
  569. struct xen_pcibk_device *pdev = psdev->pdev;
  570. struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
  571. int ret;
  572. /*with PV AER drivers*/
  573. aer_op = &(sh_info->aer_op);
  574. aer_op->cmd = aer_cmd ;
  575. /*useful for error_detected callback*/
  576. aer_op->err = state;
  577. /*pcifront_end BDF*/
  578. ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
  579. &aer_op->domain, &aer_op->bus, &aer_op->devfn);
  580. if (!ret) {
  581. dev_err(&psdev->dev->dev,
  582. DRV_NAME ": failed to get pcifront device\n");
  583. return PCI_ERS_RESULT_NONE;
  584. }
  585. wmb();
  586. dev_dbg(&psdev->dev->dev,
  587. DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
  588. aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
  589. /*local flag to mark there's aer request, xen_pcibk callback will use
  590. * this flag to judge whether we need to check pci-front give aer
  591. * service ack signal
  592. */
  593. set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  594. /*It is possible that a pcifront conf_read_write ops request invokes
  595. * the callback which cause the spurious execution of wake_up.
  596. * Yet it is harmless and better than a spinlock here
  597. */
  598. set_bit(_XEN_PCIB_active,
  599. (unsigned long *)&sh_info->flags);
  600. wmb();
  601. notify_remote_via_irq(pdev->evtchn_irq);
  602. ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
  603. !(test_bit(_XEN_PCIB_active, (unsigned long *)
  604. &sh_info->flags)), 300*HZ);
  605. if (!ret) {
  606. if (test_bit(_XEN_PCIB_active,
  607. (unsigned long *)&sh_info->flags)) {
  608. dev_err(&psdev->dev->dev,
  609. "pcifront aer process not responding!\n");
  610. clear_bit(_XEN_PCIB_active,
  611. (unsigned long *)&sh_info->flags);
  612. aer_op->err = PCI_ERS_RESULT_NONE;
  613. return res;
  614. }
  615. }
  616. clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
  617. if (test_bit(_XEN_PCIF_active,
  618. (unsigned long *)&sh_info->flags)) {
  619. dev_dbg(&psdev->dev->dev,
  620. "schedule pci_conf service in " DRV_NAME "\n");
  621. xen_pcibk_test_and_schedule_op(psdev->pdev);
  622. }
  623. res = (pci_ers_result_t)aer_op->err;
  624. return res;
  625. }
  626. /*
  627. * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
  628. * of the device driver could provide this service, and then wait for pcifront
  629. * ack.
  630. * @dev: pointer to PCI devices
  631. * return value is used by aer_core do_recovery policy
  632. */
  633. static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
  634. {
  635. struct pcistub_device *psdev;
  636. pci_ers_result_t result;
  637. result = PCI_ERS_RESULT_RECOVERED;
  638. dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
  639. dev->bus->number, dev->devfn);
  640. down_write(&pcistub_sem);
  641. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  642. dev->bus->number,
  643. PCI_SLOT(dev->devfn),
  644. PCI_FUNC(dev->devfn));
  645. if (!psdev || !psdev->pdev) {
  646. dev_err(&dev->dev,
  647. DRV_NAME " device is not found/assigned\n");
  648. goto end;
  649. }
  650. if (!psdev->pdev->sh_info) {
  651. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  652. " by HVM, kill it\n");
  653. kill_domain_by_device(psdev);
  654. goto end;
  655. }
  656. if (!test_bit(_XEN_PCIB_AERHANDLER,
  657. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  658. dev_err(&dev->dev,
  659. "guest with no AER driver should have been killed\n");
  660. goto end;
  661. }
  662. result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
  663. if (result == PCI_ERS_RESULT_NONE ||
  664. result == PCI_ERS_RESULT_DISCONNECT) {
  665. dev_dbg(&dev->dev,
  666. "No AER slot_reset service or disconnected!\n");
  667. kill_domain_by_device(psdev);
  668. }
  669. end:
  670. if (psdev)
  671. pcistub_device_put(psdev);
  672. up_write(&pcistub_sem);
  673. return result;
  674. }
  675. /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
  676. * in case of the device driver could provide this service, and then wait
  677. * for pcifront ack
  678. * @dev: pointer to PCI devices
  679. * return value is used by aer_core do_recovery policy
  680. */
  681. static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
  682. {
  683. struct pcistub_device *psdev;
  684. pci_ers_result_t result;
  685. result = PCI_ERS_RESULT_RECOVERED;
  686. dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
  687. dev->bus->number, dev->devfn);
  688. down_write(&pcistub_sem);
  689. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  690. dev->bus->number,
  691. PCI_SLOT(dev->devfn),
  692. PCI_FUNC(dev->devfn));
  693. if (!psdev || !psdev->pdev) {
  694. dev_err(&dev->dev,
  695. DRV_NAME " device is not found/assigned\n");
  696. goto end;
  697. }
  698. if (!psdev->pdev->sh_info) {
  699. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  700. " by HVM, kill it\n");
  701. kill_domain_by_device(psdev);
  702. goto end;
  703. }
  704. if (!test_bit(_XEN_PCIB_AERHANDLER,
  705. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  706. dev_err(&dev->dev,
  707. "guest with no AER driver should have been killed\n");
  708. goto end;
  709. }
  710. result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
  711. if (result == PCI_ERS_RESULT_NONE ||
  712. result == PCI_ERS_RESULT_DISCONNECT) {
  713. dev_dbg(&dev->dev,
  714. "No AER mmio_enabled service or disconnected!\n");
  715. kill_domain_by_device(psdev);
  716. }
  717. end:
  718. if (psdev)
  719. pcistub_device_put(psdev);
  720. up_write(&pcistub_sem);
  721. return result;
  722. }
  723. /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
  724. * in case of the device driver could provide this service, and then wait
  725. * for pcifront ack.
  726. * @dev: pointer to PCI devices
  727. * @error: the current PCI connection state
  728. * return value is used by aer_core do_recovery policy
  729. */
  730. static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
  731. pci_channel_state_t error)
  732. {
  733. struct pcistub_device *psdev;
  734. pci_ers_result_t result;
  735. result = PCI_ERS_RESULT_CAN_RECOVER;
  736. dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
  737. dev->bus->number, dev->devfn);
  738. down_write(&pcistub_sem);
  739. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  740. dev->bus->number,
  741. PCI_SLOT(dev->devfn),
  742. PCI_FUNC(dev->devfn));
  743. if (!psdev || !psdev->pdev) {
  744. dev_err(&dev->dev,
  745. DRV_NAME " device is not found/assigned\n");
  746. goto end;
  747. }
  748. if (!psdev->pdev->sh_info) {
  749. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  750. " by HVM, kill it\n");
  751. kill_domain_by_device(psdev);
  752. goto end;
  753. }
  754. /*Guest owns the device yet no aer handler regiested, kill guest*/
  755. if (!test_bit(_XEN_PCIB_AERHANDLER,
  756. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  757. dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
  758. kill_domain_by_device(psdev);
  759. goto end;
  760. }
  761. result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
  762. if (result == PCI_ERS_RESULT_NONE ||
  763. result == PCI_ERS_RESULT_DISCONNECT) {
  764. dev_dbg(&dev->dev,
  765. "No AER error_detected service or disconnected!\n");
  766. kill_domain_by_device(psdev);
  767. }
  768. end:
  769. if (psdev)
  770. pcistub_device_put(psdev);
  771. up_write(&pcistub_sem);
  772. return result;
  773. }
  774. /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
  775. * in case of the device driver could provide this service, and then wait
  776. * for pcifront ack.
  777. * @dev: pointer to PCI devices
  778. */
  779. static void xen_pcibk_error_resume(struct pci_dev *dev)
  780. {
  781. struct pcistub_device *psdev;
  782. dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
  783. dev->bus->number, dev->devfn);
  784. down_write(&pcistub_sem);
  785. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  786. dev->bus->number,
  787. PCI_SLOT(dev->devfn),
  788. PCI_FUNC(dev->devfn));
  789. if (!psdev || !psdev->pdev) {
  790. dev_err(&dev->dev,
  791. DRV_NAME " device is not found/assigned\n");
  792. goto end;
  793. }
  794. if (!psdev->pdev->sh_info) {
  795. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  796. " by HVM, kill it\n");
  797. kill_domain_by_device(psdev);
  798. goto end;
  799. }
  800. if (!test_bit(_XEN_PCIB_AERHANDLER,
  801. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  802. dev_err(&dev->dev,
  803. "guest with no AER driver should have been killed\n");
  804. kill_domain_by_device(psdev);
  805. goto end;
  806. }
  807. common_process(psdev, 1, XEN_PCI_OP_aer_resume,
  808. PCI_ERS_RESULT_RECOVERED);
  809. end:
  810. if (psdev)
  811. pcistub_device_put(psdev);
  812. up_write(&pcistub_sem);
  813. return;
  814. }
  815. /*add xen_pcibk AER handling*/
  816. static const struct pci_error_handlers xen_pcibk_error_handler = {
  817. .error_detected = xen_pcibk_error_detected,
  818. .mmio_enabled = xen_pcibk_mmio_enabled,
  819. .slot_reset = xen_pcibk_slot_reset,
  820. .resume = xen_pcibk_error_resume,
  821. };
  822. /*
  823. * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
  824. * for a normal device. I don't want it to be loaded automatically.
  825. */
  826. static struct pci_driver xen_pcibk_pci_driver = {
  827. /* The name should be xen_pciback, but until the tools are updated
  828. * we will keep it as pciback. */
  829. .name = PCISTUB_DRIVER_NAME,
  830. .id_table = pcistub_ids,
  831. .probe = pcistub_probe,
  832. .remove = pcistub_remove,
  833. .err_handler = &xen_pcibk_error_handler,
  834. };
  835. static inline int str_to_slot(const char *buf, int *domain, int *bus,
  836. int *slot, int *func)
  837. {
  838. int parsed = 0;
  839. switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
  840. &parsed)) {
  841. case 3:
  842. *func = -1;
  843. sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
  844. break;
  845. case 2:
  846. *slot = *func = -1;
  847. sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
  848. break;
  849. }
  850. if (parsed && !buf[parsed])
  851. return 0;
  852. /* try again without domain */
  853. *domain = 0;
  854. switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
  855. case 2:
  856. *func = -1;
  857. sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
  858. break;
  859. case 1:
  860. *slot = *func = -1;
  861. sscanf(buf, " %x:*.* %n", bus, &parsed);
  862. break;
  863. }
  864. if (parsed && !buf[parsed])
  865. return 0;
  866. return -EINVAL;
  867. }
  868. static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
  869. *slot, int *func, int *reg, int *size, int *mask)
  870. {
  871. int parsed = 0;
  872. sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
  873. reg, size, mask, &parsed);
  874. if (parsed && !buf[parsed])
  875. return 0;
  876. /* try again without domain */
  877. *domain = 0;
  878. sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
  879. mask, &parsed);
  880. if (parsed && !buf[parsed])
  881. return 0;
  882. return -EINVAL;
  883. }
  884. static int pcistub_device_id_add(int domain, int bus, int slot, int func)
  885. {
  886. struct pcistub_device_id *pci_dev_id;
  887. int rc = 0, devfn = PCI_DEVFN(slot, func);
  888. if (slot < 0) {
  889. for (slot = 0; !rc && slot < 32; ++slot)
  890. rc = pcistub_device_id_add(domain, bus, slot, func);
  891. return rc;
  892. }
  893. if (func < 0) {
  894. for (func = 0; !rc && func < 8; ++func)
  895. rc = pcistub_device_id_add(domain, bus, slot, func);
  896. return rc;
  897. }
  898. if ((
  899. #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
  900. || !defined(CONFIG_PCI_DOMAINS)
  901. !pci_domains_supported ? domain :
  902. #endif
  903. domain < 0 || domain > 0xffff)
  904. || bus < 0 || bus > 0xff
  905. || PCI_SLOT(devfn) != slot
  906. || PCI_FUNC(devfn) != func)
  907. return -EINVAL;
  908. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  909. if (!pci_dev_id)
  910. return -ENOMEM;
  911. pr_debug("wants to seize %04x:%02x:%02x.%d\n",
  912. domain, bus, slot, func);
  913. pcistub_device_id_add_list(pci_dev_id, domain, bus, devfn);
  914. return 0;
  915. }
  916. static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
  917. {
  918. struct pcistub_device_id *pci_dev_id, *t;
  919. int err = -ENOENT;
  920. unsigned long flags;
  921. spin_lock_irqsave(&device_ids_lock, flags);
  922. list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
  923. slot_list) {
  924. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
  925. && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
  926. && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
  927. /* Don't break; here because it's possible the same
  928. * slot could be in the list more than once
  929. */
  930. list_del(&pci_dev_id->slot_list);
  931. kfree(pci_dev_id);
  932. err = 0;
  933. pr_debug("removed %04x:%02x:%02x.%d from seize list\n",
  934. domain, bus, slot, func);
  935. }
  936. }
  937. spin_unlock_irqrestore(&device_ids_lock, flags);
  938. return err;
  939. }
  940. static int pcistub_reg_add(int domain, int bus, int slot, int func,
  941. unsigned int reg, unsigned int size,
  942. unsigned int mask)
  943. {
  944. int err = 0;
  945. struct pcistub_device *psdev;
  946. struct pci_dev *dev;
  947. struct config_field *field;
  948. if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
  949. return -EINVAL;
  950. psdev = pcistub_device_find(domain, bus, slot, func);
  951. if (!psdev) {
  952. err = -ENODEV;
  953. goto out;
  954. }
  955. dev = psdev->dev;
  956. field = kzalloc(sizeof(*field), GFP_ATOMIC);
  957. if (!field) {
  958. err = -ENOMEM;
  959. goto out;
  960. }
  961. field->offset = reg;
  962. field->size = size;
  963. field->mask = mask;
  964. field->init = NULL;
  965. field->reset = NULL;
  966. field->release = NULL;
  967. field->clean = xen_pcibk_config_field_free;
  968. err = xen_pcibk_config_quirks_add_field(dev, field);
  969. if (err)
  970. kfree(field);
  971. out:
  972. if (psdev)
  973. pcistub_device_put(psdev);
  974. return err;
  975. }
  976. static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
  977. size_t count)
  978. {
  979. int domain, bus, slot, func;
  980. int err;
  981. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  982. if (err)
  983. goto out;
  984. err = pcistub_device_id_add(domain, bus, slot, func);
  985. out:
  986. if (!err)
  987. err = count;
  988. return err;
  989. }
  990. static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
  991. static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
  992. size_t count)
  993. {
  994. int domain, bus, slot, func;
  995. int err;
  996. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  997. if (err)
  998. goto out;
  999. err = pcistub_device_id_remove(domain, bus, slot, func);
  1000. out:
  1001. if (!err)
  1002. err = count;
  1003. return err;
  1004. }
  1005. static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
  1006. static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
  1007. {
  1008. struct pcistub_device_id *pci_dev_id;
  1009. size_t count = 0;
  1010. unsigned long flags;
  1011. spin_lock_irqsave(&device_ids_lock, flags);
  1012. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  1013. if (count >= PAGE_SIZE)
  1014. break;
  1015. count += scnprintf(buf + count, PAGE_SIZE - count,
  1016. "%04x:%02x:%02x.%d\n",
  1017. pci_dev_id->domain, pci_dev_id->bus,
  1018. PCI_SLOT(pci_dev_id->devfn),
  1019. PCI_FUNC(pci_dev_id->devfn));
  1020. }
  1021. spin_unlock_irqrestore(&device_ids_lock, flags);
  1022. return count;
  1023. }
  1024. static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
  1025. static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
  1026. {
  1027. struct pcistub_device *psdev;
  1028. struct xen_pcibk_dev_data *dev_data;
  1029. size_t count = 0;
  1030. unsigned long flags;
  1031. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1032. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1033. if (count >= PAGE_SIZE)
  1034. break;
  1035. if (!psdev->dev)
  1036. continue;
  1037. dev_data = pci_get_drvdata(psdev->dev);
  1038. if (!dev_data)
  1039. continue;
  1040. count +=
  1041. scnprintf(buf + count, PAGE_SIZE - count,
  1042. "%s:%s:%sing:%ld\n",
  1043. pci_name(psdev->dev),
  1044. dev_data->isr_on ? "on" : "off",
  1045. dev_data->ack_intr ? "ack" : "not ack",
  1046. dev_data->handled);
  1047. }
  1048. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1049. return count;
  1050. }
  1051. static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
  1052. static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
  1053. const char *buf,
  1054. size_t count)
  1055. {
  1056. struct pcistub_device *psdev;
  1057. struct xen_pcibk_dev_data *dev_data;
  1058. int domain, bus, slot, func;
  1059. int err;
  1060. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1061. if (err)
  1062. return err;
  1063. psdev = pcistub_device_find(domain, bus, slot, func);
  1064. if (!psdev) {
  1065. err = -ENOENT;
  1066. goto out;
  1067. }
  1068. dev_data = pci_get_drvdata(psdev->dev);
  1069. if (!dev_data) {
  1070. err = -ENOENT;
  1071. goto out;
  1072. }
  1073. dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
  1074. dev_data->irq_name, dev_data->isr_on,
  1075. !dev_data->isr_on);
  1076. dev_data->isr_on = !(dev_data->isr_on);
  1077. if (dev_data->isr_on)
  1078. dev_data->ack_intr = 1;
  1079. out:
  1080. if (psdev)
  1081. pcistub_device_put(psdev);
  1082. if (!err)
  1083. err = count;
  1084. return err;
  1085. }
  1086. static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
  1087. pcistub_irq_handler_switch);
  1088. static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
  1089. size_t count)
  1090. {
  1091. int domain, bus, slot, func, reg, size, mask;
  1092. int err;
  1093. err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
  1094. &mask);
  1095. if (err)
  1096. goto out;
  1097. err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
  1098. out:
  1099. if (!err)
  1100. err = count;
  1101. return err;
  1102. }
  1103. static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
  1104. {
  1105. int count = 0;
  1106. unsigned long flags;
  1107. struct xen_pcibk_config_quirk *quirk;
  1108. struct xen_pcibk_dev_data *dev_data;
  1109. const struct config_field *field;
  1110. const struct config_field_entry *cfg_entry;
  1111. spin_lock_irqsave(&device_ids_lock, flags);
  1112. list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
  1113. if (count >= PAGE_SIZE)
  1114. goto out;
  1115. count += scnprintf(buf + count, PAGE_SIZE - count,
  1116. "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
  1117. quirk->pdev->bus->number,
  1118. PCI_SLOT(quirk->pdev->devfn),
  1119. PCI_FUNC(quirk->pdev->devfn),
  1120. quirk->devid.vendor, quirk->devid.device,
  1121. quirk->devid.subvendor,
  1122. quirk->devid.subdevice);
  1123. dev_data = pci_get_drvdata(quirk->pdev);
  1124. list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
  1125. field = cfg_entry->field;
  1126. if (count >= PAGE_SIZE)
  1127. goto out;
  1128. count += scnprintf(buf + count, PAGE_SIZE - count,
  1129. "\t\t%08x:%01x:%08x\n",
  1130. cfg_entry->base_offset +
  1131. field->offset, field->size,
  1132. field->mask);
  1133. }
  1134. }
  1135. out:
  1136. spin_unlock_irqrestore(&device_ids_lock, flags);
  1137. return count;
  1138. }
  1139. static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
  1140. pcistub_quirk_add);
  1141. static ssize_t permissive_add(struct device_driver *drv, const char *buf,
  1142. size_t count)
  1143. {
  1144. int domain, bus, slot, func;
  1145. int err;
  1146. struct pcistub_device *psdev;
  1147. struct xen_pcibk_dev_data *dev_data;
  1148. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1149. if (err)
  1150. goto out;
  1151. psdev = pcistub_device_find(domain, bus, slot, func);
  1152. if (!psdev) {
  1153. err = -ENODEV;
  1154. goto out;
  1155. }
  1156. dev_data = pci_get_drvdata(psdev->dev);
  1157. /* the driver data for a device should never be null at this point */
  1158. if (!dev_data) {
  1159. err = -ENXIO;
  1160. goto release;
  1161. }
  1162. if (!dev_data->permissive) {
  1163. dev_data->permissive = 1;
  1164. /* Let user know that what they're doing could be unsafe */
  1165. dev_warn(&psdev->dev->dev, "enabling permissive mode "
  1166. "configuration space accesses!\n");
  1167. dev_warn(&psdev->dev->dev,
  1168. "permissive mode is potentially unsafe!\n");
  1169. }
  1170. release:
  1171. pcistub_device_put(psdev);
  1172. out:
  1173. if (!err)
  1174. err = count;
  1175. return err;
  1176. }
  1177. static ssize_t permissive_show(struct device_driver *drv, char *buf)
  1178. {
  1179. struct pcistub_device *psdev;
  1180. struct xen_pcibk_dev_data *dev_data;
  1181. size_t count = 0;
  1182. unsigned long flags;
  1183. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1184. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1185. if (count >= PAGE_SIZE)
  1186. break;
  1187. if (!psdev->dev)
  1188. continue;
  1189. dev_data = pci_get_drvdata(psdev->dev);
  1190. if (!dev_data || !dev_data->permissive)
  1191. continue;
  1192. count +=
  1193. scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
  1194. pci_name(psdev->dev));
  1195. }
  1196. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1197. return count;
  1198. }
  1199. static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
  1200. permissive_add);
  1201. static void pcistub_exit(void)
  1202. {
  1203. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
  1204. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1205. &driver_attr_remove_slot);
  1206. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
  1207. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
  1208. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1209. &driver_attr_permissive);
  1210. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1211. &driver_attr_irq_handlers);
  1212. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1213. &driver_attr_irq_handler_state);
  1214. pci_unregister_driver(&xen_pcibk_pci_driver);
  1215. }
  1216. static int __init pcistub_init(void)
  1217. {
  1218. int pos = 0;
  1219. int err = 0;
  1220. int domain, bus, slot, func;
  1221. int parsed;
  1222. if (pci_devs_to_hide && *pci_devs_to_hide) {
  1223. do {
  1224. parsed = 0;
  1225. err = sscanf(pci_devs_to_hide + pos,
  1226. " (%x:%x:%x.%x) %n",
  1227. &domain, &bus, &slot, &func, &parsed);
  1228. switch (err) {
  1229. case 3:
  1230. func = -1;
  1231. sscanf(pci_devs_to_hide + pos,
  1232. " (%x:%x:%x.*) %n",
  1233. &domain, &bus, &slot, &parsed);
  1234. break;
  1235. case 2:
  1236. slot = func = -1;
  1237. sscanf(pci_devs_to_hide + pos,
  1238. " (%x:%x:*.*) %n",
  1239. &domain, &bus, &parsed);
  1240. break;
  1241. }
  1242. if (!parsed) {
  1243. domain = 0;
  1244. err = sscanf(pci_devs_to_hide + pos,
  1245. " (%x:%x.%x) %n",
  1246. &bus, &slot, &func, &parsed);
  1247. switch (err) {
  1248. case 2:
  1249. func = -1;
  1250. sscanf(pci_devs_to_hide + pos,
  1251. " (%x:%x.*) %n",
  1252. &bus, &slot, &parsed);
  1253. break;
  1254. case 1:
  1255. slot = func = -1;
  1256. sscanf(pci_devs_to_hide + pos,
  1257. " (%x:*.*) %n",
  1258. &bus, &parsed);
  1259. break;
  1260. }
  1261. }
  1262. if (parsed <= 0)
  1263. goto parse_error;
  1264. err = pcistub_device_id_add(domain, bus, slot, func);
  1265. if (err)
  1266. goto out;
  1267. pos += parsed;
  1268. } while (pci_devs_to_hide[pos]);
  1269. }
  1270. /* If we're the first PCI Device Driver to register, we're the
  1271. * first one to get offered PCI devices as they become
  1272. * available (and thus we can be the first to grab them)
  1273. */
  1274. err = pci_register_driver(&xen_pcibk_pci_driver);
  1275. if (err < 0)
  1276. goto out;
  1277. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1278. &driver_attr_new_slot);
  1279. if (!err)
  1280. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1281. &driver_attr_remove_slot);
  1282. if (!err)
  1283. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1284. &driver_attr_slots);
  1285. if (!err)
  1286. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1287. &driver_attr_quirks);
  1288. if (!err)
  1289. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1290. &driver_attr_permissive);
  1291. if (!err)
  1292. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1293. &driver_attr_irq_handlers);
  1294. if (!err)
  1295. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1296. &driver_attr_irq_handler_state);
  1297. if (err)
  1298. pcistub_exit();
  1299. out:
  1300. return err;
  1301. parse_error:
  1302. pr_err("Error parsing pci_devs_to_hide at \"%s\"\n",
  1303. pci_devs_to_hide + pos);
  1304. return -EINVAL;
  1305. }
  1306. #ifndef MODULE
  1307. /*
  1308. * fs_initcall happens before device_initcall
  1309. * so xen_pcibk *should* get called first (b/c we
  1310. * want to suck up any device before other drivers
  1311. * get a chance by being the first pci device
  1312. * driver to register)
  1313. */
  1314. fs_initcall(pcistub_init);
  1315. #endif
  1316. #ifdef CONFIG_PCI_IOV
  1317. static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
  1318. {
  1319. struct pcistub_device *psdev = NULL;
  1320. unsigned long flags;
  1321. bool found = false;
  1322. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1323. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1324. if (!psdev->pdev && psdev->dev != pdev
  1325. && pci_physfn(psdev->dev) == pdev) {
  1326. found = true;
  1327. break;
  1328. }
  1329. }
  1330. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1331. if (found)
  1332. return psdev;
  1333. return NULL;
  1334. }
  1335. static int pci_stub_notifier(struct notifier_block *nb,
  1336. unsigned long action, void *data)
  1337. {
  1338. struct device *dev = data;
  1339. const struct pci_dev *pdev = to_pci_dev(dev);
  1340. if (action != BUS_NOTIFY_UNBIND_DRIVER)
  1341. return NOTIFY_DONE;
  1342. if (!pdev->is_physfn)
  1343. return NOTIFY_DONE;
  1344. for (;;) {
  1345. struct pcistub_device *psdev = find_vfs(pdev);
  1346. if (!psdev)
  1347. break;
  1348. device_release_driver(&psdev->dev->dev);
  1349. }
  1350. return NOTIFY_DONE;
  1351. }
  1352. static struct notifier_block pci_stub_nb = {
  1353. .notifier_call = pci_stub_notifier,
  1354. };
  1355. #endif
  1356. static int __init xen_pcibk_init(void)
  1357. {
  1358. int err;
  1359. if (!xen_initial_domain())
  1360. return -ENODEV;
  1361. err = xen_pcibk_config_init();
  1362. if (err)
  1363. return err;
  1364. #ifdef MODULE
  1365. err = pcistub_init();
  1366. if (err < 0)
  1367. return err;
  1368. #endif
  1369. pcistub_init_devices_late();
  1370. err = xen_pcibk_xenbus_register();
  1371. if (err)
  1372. pcistub_exit();
  1373. #ifdef CONFIG_PCI_IOV
  1374. else
  1375. bus_register_notifier(&pci_bus_type, &pci_stub_nb);
  1376. #endif
  1377. return err;
  1378. }
  1379. static void __exit xen_pcibk_cleanup(void)
  1380. {
  1381. #ifdef CONFIG_PCI_IOV
  1382. bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
  1383. #endif
  1384. xen_pcibk_xenbus_unregister();
  1385. pcistub_exit();
  1386. }
  1387. module_init(xen_pcibk_init);
  1388. module_exit(xen_pcibk_cleanup);
  1389. MODULE_LICENSE("Dual BSD/GPL");
  1390. MODULE_ALIAS("xen-backend:pci");