vop_main.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2016 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * The full GNU General Public License is included in this distribution in
  16. * the file called "COPYING".
  17. *
  18. * Adapted from:
  19. *
  20. * virtio for kvm on s390
  21. *
  22. * Copyright IBM Corp. 2008
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License (version 2 only)
  26. * as published by the Free Software Foundation.
  27. *
  28. * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
  29. *
  30. * Intel Virtio Over PCIe (VOP) driver.
  31. *
  32. */
  33. #include <linux/delay.h>
  34. #include <linux/module.h>
  35. #include <linux/sched.h>
  36. #include <linux/dma-mapping.h>
  37. #include "vop_main.h"
  38. #define VOP_MAX_VRINGS 4
  39. /*
  40. * _vop_vdev - Allocated per virtio device instance injected by the peer.
  41. *
  42. * @vdev: Virtio device
  43. * @desc: Virtio device page descriptor
  44. * @dc: Virtio device control
  45. * @vpdev: VOP device which is the parent for this virtio device
  46. * @vr: Buffer for accessing the VRING
  47. * @used: Buffer for used
  48. * @used_size: Size of the used buffer
  49. * @reset_done: Track whether VOP reset is complete
  50. * @virtio_cookie: Cookie returned upon requesting a interrupt
  51. * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
  52. * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
  53. * @dnode: The destination node
  54. */
  55. struct _vop_vdev {
  56. struct virtio_device vdev;
  57. struct mic_device_desc __iomem *desc;
  58. struct mic_device_ctrl __iomem *dc;
  59. struct vop_device *vpdev;
  60. void __iomem *vr[VOP_MAX_VRINGS];
  61. dma_addr_t used[VOP_MAX_VRINGS];
  62. int used_size[VOP_MAX_VRINGS];
  63. struct completion reset_done;
  64. struct mic_irq *virtio_cookie;
  65. int c2h_vdev_db;
  66. int h2c_vdev_db;
  67. int dnode;
  68. };
  69. #define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
  70. #define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
  71. /* Helper API to obtain the parent of the virtio device */
  72. static inline struct device *_vop_dev(struct _vop_vdev *vdev)
  73. {
  74. return vdev->vdev.dev.parent;
  75. }
  76. static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
  77. {
  78. return sizeof(*desc)
  79. + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
  80. + ioread8(&desc->feature_len) * 2
  81. + ioread8(&desc->config_len);
  82. }
  83. static inline struct mic_vqconfig __iomem *
  84. _vop_vq_config(struct mic_device_desc __iomem *desc)
  85. {
  86. return (struct mic_vqconfig __iomem *)(desc + 1);
  87. }
  88. static inline u8 __iomem *
  89. _vop_vq_features(struct mic_device_desc __iomem *desc)
  90. {
  91. return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
  92. }
  93. static inline u8 __iomem *
  94. _vop_vq_configspace(struct mic_device_desc __iomem *desc)
  95. {
  96. return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
  97. }
  98. static inline unsigned
  99. _vop_total_desc_size(struct mic_device_desc __iomem *desc)
  100. {
  101. return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
  102. }
  103. /* This gets the device's feature bits. */
  104. static u64 vop_get_features(struct virtio_device *vdev)
  105. {
  106. unsigned int i, bits;
  107. u32 features = 0;
  108. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  109. u8 __iomem *in_features = _vop_vq_features(desc);
  110. int feature_len = ioread8(&desc->feature_len);
  111. bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
  112. for (i = 0; i < bits; i++)
  113. if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
  114. features |= BIT(i);
  115. return features;
  116. }
  117. static int vop_finalize_features(struct virtio_device *vdev)
  118. {
  119. unsigned int i, bits;
  120. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  121. u8 feature_len = ioread8(&desc->feature_len);
  122. /* Second half of bitmap is features we accept. */
  123. u8 __iomem *out_features =
  124. _vop_vq_features(desc) + feature_len;
  125. /* Give virtio_ring a chance to accept features. */
  126. vring_transport_features(vdev);
  127. memset_io(out_features, 0, feature_len);
  128. bits = min_t(unsigned, feature_len,
  129. sizeof(vdev->features)) * 8;
  130. for (i = 0; i < bits; i++) {
  131. if (__virtio_test_bit(vdev, i))
  132. iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
  133. &out_features[i / 8]);
  134. }
  135. return 0;
  136. }
  137. /*
  138. * Reading and writing elements in config space
  139. */
  140. static void vop_get(struct virtio_device *vdev, unsigned int offset,
  141. void *buf, unsigned len)
  142. {
  143. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  144. if (offset + len > ioread8(&desc->config_len))
  145. return;
  146. memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
  147. }
  148. static void vop_set(struct virtio_device *vdev, unsigned int offset,
  149. const void *buf, unsigned len)
  150. {
  151. struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
  152. if (offset + len > ioread8(&desc->config_len))
  153. return;
  154. memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
  155. }
  156. /*
  157. * The operations to get and set the status word just access the status
  158. * field of the device descriptor. set_status also interrupts the host
  159. * to tell about status changes.
  160. */
  161. static u8 vop_get_status(struct virtio_device *vdev)
  162. {
  163. return ioread8(&to_vopvdev(vdev)->desc->status);
  164. }
  165. static void vop_set_status(struct virtio_device *dev, u8 status)
  166. {
  167. struct _vop_vdev *vdev = to_vopvdev(dev);
  168. struct vop_device *vpdev = vdev->vpdev;
  169. if (!status)
  170. return;
  171. iowrite8(status, &vdev->desc->status);
  172. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  173. }
  174. /* Inform host on a virtio device reset and wait for ack from host */
  175. static void vop_reset_inform_host(struct virtio_device *dev)
  176. {
  177. struct _vop_vdev *vdev = to_vopvdev(dev);
  178. struct mic_device_ctrl __iomem *dc = vdev->dc;
  179. struct vop_device *vpdev = vdev->vpdev;
  180. int retry;
  181. iowrite8(0, &dc->host_ack);
  182. iowrite8(1, &dc->vdev_reset);
  183. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  184. /* Wait till host completes all card accesses and acks the reset */
  185. for (retry = 100; retry--;) {
  186. if (ioread8(&dc->host_ack))
  187. break;
  188. msleep(100);
  189. };
  190. dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
  191. /* Reset status to 0 in case we timed out */
  192. iowrite8(0, &vdev->desc->status);
  193. }
  194. static void vop_reset(struct virtio_device *dev)
  195. {
  196. struct _vop_vdev *vdev = to_vopvdev(dev);
  197. dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
  198. __func__, dev->id.device);
  199. vop_reset_inform_host(dev);
  200. complete_all(&vdev->reset_done);
  201. }
  202. /*
  203. * The virtio_ring code calls this API when it wants to notify the Host.
  204. */
  205. static bool vop_notify(struct virtqueue *vq)
  206. {
  207. struct _vop_vdev *vdev = vq->priv;
  208. struct vop_device *vpdev = vdev->vpdev;
  209. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  210. return true;
  211. }
  212. static void vop_del_vq(struct virtqueue *vq, int n)
  213. {
  214. struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
  215. struct vring *vr = (struct vring *)(vq + 1);
  216. struct vop_device *vpdev = vdev->vpdev;
  217. dma_unmap_single(&vpdev->dev, vdev->used[n],
  218. vdev->used_size[n], DMA_BIDIRECTIONAL);
  219. free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
  220. vring_del_virtqueue(vq);
  221. vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
  222. vdev->vr[n] = NULL;
  223. }
  224. static void vop_del_vqs(struct virtio_device *dev)
  225. {
  226. struct _vop_vdev *vdev = to_vopvdev(dev);
  227. struct virtqueue *vq, *n;
  228. int idx = 0;
  229. dev_dbg(_vop_dev(vdev), "%s\n", __func__);
  230. list_for_each_entry_safe(vq, n, &dev->vqs, list)
  231. vop_del_vq(vq, idx++);
  232. }
  233. /*
  234. * This routine will assign vring's allocated in host/io memory. Code in
  235. * virtio_ring.c however continues to access this io memory as if it were local
  236. * memory without io accessors.
  237. */
  238. static struct virtqueue *vop_find_vq(struct virtio_device *dev,
  239. unsigned index,
  240. void (*callback)(struct virtqueue *vq),
  241. const char *name)
  242. {
  243. struct _vop_vdev *vdev = to_vopvdev(dev);
  244. struct vop_device *vpdev = vdev->vpdev;
  245. struct mic_vqconfig __iomem *vqconfig;
  246. struct mic_vqconfig config;
  247. struct virtqueue *vq;
  248. void __iomem *va;
  249. struct _mic_vring_info __iomem *info;
  250. void *used;
  251. int vr_size, _vr_size, err, magic;
  252. struct vring *vr;
  253. u8 type = ioread8(&vdev->desc->type);
  254. if (index >= ioread8(&vdev->desc->num_vq))
  255. return ERR_PTR(-ENOENT);
  256. if (!name)
  257. return ERR_PTR(-ENOENT);
  258. /* First assign the vring's allocated in host memory */
  259. vqconfig = _vop_vq_config(vdev->desc) + index;
  260. memcpy_fromio(&config, vqconfig, sizeof(config));
  261. _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
  262. vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
  263. va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
  264. vr_size);
  265. if (!va)
  266. return ERR_PTR(-ENOMEM);
  267. vdev->vr[index] = va;
  268. memset_io(va, 0x0, _vr_size);
  269. vq = vring_new_virtqueue(
  270. index,
  271. le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
  272. dev,
  273. false,
  274. (void __force *)va, vop_notify, callback, name);
  275. if (!vq) {
  276. err = -ENOMEM;
  277. goto unmap;
  278. }
  279. info = va + _vr_size;
  280. magic = ioread32(&info->magic);
  281. if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
  282. err = -EIO;
  283. goto unmap;
  284. }
  285. /* Allocate and reassign used ring now */
  286. vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
  287. sizeof(struct vring_used_elem) *
  288. le16_to_cpu(config.num));
  289. used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  290. get_order(vdev->used_size[index]));
  291. if (!used) {
  292. err = -ENOMEM;
  293. dev_err(_vop_dev(vdev), "%s %d err %d\n",
  294. __func__, __LINE__, err);
  295. goto del_vq;
  296. }
  297. vdev->used[index] = dma_map_single(&vpdev->dev, used,
  298. vdev->used_size[index],
  299. DMA_BIDIRECTIONAL);
  300. if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
  301. err = -ENOMEM;
  302. dev_err(_vop_dev(vdev), "%s %d err %d\n",
  303. __func__, __LINE__, err);
  304. goto free_used;
  305. }
  306. writeq(vdev->used[index], &vqconfig->used_address);
  307. /*
  308. * To reassign the used ring here we are directly accessing
  309. * struct vring_virtqueue which is a private data structure
  310. * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
  311. * vring_new_virtqueue() would ensure that
  312. * (&vq->vring == (struct vring *) (&vq->vq + 1));
  313. */
  314. vr = (struct vring *)(vq + 1);
  315. vr->used = used;
  316. vq->priv = vdev;
  317. return vq;
  318. free_used:
  319. free_pages((unsigned long)used,
  320. get_order(vdev->used_size[index]));
  321. del_vq:
  322. vring_del_virtqueue(vq);
  323. unmap:
  324. vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
  325. return ERR_PTR(err);
  326. }
  327. static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
  328. struct virtqueue *vqs[],
  329. vq_callback_t *callbacks[],
  330. const char * const names[])
  331. {
  332. struct _vop_vdev *vdev = to_vopvdev(dev);
  333. struct vop_device *vpdev = vdev->vpdev;
  334. struct mic_device_ctrl __iomem *dc = vdev->dc;
  335. int i, err, retry;
  336. /* We must have this many virtqueues. */
  337. if (nvqs > ioread8(&vdev->desc->num_vq))
  338. return -ENOENT;
  339. for (i = 0; i < nvqs; ++i) {
  340. dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
  341. __func__, i, names[i]);
  342. vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]);
  343. if (IS_ERR(vqs[i])) {
  344. err = PTR_ERR(vqs[i]);
  345. goto error;
  346. }
  347. }
  348. iowrite8(1, &dc->used_address_updated);
  349. /*
  350. * Send an interrupt to the host to inform it that used
  351. * rings have been re-assigned.
  352. */
  353. vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
  354. for (retry = 100; --retry;) {
  355. if (!ioread8(&dc->used_address_updated))
  356. break;
  357. msleep(100);
  358. };
  359. dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
  360. if (!retry) {
  361. err = -ENODEV;
  362. goto error;
  363. }
  364. return 0;
  365. error:
  366. vop_del_vqs(dev);
  367. return err;
  368. }
  369. /*
  370. * The config ops structure as defined by virtio config
  371. */
  372. static struct virtio_config_ops vop_vq_config_ops = {
  373. .get_features = vop_get_features,
  374. .finalize_features = vop_finalize_features,
  375. .get = vop_get,
  376. .set = vop_set,
  377. .get_status = vop_get_status,
  378. .set_status = vop_set_status,
  379. .reset = vop_reset,
  380. .find_vqs = vop_find_vqs,
  381. .del_vqs = vop_del_vqs,
  382. };
  383. static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
  384. {
  385. struct _vop_vdev *vdev = data;
  386. struct vop_device *vpdev = vdev->vpdev;
  387. struct virtqueue *vq;
  388. vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
  389. list_for_each_entry(vq, &vdev->vdev.vqs, list)
  390. vring_interrupt(0, vq);
  391. return IRQ_HANDLED;
  392. }
  393. static void vop_virtio_release_dev(struct device *_d)
  394. {
  395. /*
  396. * No need for a release method similar to virtio PCI.
  397. * Provide an empty one to avoid getting a warning from core.
  398. */
  399. }
  400. /*
  401. * adds a new device and register it with virtio
  402. * appropriate drivers are loaded by the device model
  403. */
  404. static int _vop_add_device(struct mic_device_desc __iomem *d,
  405. unsigned int offset, struct vop_device *vpdev,
  406. int dnode)
  407. {
  408. struct _vop_vdev *vdev;
  409. int ret;
  410. u8 type = ioread8(&d->type);
  411. vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
  412. if (!vdev)
  413. return -ENOMEM;
  414. vdev->vpdev = vpdev;
  415. vdev->vdev.dev.parent = &vpdev->dev;
  416. vdev->vdev.dev.release = vop_virtio_release_dev;
  417. vdev->vdev.id.device = type;
  418. vdev->vdev.config = &vop_vq_config_ops;
  419. vdev->desc = d;
  420. vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
  421. vdev->dnode = dnode;
  422. vdev->vdev.priv = (void *)(u64)dnode;
  423. init_completion(&vdev->reset_done);
  424. vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
  425. vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
  426. vop_virtio_intr_handler, "virtio intr",
  427. vdev, vdev->h2c_vdev_db);
  428. if (IS_ERR(vdev->virtio_cookie)) {
  429. ret = PTR_ERR(vdev->virtio_cookie);
  430. goto kfree;
  431. }
  432. iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
  433. vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
  434. ret = register_virtio_device(&vdev->vdev);
  435. if (ret) {
  436. dev_err(_vop_dev(vdev),
  437. "Failed to register vop device %u type %u\n",
  438. offset, type);
  439. goto free_irq;
  440. }
  441. writeq((u64)vdev, &vdev->dc->vdev);
  442. dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
  443. __func__, offset, type, vdev);
  444. return 0;
  445. free_irq:
  446. vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
  447. kfree:
  448. kfree(vdev);
  449. return ret;
  450. }
  451. /*
  452. * match for a vop device with a specific desc pointer
  453. */
  454. static int vop_match_desc(struct device *dev, void *data)
  455. {
  456. struct virtio_device *_dev = dev_to_virtio(dev);
  457. struct _vop_vdev *vdev = to_vopvdev(_dev);
  458. return vdev->desc == (void __iomem *)data;
  459. }
  460. static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
  461. unsigned int offset,
  462. struct vop_device *vpdev)
  463. {
  464. struct mic_device_ctrl __iomem *dc
  465. = (void __iomem *)d + _vop_aligned_desc_size(d);
  466. struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
  467. if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
  468. return;
  469. dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
  470. virtio_config_changed(&vdev->vdev);
  471. iowrite8(1, &dc->guest_ack);
  472. }
  473. /*
  474. * removes a virtio device if a hot remove event has been
  475. * requested by the host.
  476. */
  477. static int _vop_remove_device(struct mic_device_desc __iomem *d,
  478. unsigned int offset, struct vop_device *vpdev)
  479. {
  480. struct mic_device_ctrl __iomem *dc
  481. = (void __iomem *)d + _vop_aligned_desc_size(d);
  482. struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
  483. u8 status;
  484. int ret = -1;
  485. if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
  486. dev_dbg(&vpdev->dev,
  487. "%s %d config_change %d type %d vdev %p\n",
  488. __func__, __LINE__,
  489. ioread8(&dc->config_change), ioread8(&d->type), vdev);
  490. status = ioread8(&d->status);
  491. reinit_completion(&vdev->reset_done);
  492. unregister_virtio_device(&vdev->vdev);
  493. vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
  494. iowrite8(-1, &dc->h2c_vdev_db);
  495. if (status & VIRTIO_CONFIG_S_DRIVER_OK)
  496. wait_for_completion(&vdev->reset_done);
  497. kfree(vdev);
  498. iowrite8(1, &dc->guest_ack);
  499. dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
  500. __func__, __LINE__, ioread8(&dc->guest_ack));
  501. iowrite8(-1, &d->type);
  502. ret = 0;
  503. }
  504. return ret;
  505. }
  506. #define REMOVE_DEVICES true
  507. static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
  508. bool remove, int dnode)
  509. {
  510. s8 type;
  511. unsigned int i;
  512. struct mic_device_desc __iomem *d;
  513. struct mic_device_ctrl __iomem *dc;
  514. struct device *dev;
  515. int ret;
  516. for (i = sizeof(struct mic_bootparam);
  517. i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
  518. d = dp + i;
  519. dc = (void __iomem *)d + _vop_aligned_desc_size(d);
  520. /*
  521. * This read barrier is paired with the corresponding write
  522. * barrier on the host which is inserted before adding or
  523. * removing a virtio device descriptor, by updating the type.
  524. */
  525. rmb();
  526. type = ioread8(&d->type);
  527. /* end of list */
  528. if (type == 0)
  529. break;
  530. if (type == -1)
  531. continue;
  532. /* device already exists */
  533. dev = device_find_child(&vpdev->dev, (void __force *)d,
  534. vop_match_desc);
  535. if (dev) {
  536. if (remove)
  537. iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
  538. &dc->config_change);
  539. put_device(dev);
  540. _vop_handle_config_change(d, i, vpdev);
  541. ret = _vop_remove_device(d, i, vpdev);
  542. if (remove) {
  543. iowrite8(0, &dc->config_change);
  544. iowrite8(0, &dc->guest_ack);
  545. }
  546. continue;
  547. }
  548. /* new device */
  549. dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
  550. __func__, __LINE__, d);
  551. if (!remove)
  552. _vop_add_device(d, i, vpdev, dnode);
  553. }
  554. }
  555. static void vop_scan_devices(struct vop_info *vi,
  556. struct vop_device *vpdev, bool remove)
  557. {
  558. void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
  559. if (!dp)
  560. return;
  561. mutex_lock(&vi->vop_mutex);
  562. _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
  563. mutex_unlock(&vi->vop_mutex);
  564. }
  565. /*
  566. * vop_hotplug_device tries to find changes in the device page.
  567. */
  568. static void vop_hotplug_devices(struct work_struct *work)
  569. {
  570. struct vop_info *vi = container_of(work, struct vop_info,
  571. hotplug_work);
  572. vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
  573. }
  574. /*
  575. * Interrupt handler for hot plug/config changes etc.
  576. */
  577. static irqreturn_t vop_extint_handler(int irq, void *data)
  578. {
  579. struct vop_info *vi = data;
  580. struct mic_bootparam __iomem *bp;
  581. struct vop_device *vpdev = vi->vpdev;
  582. bp = vpdev->hw_ops->get_remote_dp(vpdev);
  583. dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
  584. __func__, __LINE__);
  585. vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
  586. schedule_work(&vi->hotplug_work);
  587. return IRQ_HANDLED;
  588. }
  589. static int vop_driver_probe(struct vop_device *vpdev)
  590. {
  591. struct vop_info *vi;
  592. int rc;
  593. vi = kzalloc(sizeof(*vi), GFP_KERNEL);
  594. if (!vi) {
  595. rc = -ENOMEM;
  596. goto exit;
  597. }
  598. dev_set_drvdata(&vpdev->dev, vi);
  599. vi->vpdev = vpdev;
  600. mutex_init(&vi->vop_mutex);
  601. INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
  602. if (vpdev->dnode) {
  603. rc = vop_host_init(vi);
  604. if (rc < 0)
  605. goto free;
  606. } else {
  607. struct mic_bootparam __iomem *bootparam;
  608. vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
  609. vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
  610. vi->cookie = vpdev->hw_ops->request_irq(vpdev,
  611. vop_extint_handler,
  612. "virtio_config_intr",
  613. vi, vi->h2c_config_db);
  614. if (IS_ERR(vi->cookie)) {
  615. rc = PTR_ERR(vi->cookie);
  616. goto free;
  617. }
  618. bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
  619. iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
  620. }
  621. vop_init_debugfs(vi);
  622. return 0;
  623. free:
  624. kfree(vi);
  625. exit:
  626. return rc;
  627. }
  628. static void vop_driver_remove(struct vop_device *vpdev)
  629. {
  630. struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
  631. if (vpdev->dnode) {
  632. vop_host_uninit(vi);
  633. } else {
  634. struct mic_bootparam __iomem *bootparam =
  635. vpdev->hw_ops->get_remote_dp(vpdev);
  636. if (bootparam)
  637. iowrite8(-1, &bootparam->h2c_config_db);
  638. vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
  639. flush_work(&vi->hotplug_work);
  640. vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
  641. }
  642. vop_exit_debugfs(vi);
  643. kfree(vi);
  644. }
  645. static struct vop_device_id id_table[] = {
  646. { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
  647. { 0 },
  648. };
  649. static struct vop_driver vop_driver = {
  650. .driver.name = KBUILD_MODNAME,
  651. .driver.owner = THIS_MODULE,
  652. .id_table = id_table,
  653. .probe = vop_driver_probe,
  654. .remove = vop_driver_remove,
  655. };
  656. module_vop_driver(vop_driver);
  657. MODULE_DEVICE_TABLE(mbus, id_table);
  658. MODULE_AUTHOR("Intel Corporation");
  659. MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
  660. MODULE_LICENSE("GPL v2");