virtio_ring.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. #ifndef _LINUX_VIRTIO_RING_H
  2. #define _LINUX_VIRTIO_RING_H
  3. #include <asm/barrier.h>
  4. #include <linux/irqreturn.h>
  5. #include <uapi/linux/virtio_ring.h>
  6. /*
  7. * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
  8. * they're not on an SMP host system, so they need to assume real
  9. * barriers. Non-SMP virtio hosts could skip the barriers, but does
  10. * anyone care?
  11. *
  12. * For virtio_pci on SMP, we don't need to order with respect to MMIO
  13. * accesses through relaxed memory I/O windows, so virt_mb() et al are
  14. * sufficient.
  15. *
  16. * For using virtio to talk to real devices (eg. other heterogeneous
  17. * CPUs) we do need real barriers. In theory, we could be using both
  18. * kinds of virtio, so it's a runtime decision, and the branch is
  19. * actually quite cheap.
  20. */
  21. static inline void virtio_mb(bool weak_barriers)
  22. {
  23. if (weak_barriers)
  24. virt_mb();
  25. else
  26. mb();
  27. }
  28. static inline void virtio_rmb(bool weak_barriers)
  29. {
  30. if (weak_barriers)
  31. virt_rmb();
  32. else
  33. rmb();
  34. }
  35. static inline void virtio_wmb(bool weak_barriers)
  36. {
  37. if (weak_barriers)
  38. virt_wmb();
  39. else
  40. wmb();
  41. }
  42. static inline void virtio_store_mb(bool weak_barriers,
  43. __virtio16 *p, __virtio16 v)
  44. {
  45. if (weak_barriers) {
  46. virt_store_mb(*p, v);
  47. } else {
  48. WRITE_ONCE(*p, v);
  49. mb();
  50. }
  51. }
  52. struct virtio_device;
  53. struct virtqueue;
  54. /*
  55. * Creates a virtqueue and allocates the descriptor ring. If
  56. * may_reduce_num is set, then this may allocate a smaller ring than
  57. * expected. The caller should query virtqueue_get_ring_size to learn
  58. * the actual size of the ring.
  59. */
  60. struct virtqueue *vring_create_virtqueue(unsigned int index,
  61. unsigned int num,
  62. unsigned int vring_align,
  63. struct virtio_device *vdev,
  64. bool weak_barriers,
  65. bool may_reduce_num,
  66. bool (*notify)(struct virtqueue *vq),
  67. void (*callback)(struct virtqueue *vq),
  68. const char *name);
  69. /* Creates a virtqueue with a custom layout. */
  70. struct virtqueue *__vring_new_virtqueue(unsigned int index,
  71. struct vring vring,
  72. struct virtio_device *vdev,
  73. bool weak_barriers,
  74. bool (*notify)(struct virtqueue *),
  75. void (*callback)(struct virtqueue *),
  76. const char *name);
  77. /*
  78. * Creates a virtqueue with a standard layout but a caller-allocated
  79. * ring.
  80. */
  81. struct virtqueue *vring_new_virtqueue(unsigned int index,
  82. unsigned int num,
  83. unsigned int vring_align,
  84. struct virtio_device *vdev,
  85. bool weak_barriers,
  86. void *pages,
  87. bool (*notify)(struct virtqueue *vq),
  88. void (*callback)(struct virtqueue *vq),
  89. const char *name);
  90. /*
  91. * Destroys a virtqueue. If created with vring_create_virtqueue, this
  92. * also frees the ring.
  93. */
  94. void vring_del_virtqueue(struct virtqueue *vq);
  95. /* Filter out transport-specific feature bits. */
  96. void vring_transport_features(struct virtio_device *vdev);
  97. irqreturn_t vring_interrupt(int irq, void *_vq);
  98. #endif /* _LINUX_VIRTIO_RING_H */