virtio.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #ifndef LINUX_VIRTIO_H
  2. #define LINUX_VIRTIO_H
  3. #include <stdbool.h>
  4. #include <stdlib.h>
  5. #include <stddef.h>
  6. #include <stdio.h>
  7. #include <string.h>
  8. #include <assert.h>
  9. #include <linux/types.h>
  10. #include <errno.h>
  11. typedef unsigned long long dma_addr_t;
  12. struct scatterlist {
  13. unsigned long page_link;
  14. unsigned int offset;
  15. unsigned int length;
  16. dma_addr_t dma_address;
  17. };
  18. struct page {
  19. unsigned long long dummy;
  20. };
  21. #define BUG_ON(__BUG_ON_cond) assert(!(__BUG_ON_cond))
  22. /* Physical == Virtual */
  23. #define virt_to_phys(p) ((unsigned long)p)
  24. #define phys_to_virt(a) ((void *)(unsigned long)(a))
  25. /* Page address: Virtual / 4K */
  26. #define virt_to_page(p) ((struct page*)((virt_to_phys(p) / 4096) * \
  27. sizeof(struct page)))
  28. #define offset_in_page(p) (((unsigned long)p) % 4096)
  29. #define sg_phys(sg) ((sg->page_link & ~0x3) / sizeof(struct page) * 4096 + \
  30. sg->offset)
  31. static inline void sg_mark_end(struct scatterlist *sg)
  32. {
  33. /*
  34. * Set termination bit, clear potential chain bit
  35. */
  36. sg->page_link |= 0x02;
  37. sg->page_link &= ~0x01;
  38. }
  39. static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
  40. {
  41. memset(sgl, 0, sizeof(*sgl) * nents);
  42. sg_mark_end(&sgl[nents - 1]);
  43. }
  44. static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
  45. {
  46. unsigned long page_link = sg->page_link & 0x3;
  47. /*
  48. * In order for the low bit stealing approach to work, pages
  49. * must be aligned at a 32-bit boundary as a minimum.
  50. */
  51. BUG_ON((unsigned long) page & 0x03);
  52. sg->page_link = page_link | (unsigned long) page;
  53. }
  54. static inline void sg_set_page(struct scatterlist *sg, struct page *page,
  55. unsigned int len, unsigned int offset)
  56. {
  57. sg_assign_page(sg, page);
  58. sg->offset = offset;
  59. sg->length = len;
  60. }
  61. static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  62. unsigned int buflen)
  63. {
  64. sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
  65. }
  66. static inline void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
  67. {
  68. sg_init_table(sg, 1);
  69. sg_set_buf(sg, buf, buflen);
  70. }
  71. typedef __u16 u16;
  72. typedef enum {
  73. GFP_KERNEL,
  74. GFP_ATOMIC,
  75. } gfp_t;
  76. typedef enum {
  77. IRQ_NONE,
  78. IRQ_HANDLED
  79. } irqreturn_t;
  80. static inline void *kmalloc(size_t s, gfp_t gfp)
  81. {
  82. return malloc(s);
  83. }
  84. static inline void kfree(void *p)
  85. {
  86. free(p);
  87. }
  88. #define container_of(ptr, type, member) ({ \
  89. const typeof( ((type *)0)->member ) *__mptr = (ptr); \
  90. (type *)( (char *)__mptr - offsetof(type,member) );})
  91. #define uninitialized_var(x) x = x
  92. # ifndef likely
  93. # define likely(x) (__builtin_expect(!!(x), 1))
  94. # endif
  95. # ifndef unlikely
  96. # define unlikely(x) (__builtin_expect(!!(x), 0))
  97. # endif
  98. #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
  99. #ifdef DEBUG
  100. #define pr_debug(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
  101. #else
  102. #define pr_debug(format, ...) do {} while (0)
  103. #endif
  104. #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
  105. #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
  106. /* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
  107. #define list_add_tail(a, b) do {} while (0)
  108. #define list_del(a) do {} while (0)
  109. #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
  110. #define BITS_PER_BYTE 8
  111. #define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE)
  112. #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
  113. /* TODO: Not atomic as it should be:
  114. * we don't use this for anything important. */
  115. static inline void clear_bit(int nr, volatile unsigned long *addr)
  116. {
  117. unsigned long mask = BIT_MASK(nr);
  118. unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
  119. *p &= ~mask;
  120. }
  121. static inline int test_bit(int nr, const volatile unsigned long *addr)
  122. {
  123. return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
  124. }
  125. /* The only feature we care to support */
  126. #define virtio_has_feature(dev, feature) \
  127. test_bit((feature), (dev)->features)
  128. /* end of stubs */
  129. struct virtio_device {
  130. void *dev;
  131. unsigned long features[1];
  132. };
  133. struct virtqueue {
  134. /* TODO: commented as list macros are empty stubs for now.
  135. * Broken but enough for virtio_ring.c
  136. * struct list_head list; */
  137. void (*callback)(struct virtqueue *vq);
  138. const char *name;
  139. struct virtio_device *vdev;
  140. void *priv;
  141. };
  142. #define EXPORT_SYMBOL_GPL(__EXPORT_SYMBOL_GPL_name) \
  143. void __EXPORT_SYMBOL_GPL##__EXPORT_SYMBOL_GPL_name() { \
  144. }
  145. #define MODULE_LICENSE(__MODULE_LICENSE_value) \
  146. const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value
  147. #define CONFIG_SMP
  148. #if defined(__i386__) || defined(__x86_64__)
  149. #define barrier() asm volatile("" ::: "memory")
  150. #define mb() __sync_synchronize()
  151. #define smp_mb() mb()
  152. # define smp_rmb() barrier()
  153. # define smp_wmb() barrier()
  154. #else
  155. #error Please fill in barrier macros
  156. #endif
  157. /* Interfaces exported by virtio_ring. */
  158. int virtqueue_add_buf_gfp(struct virtqueue *vq,
  159. struct scatterlist sg[],
  160. unsigned int out_num,
  161. unsigned int in_num,
  162. void *data,
  163. gfp_t gfp);
  164. static inline int virtqueue_add_buf(struct virtqueue *vq,
  165. struct scatterlist sg[],
  166. unsigned int out_num,
  167. unsigned int in_num,
  168. void *data)
  169. {
  170. return virtqueue_add_buf_gfp(vq, sg, out_num, in_num, data, GFP_ATOMIC);
  171. }
  172. void virtqueue_kick(struct virtqueue *vq);
  173. void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
  174. void virtqueue_disable_cb(struct virtqueue *vq);
  175. bool virtqueue_enable_cb(struct virtqueue *vq);
  176. void *virtqueue_detach_unused_buf(struct virtqueue *vq);
  177. struct virtqueue *vring_new_virtqueue(unsigned int num,
  178. unsigned int vring_align,
  179. struct virtio_device *vdev,
  180. void *pages,
  181. void (*notify)(struct virtqueue *vq),
  182. void (*callback)(struct virtqueue *vq),
  183. const char *name);
  184. void vring_del_virtqueue(struct virtqueue *vq);
  185. #endif