ring_buffer.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. *
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  16. * Place - Suite 330, Boston, MA 02111-1307 USA.
  17. *
  18. * Authors:
  19. * Haiyang Zhang <haiyangz@microsoft.com>
  20. * Hank Janssen <hjanssen@microsoft.com>
  21. * K. Y. Srinivasan <kys@microsoft.com>
  22. *
  23. */
  24. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25. #include <linux/kernel.h>
  26. #include <linux/mm.h>
  27. #include <linux/hyperv.h>
  28. #include "hyperv_vmbus.h"
  29. /* #defines */
  30. /* Amount of space to write to */
  31. #define BYTES_AVAIL_TO_WRITE(r, w, z) \
  32. ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
  33. /*
  34. *
  35. * hv_get_ringbuffer_availbytes()
  36. *
  37. * Get number of bytes available to read and to write to
  38. * for the specified ring buffer
  39. */
  40. static inline void
  41. hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
  42. u32 *read, u32 *write)
  43. {
  44. u32 read_loc, write_loc;
  45. smp_read_barrier_depends();
  46. /* Capture the read/write indices before they changed */
  47. read_loc = rbi->ring_buffer->read_index;
  48. write_loc = rbi->ring_buffer->write_index;
  49. *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
  50. *read = rbi->ring_datasize - *write;
  51. }
  52. /*
  53. * hv_get_next_write_location()
  54. *
  55. * Get the next write location for the specified ring buffer
  56. *
  57. */
  58. static inline u32
  59. hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
  60. {
  61. u32 next = ring_info->ring_buffer->write_index;
  62. return next;
  63. }
  64. /*
  65. * hv_set_next_write_location()
  66. *
  67. * Set the next write location for the specified ring buffer
  68. *
  69. */
  70. static inline void
  71. hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
  72. u32 next_write_location)
  73. {
  74. ring_info->ring_buffer->write_index = next_write_location;
  75. }
  76. /*
  77. * hv_get_next_read_location()
  78. *
  79. * Get the next read location for the specified ring buffer
  80. */
  81. static inline u32
  82. hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
  83. {
  84. u32 next = ring_info->ring_buffer->read_index;
  85. return next;
  86. }
  87. /*
  88. * hv_get_next_readlocation_withoffset()
  89. *
  90. * Get the next read location + offset for the specified ring buffer.
  91. * This allows the caller to skip
  92. */
  93. static inline u32
  94. hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
  95. u32 offset)
  96. {
  97. u32 next = ring_info->ring_buffer->read_index;
  98. next += offset;
  99. next %= ring_info->ring_datasize;
  100. return next;
  101. }
  102. /*
  103. *
  104. * hv_set_next_read_location()
  105. *
  106. * Set the next read location for the specified ring buffer
  107. *
  108. */
  109. static inline void
  110. hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
  111. u32 next_read_location)
  112. {
  113. ring_info->ring_buffer->read_index = next_read_location;
  114. }
  115. /*
  116. *
  117. * hv_get_ring_buffer()
  118. *
  119. * Get the start of the ring buffer
  120. */
  121. static inline void *
  122. hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
  123. {
  124. return (void *)ring_info->ring_buffer->buffer;
  125. }
  126. /*
  127. *
  128. * hv_get_ring_buffersize()
  129. *
  130. * Get the size of the ring buffer
  131. */
  132. static inline u32
  133. hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
  134. {
  135. return ring_info->ring_datasize;
  136. }
  137. /*
  138. *
  139. * hv_get_ring_bufferindices()
  140. *
  141. * Get the read and write indices as u64 of the specified ring buffer
  142. *
  143. */
  144. static inline u64
  145. hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
  146. {
  147. return (u64)ring_info->ring_buffer->write_index << 32;
  148. }
  149. /*
  150. *
  151. * hv_copyfrom_ringbuffer()
  152. *
  153. * Helper routine to copy to source from ring buffer.
  154. * Assume there is enough room. Handles wrap-around in src case only!!
  155. *
  156. */
  157. static u32 hv_copyfrom_ringbuffer(
  158. struct hv_ring_buffer_info *ring_info,
  159. void *dest,
  160. u32 destlen,
  161. u32 start_read_offset)
  162. {
  163. void *ring_buffer = hv_get_ring_buffer(ring_info);
  164. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  165. u32 frag_len;
  166. /* wrap-around detected at the src */
  167. if (destlen > ring_buffer_size - start_read_offset) {
  168. frag_len = ring_buffer_size - start_read_offset;
  169. memcpy(dest, ring_buffer + start_read_offset, frag_len);
  170. memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
  171. } else
  172. memcpy(dest, ring_buffer + start_read_offset, destlen);
  173. start_read_offset += destlen;
  174. start_read_offset %= ring_buffer_size;
  175. return start_read_offset;
  176. }
  177. /*
  178. *
  179. * hv_copyto_ringbuffer()
  180. *
  181. * Helper routine to copy from source to ring buffer.
  182. * Assume there is enough room. Handles wrap-around in dest case only!!
  183. *
  184. */
  185. static u32 hv_copyto_ringbuffer(
  186. struct hv_ring_buffer_info *ring_info,
  187. u32 start_write_offset,
  188. void *src,
  189. u32 srclen)
  190. {
  191. void *ring_buffer = hv_get_ring_buffer(ring_info);
  192. u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
  193. u32 frag_len;
  194. /* wrap-around detected! */
  195. if (srclen > ring_buffer_size - start_write_offset) {
  196. frag_len = ring_buffer_size - start_write_offset;
  197. memcpy(ring_buffer + start_write_offset, src, frag_len);
  198. memcpy(ring_buffer, src + frag_len, srclen - frag_len);
  199. } else
  200. memcpy(ring_buffer + start_write_offset, src, srclen);
  201. start_write_offset += srclen;
  202. start_write_offset %= ring_buffer_size;
  203. return start_write_offset;
  204. }
  205. /*
  206. *
  207. * hv_ringbuffer_get_debuginfo()
  208. *
  209. * Get various debug metrics for the specified ring buffer
  210. *
  211. */
  212. void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
  213. struct hv_ring_buffer_debug_info *debug_info)
  214. {
  215. u32 bytes_avail_towrite;
  216. u32 bytes_avail_toread;
  217. if (ring_info->ring_buffer) {
  218. hv_get_ringbuffer_availbytes(ring_info,
  219. &bytes_avail_toread,
  220. &bytes_avail_towrite);
  221. debug_info->bytes_avail_toread = bytes_avail_toread;
  222. debug_info->bytes_avail_towrite = bytes_avail_towrite;
  223. debug_info->current_read_index =
  224. ring_info->ring_buffer->read_index;
  225. debug_info->current_write_index =
  226. ring_info->ring_buffer->write_index;
  227. debug_info->current_interrupt_mask =
  228. ring_info->ring_buffer->interrupt_mask;
  229. }
  230. }
  231. /*
  232. *
  233. * hv_get_ringbuffer_interrupt_mask()
  234. *
  235. * Get the interrupt mask for the specified ring buffer
  236. *
  237. */
  238. u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
  239. {
  240. return rbi->ring_buffer->interrupt_mask;
  241. }
  242. /*
  243. *
  244. * hv_ringbuffer_init()
  245. *
  246. *Initialize the ring buffer
  247. *
  248. */
  249. int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
  250. void *buffer, u32 buflen)
  251. {
  252. if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
  253. return -EINVAL;
  254. memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
  255. ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
  256. ring_info->ring_buffer->read_index =
  257. ring_info->ring_buffer->write_index = 0;
  258. ring_info->ring_size = buflen;
  259. ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
  260. spin_lock_init(&ring_info->ring_lock);
  261. return 0;
  262. }
  263. /*
  264. *
  265. * hv_ringbuffer_cleanup()
  266. *
  267. * Cleanup the ring buffer
  268. *
  269. */
  270. void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
  271. {
  272. }
  273. /*
  274. *
  275. * hv_ringbuffer_write()
  276. *
  277. * Write to the ring buffer
  278. *
  279. */
  280. int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
  281. struct scatterlist *sglist, u32 sgcount)
  282. {
  283. int i = 0;
  284. u32 bytes_avail_towrite;
  285. u32 bytes_avail_toread;
  286. u32 totalbytes_towrite = 0;
  287. struct scatterlist *sg;
  288. u32 next_write_location;
  289. u64 prev_indices = 0;
  290. unsigned long flags;
  291. for_each_sg(sglist, sg, sgcount, i)
  292. {
  293. totalbytes_towrite += sg->length;
  294. }
  295. totalbytes_towrite += sizeof(u64);
  296. spin_lock_irqsave(&outring_info->ring_lock, flags);
  297. hv_get_ringbuffer_availbytes(outring_info,
  298. &bytes_avail_toread,
  299. &bytes_avail_towrite);
  300. /* If there is only room for the packet, assume it is full. */
  301. /* Otherwise, the next time around, we think the ring buffer */
  302. /* is empty since the read index == write index */
  303. if (bytes_avail_towrite <= totalbytes_towrite) {
  304. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  305. return -EAGAIN;
  306. }
  307. /* Write to the ring buffer */
  308. next_write_location = hv_get_next_write_location(outring_info);
  309. for_each_sg(sglist, sg, sgcount, i)
  310. {
  311. next_write_location = hv_copyto_ringbuffer(outring_info,
  312. next_write_location,
  313. sg_virt(sg),
  314. sg->length);
  315. }
  316. /* Set previous packet start */
  317. prev_indices = hv_get_ring_bufferindices(outring_info);
  318. next_write_location = hv_copyto_ringbuffer(outring_info,
  319. next_write_location,
  320. &prev_indices,
  321. sizeof(u64));
  322. /* Make sure we flush all writes before updating the writeIndex */
  323. wmb();
  324. /* Now, update the write location */
  325. hv_set_next_write_location(outring_info, next_write_location);
  326. spin_unlock_irqrestore(&outring_info->ring_lock, flags);
  327. return 0;
  328. }
  329. /*
  330. *
  331. * hv_ringbuffer_peek()
  332. *
  333. * Read without advancing the read index
  334. *
  335. */
  336. int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
  337. void *Buffer, u32 buflen)
  338. {
  339. u32 bytes_avail_towrite;
  340. u32 bytes_avail_toread;
  341. u32 next_read_location = 0;
  342. unsigned long flags;
  343. spin_lock_irqsave(&Inring_info->ring_lock, flags);
  344. hv_get_ringbuffer_availbytes(Inring_info,
  345. &bytes_avail_toread,
  346. &bytes_avail_towrite);
  347. /* Make sure there is something to read */
  348. if (bytes_avail_toread < buflen) {
  349. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  350. return -EAGAIN;
  351. }
  352. /* Convert to byte offset */
  353. next_read_location = hv_get_next_read_location(Inring_info);
  354. next_read_location = hv_copyfrom_ringbuffer(Inring_info,
  355. Buffer,
  356. buflen,
  357. next_read_location);
  358. spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
  359. return 0;
  360. }
  361. /*
  362. *
  363. * hv_ringbuffer_read()
  364. *
  365. * Read and advance the read index
  366. *
  367. */
  368. int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
  369. u32 buflen, u32 offset)
  370. {
  371. u32 bytes_avail_towrite;
  372. u32 bytes_avail_toread;
  373. u32 next_read_location = 0;
  374. u64 prev_indices = 0;
  375. unsigned long flags;
  376. if (buflen <= 0)
  377. return -EINVAL;
  378. spin_lock_irqsave(&inring_info->ring_lock, flags);
  379. hv_get_ringbuffer_availbytes(inring_info,
  380. &bytes_avail_toread,
  381. &bytes_avail_towrite);
  382. /* Make sure there is something to read */
  383. if (bytes_avail_toread < buflen) {
  384. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  385. return -EAGAIN;
  386. }
  387. next_read_location =
  388. hv_get_next_readlocation_withoffset(inring_info, offset);
  389. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  390. buffer,
  391. buflen,
  392. next_read_location);
  393. next_read_location = hv_copyfrom_ringbuffer(inring_info,
  394. &prev_indices,
  395. sizeof(u64),
  396. next_read_location);
  397. /* Make sure all reads are done before we update the read index since */
  398. /* the writer may start writing to the read area once the read index */
  399. /*is updated */
  400. mb();
  401. /* Update the read index */
  402. hv_set_next_read_location(inring_info, next_read_location);
  403. spin_unlock_irqrestore(&inring_info->ring_lock, flags);
  404. return 0;
  405. }