client-buffers.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * ISHTP Ring Buffers
  3. *
  4. * Copyright (c) 2003-2016, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/slab.h>
  17. #include "client.h"
  18. /**
  19. * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
  20. * @cl: client device instance
  21. *
  22. * Allocate and initialize RX ring buffers
  23. *
  24. * Return: 0 on success else -ENOMEM
  25. */
  26. int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
  27. {
  28. size_t len = cl->device->fw_client->props.max_msg_length;
  29. int j;
  30. struct ishtp_cl_rb *rb;
  31. int ret = 0;
  32. unsigned long flags;
  33. for (j = 0; j < cl->rx_ring_size; ++j) {
  34. rb = ishtp_io_rb_init(cl);
  35. if (!rb) {
  36. ret = -ENOMEM;
  37. goto out;
  38. }
  39. ret = ishtp_io_rb_alloc_buf(rb, len);
  40. if (ret)
  41. goto out;
  42. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  43. list_add_tail(&rb->list, &cl->free_rb_list.list);
  44. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  45. }
  46. return 0;
  47. out:
  48. dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
  49. ishtp_cl_free_rx_ring(cl);
  50. return ret;
  51. }
  52. /**
  53. * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
  54. * @cl: client device instance
  55. *
  56. * Allocate and initialize TX ring buffers
  57. *
  58. * Return: 0 on success else -ENOMEM
  59. */
  60. int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
  61. {
  62. size_t len = cl->device->fw_client->props.max_msg_length;
  63. int j;
  64. unsigned long flags;
  65. /* Allocate pool to free Tx bufs */
  66. for (j = 0; j < cl->tx_ring_size; ++j) {
  67. struct ishtp_cl_tx_ring *tx_buf;
  68. tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
  69. if (!tx_buf)
  70. goto out;
  71. tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
  72. if (!tx_buf->send_buf.data) {
  73. kfree(tx_buf);
  74. goto out;
  75. }
  76. spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
  77. list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
  78. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
  79. }
  80. return 0;
  81. out:
  82. dev_err(&cl->device->dev, "error in allocating Tx pool\n");
  83. ishtp_cl_free_rx_ring(cl);
  84. return -ENOMEM;
  85. }
  86. /**
  87. * ishtp_cl_free_rx_ring() - Free RX ring buffers
  88. * @cl: client device instance
  89. *
  90. * Free RX ring buffers
  91. */
  92. void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
  93. {
  94. struct ishtp_cl_rb *rb;
  95. unsigned long flags;
  96. /* release allocated memory - pass over free_rb_list */
  97. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  98. while (!list_empty(&cl->free_rb_list.list)) {
  99. rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
  100. list);
  101. list_del(&rb->list);
  102. kfree(rb->buffer.data);
  103. kfree(rb);
  104. }
  105. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  106. /* release allocated memory - pass over in_process_list */
  107. spin_lock_irqsave(&cl->in_process_spinlock, flags);
  108. while (!list_empty(&cl->in_process_list.list)) {
  109. rb = list_entry(cl->in_process_list.list.next,
  110. struct ishtp_cl_rb, list);
  111. list_del(&rb->list);
  112. kfree(rb->buffer.data);
  113. kfree(rb);
  114. }
  115. spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
  116. }
  117. /**
  118. * ishtp_cl_free_tx_ring() - Free TX ring buffers
  119. * @cl: client device instance
  120. *
  121. * Free TX ring buffers
  122. */
  123. void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
  124. {
  125. struct ishtp_cl_tx_ring *tx_buf;
  126. unsigned long flags;
  127. spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
  128. /* release allocated memory - pass over tx_free_list */
  129. while (!list_empty(&cl->tx_free_list.list)) {
  130. tx_buf = list_entry(cl->tx_free_list.list.next,
  131. struct ishtp_cl_tx_ring, list);
  132. list_del(&tx_buf->list);
  133. kfree(tx_buf->send_buf.data);
  134. kfree(tx_buf);
  135. }
  136. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
  137. spin_lock_irqsave(&cl->tx_list_spinlock, flags);
  138. /* release allocated memory - pass over tx_list */
  139. while (!list_empty(&cl->tx_list.list)) {
  140. tx_buf = list_entry(cl->tx_list.list.next,
  141. struct ishtp_cl_tx_ring, list);
  142. list_del(&tx_buf->list);
  143. kfree(tx_buf->send_buf.data);
  144. kfree(tx_buf);
  145. }
  146. spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
  147. }
  148. /**
  149. * ishtp_io_rb_free() - Free IO request block
  150. * @rb: IO request block
  151. *
  152. * Free io request block memory
  153. */
  154. void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
  155. {
  156. if (rb == NULL)
  157. return;
  158. kfree(rb->buffer.data);
  159. kfree(rb);
  160. }
  161. /**
  162. * ishtp_io_rb_init() - Allocate and init IO request block
  163. * @cl: client device instance
  164. *
  165. * Allocate and initialize request block
  166. *
  167. * Return: Allocted IO request block pointer
  168. */
  169. struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
  170. {
  171. struct ishtp_cl_rb *rb;
  172. rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
  173. if (!rb)
  174. return NULL;
  175. INIT_LIST_HEAD(&rb->list);
  176. rb->cl = cl;
  177. rb->buf_idx = 0;
  178. return rb;
  179. }
  180. /**
  181. * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
  182. * @rb: IO request block
  183. * @length: length of response buffer
  184. *
  185. * Allocate respose buffer
  186. *
  187. * Return: 0 on success else -ENOMEM
  188. */
  189. int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
  190. {
  191. if (!rb)
  192. return -EINVAL;
  193. if (length == 0)
  194. return 0;
  195. rb->buffer.data = kmalloc(length, GFP_KERNEL);
  196. if (!rb->buffer.data)
  197. return -ENOMEM;
  198. rb->buffer.size = length;
  199. return 0;
  200. }
  201. /**
  202. * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
  203. * @rb: IO request block
  204. *
  205. * Re-append rb to its client's free list and send flow control if needed
  206. *
  207. * Return: 0 on success else -EFAULT
  208. */
  209. int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
  210. {
  211. struct ishtp_cl *cl;
  212. int rets = 0;
  213. unsigned long flags;
  214. if (!rb || !rb->cl)
  215. return -EFAULT;
  216. cl = rb->cl;
  217. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  218. list_add_tail(&rb->list, &cl->free_rb_list.list);
  219. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  220. /*
  221. * If we returned the first buffer to empty 'free' list,
  222. * send flow control
  223. */
  224. if (!cl->out_flow_ctrl_creds)
  225. rets = ishtp_cl_read_start(cl);
  226. return rets;
  227. }
  228. EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);