bsg-lib.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * BSG helper library
  3. *
  4. * Copyright (C) 2008 James Smart, Emulex Corporation
  5. * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
  6. * Copyright (C) 2011 Mike Christie
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/delay.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/bsg-lib.h>
  28. #include <linux/export.h>
  29. #include <scsi/scsi_cmnd.h>
  30. /**
  31. * bsg_destroy_job - routine to teardown/delete a bsg job
  32. * @job: bsg_job that is to be torn down
  33. */
  34. static void bsg_destroy_job(struct bsg_job *job)
  35. {
  36. put_device(job->dev); /* release reference for the request */
  37. kfree(job->request_payload.sg_list);
  38. kfree(job->reply_payload.sg_list);
  39. kfree(job);
  40. }
  41. /**
  42. * bsg_job_done - completion routine for bsg requests
  43. * @job: bsg_job that is complete
  44. * @result: job reply result
  45. * @reply_payload_rcv_len: length of payload recvd
  46. *
  47. * The LLD should call this when the bsg job has completed.
  48. */
  49. void bsg_job_done(struct bsg_job *job, int result,
  50. unsigned int reply_payload_rcv_len)
  51. {
  52. struct request *req = job->req;
  53. struct request *rsp = req->next_rq;
  54. int err;
  55. err = job->req->errors = result;
  56. if (err < 0)
  57. /* we're only returning the result field in the reply */
  58. job->req->sense_len = sizeof(u32);
  59. else
  60. job->req->sense_len = job->reply_len;
  61. /* we assume all request payload was transferred, residual == 0 */
  62. req->resid_len = 0;
  63. if (rsp) {
  64. WARN_ON(reply_payload_rcv_len > rsp->resid_len);
  65. /* set reply (bidi) residual */
  66. rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
  67. }
  68. blk_complete_request(req);
  69. }
  70. EXPORT_SYMBOL_GPL(bsg_job_done);
  71. /**
  72. * bsg_softirq_done - softirq done routine for destroying the bsg requests
  73. * @rq: BSG request that holds the job to be destroyed
  74. */
  75. static void bsg_softirq_done(struct request *rq)
  76. {
  77. struct bsg_job *job = rq->special;
  78. blk_end_request_all(rq, rq->errors);
  79. bsg_destroy_job(job);
  80. }
  81. static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
  82. {
  83. size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
  84. BUG_ON(!req->nr_phys_segments);
  85. buf->sg_list = kzalloc(sz, GFP_KERNEL);
  86. if (!buf->sg_list)
  87. return -ENOMEM;
  88. sg_init_table(buf->sg_list, req->nr_phys_segments);
  89. buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
  90. buf->payload_len = blk_rq_bytes(req);
  91. return 0;
  92. }
  93. /**
  94. * bsg_create_job - create the bsg_job structure for the bsg request
  95. * @dev: device that is being sent the bsg request
  96. * @req: BSG request that needs a job structure
  97. */
  98. static int bsg_create_job(struct device *dev, struct request *req)
  99. {
  100. struct request *rsp = req->next_rq;
  101. struct request_queue *q = req->q;
  102. struct bsg_job *job;
  103. int ret;
  104. BUG_ON(req->special);
  105. job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
  106. if (!job)
  107. return -ENOMEM;
  108. req->special = job;
  109. job->req = req;
  110. if (q->bsg_job_size)
  111. job->dd_data = (void *)&job[1];
  112. job->request = req->cmd;
  113. job->request_len = req->cmd_len;
  114. job->reply = req->sense;
  115. job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
  116. * allocated */
  117. if (req->bio) {
  118. ret = bsg_map_buffer(&job->request_payload, req);
  119. if (ret)
  120. goto failjob_rls_job;
  121. }
  122. if (rsp && rsp->bio) {
  123. ret = bsg_map_buffer(&job->reply_payload, rsp);
  124. if (ret)
  125. goto failjob_rls_rqst_payload;
  126. }
  127. job->dev = dev;
  128. /* take a reference for the request */
  129. get_device(job->dev);
  130. return 0;
  131. failjob_rls_rqst_payload:
  132. kfree(job->request_payload.sg_list);
  133. failjob_rls_job:
  134. kfree(job);
  135. return -ENOMEM;
  136. }
  137. /*
  138. * bsg_goose_queue - restart queue in case it was stopped
  139. * @q: request q to be restarted
  140. */
  141. void bsg_goose_queue(struct request_queue *q)
  142. {
  143. if (!q)
  144. return;
  145. blk_run_queue_async(q);
  146. }
  147. EXPORT_SYMBOL_GPL(bsg_goose_queue);
  148. /**
  149. * bsg_request_fn - generic handler for bsg requests
  150. * @q: request queue to manage
  151. *
  152. * On error the create_bsg_job function should return a -Exyz error value
  153. * that will be set to the req->errors.
  154. *
  155. * Drivers/subsys should pass this to the queue init function.
  156. */
  157. void bsg_request_fn(struct request_queue *q)
  158. {
  159. struct device *dev = q->queuedata;
  160. struct request *req;
  161. struct bsg_job *job;
  162. int ret;
  163. if (!get_device(dev))
  164. return;
  165. while (1) {
  166. req = blk_fetch_request(q);
  167. if (!req)
  168. break;
  169. spin_unlock_irq(q->queue_lock);
  170. ret = bsg_create_job(dev, req);
  171. if (ret) {
  172. req->errors = ret;
  173. blk_end_request_all(req, ret);
  174. spin_lock_irq(q->queue_lock);
  175. continue;
  176. }
  177. job = req->special;
  178. ret = q->bsg_job_fn(job);
  179. spin_lock_irq(q->queue_lock);
  180. if (ret)
  181. break;
  182. }
  183. spin_unlock_irq(q->queue_lock);
  184. put_device(dev);
  185. spin_lock_irq(q->queue_lock);
  186. }
  187. EXPORT_SYMBOL_GPL(bsg_request_fn);
  188. /**
  189. * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
  190. * @dev: device to attach bsg device to
  191. * @q: request queue setup by caller
  192. * @name: device to give bsg device
  193. * @job_fn: bsg job handler
  194. * @dd_job_size: size of LLD data needed for each job
  195. *
  196. * The caller should have setup the reuqest queue with bsg_request_fn
  197. * as the request_fn.
  198. */
  199. int bsg_setup_queue(struct device *dev, struct request_queue *q,
  200. char *name, bsg_job_fn *job_fn, int dd_job_size)
  201. {
  202. int ret;
  203. q->queuedata = dev;
  204. q->bsg_job_size = dd_job_size;
  205. q->bsg_job_fn = job_fn;
  206. queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
  207. blk_queue_softirq_done(q, bsg_softirq_done);
  208. blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
  209. ret = bsg_register_queue(q, dev, name, NULL);
  210. if (ret) {
  211. printk(KERN_ERR "%s: bsg interface failed to "
  212. "initialize - register queue\n", dev->kobj.name);
  213. return ret;
  214. }
  215. return 0;
  216. }
  217. EXPORT_SYMBOL_GPL(bsg_setup_queue);
  218. /**
  219. * bsg_remove_queue - Deletes the bsg dev from the q
  220. * @q: the request_queue that is to be torn down.
  221. *
  222. * Notes:
  223. * Before unregistering the queue empty any requests that are blocked
  224. */
  225. void bsg_remove_queue(struct request_queue *q)
  226. {
  227. struct request *req; /* block request */
  228. int counts; /* totals for request_list count and starved */
  229. if (!q)
  230. return;
  231. /* Stop taking in new requests */
  232. spin_lock_irq(q->queue_lock);
  233. blk_stop_queue(q);
  234. /* drain all requests in the queue */
  235. while (1) {
  236. /* need the lock to fetch a request
  237. * this may fetch the same reqeust as the previous pass
  238. */
  239. req = blk_fetch_request(q);
  240. /* save requests in use and starved */
  241. counts = q->rq.count[0] + q->rq.count[1] +
  242. q->rq.starved[0] + q->rq.starved[1];
  243. spin_unlock_irq(q->queue_lock);
  244. /* any requests still outstanding? */
  245. if (counts == 0)
  246. break;
  247. /* This may be the same req as the previous iteration,
  248. * always send the blk_end_request_all after a prefetch.
  249. * It is not okay to not end the request because the
  250. * prefetch started the request.
  251. */
  252. if (req) {
  253. /* return -ENXIO to indicate that this queue is
  254. * going away
  255. */
  256. req->errors = -ENXIO;
  257. blk_end_request_all(req, -ENXIO);
  258. }
  259. msleep(200); /* allow bsg to possibly finish */
  260. spin_lock_irq(q->queue_lock);
  261. }
  262. bsg_unregister_queue(q);
  263. }
  264. EXPORT_SYMBOL_GPL(bsg_remove_queue);