blk-exec.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Functions related to setting various queue properties from drivers
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include "blk.h"
  9. /*
  10. * for max sense size
  11. */
  12. #include <scsi/scsi_cmnd.h>
  13. /**
  14. * blk_end_sync_rq - executes a completion event on a request
  15. * @rq: request to complete
  16. * @error: end I/O status of the request
  17. */
  18. static void blk_end_sync_rq(struct request *rq, int error)
  19. {
  20. struct completion *waiting = rq->end_io_data;
  21. rq->end_io_data = NULL;
  22. __blk_put_request(rq->q, rq);
  23. /*
  24. * complete last, if this is a stack request the process (and thus
  25. * the rq pointer) could be invalid right after this complete()
  26. */
  27. complete(waiting);
  28. }
  29. /**
  30. * blk_execute_rq_nowait - insert a request into queue for execution
  31. * @q: queue to insert the request in
  32. * @bd_disk: matching gendisk
  33. * @rq: request to insert
  34. * @at_head: insert request at head or tail of queue
  35. * @done: I/O completion handler
  36. *
  37. * Description:
  38. * Insert a fully prepared request at the back of the I/O scheduler queue
  39. * for execution. Don't wait for completion.
  40. */
  41. void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
  42. struct request *rq, int at_head,
  43. rq_end_io_fn *done)
  44. {
  45. int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
  46. bool is_pm_resume;
  47. WARN_ON(irqs_disabled());
  48. rq->rq_disk = bd_disk;
  49. rq->end_io = done;
  50. /*
  51. * need to check this before __blk_run_queue(), because rq can
  52. * be freed before that returns.
  53. */
  54. is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
  55. spin_lock_irq(q->queue_lock);
  56. if (unlikely(blk_queue_dead(q))) {
  57. spin_unlock_irq(q->queue_lock);
  58. rq->errors = -ENXIO;
  59. if (rq->end_io)
  60. rq->end_io(rq, rq->errors);
  61. return;
  62. }
  63. rq->rq_disk = bd_disk;
  64. rq->end_io = done;
  65. __elv_add_request(q, rq, where);
  66. __blk_run_queue(q);
  67. /* the queue is stopped so it won't be run */
  68. if (is_pm_resume)
  69. q->request_fn(q);
  70. spin_unlock_irq(q->queue_lock);
  71. }
  72. EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
  73. /**
  74. * blk_execute_rq - insert a request into queue for execution
  75. * @q: queue to insert the request in
  76. * @bd_disk: matching gendisk
  77. * @rq: request to insert
  78. * @at_head: insert request at head or tail of queue
  79. *
  80. * Description:
  81. * Insert a fully prepared request at the back of the I/O scheduler queue
  82. * for execution and wait for completion.
  83. */
  84. int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
  85. struct request *rq, int at_head)
  86. {
  87. DECLARE_COMPLETION_ONSTACK(wait);
  88. char sense[SCSI_SENSE_BUFFERSIZE];
  89. int err = 0;
  90. unsigned long hang_check;
  91. /*
  92. * we need an extra reference to the request, so we can look at
  93. * it after io completion
  94. */
  95. rq->ref_count++;
  96. if (!rq->sense) {
  97. memset(sense, 0, sizeof(sense));
  98. rq->sense = sense;
  99. rq->sense_len = 0;
  100. }
  101. rq->end_io_data = &wait;
  102. blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
  103. /* Prevent hang_check timer from firing at us during very long I/O */
  104. hang_check = sysctl_hung_task_timeout_secs;
  105. if (hang_check)
  106. while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
  107. else
  108. wait_for_completion(&wait);
  109. if (rq->errors)
  110. err = -EIO;
  111. return err;
  112. }
  113. EXPORT_SYMBOL(blk_execute_rq);