recoverd.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /******************************************************************************
  2. *******************************************************************************
  3. **
  4. ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  5. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
  6. **
  7. ** This copyrighted material is made available to anyone wishing to use,
  8. ** modify, copy, or redistribute it subject to the terms and conditions
  9. ** of the GNU General Public License v.2.
  10. **
  11. *******************************************************************************
  12. ******************************************************************************/
  13. #include "dlm_internal.h"
  14. #include "lockspace.h"
  15. #include "member.h"
  16. #include "dir.h"
  17. #include "ast.h"
  18. #include "recover.h"
  19. #include "lowcomms.h"
  20. #include "lock.h"
  21. #include "requestqueue.h"
  22. #include "recoverd.h"
  23. /* If the start for which we're re-enabling locking (seq) has been superseded
  24. by a newer stop (ls_recover_seq), we need to leave locking disabled.
  25. We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
  26. locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
  27. enables locking and clears the requestqueue between a and b. */
  28. static int enable_locking(struct dlm_ls *ls, uint64_t seq)
  29. {
  30. int error = -EINTR;
  31. down_write(&ls->ls_recv_active);
  32. spin_lock(&ls->ls_recover_lock);
  33. if (ls->ls_recover_seq == seq) {
  34. set_bit(LSFL_RUNNING, &ls->ls_flags);
  35. /* unblocks processes waiting to enter the dlm */
  36. up_write(&ls->ls_in_recovery);
  37. error = 0;
  38. }
  39. spin_unlock(&ls->ls_recover_lock);
  40. up_write(&ls->ls_recv_active);
  41. return error;
  42. }
  43. static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
  44. {
  45. unsigned long start;
  46. int error, neg = 0;
  47. log_debug(ls, "dlm_recover %llx", (unsigned long long)rv->seq);
  48. mutex_lock(&ls->ls_recoverd_active);
  49. dlm_callback_suspend(ls);
  50. /*
  51. * Free non-master tossed rsb's. Master rsb's are kept on toss
  52. * list and put on root list to be included in resdir recovery.
  53. */
  54. dlm_clear_toss_list(ls);
  55. /*
  56. * This list of root rsb's will be the basis of most of the recovery
  57. * routines.
  58. */
  59. dlm_create_root_list(ls);
  60. /*
  61. * Add or remove nodes from the lockspace's ls_nodes list.
  62. */
  63. error = dlm_recover_members(ls, rv, &neg);
  64. if (error) {
  65. log_debug(ls, "dlm_recover_members error %d", error);
  66. goto fail;
  67. }
  68. dlm_set_recover_status(ls, DLM_RS_NODES);
  69. error = dlm_recover_members_wait(ls);
  70. if (error) {
  71. log_debug(ls, "dlm_recover_members_wait error %d", error);
  72. goto fail;
  73. }
  74. start = jiffies;
  75. /*
  76. * Rebuild our own share of the directory by collecting from all other
  77. * nodes their master rsb names that hash to us.
  78. */
  79. error = dlm_recover_directory(ls);
  80. if (error) {
  81. log_debug(ls, "dlm_recover_directory error %d", error);
  82. goto fail;
  83. }
  84. dlm_set_recover_status(ls, DLM_RS_DIR);
  85. error = dlm_recover_directory_wait(ls);
  86. if (error) {
  87. log_debug(ls, "dlm_recover_directory_wait error %d", error);
  88. goto fail;
  89. }
  90. /*
  91. * We may have outstanding operations that are waiting for a reply from
  92. * a failed node. Mark these to be resent after recovery. Unlock and
  93. * cancel ops can just be completed.
  94. */
  95. dlm_recover_waiters_pre(ls);
  96. error = dlm_recovery_stopped(ls);
  97. if (error)
  98. goto fail;
  99. if (neg || dlm_no_directory(ls)) {
  100. /*
  101. * Clear lkb's for departed nodes.
  102. */
  103. dlm_purge_locks(ls);
  104. /*
  105. * Get new master nodeid's for rsb's that were mastered on
  106. * departed nodes.
  107. */
  108. error = dlm_recover_masters(ls);
  109. if (error) {
  110. log_debug(ls, "dlm_recover_masters error %d", error);
  111. goto fail;
  112. }
  113. /*
  114. * Send our locks on remastered rsb's to the new masters.
  115. */
  116. error = dlm_recover_locks(ls);
  117. if (error) {
  118. log_debug(ls, "dlm_recover_locks error %d", error);
  119. goto fail;
  120. }
  121. dlm_set_recover_status(ls, DLM_RS_LOCKS);
  122. error = dlm_recover_locks_wait(ls);
  123. if (error) {
  124. log_debug(ls, "dlm_recover_locks_wait error %d", error);
  125. goto fail;
  126. }
  127. /*
  128. * Finalize state in master rsb's now that all locks can be
  129. * checked. This includes conversion resolution and lvb
  130. * settings.
  131. */
  132. dlm_recover_rsbs(ls);
  133. } else {
  134. /*
  135. * Other lockspace members may be going through the "neg" steps
  136. * while also adding us to the lockspace, in which case they'll
  137. * be doing the recover_locks (RS_LOCKS) barrier.
  138. */
  139. dlm_set_recover_status(ls, DLM_RS_LOCKS);
  140. error = dlm_recover_locks_wait(ls);
  141. if (error) {
  142. log_debug(ls, "dlm_recover_locks_wait error %d", error);
  143. goto fail;
  144. }
  145. }
  146. dlm_release_root_list(ls);
  147. /*
  148. * Purge directory-related requests that are saved in requestqueue.
  149. * All dir requests from before recovery are invalid now due to the dir
  150. * rebuild and will be resent by the requesting nodes.
  151. */
  152. dlm_purge_requestqueue(ls);
  153. dlm_set_recover_status(ls, DLM_RS_DONE);
  154. error = dlm_recover_done_wait(ls);
  155. if (error) {
  156. log_debug(ls, "dlm_recover_done_wait error %d", error);
  157. goto fail;
  158. }
  159. dlm_clear_members_gone(ls);
  160. dlm_adjust_timeouts(ls);
  161. dlm_callback_resume(ls);
  162. error = enable_locking(ls, rv->seq);
  163. if (error) {
  164. log_debug(ls, "enable_locking error %d", error);
  165. goto fail;
  166. }
  167. error = dlm_process_requestqueue(ls);
  168. if (error) {
  169. log_debug(ls, "dlm_process_requestqueue error %d", error);
  170. goto fail;
  171. }
  172. error = dlm_recover_waiters_post(ls);
  173. if (error) {
  174. log_debug(ls, "dlm_recover_waiters_post error %d", error);
  175. goto fail;
  176. }
  177. dlm_grant_after_purge(ls);
  178. log_debug(ls, "dlm_recover %llx generation %u done: %u ms",
  179. (unsigned long long)rv->seq, ls->ls_generation,
  180. jiffies_to_msecs(jiffies - start));
  181. mutex_unlock(&ls->ls_recoverd_active);
  182. dlm_lsop_recover_done(ls);
  183. return 0;
  184. fail:
  185. dlm_release_root_list(ls);
  186. log_debug(ls, "dlm_recover %llx error %d",
  187. (unsigned long long)rv->seq, error);
  188. mutex_unlock(&ls->ls_recoverd_active);
  189. return error;
  190. }
  191. /* The dlm_ls_start() that created the rv we take here may already have been
  192. stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
  193. flag set. */
  194. static void do_ls_recovery(struct dlm_ls *ls)
  195. {
  196. struct dlm_recover *rv = NULL;
  197. spin_lock(&ls->ls_recover_lock);
  198. rv = ls->ls_recover_args;
  199. ls->ls_recover_args = NULL;
  200. if (rv && ls->ls_recover_seq == rv->seq)
  201. clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
  202. spin_unlock(&ls->ls_recover_lock);
  203. if (rv) {
  204. ls_recover(ls, rv);
  205. kfree(rv->nodes);
  206. kfree(rv);
  207. }
  208. }
  209. static int dlm_recoverd(void *arg)
  210. {
  211. struct dlm_ls *ls;
  212. ls = dlm_find_lockspace_local(arg);
  213. if (!ls) {
  214. log_print("dlm_recoverd: no lockspace %p", arg);
  215. return -1;
  216. }
  217. while (!kthread_should_stop()) {
  218. set_current_state(TASK_INTERRUPTIBLE);
  219. if (!test_bit(LSFL_WORK, &ls->ls_flags))
  220. schedule();
  221. set_current_state(TASK_RUNNING);
  222. if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
  223. do_ls_recovery(ls);
  224. }
  225. dlm_put_lockspace(ls);
  226. return 0;
  227. }
  228. void dlm_recoverd_kick(struct dlm_ls *ls)
  229. {
  230. set_bit(LSFL_WORK, &ls->ls_flags);
  231. wake_up_process(ls->ls_recoverd_task);
  232. }
  233. int dlm_recoverd_start(struct dlm_ls *ls)
  234. {
  235. struct task_struct *p;
  236. int error = 0;
  237. p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
  238. if (IS_ERR(p))
  239. error = PTR_ERR(p);
  240. else
  241. ls->ls_recoverd_task = p;
  242. return error;
  243. }
  244. void dlm_recoverd_stop(struct dlm_ls *ls)
  245. {
  246. kthread_stop(ls->ls_recoverd_task);
  247. }
  248. void dlm_recoverd_suspend(struct dlm_ls *ls)
  249. {
  250. wake_up(&ls->ls_wait_general);
  251. mutex_lock(&ls->ls_recoverd_active);
  252. }
  253. void dlm_recoverd_resume(struct dlm_ls *ls)
  254. {
  255. mutex_unlock(&ls->ls_recoverd_active);
  256. }