drbd_worker.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724
  1. /*
  2. drbd_worker.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/drbd.h>
  21. #include <linux/sched.h>
  22. #include <linux/wait.h>
  23. #include <linux/mm.h>
  24. #include <linux/memcontrol.h>
  25. #include <linux/mm_inline.h>
  26. #include <linux/slab.h>
  27. #include <linux/random.h>
  28. #include <linux/string.h>
  29. #include <linux/scatterlist.h>
  30. #include "drbd_int.h"
  31. #include "drbd_req.h"
  32. static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
  33. static int w_make_resync_request(struct drbd_conf *mdev,
  34. struct drbd_work *w, int cancel);
  35. /* endio handlers:
  36. * drbd_md_io_complete (defined here)
  37. * drbd_endio_pri (defined here)
  38. * drbd_endio_sec (defined here)
  39. * bm_async_io_complete (defined in drbd_bitmap.c)
  40. *
  41. * For all these callbacks, note the following:
  42. * The callbacks will be called in irq context by the IDE drivers,
  43. * and in Softirqs/Tasklets/BH context by the SCSI drivers.
  44. * Try to get the locking right :)
  45. *
  46. */
  47. /* About the global_state_lock
  48. Each state transition on an device holds a read lock. In case we have
  49. to evaluate the sync after dependencies, we grab a write lock, because
  50. we need stable states on all devices for that. */
  51. rwlock_t global_state_lock;
  52. /* used for synchronous meta data and bitmap IO
  53. * submitted by drbd_md_sync_page_io()
  54. */
  55. void drbd_md_io_complete(struct bio *bio, int error)
  56. {
  57. struct drbd_md_io *md_io;
  58. md_io = (struct drbd_md_io *)bio->bi_private;
  59. md_io->error = error;
  60. complete(&md_io->event);
  61. }
  62. /* reads on behalf of the partner,
  63. * "submitted" by the receiver
  64. */
  65. void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
  66. {
  67. unsigned long flags = 0;
  68. struct drbd_conf *mdev = e->mdev;
  69. D_ASSERT(e->block_id != ID_VACANT);
  70. spin_lock_irqsave(&mdev->req_lock, flags);
  71. mdev->read_cnt += e->size >> 9;
  72. list_del(&e->w.list);
  73. if (list_empty(&mdev->read_ee))
  74. wake_up(&mdev->ee_wait);
  75. if (test_bit(__EE_WAS_ERROR, &e->flags))
  76. __drbd_chk_io_error(mdev, false);
  77. spin_unlock_irqrestore(&mdev->req_lock, flags);
  78. drbd_queue_work(&mdev->data.work, &e->w);
  79. put_ldev(mdev);
  80. }
  81. /* writes on behalf of the partner, or resync writes,
  82. * "submitted" by the receiver, final stage. */
  83. static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
  84. {
  85. unsigned long flags = 0;
  86. struct drbd_conf *mdev = e->mdev;
  87. sector_t e_sector;
  88. int do_wake;
  89. int is_syncer_req;
  90. int do_al_complete_io;
  91. D_ASSERT(e->block_id != ID_VACANT);
  92. /* after we moved e to done_ee,
  93. * we may no longer access it,
  94. * it may be freed/reused already!
  95. * (as soon as we release the req_lock) */
  96. e_sector = e->sector;
  97. do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
  98. is_syncer_req = is_syncer_block_id(e->block_id);
  99. spin_lock_irqsave(&mdev->req_lock, flags);
  100. mdev->writ_cnt += e->size >> 9;
  101. list_del(&e->w.list); /* has been on active_ee or sync_ee */
  102. list_add_tail(&e->w.list, &mdev->done_ee);
  103. /* No hlist_del_init(&e->collision) here, we did not send the Ack yet,
  104. * neither did we wake possibly waiting conflicting requests.
  105. * done from "drbd_process_done_ee" within the appropriate w.cb
  106. * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
  107. do_wake = is_syncer_req
  108. ? list_empty(&mdev->sync_ee)
  109. : list_empty(&mdev->active_ee);
  110. if (test_bit(__EE_WAS_ERROR, &e->flags))
  111. __drbd_chk_io_error(mdev, false);
  112. spin_unlock_irqrestore(&mdev->req_lock, flags);
  113. if (is_syncer_req)
  114. drbd_rs_complete_io(mdev, e_sector);
  115. if (do_wake)
  116. wake_up(&mdev->ee_wait);
  117. if (do_al_complete_io)
  118. drbd_al_complete_io(mdev, e_sector);
  119. wake_asender(mdev);
  120. put_ldev(mdev);
  121. }
  122. /* writes on behalf of the partner, or resync writes,
  123. * "submitted" by the receiver.
  124. */
  125. void drbd_endio_sec(struct bio *bio, int error)
  126. {
  127. struct drbd_epoch_entry *e = bio->bi_private;
  128. struct drbd_conf *mdev = e->mdev;
  129. int uptodate = bio_flagged(bio, BIO_UPTODATE);
  130. int is_write = bio_data_dir(bio) == WRITE;
  131. if (error && __ratelimit(&drbd_ratelimit_state))
  132. dev_warn(DEV, "%s: error=%d s=%llus\n",
  133. is_write ? "write" : "read", error,
  134. (unsigned long long)e->sector);
  135. if (!error && !uptodate) {
  136. if (__ratelimit(&drbd_ratelimit_state))
  137. dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
  138. is_write ? "write" : "read",
  139. (unsigned long long)e->sector);
  140. /* strange behavior of some lower level drivers...
  141. * fail the request by clearing the uptodate flag,
  142. * but do not return any error?! */
  143. error = -EIO;
  144. }
  145. if (error)
  146. set_bit(__EE_WAS_ERROR, &e->flags);
  147. bio_put(bio); /* no need for the bio anymore */
  148. if (atomic_dec_and_test(&e->pending_bios)) {
  149. if (is_write)
  150. drbd_endio_write_sec_final(e);
  151. else
  152. drbd_endio_read_sec_final(e);
  153. }
  154. }
  155. /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
  156. */
  157. void drbd_endio_pri(struct bio *bio, int error)
  158. {
  159. unsigned long flags;
  160. struct drbd_request *req = bio->bi_private;
  161. struct drbd_conf *mdev = req->mdev;
  162. struct bio_and_error m;
  163. enum drbd_req_event what;
  164. int uptodate = bio_flagged(bio, BIO_UPTODATE);
  165. if (!error && !uptodate) {
  166. dev_warn(DEV, "p %s: setting error to -EIO\n",
  167. bio_data_dir(bio) == WRITE ? "write" : "read");
  168. /* strange behavior of some lower level drivers...
  169. * fail the request by clearing the uptodate flag,
  170. * but do not return any error?! */
  171. error = -EIO;
  172. }
  173. /* to avoid recursion in __req_mod */
  174. if (unlikely(error)) {
  175. what = (bio_data_dir(bio) == WRITE)
  176. ? write_completed_with_error
  177. : (bio_rw(bio) == READ)
  178. ? read_completed_with_error
  179. : read_ahead_completed_with_error;
  180. } else
  181. what = completed_ok;
  182. bio_put(req->private_bio);
  183. req->private_bio = ERR_PTR(error);
  184. /* not req_mod(), we need irqsave here! */
  185. spin_lock_irqsave(&mdev->req_lock, flags);
  186. __req_mod(req, what, &m);
  187. spin_unlock_irqrestore(&mdev->req_lock, flags);
  188. if (m.bio)
  189. complete_master_bio(mdev, &m);
  190. }
  191. int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  192. {
  193. struct drbd_request *req = container_of(w, struct drbd_request, w);
  194. /* We should not detach for read io-error,
  195. * but try to WRITE the P_DATA_REPLY to the failed location,
  196. * to give the disk the chance to relocate that block */
  197. spin_lock_irq(&mdev->req_lock);
  198. if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
  199. _req_mod(req, read_retry_remote_canceled);
  200. spin_unlock_irq(&mdev->req_lock);
  201. return 1;
  202. }
  203. spin_unlock_irq(&mdev->req_lock);
  204. return w_send_read_req(mdev, w, 0);
  205. }
  206. void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
  207. {
  208. struct hash_desc desc;
  209. struct scatterlist sg;
  210. struct page *page = e->pages;
  211. struct page *tmp;
  212. unsigned len;
  213. desc.tfm = tfm;
  214. desc.flags = 0;
  215. sg_init_table(&sg, 1);
  216. crypto_hash_init(&desc);
  217. while ((tmp = page_chain_next(page))) {
  218. /* all but the last page will be fully used */
  219. sg_set_page(&sg, page, PAGE_SIZE, 0);
  220. crypto_hash_update(&desc, &sg, sg.length);
  221. page = tmp;
  222. }
  223. /* and now the last, possibly only partially used page */
  224. len = e->size & (PAGE_SIZE - 1);
  225. sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
  226. crypto_hash_update(&desc, &sg, sg.length);
  227. crypto_hash_final(&desc, digest);
  228. }
  229. void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
  230. {
  231. struct hash_desc desc;
  232. struct scatterlist sg;
  233. struct bio_vec *bvec;
  234. int i;
  235. desc.tfm = tfm;
  236. desc.flags = 0;
  237. sg_init_table(&sg, 1);
  238. crypto_hash_init(&desc);
  239. __bio_for_each_segment(bvec, bio, i, 0) {
  240. sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
  241. crypto_hash_update(&desc, &sg, sg.length);
  242. }
  243. crypto_hash_final(&desc, digest);
  244. }
  245. /* TODO merge common code with w_e_end_ov_req */
  246. int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  247. {
  248. struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
  249. int digest_size;
  250. void *digest;
  251. int ok = 1;
  252. D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
  253. if (unlikely(cancel))
  254. goto out;
  255. if (likely((e->flags & EE_WAS_ERROR) != 0))
  256. goto out;
  257. digest_size = crypto_hash_digestsize(mdev->csums_tfm);
  258. digest = kmalloc(digest_size, GFP_NOIO);
  259. if (digest) {
  260. sector_t sector = e->sector;
  261. unsigned int size = e->size;
  262. drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
  263. /* Free e and pages before send.
  264. * In case we block on congestion, we could otherwise run into
  265. * some distributed deadlock, if the other side blocks on
  266. * congestion as well, because our receiver blocks in
  267. * drbd_pp_alloc due to pp_in_use > max_buffers. */
  268. drbd_free_ee(mdev, e);
  269. e = NULL;
  270. inc_rs_pending(mdev);
  271. ok = drbd_send_drequest_csum(mdev, sector, size,
  272. digest, digest_size,
  273. P_CSUM_RS_REQUEST);
  274. kfree(digest);
  275. } else {
  276. dev_err(DEV, "kmalloc() of digest failed.\n");
  277. ok = 0;
  278. }
  279. out:
  280. if (e)
  281. drbd_free_ee(mdev, e);
  282. if (unlikely(!ok))
  283. dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
  284. return ok;
  285. }
  286. #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
  287. static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
  288. {
  289. struct drbd_epoch_entry *e;
  290. if (!get_ldev(mdev))
  291. return -EIO;
  292. if (drbd_rs_should_slow_down(mdev, sector))
  293. goto defer;
  294. /* GFP_TRY, because if there is no memory available right now, this may
  295. * be rescheduled for later. It is "only" background resync, after all. */
  296. e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
  297. if (!e)
  298. goto defer;
  299. e->w.cb = w_e_send_csum;
  300. spin_lock_irq(&mdev->req_lock);
  301. list_add(&e->w.list, &mdev->read_ee);
  302. spin_unlock_irq(&mdev->req_lock);
  303. atomic_add(size >> 9, &mdev->rs_sect_ev);
  304. if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
  305. return 0;
  306. /* If it failed because of ENOMEM, retry should help. If it failed
  307. * because bio_add_page failed (probably broken lower level driver),
  308. * retry may or may not help.
  309. * If it does not, you may need to force disconnect. */
  310. spin_lock_irq(&mdev->req_lock);
  311. list_del(&e->w.list);
  312. spin_unlock_irq(&mdev->req_lock);
  313. drbd_free_ee(mdev, e);
  314. defer:
  315. put_ldev(mdev);
  316. return -EAGAIN;
  317. }
  318. int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  319. {
  320. switch (mdev->state.conn) {
  321. case C_VERIFY_S:
  322. w_make_ov_request(mdev, w, cancel);
  323. break;
  324. case C_SYNC_TARGET:
  325. w_make_resync_request(mdev, w, cancel);
  326. break;
  327. }
  328. return 1;
  329. }
  330. void resync_timer_fn(unsigned long data)
  331. {
  332. struct drbd_conf *mdev = (struct drbd_conf *) data;
  333. if (list_empty(&mdev->resync_work.list))
  334. drbd_queue_work(&mdev->data.work, &mdev->resync_work);
  335. }
  336. static void fifo_set(struct fifo_buffer *fb, int value)
  337. {
  338. int i;
  339. for (i = 0; i < fb->size; i++)
  340. fb->values[i] = value;
  341. }
  342. static int fifo_push(struct fifo_buffer *fb, int value)
  343. {
  344. int ov;
  345. ov = fb->values[fb->head_index];
  346. fb->values[fb->head_index++] = value;
  347. if (fb->head_index >= fb->size)
  348. fb->head_index = 0;
  349. return ov;
  350. }
  351. static void fifo_add_val(struct fifo_buffer *fb, int value)
  352. {
  353. int i;
  354. for (i = 0; i < fb->size; i++)
  355. fb->values[i] += value;
  356. }
  357. static int drbd_rs_controller(struct drbd_conf *mdev)
  358. {
  359. unsigned int sect_in; /* Number of sectors that came in since the last turn */
  360. unsigned int want; /* The number of sectors we want in the proxy */
  361. int req_sect; /* Number of sectors to request in this turn */
  362. int correction; /* Number of sectors more we need in the proxy*/
  363. int cps; /* correction per invocation of drbd_rs_controller() */
  364. int steps; /* Number of time steps to plan ahead */
  365. int curr_corr;
  366. int max_sect;
  367. sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
  368. mdev->rs_in_flight -= sect_in;
  369. spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
  370. steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
  371. if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
  372. want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
  373. } else { /* normal path */
  374. want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
  375. sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
  376. }
  377. correction = want - mdev->rs_in_flight - mdev->rs_planed;
  378. /* Plan ahead */
  379. cps = correction / steps;
  380. fifo_add_val(&mdev->rs_plan_s, cps);
  381. mdev->rs_planed += cps * steps;
  382. /* What we do in this step */
  383. curr_corr = fifo_push(&mdev->rs_plan_s, 0);
  384. spin_unlock(&mdev->peer_seq_lock);
  385. mdev->rs_planed -= curr_corr;
  386. req_sect = sect_in + curr_corr;
  387. if (req_sect < 0)
  388. req_sect = 0;
  389. max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
  390. if (req_sect > max_sect)
  391. req_sect = max_sect;
  392. /*
  393. dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
  394. sect_in, mdev->rs_in_flight, want, correction,
  395. steps, cps, mdev->rs_planed, curr_corr, req_sect);
  396. */
  397. return req_sect;
  398. }
  399. static int drbd_rs_number_requests(struct drbd_conf *mdev)
  400. {
  401. int number;
  402. if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
  403. number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
  404. mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
  405. } else {
  406. mdev->c_sync_rate = mdev->sync_conf.rate;
  407. number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
  408. }
  409. /* ignore the amount of pending requests, the resync controller should
  410. * throttle down to incoming reply rate soon enough anyways. */
  411. return number;
  412. }
  413. static int w_make_resync_request(struct drbd_conf *mdev,
  414. struct drbd_work *w, int cancel)
  415. {
  416. unsigned long bit;
  417. sector_t sector;
  418. const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
  419. int max_bio_size;
  420. int number, rollback_i, size;
  421. int align, queued, sndbuf;
  422. int i = 0;
  423. if (unlikely(cancel))
  424. return 1;
  425. if (mdev->rs_total == 0) {
  426. /* empty resync? */
  427. drbd_resync_finished(mdev);
  428. return 1;
  429. }
  430. if (!get_ldev(mdev)) {
  431. /* Since we only need to access mdev->rsync a
  432. get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
  433. to continue resync with a broken disk makes no sense at
  434. all */
  435. dev_err(DEV, "Disk broke down during resync!\n");
  436. return 1;
  437. }
  438. max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
  439. number = drbd_rs_number_requests(mdev);
  440. if (number == 0)
  441. goto requeue;
  442. for (i = 0; i < number; i++) {
  443. /* Stop generating RS requests, when half of the send buffer is filled */
  444. mutex_lock(&mdev->data.mutex);
  445. if (mdev->data.socket) {
  446. queued = mdev->data.socket->sk->sk_wmem_queued;
  447. sndbuf = mdev->data.socket->sk->sk_sndbuf;
  448. } else {
  449. queued = 1;
  450. sndbuf = 0;
  451. }
  452. mutex_unlock(&mdev->data.mutex);
  453. if (queued > sndbuf / 2)
  454. goto requeue;
  455. next_sector:
  456. size = BM_BLOCK_SIZE;
  457. bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
  458. if (bit == DRBD_END_OF_BITMAP) {
  459. mdev->bm_resync_fo = drbd_bm_bits(mdev);
  460. put_ldev(mdev);
  461. return 1;
  462. }
  463. sector = BM_BIT_TO_SECT(bit);
  464. if (drbd_rs_should_slow_down(mdev, sector) ||
  465. drbd_try_rs_begin_io(mdev, sector)) {
  466. mdev->bm_resync_fo = bit;
  467. goto requeue;
  468. }
  469. mdev->bm_resync_fo = bit + 1;
  470. if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
  471. drbd_rs_complete_io(mdev, sector);
  472. goto next_sector;
  473. }
  474. #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
  475. /* try to find some adjacent bits.
  476. * we stop if we have already the maximum req size.
  477. *
  478. * Additionally always align bigger requests, in order to
  479. * be prepared for all stripe sizes of software RAIDs.
  480. */
  481. align = 1;
  482. rollback_i = i;
  483. for (;;) {
  484. if (size + BM_BLOCK_SIZE > max_bio_size)
  485. break;
  486. /* Be always aligned */
  487. if (sector & ((1<<(align+3))-1))
  488. break;
  489. /* do not cross extent boundaries */
  490. if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
  491. break;
  492. /* now, is it actually dirty, after all?
  493. * caution, drbd_bm_test_bit is tri-state for some
  494. * obscure reason; ( b == 0 ) would get the out-of-band
  495. * only accidentally right because of the "oddly sized"
  496. * adjustment below */
  497. if (drbd_bm_test_bit(mdev, bit+1) != 1)
  498. break;
  499. bit++;
  500. size += BM_BLOCK_SIZE;
  501. if ((BM_BLOCK_SIZE << align) <= size)
  502. align++;
  503. i++;
  504. }
  505. /* if we merged some,
  506. * reset the offset to start the next drbd_bm_find_next from */
  507. if (size > BM_BLOCK_SIZE)
  508. mdev->bm_resync_fo = bit + 1;
  509. #endif
  510. /* adjust very last sectors, in case we are oddly sized */
  511. if (sector + (size>>9) > capacity)
  512. size = (capacity-sector)<<9;
  513. if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
  514. switch (read_for_csum(mdev, sector, size)) {
  515. case -EIO: /* Disk failure */
  516. put_ldev(mdev);
  517. return 0;
  518. case -EAGAIN: /* allocation failed, or ldev busy */
  519. drbd_rs_complete_io(mdev, sector);
  520. mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
  521. i = rollback_i;
  522. goto requeue;
  523. case 0:
  524. /* everything ok */
  525. break;
  526. default:
  527. BUG();
  528. }
  529. } else {
  530. inc_rs_pending(mdev);
  531. if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
  532. sector, size, ID_SYNCER)) {
  533. dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
  534. dec_rs_pending(mdev);
  535. put_ldev(mdev);
  536. return 0;
  537. }
  538. }
  539. }
  540. if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
  541. /* last syncer _request_ was sent,
  542. * but the P_RS_DATA_REPLY not yet received. sync will end (and
  543. * next sync group will resume), as soon as we receive the last
  544. * resync data block, and the last bit is cleared.
  545. * until then resync "work" is "inactive" ...
  546. */
  547. put_ldev(mdev);
  548. return 1;
  549. }
  550. requeue:
  551. mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
  552. mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
  553. put_ldev(mdev);
  554. return 1;
  555. }
  556. static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  557. {
  558. int number, i, size;
  559. sector_t sector;
  560. const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
  561. if (unlikely(cancel))
  562. return 1;
  563. number = drbd_rs_number_requests(mdev);
  564. sector = mdev->ov_position;
  565. for (i = 0; i < number; i++) {
  566. if (sector >= capacity) {
  567. return 1;
  568. }
  569. size = BM_BLOCK_SIZE;
  570. if (drbd_rs_should_slow_down(mdev, sector) ||
  571. drbd_try_rs_begin_io(mdev, sector)) {
  572. mdev->ov_position = sector;
  573. goto requeue;
  574. }
  575. if (sector + (size>>9) > capacity)
  576. size = (capacity-sector)<<9;
  577. inc_rs_pending(mdev);
  578. if (!drbd_send_ov_request(mdev, sector, size)) {
  579. dec_rs_pending(mdev);
  580. return 0;
  581. }
  582. sector += BM_SECT_PER_BIT;
  583. }
  584. mdev->ov_position = sector;
  585. requeue:
  586. mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
  587. mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
  588. return 1;
  589. }
  590. void start_resync_timer_fn(unsigned long data)
  591. {
  592. struct drbd_conf *mdev = (struct drbd_conf *) data;
  593. drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
  594. }
  595. int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  596. {
  597. if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
  598. dev_warn(DEV, "w_start_resync later...\n");
  599. mdev->start_resync_timer.expires = jiffies + HZ/10;
  600. add_timer(&mdev->start_resync_timer);
  601. return 1;
  602. }
  603. drbd_start_resync(mdev, C_SYNC_SOURCE);
  604. clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
  605. return 1;
  606. }
  607. int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  608. {
  609. kfree(w);
  610. ov_oos_print(mdev);
  611. drbd_resync_finished(mdev);
  612. return 1;
  613. }
  614. static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  615. {
  616. kfree(w);
  617. drbd_resync_finished(mdev);
  618. return 1;
  619. }
  620. static void ping_peer(struct drbd_conf *mdev)
  621. {
  622. clear_bit(GOT_PING_ACK, &mdev->flags);
  623. request_ping(mdev);
  624. wait_event(mdev->misc_wait,
  625. test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
  626. }
  627. int drbd_resync_finished(struct drbd_conf *mdev)
  628. {
  629. unsigned long db, dt, dbdt;
  630. unsigned long n_oos;
  631. union drbd_state os, ns;
  632. struct drbd_work *w;
  633. char *khelper_cmd = NULL;
  634. int verify_done = 0;
  635. /* Remove all elements from the resync LRU. Since future actions
  636. * might set bits in the (main) bitmap, then the entries in the
  637. * resync LRU would be wrong. */
  638. if (drbd_rs_del_all(mdev)) {
  639. /* In case this is not possible now, most probably because
  640. * there are P_RS_DATA_REPLY Packets lingering on the worker's
  641. * queue (or even the read operations for those packets
  642. * is not finished by now). Retry in 100ms. */
  643. schedule_timeout_interruptible(HZ / 10);
  644. w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
  645. if (w) {
  646. w->cb = w_resync_finished;
  647. drbd_queue_work(&mdev->data.work, w);
  648. return 1;
  649. }
  650. dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
  651. }
  652. dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
  653. if (dt <= 0)
  654. dt = 1;
  655. db = mdev->rs_total;
  656. dbdt = Bit2KB(db/dt);
  657. mdev->rs_paused /= HZ;
  658. if (!get_ldev(mdev))
  659. goto out;
  660. ping_peer(mdev);
  661. spin_lock_irq(&mdev->req_lock);
  662. os = mdev->state;
  663. verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
  664. /* This protects us against multiple calls (that can happen in the presence
  665. of application IO), and against connectivity loss just before we arrive here. */
  666. if (os.conn <= C_CONNECTED)
  667. goto out_unlock;
  668. ns = os;
  669. ns.conn = C_CONNECTED;
  670. dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
  671. verify_done ? "Online verify " : "Resync",
  672. dt + mdev->rs_paused, mdev->rs_paused, dbdt);
  673. n_oos = drbd_bm_total_weight(mdev);
  674. if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
  675. if (n_oos) {
  676. dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
  677. n_oos, Bit2KB(1));
  678. khelper_cmd = "out-of-sync";
  679. }
  680. } else {
  681. D_ASSERT((n_oos - mdev->rs_failed) == 0);
  682. if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
  683. khelper_cmd = "after-resync-target";
  684. if (mdev->csums_tfm && mdev->rs_total) {
  685. const unsigned long s = mdev->rs_same_csum;
  686. const unsigned long t = mdev->rs_total;
  687. const int ratio =
  688. (t == 0) ? 0 :
  689. (t < 100000) ? ((s*100)/t) : (s/(t/100));
  690. dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
  691. "transferred %luK total %luK\n",
  692. ratio,
  693. Bit2KB(mdev->rs_same_csum),
  694. Bit2KB(mdev->rs_total - mdev->rs_same_csum),
  695. Bit2KB(mdev->rs_total));
  696. }
  697. }
  698. if (mdev->rs_failed) {
  699. dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
  700. if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
  701. ns.disk = D_INCONSISTENT;
  702. ns.pdsk = D_UP_TO_DATE;
  703. } else {
  704. ns.disk = D_UP_TO_DATE;
  705. ns.pdsk = D_INCONSISTENT;
  706. }
  707. } else {
  708. ns.disk = D_UP_TO_DATE;
  709. ns.pdsk = D_UP_TO_DATE;
  710. if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
  711. if (mdev->p_uuid) {
  712. int i;
  713. for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
  714. _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
  715. drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
  716. _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
  717. } else {
  718. dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
  719. }
  720. }
  721. if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
  722. /* for verify runs, we don't update uuids here,
  723. * so there would be nothing to report. */
  724. drbd_uuid_set_bm(mdev, 0UL);
  725. drbd_print_uuids(mdev, "updated UUIDs");
  726. if (mdev->p_uuid) {
  727. /* Now the two UUID sets are equal, update what we
  728. * know of the peer. */
  729. int i;
  730. for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
  731. mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
  732. }
  733. }
  734. }
  735. _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  736. out_unlock:
  737. spin_unlock_irq(&mdev->req_lock);
  738. put_ldev(mdev);
  739. out:
  740. mdev->rs_total = 0;
  741. mdev->rs_failed = 0;
  742. mdev->rs_paused = 0;
  743. if (verify_done)
  744. mdev->ov_start_sector = 0;
  745. drbd_md_sync(mdev);
  746. if (khelper_cmd)
  747. drbd_khelper(mdev, khelper_cmd);
  748. return 1;
  749. }
  750. /* helper */
  751. static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
  752. {
  753. if (drbd_ee_has_active_page(e)) {
  754. /* This might happen if sendpage() has not finished */
  755. int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
  756. atomic_add(i, &mdev->pp_in_use_by_net);
  757. atomic_sub(i, &mdev->pp_in_use);
  758. spin_lock_irq(&mdev->req_lock);
  759. list_add_tail(&e->w.list, &mdev->net_ee);
  760. spin_unlock_irq(&mdev->req_lock);
  761. wake_up(&drbd_pp_wait);
  762. } else
  763. drbd_free_ee(mdev, e);
  764. }
  765. /**
  766. * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
  767. * @mdev: DRBD device.
  768. * @w: work object.
  769. * @cancel: The connection will be closed anyways
  770. */
  771. int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  772. {
  773. struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
  774. int ok;
  775. if (unlikely(cancel)) {
  776. drbd_free_ee(mdev, e);
  777. dec_unacked(mdev);
  778. return 1;
  779. }
  780. if (likely((e->flags & EE_WAS_ERROR) == 0)) {
  781. ok = drbd_send_block(mdev, P_DATA_REPLY, e);
  782. } else {
  783. if (__ratelimit(&drbd_ratelimit_state))
  784. dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
  785. (unsigned long long)e->sector);
  786. ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
  787. }
  788. dec_unacked(mdev);
  789. move_to_net_ee_or_free(mdev, e);
  790. if (unlikely(!ok))
  791. dev_err(DEV, "drbd_send_block() failed\n");
  792. return ok;
  793. }
  794. /**
  795. * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
  796. * @mdev: DRBD device.
  797. * @w: work object.
  798. * @cancel: The connection will be closed anyways
  799. */
  800. int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  801. {
  802. struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
  803. int ok;
  804. if (unlikely(cancel)) {
  805. drbd_free_ee(mdev, e);
  806. dec_unacked(mdev);
  807. return 1;
  808. }
  809. if (get_ldev_if_state(mdev, D_FAILED)) {
  810. drbd_rs_complete_io(mdev, e->sector);
  811. put_ldev(mdev);
  812. }
  813. if (mdev->state.conn == C_AHEAD) {
  814. ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
  815. } else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
  816. if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
  817. inc_rs_pending(mdev);
  818. ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
  819. } else {
  820. if (__ratelimit(&drbd_ratelimit_state))
  821. dev_err(DEV, "Not sending RSDataReply, "
  822. "partner DISKLESS!\n");
  823. ok = 1;
  824. }
  825. } else {
  826. if (__ratelimit(&drbd_ratelimit_state))
  827. dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
  828. (unsigned long long)e->sector);
  829. ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
  830. /* update resync data with failure */
  831. drbd_rs_failed_io(mdev, e->sector, e->size);
  832. }
  833. dec_unacked(mdev);
  834. move_to_net_ee_or_free(mdev, e);
  835. if (unlikely(!ok))
  836. dev_err(DEV, "drbd_send_block() failed\n");
  837. return ok;
  838. }
  839. int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  840. {
  841. struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
  842. struct digest_info *di;
  843. int digest_size;
  844. void *digest = NULL;
  845. int ok, eq = 0;
  846. if (unlikely(cancel)) {
  847. drbd_free_ee(mdev, e);
  848. dec_unacked(mdev);
  849. return 1;
  850. }
  851. if (get_ldev(mdev)) {
  852. drbd_rs_complete_io(mdev, e->sector);
  853. put_ldev(mdev);
  854. }
  855. di = e->digest;
  856. if (likely((e->flags & EE_WAS_ERROR) == 0)) {
  857. /* quick hack to try to avoid a race against reconfiguration.
  858. * a real fix would be much more involved,
  859. * introducing more locking mechanisms */
  860. if (mdev->csums_tfm) {
  861. digest_size = crypto_hash_digestsize(mdev->csums_tfm);
  862. D_ASSERT(digest_size == di->digest_size);
  863. digest = kmalloc(digest_size, GFP_NOIO);
  864. }
  865. if (digest) {
  866. drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
  867. eq = !memcmp(digest, di->digest, digest_size);
  868. kfree(digest);
  869. }
  870. if (eq) {
  871. drbd_set_in_sync(mdev, e->sector, e->size);
  872. /* rs_same_csums unit is BM_BLOCK_SIZE */
  873. mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
  874. ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
  875. } else {
  876. inc_rs_pending(mdev);
  877. e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
  878. e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
  879. kfree(di);
  880. ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
  881. }
  882. } else {
  883. ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
  884. if (__ratelimit(&drbd_ratelimit_state))
  885. dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
  886. }
  887. dec_unacked(mdev);
  888. move_to_net_ee_or_free(mdev, e);
  889. if (unlikely(!ok))
  890. dev_err(DEV, "drbd_send_block/ack() failed\n");
  891. return ok;
  892. }
  893. /* TODO merge common code with w_e_send_csum */
  894. int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  895. {
  896. struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
  897. sector_t sector = e->sector;
  898. unsigned int size = e->size;
  899. int digest_size;
  900. void *digest;
  901. int ok = 1;
  902. if (unlikely(cancel))
  903. goto out;
  904. digest_size = crypto_hash_digestsize(mdev->verify_tfm);
  905. digest = kmalloc(digest_size, GFP_NOIO);
  906. if (!digest) {
  907. ok = 0; /* terminate the connection in case the allocation failed */
  908. goto out;
  909. }
  910. if (likely(!(e->flags & EE_WAS_ERROR)))
  911. drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
  912. else
  913. memset(digest, 0, digest_size);
  914. /* Free e and pages before send.
  915. * In case we block on congestion, we could otherwise run into
  916. * some distributed deadlock, if the other side blocks on
  917. * congestion as well, because our receiver blocks in
  918. * drbd_pp_alloc due to pp_in_use > max_buffers. */
  919. drbd_free_ee(mdev, e);
  920. e = NULL;
  921. inc_rs_pending(mdev);
  922. ok = drbd_send_drequest_csum(mdev, sector, size,
  923. digest, digest_size,
  924. P_OV_REPLY);
  925. if (!ok)
  926. dec_rs_pending(mdev);
  927. kfree(digest);
  928. out:
  929. if (e)
  930. drbd_free_ee(mdev, e);
  931. dec_unacked(mdev);
  932. return ok;
  933. }
  934. void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
  935. {
  936. if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
  937. mdev->ov_last_oos_size += size>>9;
  938. } else {
  939. mdev->ov_last_oos_start = sector;
  940. mdev->ov_last_oos_size = size>>9;
  941. }
  942. drbd_set_out_of_sync(mdev, sector, size);
  943. }
  944. int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  945. {
  946. struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
  947. struct digest_info *di;
  948. void *digest;
  949. sector_t sector = e->sector;
  950. unsigned int size = e->size;
  951. int digest_size;
  952. int ok, eq = 0;
  953. if (unlikely(cancel)) {
  954. drbd_free_ee(mdev, e);
  955. dec_unacked(mdev);
  956. return 1;
  957. }
  958. /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
  959. * the resync lru has been cleaned up already */
  960. if (get_ldev(mdev)) {
  961. drbd_rs_complete_io(mdev, e->sector);
  962. put_ldev(mdev);
  963. }
  964. di = e->digest;
  965. if (likely((e->flags & EE_WAS_ERROR) == 0)) {
  966. digest_size = crypto_hash_digestsize(mdev->verify_tfm);
  967. digest = kmalloc(digest_size, GFP_NOIO);
  968. if (digest) {
  969. drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
  970. D_ASSERT(digest_size == di->digest_size);
  971. eq = !memcmp(digest, di->digest, digest_size);
  972. kfree(digest);
  973. }
  974. }
  975. /* Free e and pages before send.
  976. * In case we block on congestion, we could otherwise run into
  977. * some distributed deadlock, if the other side blocks on
  978. * congestion as well, because our receiver blocks in
  979. * drbd_pp_alloc due to pp_in_use > max_buffers. */
  980. drbd_free_ee(mdev, e);
  981. if (!eq)
  982. drbd_ov_oos_found(mdev, sector, size);
  983. else
  984. ov_oos_print(mdev);
  985. ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
  986. eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
  987. dec_unacked(mdev);
  988. --mdev->ov_left;
  989. /* let's advance progress step marks only for every other megabyte */
  990. if ((mdev->ov_left & 0x200) == 0x200)
  991. drbd_advance_rs_marks(mdev, mdev->ov_left);
  992. if (mdev->ov_left == 0) {
  993. ov_oos_print(mdev);
  994. drbd_resync_finished(mdev);
  995. }
  996. return ok;
  997. }
  998. int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  999. {
  1000. struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
  1001. complete(&b->done);
  1002. return 1;
  1003. }
  1004. int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  1005. {
  1006. struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
  1007. struct p_barrier *p = &mdev->data.sbuf.barrier;
  1008. int ok = 1;
  1009. /* really avoid racing with tl_clear. w.cb may have been referenced
  1010. * just before it was reassigned and re-queued, so double check that.
  1011. * actually, this race was harmless, since we only try to send the
  1012. * barrier packet here, and otherwise do nothing with the object.
  1013. * but compare with the head of w_clear_epoch */
  1014. spin_lock_irq(&mdev->req_lock);
  1015. if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
  1016. cancel = 1;
  1017. spin_unlock_irq(&mdev->req_lock);
  1018. if (cancel)
  1019. return 1;
  1020. if (!drbd_get_data_sock(mdev))
  1021. return 0;
  1022. p->barrier = b->br_number;
  1023. /* inc_ap_pending was done where this was queued.
  1024. * dec_ap_pending will be done in got_BarrierAck
  1025. * or (on connection loss) in w_clear_epoch. */
  1026. ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
  1027. (struct p_header80 *)p, sizeof(*p), 0);
  1028. drbd_put_data_sock(mdev);
  1029. return ok;
  1030. }
  1031. int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  1032. {
  1033. if (cancel)
  1034. return 1;
  1035. return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
  1036. }
  1037. int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  1038. {
  1039. struct drbd_request *req = container_of(w, struct drbd_request, w);
  1040. int ok;
  1041. if (unlikely(cancel)) {
  1042. req_mod(req, send_canceled);
  1043. return 1;
  1044. }
  1045. ok = drbd_send_oos(mdev, req);
  1046. req_mod(req, oos_handed_to_network);
  1047. return ok;
  1048. }
  1049. /**
  1050. * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
  1051. * @mdev: DRBD device.
  1052. * @w: work object.
  1053. * @cancel: The connection will be closed anyways
  1054. */
  1055. int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  1056. {
  1057. struct drbd_request *req = container_of(w, struct drbd_request, w);
  1058. int ok;
  1059. if (unlikely(cancel)) {
  1060. req_mod(req, send_canceled);
  1061. return 1;
  1062. }
  1063. ok = drbd_send_dblock(mdev, req);
  1064. req_mod(req, ok ? handed_over_to_network : send_failed);
  1065. return ok;
  1066. }
  1067. /**
  1068. * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
  1069. * @mdev: DRBD device.
  1070. * @w: work object.
  1071. * @cancel: The connection will be closed anyways
  1072. */
  1073. int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  1074. {
  1075. struct drbd_request *req = container_of(w, struct drbd_request, w);
  1076. int ok;
  1077. if (unlikely(cancel)) {
  1078. req_mod(req, send_canceled);
  1079. return 1;
  1080. }
  1081. ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
  1082. (unsigned long)req);
  1083. if (!ok) {
  1084. /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
  1085. * so this is probably redundant */
  1086. if (mdev->state.conn >= C_CONNECTED)
  1087. drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
  1088. }
  1089. req_mod(req, ok ? handed_over_to_network : send_failed);
  1090. return ok;
  1091. }
  1092. int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
  1093. {
  1094. struct drbd_request *req = container_of(w, struct drbd_request, w);
  1095. if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
  1096. drbd_al_begin_io(mdev, req->sector);
  1097. /* Calling drbd_al_begin_io() out of the worker might deadlocks
  1098. theoretically. Practically it can not deadlock, since this is
  1099. only used when unfreezing IOs. All the extents of the requests
  1100. that made it into the TL are already active */
  1101. drbd_req_make_private_bio(req, req->master_bio);
  1102. req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
  1103. generic_make_request(req->private_bio);
  1104. return 1;
  1105. }
  1106. static int _drbd_may_sync_now(struct drbd_conf *mdev)
  1107. {
  1108. struct drbd_conf *odev = mdev;
  1109. while (1) {
  1110. if (odev->sync_conf.after == -1)
  1111. return 1;
  1112. odev = minor_to_mdev(odev->sync_conf.after);
  1113. ERR_IF(!odev) return 1;
  1114. if ((odev->state.conn >= C_SYNC_SOURCE &&
  1115. odev->state.conn <= C_PAUSED_SYNC_T) ||
  1116. odev->state.aftr_isp || odev->state.peer_isp ||
  1117. odev->state.user_isp)
  1118. return 0;
  1119. }
  1120. }
  1121. /**
  1122. * _drbd_pause_after() - Pause resync on all devices that may not resync now
  1123. * @mdev: DRBD device.
  1124. *
  1125. * Called from process context only (admin command and after_state_ch).
  1126. */
  1127. static int _drbd_pause_after(struct drbd_conf *mdev)
  1128. {
  1129. struct drbd_conf *odev;
  1130. int i, rv = 0;
  1131. for (i = 0; i < minor_count; i++) {
  1132. odev = minor_to_mdev(i);
  1133. if (!odev)
  1134. continue;
  1135. if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
  1136. continue;
  1137. if (!_drbd_may_sync_now(odev))
  1138. rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
  1139. != SS_NOTHING_TO_DO);
  1140. }
  1141. return rv;
  1142. }
  1143. /**
  1144. * _drbd_resume_next() - Resume resync on all devices that may resync now
  1145. * @mdev: DRBD device.
  1146. *
  1147. * Called from process context only (admin command and worker).
  1148. */
  1149. static int _drbd_resume_next(struct drbd_conf *mdev)
  1150. {
  1151. struct drbd_conf *odev;
  1152. int i, rv = 0;
  1153. for (i = 0; i < minor_count; i++) {
  1154. odev = minor_to_mdev(i);
  1155. if (!odev)
  1156. continue;
  1157. if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
  1158. continue;
  1159. if (odev->state.aftr_isp) {
  1160. if (_drbd_may_sync_now(odev))
  1161. rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
  1162. CS_HARD, NULL)
  1163. != SS_NOTHING_TO_DO) ;
  1164. }
  1165. }
  1166. return rv;
  1167. }
  1168. void resume_next_sg(struct drbd_conf *mdev)
  1169. {
  1170. write_lock_irq(&global_state_lock);
  1171. _drbd_resume_next(mdev);
  1172. write_unlock_irq(&global_state_lock);
  1173. }
  1174. void suspend_other_sg(struct drbd_conf *mdev)
  1175. {
  1176. write_lock_irq(&global_state_lock);
  1177. _drbd_pause_after(mdev);
  1178. write_unlock_irq(&global_state_lock);
  1179. }
  1180. static int sync_after_error(struct drbd_conf *mdev, int o_minor)
  1181. {
  1182. struct drbd_conf *odev;
  1183. if (o_minor == -1)
  1184. return NO_ERROR;
  1185. if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
  1186. return ERR_SYNC_AFTER;
  1187. /* check for loops */
  1188. odev = minor_to_mdev(o_minor);
  1189. while (1) {
  1190. if (odev == mdev)
  1191. return ERR_SYNC_AFTER_CYCLE;
  1192. /* dependency chain ends here, no cycles. */
  1193. if (odev->sync_conf.after == -1)
  1194. return NO_ERROR;
  1195. /* follow the dependency chain */
  1196. odev = minor_to_mdev(odev->sync_conf.after);
  1197. }
  1198. }
  1199. int drbd_alter_sa(struct drbd_conf *mdev, int na)
  1200. {
  1201. int changes;
  1202. int retcode;
  1203. write_lock_irq(&global_state_lock);
  1204. retcode = sync_after_error(mdev, na);
  1205. if (retcode == NO_ERROR) {
  1206. mdev->sync_conf.after = na;
  1207. do {
  1208. changes = _drbd_pause_after(mdev);
  1209. changes |= _drbd_resume_next(mdev);
  1210. } while (changes);
  1211. }
  1212. write_unlock_irq(&global_state_lock);
  1213. return retcode;
  1214. }
  1215. void drbd_rs_controller_reset(struct drbd_conf *mdev)
  1216. {
  1217. atomic_set(&mdev->rs_sect_in, 0);
  1218. atomic_set(&mdev->rs_sect_ev, 0);
  1219. mdev->rs_in_flight = 0;
  1220. mdev->rs_planed = 0;
  1221. spin_lock(&mdev->peer_seq_lock);
  1222. fifo_set(&mdev->rs_plan_s, 0);
  1223. spin_unlock(&mdev->peer_seq_lock);
  1224. }
  1225. /**
  1226. * drbd_start_resync() - Start the resync process
  1227. * @mdev: DRBD device.
  1228. * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
  1229. *
  1230. * This function might bring you directly into one of the
  1231. * C_PAUSED_SYNC_* states.
  1232. */
  1233. void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
  1234. {
  1235. union drbd_state ns;
  1236. int r;
  1237. if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
  1238. dev_err(DEV, "Resync already running!\n");
  1239. return;
  1240. }
  1241. if (mdev->state.conn < C_AHEAD) {
  1242. /* In case a previous resync run was aborted by an IO error/detach on the peer. */
  1243. drbd_rs_cancel_all(mdev);
  1244. /* This should be done when we abort the resync. We definitely do not
  1245. want to have this for connections going back and forth between
  1246. Ahead/Behind and SyncSource/SyncTarget */
  1247. }
  1248. if (side == C_SYNC_TARGET) {
  1249. /* Since application IO was locked out during C_WF_BITMAP_T and
  1250. C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
  1251. we check that we might make the data inconsistent. */
  1252. r = drbd_khelper(mdev, "before-resync-target");
  1253. r = (r >> 8) & 0xff;
  1254. if (r > 0) {
  1255. dev_info(DEV, "before-resync-target handler returned %d, "
  1256. "dropping connection.\n", r);
  1257. drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
  1258. return;
  1259. }
  1260. } else /* C_SYNC_SOURCE */ {
  1261. r = drbd_khelper(mdev, "before-resync-source");
  1262. r = (r >> 8) & 0xff;
  1263. if (r > 0) {
  1264. if (r == 3) {
  1265. dev_info(DEV, "before-resync-source handler returned %d, "
  1266. "ignoring. Old userland tools?", r);
  1267. } else {
  1268. dev_info(DEV, "before-resync-source handler returned %d, "
  1269. "dropping connection.\n", r);
  1270. drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
  1271. return;
  1272. }
  1273. }
  1274. }
  1275. drbd_state_lock(mdev);
  1276. if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
  1277. drbd_state_unlock(mdev);
  1278. return;
  1279. }
  1280. write_lock_irq(&global_state_lock);
  1281. ns = mdev->state;
  1282. ns.aftr_isp = !_drbd_may_sync_now(mdev);
  1283. ns.conn = side;
  1284. if (side == C_SYNC_TARGET)
  1285. ns.disk = D_INCONSISTENT;
  1286. else /* side == C_SYNC_SOURCE */
  1287. ns.pdsk = D_INCONSISTENT;
  1288. r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
  1289. ns = mdev->state;
  1290. if (ns.conn < C_CONNECTED)
  1291. r = SS_UNKNOWN_ERROR;
  1292. if (r == SS_SUCCESS) {
  1293. unsigned long tw = drbd_bm_total_weight(mdev);
  1294. unsigned long now = jiffies;
  1295. int i;
  1296. mdev->rs_failed = 0;
  1297. mdev->rs_paused = 0;
  1298. mdev->rs_same_csum = 0;
  1299. mdev->rs_last_events = 0;
  1300. mdev->rs_last_sect_ev = 0;
  1301. mdev->rs_total = tw;
  1302. mdev->rs_start = now;
  1303. for (i = 0; i < DRBD_SYNC_MARKS; i++) {
  1304. mdev->rs_mark_left[i] = tw;
  1305. mdev->rs_mark_time[i] = now;
  1306. }
  1307. _drbd_pause_after(mdev);
  1308. }
  1309. write_unlock_irq(&global_state_lock);
  1310. if (r == SS_SUCCESS) {
  1311. dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
  1312. drbd_conn_str(ns.conn),
  1313. (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
  1314. (unsigned long) mdev->rs_total);
  1315. if (side == C_SYNC_TARGET)
  1316. mdev->bm_resync_fo = 0;
  1317. /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
  1318. * with w_send_oos, or the sync target will get confused as to
  1319. * how much bits to resync. We cannot do that always, because for an
  1320. * empty resync and protocol < 95, we need to do it here, as we call
  1321. * drbd_resync_finished from here in that case.
  1322. * We drbd_gen_and_send_sync_uuid here for protocol < 96,
  1323. * and from after_state_ch otherwise. */
  1324. if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
  1325. drbd_gen_and_send_sync_uuid(mdev);
  1326. if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
  1327. /* This still has a race (about when exactly the peers
  1328. * detect connection loss) that can lead to a full sync
  1329. * on next handshake. In 8.3.9 we fixed this with explicit
  1330. * resync-finished notifications, but the fix
  1331. * introduces a protocol change. Sleeping for some
  1332. * time longer than the ping interval + timeout on the
  1333. * SyncSource, to give the SyncTarget the chance to
  1334. * detect connection loss, then waiting for a ping
  1335. * response (implicit in drbd_resync_finished) reduces
  1336. * the race considerably, but does not solve it. */
  1337. if (side == C_SYNC_SOURCE)
  1338. schedule_timeout_interruptible(
  1339. mdev->net_conf->ping_int * HZ +
  1340. mdev->net_conf->ping_timeo*HZ/9);
  1341. drbd_resync_finished(mdev);
  1342. }
  1343. drbd_rs_controller_reset(mdev);
  1344. /* ns.conn may already be != mdev->state.conn,
  1345. * we may have been paused in between, or become paused until
  1346. * the timer triggers.
  1347. * No matter, that is handled in resync_timer_fn() */
  1348. if (ns.conn == C_SYNC_TARGET)
  1349. mod_timer(&mdev->resync_timer, jiffies);
  1350. drbd_md_sync(mdev);
  1351. }
  1352. put_ldev(mdev);
  1353. drbd_state_unlock(mdev);
  1354. }
  1355. int drbd_worker(struct drbd_thread *thi)
  1356. {
  1357. struct drbd_conf *mdev = thi->mdev;
  1358. struct drbd_work *w = NULL;
  1359. LIST_HEAD(work_list);
  1360. int intr = 0, i;
  1361. sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
  1362. while (get_t_state(thi) == Running) {
  1363. drbd_thread_current_set_cpu(mdev);
  1364. if (down_trylock(&mdev->data.work.s)) {
  1365. mutex_lock(&mdev->data.mutex);
  1366. if (mdev->data.socket && !mdev->net_conf->no_cork)
  1367. drbd_tcp_uncork(mdev->data.socket);
  1368. mutex_unlock(&mdev->data.mutex);
  1369. intr = down_interruptible(&mdev->data.work.s);
  1370. mutex_lock(&mdev->data.mutex);
  1371. if (mdev->data.socket && !mdev->net_conf->no_cork)
  1372. drbd_tcp_cork(mdev->data.socket);
  1373. mutex_unlock(&mdev->data.mutex);
  1374. }
  1375. if (intr) {
  1376. D_ASSERT(intr == -EINTR);
  1377. flush_signals(current);
  1378. ERR_IF (get_t_state(thi) == Running)
  1379. continue;
  1380. break;
  1381. }
  1382. if (get_t_state(thi) != Running)
  1383. break;
  1384. /* With this break, we have done a down() but not consumed
  1385. the entry from the list. The cleanup code takes care of
  1386. this... */
  1387. w = NULL;
  1388. spin_lock_irq(&mdev->data.work.q_lock);
  1389. ERR_IF(list_empty(&mdev->data.work.q)) {
  1390. /* something terribly wrong in our logic.
  1391. * we were able to down() the semaphore,
  1392. * but the list is empty... doh.
  1393. *
  1394. * what is the best thing to do now?
  1395. * try again from scratch, restarting the receiver,
  1396. * asender, whatnot? could break even more ugly,
  1397. * e.g. when we are primary, but no good local data.
  1398. *
  1399. * I'll try to get away just starting over this loop.
  1400. */
  1401. spin_unlock_irq(&mdev->data.work.q_lock);
  1402. continue;
  1403. }
  1404. w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
  1405. list_del_init(&w->list);
  1406. spin_unlock_irq(&mdev->data.work.q_lock);
  1407. if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
  1408. /* dev_warn(DEV, "worker: a callback failed! \n"); */
  1409. if (mdev->state.conn >= C_CONNECTED)
  1410. drbd_force_state(mdev,
  1411. NS(conn, C_NETWORK_FAILURE));
  1412. }
  1413. }
  1414. D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
  1415. D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
  1416. spin_lock_irq(&mdev->data.work.q_lock);
  1417. i = 0;
  1418. while (!list_empty(&mdev->data.work.q)) {
  1419. list_splice_init(&mdev->data.work.q, &work_list);
  1420. spin_unlock_irq(&mdev->data.work.q_lock);
  1421. while (!list_empty(&work_list)) {
  1422. w = list_entry(work_list.next, struct drbd_work, list);
  1423. list_del_init(&w->list);
  1424. w->cb(mdev, w, 1);
  1425. i++; /* dead debugging code */
  1426. }
  1427. spin_lock_irq(&mdev->data.work.q_lock);
  1428. }
  1429. sema_init(&mdev->data.work.s, 0);
  1430. /* DANGEROUS race: if someone did queue his work within the spinlock,
  1431. * but up() ed outside the spinlock, we could get an up() on the
  1432. * semaphore without corresponding list entry.
  1433. * So don't do that.
  1434. */
  1435. spin_unlock_irq(&mdev->data.work.q_lock);
  1436. D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
  1437. /* _drbd_set_state only uses stop_nowait.
  1438. * wait here for the Exiting receiver. */
  1439. drbd_thread_stop(&mdev->receiver);
  1440. drbd_mdev_cleanup(mdev);
  1441. dev_info(DEV, "worker terminated\n");
  1442. clear_bit(DEVICE_DYING, &mdev->flags);
  1443. clear_bit(CONFIG_PENDING, &mdev->flags);
  1444. wake_up(&mdev->state_wait);
  1445. return 0;
  1446. }