dm-raid1.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. /*
  2. * Copyright (C) 2003 Sistina Software Limited.
  3. * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the GPL.
  6. */
  7. #include "dm-bio-record.h"
  8. #include <linux/init.h>
  9. #include <linux/mempool.h>
  10. #include <linux/module.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/slab.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/device-mapper.h>
  15. #include <linux/dm-io.h>
  16. #include <linux/dm-dirty-log.h>
  17. #include <linux/dm-kcopyd.h>
  18. #include <linux/dm-region-hash.h>
  19. #define DM_MSG_PREFIX "raid1"
  20. #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
  21. #define DM_RAID1_HANDLE_ERRORS 0x01
  22. #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
  23. static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
  24. /*-----------------------------------------------------------------
  25. * Mirror set structures.
  26. *---------------------------------------------------------------*/
  27. enum dm_raid1_error {
  28. DM_RAID1_WRITE_ERROR,
  29. DM_RAID1_FLUSH_ERROR,
  30. DM_RAID1_SYNC_ERROR,
  31. DM_RAID1_READ_ERROR
  32. };
  33. struct mirror {
  34. struct mirror_set *ms;
  35. atomic_t error_count;
  36. unsigned long error_type;
  37. struct dm_dev *dev;
  38. sector_t offset;
  39. };
  40. struct mirror_set {
  41. struct dm_target *ti;
  42. struct list_head list;
  43. uint64_t features;
  44. spinlock_t lock; /* protects the lists */
  45. struct bio_list reads;
  46. struct bio_list writes;
  47. struct bio_list failures;
  48. struct bio_list holds; /* bios are waiting until suspend */
  49. struct dm_region_hash *rh;
  50. struct dm_kcopyd_client *kcopyd_client;
  51. struct dm_io_client *io_client;
  52. mempool_t *read_record_pool;
  53. /* recovery */
  54. region_t nr_regions;
  55. int in_sync;
  56. int log_failure;
  57. int leg_failure;
  58. atomic_t suspend;
  59. atomic_t default_mirror; /* Default mirror */
  60. struct workqueue_struct *kmirrord_wq;
  61. struct work_struct kmirrord_work;
  62. struct timer_list timer;
  63. unsigned long timer_pending;
  64. struct work_struct trigger_event;
  65. unsigned nr_mirrors;
  66. struct mirror mirror[0];
  67. };
  68. static void wakeup_mirrord(void *context)
  69. {
  70. struct mirror_set *ms = context;
  71. queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
  72. }
  73. static void delayed_wake_fn(unsigned long data)
  74. {
  75. struct mirror_set *ms = (struct mirror_set *) data;
  76. clear_bit(0, &ms->timer_pending);
  77. wakeup_mirrord(ms);
  78. }
  79. static void delayed_wake(struct mirror_set *ms)
  80. {
  81. if (test_and_set_bit(0, &ms->timer_pending))
  82. return;
  83. ms->timer.expires = jiffies + HZ / 5;
  84. ms->timer.data = (unsigned long) ms;
  85. ms->timer.function = delayed_wake_fn;
  86. add_timer(&ms->timer);
  87. }
  88. static void wakeup_all_recovery_waiters(void *context)
  89. {
  90. wake_up_all(&_kmirrord_recovery_stopped);
  91. }
  92. static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
  93. {
  94. unsigned long flags;
  95. int should_wake = 0;
  96. struct bio_list *bl;
  97. bl = (rw == WRITE) ? &ms->writes : &ms->reads;
  98. spin_lock_irqsave(&ms->lock, flags);
  99. should_wake = !(bl->head);
  100. bio_list_add(bl, bio);
  101. spin_unlock_irqrestore(&ms->lock, flags);
  102. if (should_wake)
  103. wakeup_mirrord(ms);
  104. }
  105. static void dispatch_bios(void *context, struct bio_list *bio_list)
  106. {
  107. struct mirror_set *ms = context;
  108. struct bio *bio;
  109. while ((bio = bio_list_pop(bio_list)))
  110. queue_bio(ms, bio, WRITE);
  111. }
  112. #define MIN_READ_RECORDS 20
  113. struct dm_raid1_read_record {
  114. struct mirror *m;
  115. struct dm_bio_details details;
  116. };
  117. static struct kmem_cache *_dm_raid1_read_record_cache;
  118. /*
  119. * Every mirror should look like this one.
  120. */
  121. #define DEFAULT_MIRROR 0
  122. /*
  123. * This is yucky. We squirrel the mirror struct away inside
  124. * bi_next for read/write buffers. This is safe since the bh
  125. * doesn't get submitted to the lower levels of block layer.
  126. */
  127. static struct mirror *bio_get_m(struct bio *bio)
  128. {
  129. return (struct mirror *) bio->bi_next;
  130. }
  131. static void bio_set_m(struct bio *bio, struct mirror *m)
  132. {
  133. bio->bi_next = (struct bio *) m;
  134. }
  135. static struct mirror *get_default_mirror(struct mirror_set *ms)
  136. {
  137. return &ms->mirror[atomic_read(&ms->default_mirror)];
  138. }
  139. static void set_default_mirror(struct mirror *m)
  140. {
  141. struct mirror_set *ms = m->ms;
  142. struct mirror *m0 = &(ms->mirror[0]);
  143. atomic_set(&ms->default_mirror, m - m0);
  144. }
  145. static struct mirror *get_valid_mirror(struct mirror_set *ms)
  146. {
  147. struct mirror *m;
  148. for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
  149. if (!atomic_read(&m->error_count))
  150. return m;
  151. return NULL;
  152. }
  153. /* fail_mirror
  154. * @m: mirror device to fail
  155. * @error_type: one of the enum's, DM_RAID1_*_ERROR
  156. *
  157. * If errors are being handled, record the type of
  158. * error encountered for this device. If this type
  159. * of error has already been recorded, we can return;
  160. * otherwise, we must signal userspace by triggering
  161. * an event. Additionally, if the device is the
  162. * primary device, we must choose a new primary, but
  163. * only if the mirror is in-sync.
  164. *
  165. * This function must not block.
  166. */
  167. static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
  168. {
  169. struct mirror_set *ms = m->ms;
  170. struct mirror *new;
  171. ms->leg_failure = 1;
  172. /*
  173. * error_count is used for nothing more than a
  174. * simple way to tell if a device has encountered
  175. * errors.
  176. */
  177. atomic_inc(&m->error_count);
  178. if (test_and_set_bit(error_type, &m->error_type))
  179. return;
  180. if (!errors_handled(ms))
  181. return;
  182. if (m != get_default_mirror(ms))
  183. goto out;
  184. if (!ms->in_sync) {
  185. /*
  186. * Better to issue requests to same failing device
  187. * than to risk returning corrupt data.
  188. */
  189. DMERR("Primary mirror (%s) failed while out-of-sync: "
  190. "Reads may fail.", m->dev->name);
  191. goto out;
  192. }
  193. new = get_valid_mirror(ms);
  194. if (new)
  195. set_default_mirror(new);
  196. else
  197. DMWARN("All sides of mirror have failed.");
  198. out:
  199. schedule_work(&ms->trigger_event);
  200. }
  201. static int mirror_flush(struct dm_target *ti)
  202. {
  203. struct mirror_set *ms = ti->private;
  204. unsigned long error_bits;
  205. unsigned int i;
  206. struct dm_io_region io[ms->nr_mirrors];
  207. struct mirror *m;
  208. struct dm_io_request io_req = {
  209. .bi_rw = WRITE_FLUSH,
  210. .mem.type = DM_IO_KMEM,
  211. .mem.ptr.addr = NULL,
  212. .client = ms->io_client,
  213. };
  214. for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
  215. io[i].bdev = m->dev->bdev;
  216. io[i].sector = 0;
  217. io[i].count = 0;
  218. }
  219. error_bits = -1;
  220. dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
  221. if (unlikely(error_bits != 0)) {
  222. for (i = 0; i < ms->nr_mirrors; i++)
  223. if (test_bit(i, &error_bits))
  224. fail_mirror(ms->mirror + i,
  225. DM_RAID1_FLUSH_ERROR);
  226. return -EIO;
  227. }
  228. return 0;
  229. }
  230. /*-----------------------------------------------------------------
  231. * Recovery.
  232. *
  233. * When a mirror is first activated we may find that some regions
  234. * are in the no-sync state. We have to recover these by
  235. * recopying from the default mirror to all the others.
  236. *---------------------------------------------------------------*/
  237. static void recovery_complete(int read_err, unsigned long write_err,
  238. void *context)
  239. {
  240. struct dm_region *reg = context;
  241. struct mirror_set *ms = dm_rh_region_context(reg);
  242. int m, bit = 0;
  243. if (read_err) {
  244. /* Read error means the failure of default mirror. */
  245. DMERR_LIMIT("Unable to read primary mirror during recovery");
  246. fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
  247. }
  248. if (write_err) {
  249. DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
  250. write_err);
  251. /*
  252. * Bits correspond to devices (excluding default mirror).
  253. * The default mirror cannot change during recovery.
  254. */
  255. for (m = 0; m < ms->nr_mirrors; m++) {
  256. if (&ms->mirror[m] == get_default_mirror(ms))
  257. continue;
  258. if (test_bit(bit, &write_err))
  259. fail_mirror(ms->mirror + m,
  260. DM_RAID1_SYNC_ERROR);
  261. bit++;
  262. }
  263. }
  264. dm_rh_recovery_end(reg, !(read_err || write_err));
  265. }
  266. static int recover(struct mirror_set *ms, struct dm_region *reg)
  267. {
  268. int r;
  269. unsigned i;
  270. struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
  271. struct mirror *m;
  272. unsigned long flags = 0;
  273. region_t key = dm_rh_get_region_key(reg);
  274. sector_t region_size = dm_rh_get_region_size(ms->rh);
  275. /* fill in the source */
  276. m = get_default_mirror(ms);
  277. from.bdev = m->dev->bdev;
  278. from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
  279. if (key == (ms->nr_regions - 1)) {
  280. /*
  281. * The final region may be smaller than
  282. * region_size.
  283. */
  284. from.count = ms->ti->len & (region_size - 1);
  285. if (!from.count)
  286. from.count = region_size;
  287. } else
  288. from.count = region_size;
  289. /* fill in the destinations */
  290. for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
  291. if (&ms->mirror[i] == get_default_mirror(ms))
  292. continue;
  293. m = ms->mirror + i;
  294. dest->bdev = m->dev->bdev;
  295. dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
  296. dest->count = from.count;
  297. dest++;
  298. }
  299. /* hand to kcopyd */
  300. if (!errors_handled(ms))
  301. set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
  302. r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
  303. flags, recovery_complete, reg);
  304. return r;
  305. }
  306. static void do_recovery(struct mirror_set *ms)
  307. {
  308. struct dm_region *reg;
  309. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  310. int r;
  311. /*
  312. * Start quiescing some regions.
  313. */
  314. dm_rh_recovery_prepare(ms->rh);
  315. /*
  316. * Copy any already quiesced regions.
  317. */
  318. while ((reg = dm_rh_recovery_start(ms->rh))) {
  319. r = recover(ms, reg);
  320. if (r)
  321. dm_rh_recovery_end(reg, 0);
  322. }
  323. /*
  324. * Update the in sync flag.
  325. */
  326. if (!ms->in_sync &&
  327. (log->type->get_sync_count(log) == ms->nr_regions)) {
  328. /* the sync is complete */
  329. dm_table_event(ms->ti->table);
  330. ms->in_sync = 1;
  331. }
  332. }
  333. /*-----------------------------------------------------------------
  334. * Reads
  335. *---------------------------------------------------------------*/
  336. static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
  337. {
  338. struct mirror *m = get_default_mirror(ms);
  339. do {
  340. if (likely(!atomic_read(&m->error_count)))
  341. return m;
  342. if (m-- == ms->mirror)
  343. m += ms->nr_mirrors;
  344. } while (m != get_default_mirror(ms));
  345. return NULL;
  346. }
  347. static int default_ok(struct mirror *m)
  348. {
  349. struct mirror *default_mirror = get_default_mirror(m->ms);
  350. return !atomic_read(&default_mirror->error_count);
  351. }
  352. static int mirror_available(struct mirror_set *ms, struct bio *bio)
  353. {
  354. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  355. region_t region = dm_rh_bio_to_region(ms->rh, bio);
  356. if (log->type->in_sync(log, region, 0))
  357. return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
  358. return 0;
  359. }
  360. /*
  361. * remap a buffer to a particular mirror.
  362. */
  363. static sector_t map_sector(struct mirror *m, struct bio *bio)
  364. {
  365. if (unlikely(!bio->bi_size))
  366. return 0;
  367. return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
  368. }
  369. static void map_bio(struct mirror *m, struct bio *bio)
  370. {
  371. bio->bi_bdev = m->dev->bdev;
  372. bio->bi_sector = map_sector(m, bio);
  373. }
  374. static void map_region(struct dm_io_region *io, struct mirror *m,
  375. struct bio *bio)
  376. {
  377. io->bdev = m->dev->bdev;
  378. io->sector = map_sector(m, bio);
  379. io->count = bio->bi_size >> 9;
  380. }
  381. static void hold_bio(struct mirror_set *ms, struct bio *bio)
  382. {
  383. /*
  384. * Lock is required to avoid race condition during suspend
  385. * process.
  386. */
  387. spin_lock_irq(&ms->lock);
  388. if (atomic_read(&ms->suspend)) {
  389. spin_unlock_irq(&ms->lock);
  390. /*
  391. * If device is suspended, complete the bio.
  392. */
  393. if (dm_noflush_suspending(ms->ti))
  394. bio_endio(bio, DM_ENDIO_REQUEUE);
  395. else
  396. bio_endio(bio, -EIO);
  397. return;
  398. }
  399. /*
  400. * Hold bio until the suspend is complete.
  401. */
  402. bio_list_add(&ms->holds, bio);
  403. spin_unlock_irq(&ms->lock);
  404. }
  405. /*-----------------------------------------------------------------
  406. * Reads
  407. *---------------------------------------------------------------*/
  408. static void read_callback(unsigned long error, void *context)
  409. {
  410. struct bio *bio = context;
  411. struct mirror *m;
  412. m = bio_get_m(bio);
  413. bio_set_m(bio, NULL);
  414. if (likely(!error)) {
  415. bio_endio(bio, 0);
  416. return;
  417. }
  418. fail_mirror(m, DM_RAID1_READ_ERROR);
  419. if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
  420. DMWARN_LIMIT("Read failure on mirror device %s. "
  421. "Trying alternative device.",
  422. m->dev->name);
  423. queue_bio(m->ms, bio, bio_rw(bio));
  424. return;
  425. }
  426. DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
  427. m->dev->name);
  428. bio_endio(bio, -EIO);
  429. }
  430. /* Asynchronous read. */
  431. static void read_async_bio(struct mirror *m, struct bio *bio)
  432. {
  433. struct dm_io_region io;
  434. struct dm_io_request io_req = {
  435. .bi_rw = READ,
  436. .mem.type = DM_IO_BVEC,
  437. .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
  438. .notify.fn = read_callback,
  439. .notify.context = bio,
  440. .client = m->ms->io_client,
  441. };
  442. map_region(&io, m, bio);
  443. bio_set_m(bio, m);
  444. BUG_ON(dm_io(&io_req, 1, &io, NULL));
  445. }
  446. static inline int region_in_sync(struct mirror_set *ms, region_t region,
  447. int may_block)
  448. {
  449. int state = dm_rh_get_state(ms->rh, region, may_block);
  450. return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
  451. }
  452. static void do_reads(struct mirror_set *ms, struct bio_list *reads)
  453. {
  454. region_t region;
  455. struct bio *bio;
  456. struct mirror *m;
  457. while ((bio = bio_list_pop(reads))) {
  458. region = dm_rh_bio_to_region(ms->rh, bio);
  459. m = get_default_mirror(ms);
  460. /*
  461. * We can only read balance if the region is in sync.
  462. */
  463. if (likely(region_in_sync(ms, region, 1)))
  464. m = choose_mirror(ms, bio->bi_sector);
  465. else if (m && atomic_read(&m->error_count))
  466. m = NULL;
  467. if (likely(m))
  468. read_async_bio(m, bio);
  469. else
  470. bio_endio(bio, -EIO);
  471. }
  472. }
  473. /*-----------------------------------------------------------------
  474. * Writes.
  475. *
  476. * We do different things with the write io depending on the
  477. * state of the region that it's in:
  478. *
  479. * SYNC: increment pending, use kcopyd to write to *all* mirrors
  480. * RECOVERING: delay the io until recovery completes
  481. * NOSYNC: increment pending, just write to the default mirror
  482. *---------------------------------------------------------------*/
  483. static void write_callback(unsigned long error, void *context)
  484. {
  485. unsigned i, ret = 0;
  486. struct bio *bio = (struct bio *) context;
  487. struct mirror_set *ms;
  488. int should_wake = 0;
  489. unsigned long flags;
  490. ms = bio_get_m(bio)->ms;
  491. bio_set_m(bio, NULL);
  492. /*
  493. * NOTE: We don't decrement the pending count here,
  494. * instead it is done by the targets endio function.
  495. * This way we handle both writes to SYNC and NOSYNC
  496. * regions with the same code.
  497. */
  498. if (likely(!error)) {
  499. bio_endio(bio, ret);
  500. return;
  501. }
  502. for (i = 0; i < ms->nr_mirrors; i++)
  503. if (test_bit(i, &error))
  504. fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
  505. /*
  506. * Need to raise event. Since raising
  507. * events can block, we need to do it in
  508. * the main thread.
  509. */
  510. spin_lock_irqsave(&ms->lock, flags);
  511. if (!ms->failures.head)
  512. should_wake = 1;
  513. bio_list_add(&ms->failures, bio);
  514. spin_unlock_irqrestore(&ms->lock, flags);
  515. if (should_wake)
  516. wakeup_mirrord(ms);
  517. }
  518. static void do_write(struct mirror_set *ms, struct bio *bio)
  519. {
  520. unsigned int i;
  521. struct dm_io_region io[ms->nr_mirrors], *dest = io;
  522. struct mirror *m;
  523. struct dm_io_request io_req = {
  524. .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
  525. .mem.type = DM_IO_BVEC,
  526. .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
  527. .notify.fn = write_callback,
  528. .notify.context = bio,
  529. .client = ms->io_client,
  530. };
  531. if (bio->bi_rw & REQ_DISCARD) {
  532. io_req.bi_rw |= REQ_DISCARD;
  533. io_req.mem.type = DM_IO_KMEM;
  534. io_req.mem.ptr.addr = NULL;
  535. }
  536. for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
  537. map_region(dest++, m, bio);
  538. /*
  539. * Use default mirror because we only need it to retrieve the reference
  540. * to the mirror set in write_callback().
  541. */
  542. bio_set_m(bio, get_default_mirror(ms));
  543. BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
  544. }
  545. static void do_writes(struct mirror_set *ms, struct bio_list *writes)
  546. {
  547. int state;
  548. struct bio *bio;
  549. struct bio_list sync, nosync, recover, *this_list = NULL;
  550. struct bio_list requeue;
  551. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  552. region_t region;
  553. if (!writes->head)
  554. return;
  555. /*
  556. * Classify each write.
  557. */
  558. bio_list_init(&sync);
  559. bio_list_init(&nosync);
  560. bio_list_init(&recover);
  561. bio_list_init(&requeue);
  562. while ((bio = bio_list_pop(writes))) {
  563. if ((bio->bi_rw & REQ_FLUSH) ||
  564. (bio->bi_rw & REQ_DISCARD)) {
  565. bio_list_add(&sync, bio);
  566. continue;
  567. }
  568. region = dm_rh_bio_to_region(ms->rh, bio);
  569. if (log->type->is_remote_recovering &&
  570. log->type->is_remote_recovering(log, region)) {
  571. bio_list_add(&requeue, bio);
  572. continue;
  573. }
  574. state = dm_rh_get_state(ms->rh, region, 1);
  575. switch (state) {
  576. case DM_RH_CLEAN:
  577. case DM_RH_DIRTY:
  578. this_list = &sync;
  579. break;
  580. case DM_RH_NOSYNC:
  581. this_list = &nosync;
  582. break;
  583. case DM_RH_RECOVERING:
  584. this_list = &recover;
  585. break;
  586. }
  587. bio_list_add(this_list, bio);
  588. }
  589. /*
  590. * Add bios that are delayed due to remote recovery
  591. * back on to the write queue
  592. */
  593. if (unlikely(requeue.head)) {
  594. spin_lock_irq(&ms->lock);
  595. bio_list_merge(&ms->writes, &requeue);
  596. spin_unlock_irq(&ms->lock);
  597. delayed_wake(ms);
  598. }
  599. /*
  600. * Increment the pending counts for any regions that will
  601. * be written to (writes to recover regions are going to
  602. * be delayed).
  603. */
  604. dm_rh_inc_pending(ms->rh, &sync);
  605. dm_rh_inc_pending(ms->rh, &nosync);
  606. /*
  607. * If the flush fails on a previous call and succeeds here,
  608. * we must not reset the log_failure variable. We need
  609. * userspace interaction to do that.
  610. */
  611. ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
  612. /*
  613. * Dispatch io.
  614. */
  615. if (unlikely(ms->log_failure) && errors_handled(ms)) {
  616. spin_lock_irq(&ms->lock);
  617. bio_list_merge(&ms->failures, &sync);
  618. spin_unlock_irq(&ms->lock);
  619. wakeup_mirrord(ms);
  620. } else
  621. while ((bio = bio_list_pop(&sync)))
  622. do_write(ms, bio);
  623. while ((bio = bio_list_pop(&recover)))
  624. dm_rh_delay(ms->rh, bio);
  625. while ((bio = bio_list_pop(&nosync))) {
  626. if (unlikely(ms->leg_failure) && errors_handled(ms)) {
  627. spin_lock_irq(&ms->lock);
  628. bio_list_add(&ms->failures, bio);
  629. spin_unlock_irq(&ms->lock);
  630. wakeup_mirrord(ms);
  631. } else {
  632. map_bio(get_default_mirror(ms), bio);
  633. generic_make_request(bio);
  634. }
  635. }
  636. }
  637. static void do_failures(struct mirror_set *ms, struct bio_list *failures)
  638. {
  639. struct bio *bio;
  640. if (likely(!failures->head))
  641. return;
  642. /*
  643. * If the log has failed, unattempted writes are being
  644. * put on the holds list. We can't issue those writes
  645. * until a log has been marked, so we must store them.
  646. *
  647. * If a 'noflush' suspend is in progress, we can requeue
  648. * the I/O's to the core. This give userspace a chance
  649. * to reconfigure the mirror, at which point the core
  650. * will reissue the writes. If the 'noflush' flag is
  651. * not set, we have no choice but to return errors.
  652. *
  653. * Some writes on the failures list may have been
  654. * submitted before the log failure and represent a
  655. * failure to write to one of the devices. It is ok
  656. * for us to treat them the same and requeue them
  657. * as well.
  658. */
  659. while ((bio = bio_list_pop(failures))) {
  660. if (!ms->log_failure) {
  661. ms->in_sync = 0;
  662. dm_rh_mark_nosync(ms->rh, bio);
  663. }
  664. /*
  665. * If all the legs are dead, fail the I/O.
  666. * If we have been told to handle errors, hold the bio
  667. * and wait for userspace to deal with the problem.
  668. * Otherwise pretend that the I/O succeeded. (This would
  669. * be wrong if the failed leg returned after reboot and
  670. * got replicated back to the good legs.)
  671. */
  672. if (!get_valid_mirror(ms))
  673. bio_endio(bio, -EIO);
  674. else if (errors_handled(ms))
  675. hold_bio(ms, bio);
  676. else
  677. bio_endio(bio, 0);
  678. }
  679. }
  680. static void trigger_event(struct work_struct *work)
  681. {
  682. struct mirror_set *ms =
  683. container_of(work, struct mirror_set, trigger_event);
  684. dm_table_event(ms->ti->table);
  685. }
  686. /*-----------------------------------------------------------------
  687. * kmirrord
  688. *---------------------------------------------------------------*/
  689. static void do_mirror(struct work_struct *work)
  690. {
  691. struct mirror_set *ms = container_of(work, struct mirror_set,
  692. kmirrord_work);
  693. struct bio_list reads, writes, failures;
  694. unsigned long flags;
  695. spin_lock_irqsave(&ms->lock, flags);
  696. reads = ms->reads;
  697. writes = ms->writes;
  698. failures = ms->failures;
  699. bio_list_init(&ms->reads);
  700. bio_list_init(&ms->writes);
  701. bio_list_init(&ms->failures);
  702. spin_unlock_irqrestore(&ms->lock, flags);
  703. dm_rh_update_states(ms->rh, errors_handled(ms));
  704. do_recovery(ms);
  705. do_reads(ms, &reads);
  706. do_writes(ms, &writes);
  707. do_failures(ms, &failures);
  708. }
  709. /*-----------------------------------------------------------------
  710. * Target functions
  711. *---------------------------------------------------------------*/
  712. static struct mirror_set *alloc_context(unsigned int nr_mirrors,
  713. uint32_t region_size,
  714. struct dm_target *ti,
  715. struct dm_dirty_log *dl)
  716. {
  717. size_t len;
  718. struct mirror_set *ms = NULL;
  719. len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
  720. ms = kzalloc(len, GFP_KERNEL);
  721. if (!ms) {
  722. ti->error = "Cannot allocate mirror context";
  723. return NULL;
  724. }
  725. spin_lock_init(&ms->lock);
  726. bio_list_init(&ms->reads);
  727. bio_list_init(&ms->writes);
  728. bio_list_init(&ms->failures);
  729. bio_list_init(&ms->holds);
  730. ms->ti = ti;
  731. ms->nr_mirrors = nr_mirrors;
  732. ms->nr_regions = dm_sector_div_up(ti->len, region_size);
  733. ms->in_sync = 0;
  734. ms->log_failure = 0;
  735. ms->leg_failure = 0;
  736. atomic_set(&ms->suspend, 0);
  737. atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
  738. ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
  739. _dm_raid1_read_record_cache);
  740. if (!ms->read_record_pool) {
  741. ti->error = "Error creating mirror read_record_pool";
  742. kfree(ms);
  743. return NULL;
  744. }
  745. ms->io_client = dm_io_client_create();
  746. if (IS_ERR(ms->io_client)) {
  747. ti->error = "Error creating dm_io client";
  748. mempool_destroy(ms->read_record_pool);
  749. kfree(ms);
  750. return NULL;
  751. }
  752. ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
  753. wakeup_all_recovery_waiters,
  754. ms->ti->begin, MAX_RECOVERY,
  755. dl, region_size, ms->nr_regions);
  756. if (IS_ERR(ms->rh)) {
  757. ti->error = "Error creating dirty region hash";
  758. dm_io_client_destroy(ms->io_client);
  759. mempool_destroy(ms->read_record_pool);
  760. kfree(ms);
  761. return NULL;
  762. }
  763. return ms;
  764. }
  765. static void free_context(struct mirror_set *ms, struct dm_target *ti,
  766. unsigned int m)
  767. {
  768. while (m--)
  769. dm_put_device(ti, ms->mirror[m].dev);
  770. dm_io_client_destroy(ms->io_client);
  771. dm_region_hash_destroy(ms->rh);
  772. mempool_destroy(ms->read_record_pool);
  773. kfree(ms);
  774. }
  775. static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
  776. unsigned int mirror, char **argv)
  777. {
  778. unsigned long long offset;
  779. if (sscanf(argv[1], "%llu", &offset) != 1) {
  780. ti->error = "Invalid offset";
  781. return -EINVAL;
  782. }
  783. if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
  784. &ms->mirror[mirror].dev)) {
  785. ti->error = "Device lookup failure";
  786. return -ENXIO;
  787. }
  788. ms->mirror[mirror].ms = ms;
  789. atomic_set(&(ms->mirror[mirror].error_count), 0);
  790. ms->mirror[mirror].error_type = 0;
  791. ms->mirror[mirror].offset = offset;
  792. return 0;
  793. }
  794. /*
  795. * Create dirty log: log_type #log_params <log_params>
  796. */
  797. static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
  798. unsigned argc, char **argv,
  799. unsigned *args_used)
  800. {
  801. unsigned param_count;
  802. struct dm_dirty_log *dl;
  803. if (argc < 2) {
  804. ti->error = "Insufficient mirror log arguments";
  805. return NULL;
  806. }
  807. if (sscanf(argv[1], "%u", &param_count) != 1) {
  808. ti->error = "Invalid mirror log argument count";
  809. return NULL;
  810. }
  811. *args_used = 2 + param_count;
  812. if (argc < *args_used) {
  813. ti->error = "Insufficient mirror log arguments";
  814. return NULL;
  815. }
  816. dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
  817. argv + 2);
  818. if (!dl) {
  819. ti->error = "Error creating mirror dirty log";
  820. return NULL;
  821. }
  822. return dl;
  823. }
  824. static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
  825. unsigned *args_used)
  826. {
  827. unsigned num_features;
  828. struct dm_target *ti = ms->ti;
  829. *args_used = 0;
  830. if (!argc)
  831. return 0;
  832. if (sscanf(argv[0], "%u", &num_features) != 1) {
  833. ti->error = "Invalid number of features";
  834. return -EINVAL;
  835. }
  836. argc--;
  837. argv++;
  838. (*args_used)++;
  839. if (num_features > argc) {
  840. ti->error = "Not enough arguments to support feature count";
  841. return -EINVAL;
  842. }
  843. if (!strcmp("handle_errors", argv[0]))
  844. ms->features |= DM_RAID1_HANDLE_ERRORS;
  845. else {
  846. ti->error = "Unrecognised feature requested";
  847. return -EINVAL;
  848. }
  849. (*args_used)++;
  850. return 0;
  851. }
  852. /*
  853. * Construct a mirror mapping:
  854. *
  855. * log_type #log_params <log_params>
  856. * #mirrors [mirror_path offset]{2,}
  857. * [#features <features>]
  858. *
  859. * log_type is "core" or "disk"
  860. * #log_params is between 1 and 3
  861. *
  862. * If present, features must be "handle_errors".
  863. */
  864. static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
  865. {
  866. int r;
  867. unsigned int nr_mirrors, m, args_used;
  868. struct mirror_set *ms;
  869. struct dm_dirty_log *dl;
  870. dl = create_dirty_log(ti, argc, argv, &args_used);
  871. if (!dl)
  872. return -EINVAL;
  873. argv += args_used;
  874. argc -= args_used;
  875. if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
  876. nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
  877. ti->error = "Invalid number of mirrors";
  878. dm_dirty_log_destroy(dl);
  879. return -EINVAL;
  880. }
  881. argv++, argc--;
  882. if (argc < nr_mirrors * 2) {
  883. ti->error = "Too few mirror arguments";
  884. dm_dirty_log_destroy(dl);
  885. return -EINVAL;
  886. }
  887. ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
  888. if (!ms) {
  889. dm_dirty_log_destroy(dl);
  890. return -ENOMEM;
  891. }
  892. /* Get the mirror parameter sets */
  893. for (m = 0; m < nr_mirrors; m++) {
  894. r = get_mirror(ms, ti, m, argv);
  895. if (r) {
  896. free_context(ms, ti, m);
  897. return r;
  898. }
  899. argv += 2;
  900. argc -= 2;
  901. }
  902. ti->private = ms;
  903. ti->split_io = dm_rh_get_region_size(ms->rh);
  904. ti->num_flush_requests = 1;
  905. ti->num_discard_requests = 1;
  906. ms->kmirrord_wq = alloc_workqueue("kmirrord",
  907. WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
  908. if (!ms->kmirrord_wq) {
  909. DMERR("couldn't start kmirrord");
  910. r = -ENOMEM;
  911. goto err_free_context;
  912. }
  913. INIT_WORK(&ms->kmirrord_work, do_mirror);
  914. init_timer(&ms->timer);
  915. ms->timer_pending = 0;
  916. INIT_WORK(&ms->trigger_event, trigger_event);
  917. r = parse_features(ms, argc, argv, &args_used);
  918. if (r)
  919. goto err_destroy_wq;
  920. argv += args_used;
  921. argc -= args_used;
  922. /*
  923. * Any read-balancing addition depends on the
  924. * DM_RAID1_HANDLE_ERRORS flag being present.
  925. * This is because the decision to balance depends
  926. * on the sync state of a region. If the above
  927. * flag is not present, we ignore errors; and
  928. * the sync state may be inaccurate.
  929. */
  930. if (argc) {
  931. ti->error = "Too many mirror arguments";
  932. r = -EINVAL;
  933. goto err_destroy_wq;
  934. }
  935. ms->kcopyd_client = dm_kcopyd_client_create();
  936. if (IS_ERR(ms->kcopyd_client)) {
  937. r = PTR_ERR(ms->kcopyd_client);
  938. goto err_destroy_wq;
  939. }
  940. wakeup_mirrord(ms);
  941. return 0;
  942. err_destroy_wq:
  943. destroy_workqueue(ms->kmirrord_wq);
  944. err_free_context:
  945. free_context(ms, ti, ms->nr_mirrors);
  946. return r;
  947. }
  948. static void mirror_dtr(struct dm_target *ti)
  949. {
  950. struct mirror_set *ms = (struct mirror_set *) ti->private;
  951. del_timer_sync(&ms->timer);
  952. flush_workqueue(ms->kmirrord_wq);
  953. flush_work_sync(&ms->trigger_event);
  954. dm_kcopyd_client_destroy(ms->kcopyd_client);
  955. destroy_workqueue(ms->kmirrord_wq);
  956. free_context(ms, ti, ms->nr_mirrors);
  957. }
  958. /*
  959. * Mirror mapping function
  960. */
  961. static int mirror_map(struct dm_target *ti, struct bio *bio,
  962. union map_info *map_context)
  963. {
  964. int r, rw = bio_rw(bio);
  965. struct mirror *m;
  966. struct mirror_set *ms = ti->private;
  967. struct dm_raid1_read_record *read_record = NULL;
  968. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  969. if (rw == WRITE) {
  970. /* Save region for mirror_end_io() handler */
  971. map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
  972. queue_bio(ms, bio, rw);
  973. return DM_MAPIO_SUBMITTED;
  974. }
  975. r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
  976. if (r < 0 && r != -EWOULDBLOCK)
  977. return r;
  978. /*
  979. * If region is not in-sync queue the bio.
  980. */
  981. if (!r || (r == -EWOULDBLOCK)) {
  982. if (rw == READA)
  983. return -EWOULDBLOCK;
  984. queue_bio(ms, bio, rw);
  985. return DM_MAPIO_SUBMITTED;
  986. }
  987. /*
  988. * The region is in-sync and we can perform reads directly.
  989. * Store enough information so we can retry if it fails.
  990. */
  991. m = choose_mirror(ms, bio->bi_sector);
  992. if (unlikely(!m))
  993. return -EIO;
  994. read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
  995. if (likely(read_record)) {
  996. dm_bio_record(&read_record->details, bio);
  997. map_context->ptr = read_record;
  998. read_record->m = m;
  999. }
  1000. map_bio(m, bio);
  1001. return DM_MAPIO_REMAPPED;
  1002. }
  1003. static int mirror_end_io(struct dm_target *ti, struct bio *bio,
  1004. int error, union map_info *map_context)
  1005. {
  1006. int rw = bio_rw(bio);
  1007. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1008. struct mirror *m = NULL;
  1009. struct dm_bio_details *bd = NULL;
  1010. struct dm_raid1_read_record *read_record = map_context->ptr;
  1011. /*
  1012. * We need to dec pending if this was a write.
  1013. */
  1014. if (rw == WRITE) {
  1015. if (!(bio->bi_rw & REQ_FLUSH))
  1016. dm_rh_dec(ms->rh, map_context->ll);
  1017. return error;
  1018. }
  1019. if (error == -EOPNOTSUPP)
  1020. goto out;
  1021. if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
  1022. goto out;
  1023. if (unlikely(error)) {
  1024. if (!read_record) {
  1025. /*
  1026. * There wasn't enough memory to record necessary
  1027. * information for a retry or there was no other
  1028. * mirror in-sync.
  1029. */
  1030. DMERR_LIMIT("Mirror read failed.");
  1031. return -EIO;
  1032. }
  1033. m = read_record->m;
  1034. DMERR("Mirror read failed from %s. Trying alternative device.",
  1035. m->dev->name);
  1036. fail_mirror(m, DM_RAID1_READ_ERROR);
  1037. /*
  1038. * A failed read is requeued for another attempt using an intact
  1039. * mirror.
  1040. */
  1041. if (default_ok(m) || mirror_available(ms, bio)) {
  1042. bd = &read_record->details;
  1043. dm_bio_restore(bd, bio);
  1044. mempool_free(read_record, ms->read_record_pool);
  1045. map_context->ptr = NULL;
  1046. queue_bio(ms, bio, rw);
  1047. return 1;
  1048. }
  1049. DMERR("All replicated volumes dead, failing I/O");
  1050. }
  1051. out:
  1052. if (read_record) {
  1053. mempool_free(read_record, ms->read_record_pool);
  1054. map_context->ptr = NULL;
  1055. }
  1056. return error;
  1057. }
  1058. static void mirror_presuspend(struct dm_target *ti)
  1059. {
  1060. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1061. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1062. struct bio_list holds;
  1063. struct bio *bio;
  1064. atomic_set(&ms->suspend, 1);
  1065. /*
  1066. * Process bios in the hold list to start recovery waiting
  1067. * for bios in the hold list. After the process, no bio has
  1068. * a chance to be added in the hold list because ms->suspend
  1069. * is set.
  1070. */
  1071. spin_lock_irq(&ms->lock);
  1072. holds = ms->holds;
  1073. bio_list_init(&ms->holds);
  1074. spin_unlock_irq(&ms->lock);
  1075. while ((bio = bio_list_pop(&holds)))
  1076. hold_bio(ms, bio);
  1077. /*
  1078. * We must finish up all the work that we've
  1079. * generated (i.e. recovery work).
  1080. */
  1081. dm_rh_stop_recovery(ms->rh);
  1082. wait_event(_kmirrord_recovery_stopped,
  1083. !dm_rh_recovery_in_flight(ms->rh));
  1084. if (log->type->presuspend && log->type->presuspend(log))
  1085. /* FIXME: need better error handling */
  1086. DMWARN("log presuspend failed");
  1087. /*
  1088. * Now that recovery is complete/stopped and the
  1089. * delayed bios are queued, we need to wait for
  1090. * the worker thread to complete. This way,
  1091. * we know that all of our I/O has been pushed.
  1092. */
  1093. flush_workqueue(ms->kmirrord_wq);
  1094. }
  1095. static void mirror_postsuspend(struct dm_target *ti)
  1096. {
  1097. struct mirror_set *ms = ti->private;
  1098. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1099. if (log->type->postsuspend && log->type->postsuspend(log))
  1100. /* FIXME: need better error handling */
  1101. DMWARN("log postsuspend failed");
  1102. }
  1103. static void mirror_resume(struct dm_target *ti)
  1104. {
  1105. struct mirror_set *ms = ti->private;
  1106. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1107. atomic_set(&ms->suspend, 0);
  1108. if (log->type->resume && log->type->resume(log))
  1109. /* FIXME: need better error handling */
  1110. DMWARN("log resume failed");
  1111. dm_rh_start_recovery(ms->rh);
  1112. }
  1113. /*
  1114. * device_status_char
  1115. * @m: mirror device/leg we want the status of
  1116. *
  1117. * We return one character representing the most severe error
  1118. * we have encountered.
  1119. * A => Alive - No failures
  1120. * D => Dead - A write failure occurred leaving mirror out-of-sync
  1121. * S => Sync - A sychronization failure occurred, mirror out-of-sync
  1122. * R => Read - A read failure occurred, mirror data unaffected
  1123. *
  1124. * Returns: <char>
  1125. */
  1126. static char device_status_char(struct mirror *m)
  1127. {
  1128. if (!atomic_read(&(m->error_count)))
  1129. return 'A';
  1130. return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
  1131. (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
  1132. (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
  1133. (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
  1134. }
  1135. static int mirror_status(struct dm_target *ti, status_type_t type,
  1136. char *result, unsigned int maxlen)
  1137. {
  1138. unsigned int m, sz = 0;
  1139. struct mirror_set *ms = (struct mirror_set *) ti->private;
  1140. struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
  1141. char buffer[ms->nr_mirrors + 1];
  1142. switch (type) {
  1143. case STATUSTYPE_INFO:
  1144. DMEMIT("%d ", ms->nr_mirrors);
  1145. for (m = 0; m < ms->nr_mirrors; m++) {
  1146. DMEMIT("%s ", ms->mirror[m].dev->name);
  1147. buffer[m] = device_status_char(&(ms->mirror[m]));
  1148. }
  1149. buffer[m] = '\0';
  1150. DMEMIT("%llu/%llu 1 %s ",
  1151. (unsigned long long)log->type->get_sync_count(log),
  1152. (unsigned long long)ms->nr_regions, buffer);
  1153. sz += log->type->status(log, type, result+sz, maxlen-sz);
  1154. break;
  1155. case STATUSTYPE_TABLE:
  1156. sz = log->type->status(log, type, result, maxlen);
  1157. DMEMIT("%d", ms->nr_mirrors);
  1158. for (m = 0; m < ms->nr_mirrors; m++)
  1159. DMEMIT(" %s %llu", ms->mirror[m].dev->name,
  1160. (unsigned long long)ms->mirror[m].offset);
  1161. if (ms->features & DM_RAID1_HANDLE_ERRORS)
  1162. DMEMIT(" 1 handle_errors");
  1163. }
  1164. return 0;
  1165. }
  1166. static int mirror_iterate_devices(struct dm_target *ti,
  1167. iterate_devices_callout_fn fn, void *data)
  1168. {
  1169. struct mirror_set *ms = ti->private;
  1170. int ret = 0;
  1171. unsigned i;
  1172. for (i = 0; !ret && i < ms->nr_mirrors; i++)
  1173. ret = fn(ti, ms->mirror[i].dev,
  1174. ms->mirror[i].offset, ti->len, data);
  1175. return ret;
  1176. }
  1177. static struct target_type mirror_target = {
  1178. .name = "mirror",
  1179. .version = {1, 12, 1},
  1180. .module = THIS_MODULE,
  1181. .ctr = mirror_ctr,
  1182. .dtr = mirror_dtr,
  1183. .map = mirror_map,
  1184. .end_io = mirror_end_io,
  1185. .presuspend = mirror_presuspend,
  1186. .postsuspend = mirror_postsuspend,
  1187. .resume = mirror_resume,
  1188. .status = mirror_status,
  1189. .iterate_devices = mirror_iterate_devices,
  1190. };
  1191. static int __init dm_mirror_init(void)
  1192. {
  1193. int r;
  1194. _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
  1195. if (!_dm_raid1_read_record_cache) {
  1196. DMERR("Can't allocate dm_raid1_read_record cache");
  1197. r = -ENOMEM;
  1198. goto bad_cache;
  1199. }
  1200. r = dm_register_target(&mirror_target);
  1201. if (r < 0) {
  1202. DMERR("Failed to register mirror target");
  1203. goto bad_target;
  1204. }
  1205. return 0;
  1206. bad_target:
  1207. kmem_cache_destroy(_dm_raid1_read_record_cache);
  1208. bad_cache:
  1209. return r;
  1210. }
  1211. static void __exit dm_mirror_exit(void)
  1212. {
  1213. dm_unregister_target(&mirror_target);
  1214. kmem_cache_destroy(_dm_raid1_read_record_cache);
  1215. }
  1216. /* Module hooks */
  1217. module_init(dm_mirror_init);
  1218. module_exit(dm_mirror_exit);
  1219. MODULE_DESCRIPTION(DM_NAME " mirror target");
  1220. MODULE_AUTHOR("Joe Thornber");
  1221. MODULE_LICENSE("GPL");