badblocks.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /*
  2. * Bad block management
  3. *
  4. * - Heavily based on MD badblocks code from Neil Brown
  5. *
  6. * Copyright (c) 2015, Intel Corporation.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. */
  17. #include <linux/badblocks.h>
  18. #include <linux/seqlock.h>
  19. #include <linux/device.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/stddef.h>
  23. #include <linux/types.h>
  24. #include <linux/slab.h>
  25. /**
  26. * badblocks_check() - check a given range for bad sectors
  27. * @bb: the badblocks structure that holds all badblock information
  28. * @s: sector (start) at which to check for badblocks
  29. * @sectors: number of sectors to check for badblocks
  30. * @first_bad: pointer to store location of the first badblock
  31. * @bad_sectors: pointer to store number of badblocks after @first_bad
  32. *
  33. * We can record which blocks on each device are 'bad' and so just
  34. * fail those blocks, or that stripe, rather than the whole device.
  35. * Entries in the bad-block table are 64bits wide. This comprises:
  36. * Length of bad-range, in sectors: 0-511 for lengths 1-512
  37. * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
  38. * A 'shift' can be set so that larger blocks are tracked and
  39. * consequently larger devices can be covered.
  40. * 'Acknowledged' flag - 1 bit. - the most significant bit.
  41. *
  42. * Locking of the bad-block table uses a seqlock so badblocks_check
  43. * might need to retry if it is very unlucky.
  44. * We will sometimes want to check for bad blocks in a bi_end_io function,
  45. * so we use the write_seqlock_irq variant.
  46. *
  47. * When looking for a bad block we specify a range and want to
  48. * know if any block in the range is bad. So we binary-search
  49. * to the last range that starts at-or-before the given endpoint,
  50. * (or "before the sector after the target range")
  51. * then see if it ends after the given start.
  52. *
  53. * Return:
  54. * 0: there are no known bad blocks in the range
  55. * 1: there are known bad block which are all acknowledged
  56. * -1: there are bad blocks which have not yet been acknowledged in metadata.
  57. * plus the start/length of the first bad section we overlap.
  58. */
  59. int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
  60. sector_t *first_bad, int *bad_sectors)
  61. {
  62. int hi;
  63. int lo;
  64. u64 *p = bb->page;
  65. int rv;
  66. sector_t target = s + sectors;
  67. unsigned seq;
  68. if (bb->shift > 0) {
  69. /* round the start down, and the end up */
  70. s >>= bb->shift;
  71. target += (1<<bb->shift) - 1;
  72. target >>= bb->shift;
  73. sectors = target - s;
  74. }
  75. /* 'target' is now the first block after the bad range */
  76. retry:
  77. seq = read_seqbegin(&bb->lock);
  78. lo = 0;
  79. rv = 0;
  80. hi = bb->count;
  81. /* Binary search between lo and hi for 'target'
  82. * i.e. for the last range that starts before 'target'
  83. */
  84. /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
  85. * are known not to be the last range before target.
  86. * VARIANT: hi-lo is the number of possible
  87. * ranges, and decreases until it reaches 1
  88. */
  89. while (hi - lo > 1) {
  90. int mid = (lo + hi) / 2;
  91. sector_t a = BB_OFFSET(p[mid]);
  92. if (a < target)
  93. /* This could still be the one, earlier ranges
  94. * could not.
  95. */
  96. lo = mid;
  97. else
  98. /* This and later ranges are definitely out. */
  99. hi = mid;
  100. }
  101. /* 'lo' might be the last that started before target, but 'hi' isn't */
  102. if (hi > lo) {
  103. /* need to check all range that end after 's' to see if
  104. * any are unacknowledged.
  105. */
  106. while (lo >= 0 &&
  107. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  108. if (BB_OFFSET(p[lo]) < target) {
  109. /* starts before the end, and finishes after
  110. * the start, so they must overlap
  111. */
  112. if (rv != -1 && BB_ACK(p[lo]))
  113. rv = 1;
  114. else
  115. rv = -1;
  116. *first_bad = BB_OFFSET(p[lo]);
  117. *bad_sectors = BB_LEN(p[lo]);
  118. }
  119. lo--;
  120. }
  121. }
  122. if (read_seqretry(&bb->lock, seq))
  123. goto retry;
  124. return rv;
  125. }
  126. EXPORT_SYMBOL_GPL(badblocks_check);
  127. static void badblocks_update_acked(struct badblocks *bb)
  128. {
  129. u64 *p = bb->page;
  130. int i;
  131. bool unacked = false;
  132. if (!bb->unacked_exist)
  133. return;
  134. for (i = 0; i < bb->count ; i++) {
  135. if (!BB_ACK(p[i])) {
  136. unacked = true;
  137. break;
  138. }
  139. }
  140. if (!unacked)
  141. bb->unacked_exist = 0;
  142. }
  143. /**
  144. * badblocks_set() - Add a range of bad blocks to the table.
  145. * @bb: the badblocks structure that holds all badblock information
  146. * @s: first sector to mark as bad
  147. * @sectors: number of sectors to mark as bad
  148. * @acknowledged: weather to mark the bad sectors as acknowledged
  149. *
  150. * This might extend the table, or might contract it if two adjacent ranges
  151. * can be merged. We binary-search to find the 'insertion' point, then
  152. * decide how best to handle it.
  153. *
  154. * Return:
  155. * 0: success
  156. * 1: failed to set badblocks (out of space)
  157. */
  158. int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
  159. int acknowledged)
  160. {
  161. u64 *p;
  162. int lo, hi;
  163. int rv = 0;
  164. unsigned long flags;
  165. if (bb->shift < 0)
  166. /* badblocks are disabled */
  167. return 1;
  168. if (bb->shift) {
  169. /* round the start down, and the end up */
  170. sector_t next = s + sectors;
  171. s >>= bb->shift;
  172. next += (1<<bb->shift) - 1;
  173. next >>= bb->shift;
  174. sectors = next - s;
  175. }
  176. write_seqlock_irqsave(&bb->lock, flags);
  177. p = bb->page;
  178. lo = 0;
  179. hi = bb->count;
  180. /* Find the last range that starts at-or-before 's' */
  181. while (hi - lo > 1) {
  182. int mid = (lo + hi) / 2;
  183. sector_t a = BB_OFFSET(p[mid]);
  184. if (a <= s)
  185. lo = mid;
  186. else
  187. hi = mid;
  188. }
  189. if (hi > lo && BB_OFFSET(p[lo]) > s)
  190. hi = lo;
  191. if (hi > lo) {
  192. /* we found a range that might merge with the start
  193. * of our new range
  194. */
  195. sector_t a = BB_OFFSET(p[lo]);
  196. sector_t e = a + BB_LEN(p[lo]);
  197. int ack = BB_ACK(p[lo]);
  198. if (e >= s) {
  199. /* Yes, we can merge with a previous range */
  200. if (s == a && s + sectors >= e)
  201. /* new range covers old */
  202. ack = acknowledged;
  203. else
  204. ack = ack && acknowledged;
  205. if (e < s + sectors)
  206. e = s + sectors;
  207. if (e - a <= BB_MAX_LEN) {
  208. p[lo] = BB_MAKE(a, e-a, ack);
  209. s = e;
  210. } else {
  211. /* does not all fit in one range,
  212. * make p[lo] maximal
  213. */
  214. if (BB_LEN(p[lo]) != BB_MAX_LEN)
  215. p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
  216. s = a + BB_MAX_LEN;
  217. }
  218. sectors = e - s;
  219. }
  220. }
  221. if (sectors && hi < bb->count) {
  222. /* 'hi' points to the first range that starts after 's'.
  223. * Maybe we can merge with the start of that range
  224. */
  225. sector_t a = BB_OFFSET(p[hi]);
  226. sector_t e = a + BB_LEN(p[hi]);
  227. int ack = BB_ACK(p[hi]);
  228. if (a <= s + sectors) {
  229. /* merging is possible */
  230. if (e <= s + sectors) {
  231. /* full overlap */
  232. e = s + sectors;
  233. ack = acknowledged;
  234. } else
  235. ack = ack && acknowledged;
  236. a = s;
  237. if (e - a <= BB_MAX_LEN) {
  238. p[hi] = BB_MAKE(a, e-a, ack);
  239. s = e;
  240. } else {
  241. p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
  242. s = a + BB_MAX_LEN;
  243. }
  244. sectors = e - s;
  245. lo = hi;
  246. hi++;
  247. }
  248. }
  249. if (sectors == 0 && hi < bb->count) {
  250. /* we might be able to combine lo and hi */
  251. /* Note: 's' is at the end of 'lo' */
  252. sector_t a = BB_OFFSET(p[hi]);
  253. int lolen = BB_LEN(p[lo]);
  254. int hilen = BB_LEN(p[hi]);
  255. int newlen = lolen + hilen - (s - a);
  256. if (s >= a && newlen < BB_MAX_LEN) {
  257. /* yes, we can combine them */
  258. int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
  259. p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
  260. memmove(p + hi, p + hi + 1,
  261. (bb->count - hi - 1) * 8);
  262. bb->count--;
  263. }
  264. }
  265. while (sectors) {
  266. /* didn't merge (it all).
  267. * Need to add a range just before 'hi'
  268. */
  269. if (bb->count >= MAX_BADBLOCKS) {
  270. /* No room for more */
  271. rv = 1;
  272. break;
  273. } else {
  274. int this_sectors = sectors;
  275. memmove(p + hi + 1, p + hi,
  276. (bb->count - hi) * 8);
  277. bb->count++;
  278. if (this_sectors > BB_MAX_LEN)
  279. this_sectors = BB_MAX_LEN;
  280. p[hi] = BB_MAKE(s, this_sectors, acknowledged);
  281. sectors -= this_sectors;
  282. s += this_sectors;
  283. }
  284. }
  285. bb->changed = 1;
  286. if (!acknowledged)
  287. bb->unacked_exist = 1;
  288. else
  289. badblocks_update_acked(bb);
  290. write_sequnlock_irqrestore(&bb->lock, flags);
  291. return rv;
  292. }
  293. EXPORT_SYMBOL_GPL(badblocks_set);
  294. /**
  295. * badblocks_clear() - Remove a range of bad blocks to the table.
  296. * @bb: the badblocks structure that holds all badblock information
  297. * @s: first sector to mark as bad
  298. * @sectors: number of sectors to mark as bad
  299. *
  300. * This may involve extending the table if we spilt a region,
  301. * but it must not fail. So if the table becomes full, we just
  302. * drop the remove request.
  303. *
  304. * Return:
  305. * 0: success
  306. * 1: failed to clear badblocks
  307. */
  308. int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
  309. {
  310. u64 *p;
  311. int lo, hi;
  312. sector_t target = s + sectors;
  313. int rv = 0;
  314. if (bb->shift > 0) {
  315. /* When clearing we round the start up and the end down.
  316. * This should not matter as the shift should align with
  317. * the block size and no rounding should ever be needed.
  318. * However it is better the think a block is bad when it
  319. * isn't than to think a block is not bad when it is.
  320. */
  321. s += (1<<bb->shift) - 1;
  322. s >>= bb->shift;
  323. target >>= bb->shift;
  324. sectors = target - s;
  325. }
  326. write_seqlock_irq(&bb->lock);
  327. p = bb->page;
  328. lo = 0;
  329. hi = bb->count;
  330. /* Find the last range that starts before 'target' */
  331. while (hi - lo > 1) {
  332. int mid = (lo + hi) / 2;
  333. sector_t a = BB_OFFSET(p[mid]);
  334. if (a < target)
  335. lo = mid;
  336. else
  337. hi = mid;
  338. }
  339. if (hi > lo) {
  340. /* p[lo] is the last range that could overlap the
  341. * current range. Earlier ranges could also overlap,
  342. * but only this one can overlap the end of the range.
  343. */
  344. if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
  345. (BB_OFFSET(p[lo]) < target)) {
  346. /* Partial overlap, leave the tail of this range */
  347. int ack = BB_ACK(p[lo]);
  348. sector_t a = BB_OFFSET(p[lo]);
  349. sector_t end = a + BB_LEN(p[lo]);
  350. if (a < s) {
  351. /* we need to split this range */
  352. if (bb->count >= MAX_BADBLOCKS) {
  353. rv = -ENOSPC;
  354. goto out;
  355. }
  356. memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
  357. bb->count++;
  358. p[lo] = BB_MAKE(a, s-a, ack);
  359. lo++;
  360. }
  361. p[lo] = BB_MAKE(target, end - target, ack);
  362. /* there is no longer an overlap */
  363. hi = lo;
  364. lo--;
  365. }
  366. while (lo >= 0 &&
  367. (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
  368. (BB_OFFSET(p[lo]) < target)) {
  369. /* This range does overlap */
  370. if (BB_OFFSET(p[lo]) < s) {
  371. /* Keep the early parts of this range. */
  372. int ack = BB_ACK(p[lo]);
  373. sector_t start = BB_OFFSET(p[lo]);
  374. p[lo] = BB_MAKE(start, s - start, ack);
  375. /* now low doesn't overlap, so.. */
  376. break;
  377. }
  378. lo--;
  379. }
  380. /* 'lo' is strictly before, 'hi' is strictly after,
  381. * anything between needs to be discarded
  382. */
  383. if (hi - lo > 1) {
  384. memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
  385. bb->count -= (hi - lo - 1);
  386. }
  387. }
  388. badblocks_update_acked(bb);
  389. bb->changed = 1;
  390. out:
  391. write_sequnlock_irq(&bb->lock);
  392. return rv;
  393. }
  394. EXPORT_SYMBOL_GPL(badblocks_clear);
  395. /**
  396. * ack_all_badblocks() - Acknowledge all bad blocks in a list.
  397. * @bb: the badblocks structure that holds all badblock information
  398. *
  399. * This only succeeds if ->changed is clear. It is used by
  400. * in-kernel metadata updates
  401. */
  402. void ack_all_badblocks(struct badblocks *bb)
  403. {
  404. if (bb->page == NULL || bb->changed)
  405. /* no point even trying */
  406. return;
  407. write_seqlock_irq(&bb->lock);
  408. if (bb->changed == 0 && bb->unacked_exist) {
  409. u64 *p = bb->page;
  410. int i;
  411. for (i = 0; i < bb->count ; i++) {
  412. if (!BB_ACK(p[i])) {
  413. sector_t start = BB_OFFSET(p[i]);
  414. int len = BB_LEN(p[i]);
  415. p[i] = BB_MAKE(start, len, 1);
  416. }
  417. }
  418. bb->unacked_exist = 0;
  419. }
  420. write_sequnlock_irq(&bb->lock);
  421. }
  422. EXPORT_SYMBOL_GPL(ack_all_badblocks);
  423. /**
  424. * badblocks_show() - sysfs access to bad-blocks list
  425. * @bb: the badblocks structure that holds all badblock information
  426. * @page: buffer received from sysfs
  427. * @unack: weather to show unacknowledged badblocks
  428. *
  429. * Return:
  430. * Length of returned data
  431. */
  432. ssize_t badblocks_show(struct badblocks *bb, char *page, int unack)
  433. {
  434. size_t len;
  435. int i;
  436. u64 *p = bb->page;
  437. unsigned seq;
  438. if (bb->shift < 0)
  439. return 0;
  440. retry:
  441. seq = read_seqbegin(&bb->lock);
  442. len = 0;
  443. i = 0;
  444. while (len < PAGE_SIZE && i < bb->count) {
  445. sector_t s = BB_OFFSET(p[i]);
  446. unsigned int length = BB_LEN(p[i]);
  447. int ack = BB_ACK(p[i]);
  448. i++;
  449. if (unack && ack)
  450. continue;
  451. len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
  452. (unsigned long long)s << bb->shift,
  453. length << bb->shift);
  454. }
  455. if (unack && len == 0)
  456. bb->unacked_exist = 0;
  457. if (read_seqretry(&bb->lock, seq))
  458. goto retry;
  459. return len;
  460. }
  461. EXPORT_SYMBOL_GPL(badblocks_show);
  462. /**
  463. * badblocks_store() - sysfs access to bad-blocks list
  464. * @bb: the badblocks structure that holds all badblock information
  465. * @page: buffer received from sysfs
  466. * @len: length of data received from sysfs
  467. * @unack: weather to show unacknowledged badblocks
  468. *
  469. * Return:
  470. * Length of the buffer processed or -ve error.
  471. */
  472. ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
  473. int unack)
  474. {
  475. unsigned long long sector;
  476. int length;
  477. char newline;
  478. switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
  479. case 3:
  480. if (newline != '\n')
  481. return -EINVAL;
  482. /* fall through */
  483. case 2:
  484. if (length <= 0)
  485. return -EINVAL;
  486. break;
  487. default:
  488. return -EINVAL;
  489. }
  490. if (badblocks_set(bb, sector, length, !unack))
  491. return -ENOSPC;
  492. else
  493. return len;
  494. }
  495. EXPORT_SYMBOL_GPL(badblocks_store);
  496. static int __badblocks_init(struct device *dev, struct badblocks *bb,
  497. int enable)
  498. {
  499. bb->dev = dev;
  500. bb->count = 0;
  501. if (enable)
  502. bb->shift = 0;
  503. else
  504. bb->shift = -1;
  505. if (dev)
  506. bb->page = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
  507. else
  508. bb->page = kzalloc(PAGE_SIZE, GFP_KERNEL);
  509. if (!bb->page) {
  510. bb->shift = -1;
  511. return -ENOMEM;
  512. }
  513. seqlock_init(&bb->lock);
  514. return 0;
  515. }
  516. /**
  517. * badblocks_init() - initialize the badblocks structure
  518. * @bb: the badblocks structure that holds all badblock information
  519. * @enable: weather to enable badblocks accounting
  520. *
  521. * Return:
  522. * 0: success
  523. * -ve errno: on error
  524. */
  525. int badblocks_init(struct badblocks *bb, int enable)
  526. {
  527. return __badblocks_init(NULL, bb, enable);
  528. }
  529. EXPORT_SYMBOL_GPL(badblocks_init);
  530. int devm_init_badblocks(struct device *dev, struct badblocks *bb)
  531. {
  532. if (!bb)
  533. return -EINVAL;
  534. return __badblocks_init(dev, bb, 1);
  535. }
  536. EXPORT_SYMBOL_GPL(devm_init_badblocks);
  537. /**
  538. * badblocks_exit() - free the badblocks structure
  539. * @bb: the badblocks structure that holds all badblock information
  540. */
  541. void badblocks_exit(struct badblocks *bb)
  542. {
  543. if (!bb)
  544. return;
  545. if (bb->dev)
  546. devm_kfree(bb->dev, bb->page);
  547. else
  548. kfree(bb->page);
  549. bb->page = NULL;
  550. }
  551. EXPORT_SYMBOL_GPL(badblocks_exit);