core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/libnvdimm.h>
  14. #include <linux/badblocks.h>
  15. #include <linux/export.h>
  16. #include <linux/module.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/device.h>
  19. #include <linux/ctype.h>
  20. #include <linux/ndctl.h>
  21. #include <linux/mutex.h>
  22. #include <linux/slab.h>
  23. #include <linux/io.h>
  24. #include "nd-core.h"
  25. #include "nd.h"
  26. LIST_HEAD(nvdimm_bus_list);
  27. DEFINE_MUTEX(nvdimm_bus_list_mutex);
  28. void nvdimm_bus_lock(struct device *dev)
  29. {
  30. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  31. if (!nvdimm_bus)
  32. return;
  33. mutex_lock(&nvdimm_bus->reconfig_mutex);
  34. }
  35. EXPORT_SYMBOL(nvdimm_bus_lock);
  36. void nvdimm_bus_unlock(struct device *dev)
  37. {
  38. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  39. if (!nvdimm_bus)
  40. return;
  41. mutex_unlock(&nvdimm_bus->reconfig_mutex);
  42. }
  43. EXPORT_SYMBOL(nvdimm_bus_unlock);
  44. bool is_nvdimm_bus_locked(struct device *dev)
  45. {
  46. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  47. if (!nvdimm_bus)
  48. return false;
  49. return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
  50. }
  51. EXPORT_SYMBOL(is_nvdimm_bus_locked);
  52. struct nvdimm_map {
  53. struct nvdimm_bus *nvdimm_bus;
  54. struct list_head list;
  55. resource_size_t offset;
  56. unsigned long flags;
  57. size_t size;
  58. union {
  59. void *mem;
  60. void __iomem *iomem;
  61. };
  62. struct kref kref;
  63. };
  64. static struct nvdimm_map *find_nvdimm_map(struct device *dev,
  65. resource_size_t offset)
  66. {
  67. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  68. struct nvdimm_map *nvdimm_map;
  69. list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
  70. if (nvdimm_map->offset == offset)
  71. return nvdimm_map;
  72. return NULL;
  73. }
  74. static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
  75. resource_size_t offset, size_t size, unsigned long flags)
  76. {
  77. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  78. struct nvdimm_map *nvdimm_map;
  79. nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
  80. if (!nvdimm_map)
  81. return NULL;
  82. INIT_LIST_HEAD(&nvdimm_map->list);
  83. nvdimm_map->nvdimm_bus = nvdimm_bus;
  84. nvdimm_map->offset = offset;
  85. nvdimm_map->flags = flags;
  86. nvdimm_map->size = size;
  87. kref_init(&nvdimm_map->kref);
  88. if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
  89. dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
  90. &offset, size, dev_name(dev));
  91. goto err_request_region;
  92. }
  93. if (flags)
  94. nvdimm_map->mem = memremap(offset, size, flags);
  95. else
  96. nvdimm_map->iomem = ioremap(offset, size);
  97. if (!nvdimm_map->mem)
  98. goto err_map;
  99. dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
  100. __func__);
  101. list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
  102. return nvdimm_map;
  103. err_map:
  104. release_mem_region(offset, size);
  105. err_request_region:
  106. kfree(nvdimm_map);
  107. return NULL;
  108. }
  109. static void nvdimm_map_release(struct kref *kref)
  110. {
  111. struct nvdimm_bus *nvdimm_bus;
  112. struct nvdimm_map *nvdimm_map;
  113. nvdimm_map = container_of(kref, struct nvdimm_map, kref);
  114. nvdimm_bus = nvdimm_map->nvdimm_bus;
  115. dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
  116. list_del(&nvdimm_map->list);
  117. if (nvdimm_map->flags)
  118. memunmap(nvdimm_map->mem);
  119. else
  120. iounmap(nvdimm_map->iomem);
  121. release_mem_region(nvdimm_map->offset, nvdimm_map->size);
  122. kfree(nvdimm_map);
  123. }
  124. static void nvdimm_map_put(void *data)
  125. {
  126. struct nvdimm_map *nvdimm_map = data;
  127. struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
  128. nvdimm_bus_lock(&nvdimm_bus->dev);
  129. kref_put(&nvdimm_map->kref, nvdimm_map_release);
  130. nvdimm_bus_unlock(&nvdimm_bus->dev);
  131. }
  132. /**
  133. * devm_nvdimm_memremap - map a resource that is shared across regions
  134. * @dev: device that will own a reference to the shared mapping
  135. * @offset: physical base address of the mapping
  136. * @size: mapping size
  137. * @flags: memremap flags, or, if zero, perform an ioremap instead
  138. */
  139. void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
  140. size_t size, unsigned long flags)
  141. {
  142. struct nvdimm_map *nvdimm_map;
  143. nvdimm_bus_lock(dev);
  144. nvdimm_map = find_nvdimm_map(dev, offset);
  145. if (!nvdimm_map)
  146. nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
  147. else
  148. kref_get(&nvdimm_map->kref);
  149. nvdimm_bus_unlock(dev);
  150. if (!nvdimm_map)
  151. return NULL;
  152. if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
  153. return NULL;
  154. return nvdimm_map->mem;
  155. }
  156. EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
  157. u64 nd_fletcher64(void *addr, size_t len, bool le)
  158. {
  159. u32 *buf = addr;
  160. u32 lo32 = 0;
  161. u64 hi32 = 0;
  162. int i;
  163. for (i = 0; i < len / sizeof(u32); i++) {
  164. lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
  165. hi32 += lo32;
  166. }
  167. return hi32 << 32 | lo32;
  168. }
  169. EXPORT_SYMBOL_GPL(nd_fletcher64);
  170. struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
  171. {
  172. /* struct nvdimm_bus definition is private to libnvdimm */
  173. return nvdimm_bus->nd_desc;
  174. }
  175. EXPORT_SYMBOL_GPL(to_nd_desc);
  176. struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
  177. {
  178. /* struct nvdimm_bus definition is private to libnvdimm */
  179. return &nvdimm_bus->dev;
  180. }
  181. EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
  182. static bool is_uuid_sep(char sep)
  183. {
  184. if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
  185. return true;
  186. return false;
  187. }
  188. static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
  189. size_t len)
  190. {
  191. const char *str = buf;
  192. u8 uuid[16];
  193. int i;
  194. for (i = 0; i < 16; i++) {
  195. if (!isxdigit(str[0]) || !isxdigit(str[1])) {
  196. dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
  197. __func__, i, str - buf, str[0],
  198. str + 1 - buf, str[1]);
  199. return -EINVAL;
  200. }
  201. uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
  202. str += 2;
  203. if (is_uuid_sep(*str))
  204. str++;
  205. }
  206. memcpy(uuid_out, uuid, sizeof(uuid));
  207. return 0;
  208. }
  209. /**
  210. * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
  211. * @dev: container device for the uuid property
  212. * @uuid_out: uuid buffer to replace
  213. * @buf: raw sysfs buffer to parse
  214. *
  215. * Enforce that uuids can only be changed while the device is disabled
  216. * (driver detached)
  217. * LOCKING: expects device_lock() is held on entry
  218. */
  219. int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
  220. size_t len)
  221. {
  222. u8 uuid[16];
  223. int rc;
  224. if (dev->driver)
  225. return -EBUSY;
  226. rc = nd_uuid_parse(dev, uuid, buf, len);
  227. if (rc)
  228. return rc;
  229. kfree(*uuid_out);
  230. *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
  231. if (!(*uuid_out))
  232. return -ENOMEM;
  233. return 0;
  234. }
  235. ssize_t nd_size_select_show(unsigned long current_size,
  236. const unsigned long *supported, char *buf)
  237. {
  238. ssize_t len = 0;
  239. int i;
  240. for (i = 0; supported[i]; i++)
  241. if (current_size == supported[i])
  242. len += sprintf(buf + len, "[%ld] ", supported[i]);
  243. else
  244. len += sprintf(buf + len, "%ld ", supported[i]);
  245. len += sprintf(buf + len, "\n");
  246. return len;
  247. }
  248. ssize_t nd_size_select_store(struct device *dev, const char *buf,
  249. unsigned long *current_size, const unsigned long *supported)
  250. {
  251. unsigned long lbasize;
  252. int rc, i;
  253. if (dev->driver)
  254. return -EBUSY;
  255. rc = kstrtoul(buf, 0, &lbasize);
  256. if (rc)
  257. return rc;
  258. for (i = 0; supported[i]; i++)
  259. if (lbasize == supported[i])
  260. break;
  261. if (supported[i]) {
  262. *current_size = lbasize;
  263. return 0;
  264. } else {
  265. return -EINVAL;
  266. }
  267. }
  268. static ssize_t commands_show(struct device *dev,
  269. struct device_attribute *attr, char *buf)
  270. {
  271. int cmd, len = 0;
  272. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  273. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  274. for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
  275. len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
  276. len += sprintf(buf + len, "\n");
  277. return len;
  278. }
  279. static DEVICE_ATTR_RO(commands);
  280. static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
  281. {
  282. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  283. struct device *parent = nvdimm_bus->dev.parent;
  284. if (nd_desc->provider_name)
  285. return nd_desc->provider_name;
  286. else if (parent)
  287. return dev_name(parent);
  288. else
  289. return "unknown";
  290. }
  291. static ssize_t provider_show(struct device *dev,
  292. struct device_attribute *attr, char *buf)
  293. {
  294. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  295. return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
  296. }
  297. static DEVICE_ATTR_RO(provider);
  298. static int flush_namespaces(struct device *dev, void *data)
  299. {
  300. device_lock(dev);
  301. device_unlock(dev);
  302. return 0;
  303. }
  304. static int flush_regions_dimms(struct device *dev, void *data)
  305. {
  306. device_lock(dev);
  307. device_unlock(dev);
  308. device_for_each_child(dev, NULL, flush_namespaces);
  309. return 0;
  310. }
  311. static ssize_t wait_probe_show(struct device *dev,
  312. struct device_attribute *attr, char *buf)
  313. {
  314. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  315. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  316. int rc;
  317. if (nd_desc->flush_probe) {
  318. rc = nd_desc->flush_probe(nd_desc);
  319. if (rc)
  320. return rc;
  321. }
  322. nd_synchronize();
  323. device_for_each_child(dev, NULL, flush_regions_dimms);
  324. return sprintf(buf, "1\n");
  325. }
  326. static DEVICE_ATTR_RO(wait_probe);
  327. static struct attribute *nvdimm_bus_attributes[] = {
  328. &dev_attr_commands.attr,
  329. &dev_attr_wait_probe.attr,
  330. &dev_attr_provider.attr,
  331. NULL,
  332. };
  333. struct attribute_group nvdimm_bus_attribute_group = {
  334. .attrs = nvdimm_bus_attributes,
  335. };
  336. EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
  337. static void set_badblock(struct badblocks *bb, sector_t s, int num)
  338. {
  339. dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
  340. (u64) s * 512, (u64) num * 512);
  341. /* this isn't an error as the hardware will still throw an exception */
  342. if (badblocks_set(bb, s, num, 1))
  343. dev_info_once(bb->dev, "%s: failed for sector %llx\n",
  344. __func__, (u64) s);
  345. }
  346. /**
  347. * __add_badblock_range() - Convert a physical address range to bad sectors
  348. * @bb: badblocks instance to populate
  349. * @ns_offset: namespace offset where the error range begins (in bytes)
  350. * @len: number of bytes of poison to be added
  351. *
  352. * This assumes that the range provided with (ns_offset, len) is within
  353. * the bounds of physical addresses for this namespace, i.e. lies in the
  354. * interval [ns_start, ns_start + ns_size)
  355. */
  356. static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
  357. {
  358. const unsigned int sector_size = 512;
  359. sector_t start_sector, end_sector;
  360. u64 num_sectors;
  361. u32 rem;
  362. start_sector = div_u64(ns_offset, sector_size);
  363. end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
  364. if (rem)
  365. end_sector++;
  366. num_sectors = end_sector - start_sector;
  367. if (unlikely(num_sectors > (u64)INT_MAX)) {
  368. u64 remaining = num_sectors;
  369. sector_t s = start_sector;
  370. while (remaining) {
  371. int done = min_t(u64, remaining, INT_MAX);
  372. set_badblock(bb, s, done);
  373. remaining -= done;
  374. s += done;
  375. }
  376. } else
  377. set_badblock(bb, start_sector, num_sectors);
  378. }
  379. static void badblocks_populate(struct list_head *poison_list,
  380. struct badblocks *bb, const struct resource *res)
  381. {
  382. struct nd_poison *pl;
  383. if (list_empty(poison_list))
  384. return;
  385. list_for_each_entry(pl, poison_list, list) {
  386. u64 pl_end = pl->start + pl->length - 1;
  387. /* Discard intervals with no intersection */
  388. if (pl_end < res->start)
  389. continue;
  390. if (pl->start > res->end)
  391. continue;
  392. /* Deal with any overlap after start of the namespace */
  393. if (pl->start >= res->start) {
  394. u64 start = pl->start;
  395. u64 len;
  396. if (pl_end <= res->end)
  397. len = pl->length;
  398. else
  399. len = res->start + resource_size(res)
  400. - pl->start;
  401. __add_badblock_range(bb, start - res->start, len);
  402. continue;
  403. }
  404. /* Deal with overlap for poison starting before the namespace */
  405. if (pl->start < res->start) {
  406. u64 len;
  407. if (pl_end < res->end)
  408. len = pl->start + pl->length - res->start;
  409. else
  410. len = resource_size(res);
  411. __add_badblock_range(bb, 0, len);
  412. }
  413. }
  414. }
  415. /**
  416. * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
  417. * @region: parent region of the range to interrogate
  418. * @bb: badblocks instance to populate
  419. * @res: resource range to consider
  420. *
  421. * The poison list generated during bus initialization may contain
  422. * multiple, possibly overlapping physical address ranges. Compare each
  423. * of these ranges to the resource range currently being initialized,
  424. * and add badblocks entries for all matching sub-ranges
  425. */
  426. void nvdimm_badblocks_populate(struct nd_region *nd_region,
  427. struct badblocks *bb, const struct resource *res)
  428. {
  429. struct nvdimm_bus *nvdimm_bus;
  430. struct list_head *poison_list;
  431. if (!is_memory(&nd_region->dev)) {
  432. dev_WARN_ONCE(&nd_region->dev, 1,
  433. "%s only valid for pmem regions\n", __func__);
  434. return;
  435. }
  436. nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
  437. poison_list = &nvdimm_bus->poison_list;
  438. nvdimm_bus_lock(&nvdimm_bus->dev);
  439. badblocks_populate(poison_list, bb, res);
  440. nvdimm_bus_unlock(&nvdimm_bus->dev);
  441. }
  442. EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
  443. static void append_poison_entry(struct nvdimm_bus *nvdimm_bus,
  444. struct nd_poison *pl, u64 addr, u64 length)
  445. {
  446. lockdep_assert_held(&nvdimm_bus->poison_lock);
  447. pl->start = addr;
  448. pl->length = length;
  449. list_add_tail(&pl->list, &nvdimm_bus->poison_list);
  450. }
  451. static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
  452. gfp_t flags)
  453. {
  454. struct nd_poison *pl;
  455. pl = kzalloc(sizeof(*pl), flags);
  456. if (!pl)
  457. return -ENOMEM;
  458. append_poison_entry(nvdimm_bus, pl, addr, length);
  459. return 0;
  460. }
  461. static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  462. {
  463. struct nd_poison *pl, *pl_new;
  464. spin_unlock(&nvdimm_bus->poison_lock);
  465. pl_new = kzalloc(sizeof(*pl_new), GFP_KERNEL);
  466. spin_lock(&nvdimm_bus->poison_lock);
  467. if (list_empty(&nvdimm_bus->poison_list)) {
  468. if (!pl_new)
  469. return -ENOMEM;
  470. append_poison_entry(nvdimm_bus, pl_new, addr, length);
  471. return 0;
  472. }
  473. /*
  474. * There is a chance this is a duplicate, check for those first.
  475. * This will be the common case as ARS_STATUS returns all known
  476. * errors in the SPA space, and we can't query it per region
  477. */
  478. list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
  479. if (pl->start == addr) {
  480. /* If length has changed, update this list entry */
  481. if (pl->length != length)
  482. pl->length = length;
  483. kfree(pl_new);
  484. return 0;
  485. }
  486. /*
  487. * If not a duplicate or a simple length update, add the entry as is,
  488. * as any overlapping ranges will get resolved when the list is consumed
  489. * and converted to badblocks
  490. */
  491. if (!pl_new)
  492. return -ENOMEM;
  493. append_poison_entry(nvdimm_bus, pl_new, addr, length);
  494. return 0;
  495. }
  496. int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  497. {
  498. int rc;
  499. spin_lock(&nvdimm_bus->poison_lock);
  500. rc = bus_add_poison(nvdimm_bus, addr, length);
  501. spin_unlock(&nvdimm_bus->poison_lock);
  502. return rc;
  503. }
  504. EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
  505. void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
  506. unsigned int len)
  507. {
  508. struct list_head *poison_list = &nvdimm_bus->poison_list;
  509. u64 clr_end = start + len - 1;
  510. struct nd_poison *pl, *next;
  511. spin_lock(&nvdimm_bus->poison_lock);
  512. WARN_ON_ONCE(list_empty(poison_list));
  513. /*
  514. * [start, clr_end] is the poison interval being cleared.
  515. * [pl->start, pl_end] is the poison_list entry we're comparing
  516. * the above interval against. The poison list entry may need
  517. * to be modified (update either start or length), deleted, or
  518. * split into two based on the overlap characteristics
  519. */
  520. list_for_each_entry_safe(pl, next, poison_list, list) {
  521. u64 pl_end = pl->start + pl->length - 1;
  522. /* Skip intervals with no intersection */
  523. if (pl_end < start)
  524. continue;
  525. if (pl->start > clr_end)
  526. continue;
  527. /* Delete completely overlapped poison entries */
  528. if ((pl->start >= start) && (pl_end <= clr_end)) {
  529. list_del(&pl->list);
  530. kfree(pl);
  531. continue;
  532. }
  533. /* Adjust start point of partially cleared entries */
  534. if ((start <= pl->start) && (clr_end > pl->start)) {
  535. pl->length -= clr_end - pl->start + 1;
  536. pl->start = clr_end + 1;
  537. continue;
  538. }
  539. /* Adjust pl->length for partial clearing at the tail end */
  540. if ((pl->start < start) && (pl_end <= clr_end)) {
  541. /* pl->start remains the same */
  542. pl->length = start - pl->start;
  543. continue;
  544. }
  545. /*
  546. * If clearing in the middle of an entry, we split it into
  547. * two by modifying the current entry to represent one half of
  548. * the split, and adding a new entry for the second half.
  549. */
  550. if ((pl->start < start) && (pl_end > clr_end)) {
  551. u64 new_start = clr_end + 1;
  552. u64 new_len = pl_end - new_start + 1;
  553. /* Add new entry covering the right half */
  554. add_poison(nvdimm_bus, new_start, new_len, GFP_NOWAIT);
  555. /* Adjust this entry to cover the left half */
  556. pl->length = start - pl->start;
  557. continue;
  558. }
  559. }
  560. spin_unlock(&nvdimm_bus->poison_lock);
  561. }
  562. EXPORT_SYMBOL_GPL(nvdimm_forget_poison);
  563. #ifdef CONFIG_BLK_DEV_INTEGRITY
  564. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  565. {
  566. struct blk_integrity bi;
  567. if (meta_size == 0)
  568. return 0;
  569. memset(&bi, 0, sizeof(bi));
  570. bi.tuple_size = meta_size;
  571. bi.tag_size = meta_size;
  572. blk_integrity_register(disk, &bi);
  573. blk_queue_max_integrity_segments(disk->queue, 1);
  574. return 0;
  575. }
  576. EXPORT_SYMBOL(nd_integrity_init);
  577. #else /* CONFIG_BLK_DEV_INTEGRITY */
  578. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  579. {
  580. return 0;
  581. }
  582. EXPORT_SYMBOL(nd_integrity_init);
  583. #endif
  584. static __init int libnvdimm_init(void)
  585. {
  586. int rc;
  587. rc = nvdimm_bus_init();
  588. if (rc)
  589. return rc;
  590. rc = nvdimm_init();
  591. if (rc)
  592. goto err_dimm;
  593. rc = nd_region_init();
  594. if (rc)
  595. goto err_region;
  596. nd_label_init();
  597. return 0;
  598. err_region:
  599. nvdimm_exit();
  600. err_dimm:
  601. nvdimm_bus_exit();
  602. return rc;
  603. }
  604. static __exit void libnvdimm_exit(void)
  605. {
  606. WARN_ON(!list_empty(&nvdimm_bus_list));
  607. nd_region_exit();
  608. nvdimm_exit();
  609. nvdimm_bus_exit();
  610. nd_region_devs_exit();
  611. nvdimm_devs_exit();
  612. }
  613. MODULE_LICENSE("GPL v2");
  614. MODULE_AUTHOR("Intel Corporation");
  615. subsys_initcall(libnvdimm_init);
  616. module_exit(libnvdimm_exit);