core.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  3. * Initial release: Matias Bjorling <m@bjorling.me>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version
  7. * 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  17. * USA.
  18. *
  19. */
  20. #include <linux/list.h>
  21. #include <linux/types.h>
  22. #include <linux/sem.h>
  23. #include <linux/bitmap.h>
  24. #include <linux/module.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/lightnvm.h>
  27. #include <linux/sched/sysctl.h>
  28. #include "lightnvm.h"
  29. static LIST_HEAD(nvm_tgt_types);
  30. static DECLARE_RWSEM(nvm_tgtt_lock);
  31. static LIST_HEAD(nvm_mgrs);
  32. static LIST_HEAD(nvm_devices);
  33. static DECLARE_RWSEM(nvm_lock);
  34. struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
  35. {
  36. struct nvm_tgt_type *tmp, *tt = NULL;
  37. if (lock)
  38. down_write(&nvm_tgtt_lock);
  39. list_for_each_entry(tmp, &nvm_tgt_types, list)
  40. if (!strcmp(name, tmp->name)) {
  41. tt = tmp;
  42. break;
  43. }
  44. if (lock)
  45. up_write(&nvm_tgtt_lock);
  46. return tt;
  47. }
  48. EXPORT_SYMBOL(nvm_find_target_type);
  49. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  50. {
  51. int ret = 0;
  52. down_write(&nvm_tgtt_lock);
  53. if (nvm_find_target_type(tt->name, 0))
  54. ret = -EEXIST;
  55. else
  56. list_add(&tt->list, &nvm_tgt_types);
  57. up_write(&nvm_tgtt_lock);
  58. return ret;
  59. }
  60. EXPORT_SYMBOL(nvm_register_tgt_type);
  61. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  62. {
  63. if (!tt)
  64. return;
  65. down_write(&nvm_lock);
  66. list_del(&tt->list);
  67. up_write(&nvm_lock);
  68. }
  69. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  70. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  71. dma_addr_t *dma_handler)
  72. {
  73. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  74. dma_handler);
  75. }
  76. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  77. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
  78. dma_addr_t dma_handler)
  79. {
  80. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  81. }
  82. EXPORT_SYMBOL(nvm_dev_dma_free);
  83. static struct nvmm_type *nvm_find_mgr_type(const char *name)
  84. {
  85. struct nvmm_type *mt;
  86. list_for_each_entry(mt, &nvm_mgrs, list)
  87. if (!strcmp(name, mt->name))
  88. return mt;
  89. return NULL;
  90. }
  91. static struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
  92. {
  93. struct nvmm_type *mt;
  94. int ret;
  95. lockdep_assert_held(&nvm_lock);
  96. list_for_each_entry(mt, &nvm_mgrs, list) {
  97. if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
  98. continue;
  99. ret = mt->register_mgr(dev);
  100. if (ret < 0) {
  101. pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
  102. ret, dev->name);
  103. return NULL; /* initialization failed */
  104. } else if (ret > 0)
  105. return mt;
  106. }
  107. return NULL;
  108. }
  109. int nvm_register_mgr(struct nvmm_type *mt)
  110. {
  111. struct nvm_dev *dev;
  112. int ret = 0;
  113. down_write(&nvm_lock);
  114. if (nvm_find_mgr_type(mt->name)) {
  115. ret = -EEXIST;
  116. goto finish;
  117. } else {
  118. list_add(&mt->list, &nvm_mgrs);
  119. }
  120. /* try to register media mgr if any device have none configured */
  121. list_for_each_entry(dev, &nvm_devices, devices) {
  122. if (dev->mt)
  123. continue;
  124. dev->mt = nvm_init_mgr(dev);
  125. }
  126. finish:
  127. up_write(&nvm_lock);
  128. return ret;
  129. }
  130. EXPORT_SYMBOL(nvm_register_mgr);
  131. void nvm_unregister_mgr(struct nvmm_type *mt)
  132. {
  133. if (!mt)
  134. return;
  135. down_write(&nvm_lock);
  136. list_del(&mt->list);
  137. up_write(&nvm_lock);
  138. }
  139. EXPORT_SYMBOL(nvm_unregister_mgr);
  140. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  141. {
  142. struct nvm_dev *dev;
  143. list_for_each_entry(dev, &nvm_devices, devices)
  144. if (!strcmp(name, dev->name))
  145. return dev;
  146. return NULL;
  147. }
  148. struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
  149. unsigned long flags)
  150. {
  151. return dev->mt->get_blk(dev, lun, flags);
  152. }
  153. EXPORT_SYMBOL(nvm_get_blk);
  154. /* Assumes that all valid pages have already been moved on release to bm */
  155. void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
  156. {
  157. return dev->mt->put_blk(dev, blk);
  158. }
  159. EXPORT_SYMBOL(nvm_put_blk);
  160. void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
  161. {
  162. return dev->mt->mark_blk(dev, ppa, type);
  163. }
  164. EXPORT_SYMBOL(nvm_mark_blk);
  165. int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
  166. {
  167. return dev->mt->submit_io(dev, rqd);
  168. }
  169. EXPORT_SYMBOL(nvm_submit_io);
  170. int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
  171. {
  172. return dev->mt->erase_blk(dev, blk, 0);
  173. }
  174. EXPORT_SYMBOL(nvm_erase_blk);
  175. void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  176. {
  177. int i;
  178. if (rqd->nr_ppas > 1) {
  179. for (i = 0; i < rqd->nr_ppas; i++)
  180. rqd->ppa_list[i] = dev_to_generic_addr(dev,
  181. rqd->ppa_list[i]);
  182. } else {
  183. rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
  184. }
  185. }
  186. EXPORT_SYMBOL(nvm_addr_to_generic_mode);
  187. void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
  188. {
  189. int i;
  190. if (rqd->nr_ppas > 1) {
  191. for (i = 0; i < rqd->nr_ppas; i++)
  192. rqd->ppa_list[i] = generic_to_dev_addr(dev,
  193. rqd->ppa_list[i]);
  194. } else {
  195. rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
  196. }
  197. }
  198. EXPORT_SYMBOL(nvm_generic_to_addr_mode);
  199. int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
  200. const struct ppa_addr *ppas, int nr_ppas, int vblk)
  201. {
  202. int i, plane_cnt, pl_idx;
  203. struct ppa_addr ppa;
  204. if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
  205. rqd->nr_ppas = nr_ppas;
  206. rqd->ppa_addr = ppas[0];
  207. return 0;
  208. }
  209. rqd->nr_ppas = nr_ppas;
  210. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  211. if (!rqd->ppa_list) {
  212. pr_err("nvm: failed to allocate dma memory\n");
  213. return -ENOMEM;
  214. }
  215. if (!vblk) {
  216. for (i = 0; i < nr_ppas; i++)
  217. rqd->ppa_list[i] = ppas[i];
  218. } else {
  219. plane_cnt = dev->plane_mode;
  220. rqd->nr_ppas *= plane_cnt;
  221. for (i = 0; i < nr_ppas; i++) {
  222. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  223. ppa = ppas[i];
  224. ppa.g.pl = pl_idx;
  225. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  226. }
  227. }
  228. }
  229. return 0;
  230. }
  231. EXPORT_SYMBOL(nvm_set_rqd_ppalist);
  232. void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
  233. {
  234. if (!rqd->ppa_list)
  235. return;
  236. nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
  237. }
  238. EXPORT_SYMBOL(nvm_free_rqd_ppalist);
  239. int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
  240. {
  241. struct nvm_rq rqd;
  242. int ret;
  243. if (!dev->ops->erase_block)
  244. return 0;
  245. memset(&rqd, 0, sizeof(struct nvm_rq));
  246. ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
  247. if (ret)
  248. return ret;
  249. nvm_generic_to_addr_mode(dev, &rqd);
  250. ret = dev->ops->erase_block(dev, &rqd);
  251. nvm_free_rqd_ppalist(dev, &rqd);
  252. return ret;
  253. }
  254. EXPORT_SYMBOL(nvm_erase_ppa);
  255. void nvm_end_io(struct nvm_rq *rqd, int error)
  256. {
  257. rqd->error = error;
  258. rqd->end_io(rqd);
  259. }
  260. EXPORT_SYMBOL(nvm_end_io);
  261. static void nvm_end_io_sync(struct nvm_rq *rqd)
  262. {
  263. struct completion *waiting = rqd->wait;
  264. rqd->wait = NULL;
  265. complete(waiting);
  266. }
  267. static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
  268. int flags, void *buf, int len)
  269. {
  270. DECLARE_COMPLETION_ONSTACK(wait);
  271. struct bio *bio;
  272. int ret;
  273. unsigned long hang_check;
  274. bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
  275. if (IS_ERR_OR_NULL(bio))
  276. return -ENOMEM;
  277. nvm_generic_to_addr_mode(dev, rqd);
  278. rqd->dev = dev;
  279. rqd->opcode = opcode;
  280. rqd->flags = flags;
  281. rqd->bio = bio;
  282. rqd->wait = &wait;
  283. rqd->end_io = nvm_end_io_sync;
  284. ret = dev->ops->submit_io(dev, rqd);
  285. if (ret) {
  286. bio_put(bio);
  287. return ret;
  288. }
  289. /* Prevent hang_check timer from firing at us during very long I/O */
  290. hang_check = sysctl_hung_task_timeout_secs;
  291. if (hang_check)
  292. while (!wait_for_completion_io_timeout(&wait,
  293. hang_check * (HZ/2)))
  294. ;
  295. else
  296. wait_for_completion_io(&wait);
  297. return rqd->error;
  298. }
  299. /**
  300. * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
  301. * take to free ppa list if necessary.
  302. * @dev: device
  303. * @ppa_list: user created ppa_list
  304. * @nr_ppas: length of ppa_list
  305. * @opcode: device opcode
  306. * @flags: device flags
  307. * @buf: data buffer
  308. * @len: data buffer length
  309. */
  310. int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
  311. int nr_ppas, int opcode, int flags, void *buf, int len)
  312. {
  313. struct nvm_rq rqd;
  314. if (dev->ops->max_phys_sect < nr_ppas)
  315. return -EINVAL;
  316. memset(&rqd, 0, sizeof(struct nvm_rq));
  317. rqd.nr_ppas = nr_ppas;
  318. if (nr_ppas > 1)
  319. rqd.ppa_list = ppa_list;
  320. else
  321. rqd.ppa_addr = ppa_list[0];
  322. return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  323. }
  324. EXPORT_SYMBOL(nvm_submit_ppa_list);
  325. /**
  326. * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
  327. * as single, dual, quad plane PPAs depending on device type.
  328. * @dev: device
  329. * @ppa: user created ppa_list
  330. * @nr_ppas: length of ppa_list
  331. * @opcode: device opcode
  332. * @flags: device flags
  333. * @buf: data buffer
  334. * @len: data buffer length
  335. */
  336. int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
  337. int opcode, int flags, void *buf, int len)
  338. {
  339. struct nvm_rq rqd;
  340. int ret;
  341. memset(&rqd, 0, sizeof(struct nvm_rq));
  342. ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
  343. if (ret)
  344. return ret;
  345. ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
  346. nvm_free_rqd_ppalist(dev, &rqd);
  347. return ret;
  348. }
  349. EXPORT_SYMBOL(nvm_submit_ppa);
  350. /*
  351. * folds a bad block list from its plane representation to its virtual
  352. * block representation. The fold is done in place and reduced size is
  353. * returned.
  354. *
  355. * If any of the planes status are bad or grown bad block, the virtual block
  356. * is marked bad. If not bad, the first plane state acts as the block state.
  357. */
  358. int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
  359. {
  360. int blk, offset, pl, blktype;
  361. if (nr_blks != dev->blks_per_lun * dev->plane_mode)
  362. return -EINVAL;
  363. for (blk = 0; blk < dev->blks_per_lun; blk++) {
  364. offset = blk * dev->plane_mode;
  365. blktype = blks[offset];
  366. /* Bad blocks on any planes take precedence over other types */
  367. for (pl = 0; pl < dev->plane_mode; pl++) {
  368. if (blks[offset + pl] &
  369. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  370. blktype = blks[offset + pl];
  371. break;
  372. }
  373. }
  374. blks[blk] = blktype;
  375. }
  376. return dev->blks_per_lun;
  377. }
  378. EXPORT_SYMBOL(nvm_bb_tbl_fold);
  379. int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
  380. {
  381. ppa = generic_to_dev_addr(dev, ppa);
  382. return dev->ops->get_bb_tbl(dev, ppa, blks);
  383. }
  384. EXPORT_SYMBOL(nvm_get_bb_tbl);
  385. static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  386. {
  387. int i;
  388. dev->lps_per_blk = dev->pgs_per_blk;
  389. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  390. if (!dev->lptbl)
  391. return -ENOMEM;
  392. /* Just a linear array */
  393. for (i = 0; i < dev->lps_per_blk; i++)
  394. dev->lptbl[i] = i;
  395. return 0;
  396. }
  397. static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
  398. {
  399. int i, p;
  400. struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
  401. if (!mlc->num_pairs)
  402. return 0;
  403. dev->lps_per_blk = mlc->num_pairs;
  404. dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
  405. if (!dev->lptbl)
  406. return -ENOMEM;
  407. /* The lower page table encoding consists of a list of bytes, where each
  408. * has a lower and an upper half. The first half byte maintains the
  409. * increment value and every value after is an offset added to the
  410. * previous incrementation value
  411. */
  412. dev->lptbl[0] = mlc->pairs[0] & 0xF;
  413. for (i = 1; i < dev->lps_per_blk; i++) {
  414. p = mlc->pairs[i >> 1];
  415. if (i & 0x1) /* upper */
  416. dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
  417. else /* lower */
  418. dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
  419. }
  420. return 0;
  421. }
  422. static int nvm_core_init(struct nvm_dev *dev)
  423. {
  424. struct nvm_id *id = &dev->identity;
  425. struct nvm_id_group *grp = &id->groups[0];
  426. int ret;
  427. /* device values */
  428. dev->nr_chnls = grp->num_ch;
  429. dev->luns_per_chnl = grp->num_lun;
  430. dev->pgs_per_blk = grp->num_pg;
  431. dev->blks_per_lun = grp->num_blk;
  432. dev->nr_planes = grp->num_pln;
  433. dev->fpg_size = grp->fpg_sz;
  434. dev->pfpg_size = grp->fpg_sz * grp->num_pln;
  435. dev->sec_size = grp->csecs;
  436. dev->oob_size = grp->sos;
  437. dev->sec_per_pg = grp->fpg_sz / grp->csecs;
  438. dev->mccap = grp->mccap;
  439. memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
  440. dev->plane_mode = NVM_PLANE_SINGLE;
  441. dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
  442. if (grp->mpos & 0x020202)
  443. dev->plane_mode = NVM_PLANE_DOUBLE;
  444. if (grp->mpos & 0x040404)
  445. dev->plane_mode = NVM_PLANE_QUAD;
  446. if (grp->mtype != 0) {
  447. pr_err("nvm: memory type not supported\n");
  448. return -EINVAL;
  449. }
  450. /* calculated values */
  451. dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
  452. dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
  453. dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
  454. dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
  455. dev->total_secs = dev->nr_luns * dev->sec_per_lun;
  456. dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
  457. sizeof(unsigned long), GFP_KERNEL);
  458. if (!dev->lun_map)
  459. return -ENOMEM;
  460. switch (grp->fmtype) {
  461. case NVM_ID_FMTYPE_SLC:
  462. if (nvm_init_slc_tbl(dev, grp)) {
  463. ret = -ENOMEM;
  464. goto err_fmtype;
  465. }
  466. break;
  467. case NVM_ID_FMTYPE_MLC:
  468. if (nvm_init_mlc_tbl(dev, grp)) {
  469. ret = -ENOMEM;
  470. goto err_fmtype;
  471. }
  472. break;
  473. default:
  474. pr_err("nvm: flash type not supported\n");
  475. ret = -EINVAL;
  476. goto err_fmtype;
  477. }
  478. mutex_init(&dev->mlock);
  479. spin_lock_init(&dev->lock);
  480. blk_queue_logical_block_size(dev->q, dev->sec_size);
  481. return 0;
  482. err_fmtype:
  483. kfree(dev->lun_map);
  484. return ret;
  485. }
  486. static void nvm_free_mgr(struct nvm_dev *dev)
  487. {
  488. if (!dev->mt)
  489. return;
  490. dev->mt->unregister_mgr(dev);
  491. dev->mt = NULL;
  492. }
  493. void nvm_free(struct nvm_dev *dev)
  494. {
  495. if (!dev)
  496. return;
  497. nvm_free_mgr(dev);
  498. if (dev->dma_pool)
  499. dev->ops->destroy_dma_pool(dev->dma_pool);
  500. kfree(dev->lptbl);
  501. kfree(dev->lun_map);
  502. kfree(dev);
  503. }
  504. static int nvm_init(struct nvm_dev *dev)
  505. {
  506. int ret = -EINVAL;
  507. if (!dev->q || !dev->ops)
  508. return ret;
  509. if (dev->ops->identity(dev, &dev->identity)) {
  510. pr_err("nvm: device could not be identified\n");
  511. goto err;
  512. }
  513. pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
  514. dev->identity.ver_id, dev->identity.vmnt,
  515. dev->identity.cgrps);
  516. if (dev->identity.ver_id != 1) {
  517. pr_err("nvm: device not supported by kernel.");
  518. goto err;
  519. }
  520. if (dev->identity.cgrps != 1) {
  521. pr_err("nvm: only one group configuration supported.");
  522. goto err;
  523. }
  524. ret = nvm_core_init(dev);
  525. if (ret) {
  526. pr_err("nvm: could not initialize core structures.\n");
  527. goto err;
  528. }
  529. pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
  530. dev->name, dev->sec_per_pg, dev->nr_planes,
  531. dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
  532. dev->nr_chnls);
  533. return 0;
  534. err:
  535. pr_err("nvm: failed to initialize nvm\n");
  536. return ret;
  537. }
  538. static void nvm_exit(struct nvm_dev *dev)
  539. {
  540. nvm_sysfs_unregister_dev(dev);
  541. }
  542. struct nvm_dev *nvm_alloc_dev(int node)
  543. {
  544. return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
  545. }
  546. EXPORT_SYMBOL(nvm_alloc_dev);
  547. int nvm_register(struct nvm_dev *dev)
  548. {
  549. int ret;
  550. ret = nvm_init(dev);
  551. if (ret)
  552. goto err_init;
  553. if (dev->ops->max_phys_sect > 256) {
  554. pr_info("nvm: max sectors supported is 256.\n");
  555. ret = -EINVAL;
  556. goto err_init;
  557. }
  558. if (dev->ops->max_phys_sect > 1) {
  559. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
  560. if (!dev->dma_pool) {
  561. pr_err("nvm: could not create dma pool\n");
  562. ret = -ENOMEM;
  563. goto err_init;
  564. }
  565. }
  566. ret = nvm_sysfs_register_dev(dev);
  567. if (ret)
  568. goto err_ppalist;
  569. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  570. ret = nvm_get_sysblock(dev, &dev->sb);
  571. if (!ret)
  572. pr_err("nvm: device not initialized.\n");
  573. else if (ret < 0)
  574. pr_err("nvm: err (%d) on device initialization\n", ret);
  575. }
  576. /* register device with a supported media manager */
  577. down_write(&nvm_lock);
  578. if (ret > 0)
  579. dev->mt = nvm_init_mgr(dev);
  580. list_add(&dev->devices, &nvm_devices);
  581. up_write(&nvm_lock);
  582. return 0;
  583. err_ppalist:
  584. dev->ops->destroy_dma_pool(dev->dma_pool);
  585. err_init:
  586. kfree(dev->lun_map);
  587. return ret;
  588. }
  589. EXPORT_SYMBOL(nvm_register);
  590. void nvm_unregister(struct nvm_dev *dev)
  591. {
  592. down_write(&nvm_lock);
  593. list_del(&dev->devices);
  594. up_write(&nvm_lock);
  595. nvm_exit(dev);
  596. }
  597. EXPORT_SYMBOL(nvm_unregister);
  598. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  599. {
  600. struct nvm_dev *dev;
  601. struct nvm_ioctl_create_simple *s;
  602. down_write(&nvm_lock);
  603. dev = nvm_find_nvm_dev(create->dev);
  604. up_write(&nvm_lock);
  605. if (!dev) {
  606. pr_err("nvm: device not found\n");
  607. return -EINVAL;
  608. }
  609. if (!dev->mt) {
  610. pr_info("nvm: device has no media manager registered.\n");
  611. return -ENODEV;
  612. }
  613. if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
  614. pr_err("nvm: config type not valid\n");
  615. return -EINVAL;
  616. }
  617. s = &create->conf.s;
  618. if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
  619. pr_err("nvm: lun out of bound (%u:%u > %u)\n",
  620. s->lun_begin, s->lun_end, dev->nr_luns);
  621. return -EINVAL;
  622. }
  623. return dev->mt->create_tgt(dev, create);
  624. }
  625. #ifdef CONFIG_NVM_DEBUG
  626. static int nvm_configure_show(const char *val)
  627. {
  628. struct nvm_dev *dev;
  629. char opcode, devname[DISK_NAME_LEN];
  630. int ret;
  631. ret = sscanf(val, "%c %32s", &opcode, devname);
  632. if (ret != 2) {
  633. pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
  634. return -EINVAL;
  635. }
  636. down_write(&nvm_lock);
  637. dev = nvm_find_nvm_dev(devname);
  638. up_write(&nvm_lock);
  639. if (!dev) {
  640. pr_err("nvm: device not found\n");
  641. return -EINVAL;
  642. }
  643. if (!dev->mt)
  644. return 0;
  645. dev->mt->lun_info_print(dev);
  646. return 0;
  647. }
  648. static int nvm_configure_remove(const char *val)
  649. {
  650. struct nvm_ioctl_remove remove;
  651. struct nvm_dev *dev;
  652. char opcode;
  653. int ret = 0;
  654. ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
  655. if (ret != 2) {
  656. pr_err("nvm: invalid command. Use \"d targetname\".\n");
  657. return -EINVAL;
  658. }
  659. remove.flags = 0;
  660. list_for_each_entry(dev, &nvm_devices, devices) {
  661. ret = dev->mt->remove_tgt(dev, &remove);
  662. if (!ret)
  663. break;
  664. }
  665. return ret;
  666. }
  667. static int nvm_configure_create(const char *val)
  668. {
  669. struct nvm_ioctl_create create;
  670. char opcode;
  671. int lun_begin, lun_end, ret;
  672. ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
  673. create.tgtname, create.tgttype,
  674. &lun_begin, &lun_end);
  675. if (ret != 6) {
  676. pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
  677. return -EINVAL;
  678. }
  679. create.flags = 0;
  680. create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
  681. create.conf.s.lun_begin = lun_begin;
  682. create.conf.s.lun_end = lun_end;
  683. return __nvm_configure_create(&create);
  684. }
  685. /* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
  686. static int nvm_configure_by_str_event(const char *val,
  687. const struct kernel_param *kp)
  688. {
  689. char opcode;
  690. int ret;
  691. ret = sscanf(val, "%c", &opcode);
  692. if (ret != 1) {
  693. pr_err("nvm: string must have the format of \"cmd ...\"\n");
  694. return -EINVAL;
  695. }
  696. switch (opcode) {
  697. case 'a':
  698. return nvm_configure_create(val);
  699. case 'd':
  700. return nvm_configure_remove(val);
  701. case 's':
  702. return nvm_configure_show(val);
  703. default:
  704. pr_err("nvm: invalid command\n");
  705. return -EINVAL;
  706. }
  707. return 0;
  708. }
  709. static int nvm_configure_get(char *buf, const struct kernel_param *kp)
  710. {
  711. int sz;
  712. struct nvm_dev *dev;
  713. sz = sprintf(buf, "available devices:\n");
  714. down_write(&nvm_lock);
  715. list_for_each_entry(dev, &nvm_devices, devices) {
  716. if (sz > 4095 - DISK_NAME_LEN - 2)
  717. break;
  718. sz += sprintf(buf + sz, " %32s\n", dev->name);
  719. }
  720. up_write(&nvm_lock);
  721. return sz;
  722. }
  723. static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
  724. .set = nvm_configure_by_str_event,
  725. .get = nvm_configure_get,
  726. };
  727. #undef MODULE_PARAM_PREFIX
  728. #define MODULE_PARAM_PREFIX "lnvm."
  729. module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
  730. 0644);
  731. #endif /* CONFIG_NVM_DEBUG */
  732. static long nvm_ioctl_info(struct file *file, void __user *arg)
  733. {
  734. struct nvm_ioctl_info *info;
  735. struct nvm_tgt_type *tt;
  736. int tgt_iter = 0;
  737. if (!capable(CAP_SYS_ADMIN))
  738. return -EPERM;
  739. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  740. if (IS_ERR(info))
  741. return -EFAULT;
  742. info->version[0] = NVM_VERSION_MAJOR;
  743. info->version[1] = NVM_VERSION_MINOR;
  744. info->version[2] = NVM_VERSION_PATCH;
  745. down_write(&nvm_lock);
  746. list_for_each_entry(tt, &nvm_tgt_types, list) {
  747. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  748. tgt->version[0] = tt->version[0];
  749. tgt->version[1] = tt->version[1];
  750. tgt->version[2] = tt->version[2];
  751. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  752. tgt_iter++;
  753. }
  754. info->tgtsize = tgt_iter;
  755. up_write(&nvm_lock);
  756. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  757. kfree(info);
  758. return -EFAULT;
  759. }
  760. kfree(info);
  761. return 0;
  762. }
  763. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  764. {
  765. struct nvm_ioctl_get_devices *devices;
  766. struct nvm_dev *dev;
  767. int i = 0;
  768. if (!capable(CAP_SYS_ADMIN))
  769. return -EPERM;
  770. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  771. if (!devices)
  772. return -ENOMEM;
  773. down_write(&nvm_lock);
  774. list_for_each_entry(dev, &nvm_devices, devices) {
  775. struct nvm_ioctl_device_info *info = &devices->info[i];
  776. sprintf(info->devname, "%s", dev->name);
  777. if (dev->mt) {
  778. info->bmversion[0] = dev->mt->version[0];
  779. info->bmversion[1] = dev->mt->version[1];
  780. info->bmversion[2] = dev->mt->version[2];
  781. sprintf(info->bmname, "%s", dev->mt->name);
  782. } else {
  783. sprintf(info->bmname, "none");
  784. }
  785. i++;
  786. if (i > 31) {
  787. pr_err("nvm: max 31 devices can be reported.\n");
  788. break;
  789. }
  790. }
  791. up_write(&nvm_lock);
  792. devices->nr_devices = i;
  793. if (copy_to_user(arg, devices,
  794. sizeof(struct nvm_ioctl_get_devices))) {
  795. kfree(devices);
  796. return -EFAULT;
  797. }
  798. kfree(devices);
  799. return 0;
  800. }
  801. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  802. {
  803. struct nvm_ioctl_create create;
  804. if (!capable(CAP_SYS_ADMIN))
  805. return -EPERM;
  806. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  807. return -EFAULT;
  808. create.dev[DISK_NAME_LEN - 1] = '\0';
  809. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  810. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  811. if (create.flags != 0) {
  812. pr_err("nvm: no flags supported\n");
  813. return -EINVAL;
  814. }
  815. return __nvm_configure_create(&create);
  816. }
  817. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  818. {
  819. struct nvm_ioctl_remove remove;
  820. struct nvm_dev *dev;
  821. int ret = 0;
  822. if (!capable(CAP_SYS_ADMIN))
  823. return -EPERM;
  824. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  825. return -EFAULT;
  826. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  827. if (remove.flags != 0) {
  828. pr_err("nvm: no flags supported\n");
  829. return -EINVAL;
  830. }
  831. list_for_each_entry(dev, &nvm_devices, devices) {
  832. ret = dev->mt->remove_tgt(dev, &remove);
  833. if (!ret)
  834. break;
  835. }
  836. return ret;
  837. }
  838. static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
  839. {
  840. info->seqnr = 1;
  841. info->erase_cnt = 0;
  842. info->version = 1;
  843. }
  844. static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
  845. {
  846. struct nvm_dev *dev;
  847. struct nvm_sb_info info;
  848. int ret;
  849. down_write(&nvm_lock);
  850. dev = nvm_find_nvm_dev(init->dev);
  851. up_write(&nvm_lock);
  852. if (!dev) {
  853. pr_err("nvm: device not found\n");
  854. return -EINVAL;
  855. }
  856. nvm_setup_nvm_sb_info(&info);
  857. strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
  858. info.fs_ppa.ppa = -1;
  859. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
  860. ret = nvm_init_sysblock(dev, &info);
  861. if (ret)
  862. return ret;
  863. }
  864. memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
  865. down_write(&nvm_lock);
  866. dev->mt = nvm_init_mgr(dev);
  867. up_write(&nvm_lock);
  868. return 0;
  869. }
  870. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  871. {
  872. struct nvm_ioctl_dev_init init;
  873. if (!capable(CAP_SYS_ADMIN))
  874. return -EPERM;
  875. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  876. return -EFAULT;
  877. if (init.flags != 0) {
  878. pr_err("nvm: no flags supported\n");
  879. return -EINVAL;
  880. }
  881. init.dev[DISK_NAME_LEN - 1] = '\0';
  882. return __nvm_ioctl_dev_init(&init);
  883. }
  884. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  885. {
  886. struct nvm_ioctl_dev_factory fact;
  887. struct nvm_dev *dev;
  888. if (!capable(CAP_SYS_ADMIN))
  889. return -EPERM;
  890. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  891. return -EFAULT;
  892. fact.dev[DISK_NAME_LEN - 1] = '\0';
  893. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  894. return -EINVAL;
  895. down_write(&nvm_lock);
  896. dev = nvm_find_nvm_dev(fact.dev);
  897. up_write(&nvm_lock);
  898. if (!dev) {
  899. pr_err("nvm: device not found\n");
  900. return -EINVAL;
  901. }
  902. nvm_free_mgr(dev);
  903. if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
  904. return nvm_dev_factory(dev, fact.flags);
  905. return 0;
  906. }
  907. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  908. {
  909. void __user *argp = (void __user *)arg;
  910. switch (cmd) {
  911. case NVM_INFO:
  912. return nvm_ioctl_info(file, argp);
  913. case NVM_GET_DEVICES:
  914. return nvm_ioctl_get_devices(file, argp);
  915. case NVM_DEV_CREATE:
  916. return nvm_ioctl_dev_create(file, argp);
  917. case NVM_DEV_REMOVE:
  918. return nvm_ioctl_dev_remove(file, argp);
  919. case NVM_DEV_INIT:
  920. return nvm_ioctl_dev_init(file, argp);
  921. case NVM_DEV_FACTORY:
  922. return nvm_ioctl_dev_factory(file, argp);
  923. }
  924. return 0;
  925. }
  926. static const struct file_operations _ctl_fops = {
  927. .open = nonseekable_open,
  928. .unlocked_ioctl = nvm_ctl_ioctl,
  929. .owner = THIS_MODULE,
  930. .llseek = noop_llseek,
  931. };
  932. static struct miscdevice _nvm_misc = {
  933. .minor = MISC_DYNAMIC_MINOR,
  934. .name = "lightnvm",
  935. .nodename = "lightnvm/control",
  936. .fops = &_ctl_fops,
  937. };
  938. module_misc_device(_nvm_misc);
  939. MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
  940. MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
  941. MODULE_LICENSE("GPL v2");
  942. MODULE_VERSION("0.1");