aml_nftl_wl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /**
  2. *wl.c - jnftl wear leveling & garbage collection
  3. */
  4. #include <linux/rbtree.h>
  5. #include <linux/list.h>
  6. #include <linux/kthread.h>
  7. #include <linux/freezer.h>
  8. #include "aml_nftl.h"
  9. #define list_to_node(l) container_of(l, struct wl_list_t, list)
  10. #define list_to_gc_node(l) container_of(l, struct gc_blk_list, list)
  11. /**
  12. * construct_lnode - construct list node
  13. * @ vblk : logical block addr
  14. *
  15. * 1. check valid block addr
  16. * 2. malloc node
  17. * 3. evaluate logical block
  18. *
  19. * If the logical block is not a valid value, NULL will be returned,
  20. * else return list node
  21. */
  22. static inline struct wl_list_t *construct_lnode(struct aml_nftl_wl_t* wl, addr_blk_t vt_blk, addr_blk_t phy_blk)
  23. {
  24. struct wl_list_t *lnode;
  25. if(vt_blk == BLOCK_INIT_VALUE)
  26. return NULL;
  27. lnode = aml_nftl_malloc(sizeof(struct wl_list_t));
  28. if(lnode) {
  29. lnode->vt_blk = vt_blk;
  30. lnode->phy_blk = phy_blk;
  31. }
  32. return lnode;
  33. }
  34. /**
  35. * construct_tnode - construct tree node
  36. * @ blk : linear block addr
  37. *
  38. * 1. check valid block addr
  39. * 2. malloc node
  40. * 3. evaluate linear block & ec
  41. *
  42. * If the linear block is not a valid value, NULL will be returned,
  43. * else return tree node
  44. */
  45. static inline struct wl_rb_t* construct_tnode(struct aml_nftl_wl_t* wl, addr_blk_t blk)
  46. {
  47. struct aml_nftl_info_t *aml_nftl_info = wl->aml_nftl_info;
  48. struct wl_rb_t* tnode;
  49. struct phyblk_node_t *phy_blk_node;
  50. if(blk == BLOCK_INIT_VALUE)
  51. return NULL;
  52. phy_blk_node = &aml_nftl_info->phypmt[blk];
  53. tnode = aml_nftl_malloc(sizeof(struct wl_rb_t));
  54. if(tnode){
  55. tnode->blk = blk;
  56. tnode->ec = phy_blk_node->ec;
  57. }
  58. return tnode;
  59. }
  60. /**
  61. * del_from_tree - delete tree node from redblack tree
  62. * @ tree : free / erased / used tree
  63. * @ tnode : tree node
  64. *
  65. * Erase tree node & tree count-- & free
  66. */
  67. static void del_from_tree(struct aml_nftl_wl_t* wl, struct wl_tree_t* tree, struct wl_rb_t* tnode)
  68. {
  69. rb_erase(&tnode->rb_node, &tree->root);
  70. tree->count--;
  71. aml_nftl_free(tnode);
  72. return;
  73. }
  74. /**
  75. * _del_free - delete tree node from free tree
  76. */
  77. static inline void _del_free(struct aml_nftl_wl_t* wl, struct wl_rb_t* tnode)
  78. {
  79. del_from_tree(wl, &wl->free_root, tnode);
  80. return;
  81. }
  82. /**
  83. * _del_free - delete tree node from used tree
  84. */
  85. static inline void _del_used(struct aml_nftl_wl_t* wl, struct wl_rb_t* tnode)
  86. {
  87. del_from_tree(wl, &wl->used_root, tnode);
  88. }
  89. /**
  90. * _del_free - delete tree node from erased tree
  91. */
  92. static inline void _del_erased(struct aml_nftl_wl_t* wl, struct wl_rb_t* tnode)
  93. {
  94. del_from_tree(wl, &wl->erased_root, tnode);
  95. }
  96. /**
  97. * add_tree - add tree node into redblack tree
  98. * @ tree : free / erased / used tree
  99. * @ tnode : tree node
  100. *
  101. * To avoid the same ec in the rb tree, as to get confused in rb_left & rb_right,
  102. * we make a new value: ec<<16+blk to make tree, without modify rb structure
  103. * 1. make ec blk
  104. * 2. find a suitable place in tree
  105. * 3. linke node & insert it at the place we just found
  106. * 4. add tree node number
  107. */
  108. static void add_tree(struct aml_nftl_wl_t* wl, struct wl_tree_t* tree, struct wl_rb_t* tnode)
  109. {
  110. struct rb_node **p = &tree->root.rb_node;
  111. struct rb_node *parent = NULL;
  112. struct wl_rb_t *cur;
  113. uint32_t node_ec_blk = MAKE_EC_BLK(tnode->ec, tnode->blk);
  114. uint32_t cur_ec_blk;
  115. while (*p) {
  116. parent = *p;
  117. cur = rb_entry(parent, struct wl_rb_t, rb_node);
  118. cur_ec_blk = MAKE_EC_BLK(cur->ec, cur->blk);
  119. if (node_ec_blk < cur_ec_blk)
  120. p = &parent->rb_left;
  121. else
  122. p = &parent->rb_right;
  123. }
  124. rb_link_node(&tnode->rb_node, parent, p);
  125. rb_insert_color(&tnode->rb_node, &tree->root);
  126. tree->count++;
  127. return;
  128. }
  129. /**
  130. * search_tree - search ec_blk from the rb tree, get the tree node
  131. * @ tree : tree searched in
  132. * @ blk : linear block addr
  133. * @ ec : erase count
  134. *
  135. * To avoid the same ec in the rb tree, as to get confused in rb_left & rb_right,
  136. * we make a new value: ec<<16+blk to make tree, without modify rb structure
  137. * 1. make ec blk
  138. * 2. search the same ec_blk in the tree
  139. * 3. return tree node or NULL
  140. */
  141. static struct wl_rb_t* search_tree(struct aml_nftl_wl_t* wl, struct wl_tree_t* tree, addr_blk_t blk,
  142. erase_count_t ec)
  143. {
  144. struct rb_node **p = &tree->root.rb_node;
  145. struct rb_node *parent = NULL;
  146. struct wl_rb_t* cur;
  147. uint32_t node_ec_blk = MAKE_EC_BLK(ec, blk);
  148. uint32_t cur_ec_blk;
  149. while(*p){
  150. parent = *p;
  151. cur = rb_entry(parent, struct wl_rb_t, rb_node);
  152. cur_ec_blk = MAKE_EC_BLK(cur->ec, cur->blk);
  153. if(node_ec_blk < cur_ec_blk)
  154. p = &parent->rb_left;
  155. else if(node_ec_blk > cur_ec_blk)
  156. p = &parent->rb_right;
  157. else
  158. return cur;
  159. }
  160. return NULL;
  161. }
  162. static void add_used(struct aml_nftl_wl_t* wl, addr_blk_t blk)
  163. {
  164. struct wl_rb_t* tnode;
  165. tnode = construct_tnode(wl, blk);
  166. if(tnode)
  167. add_tree(wl, &wl->used_root, tnode);
  168. return;
  169. }
  170. /**
  171. * add_erased - add erased free block info erase rb_tree
  172. * @ blk : linear block addr
  173. */
  174. static void add_erased(struct aml_nftl_wl_t* wl, addr_blk_t blk)
  175. {
  176. struct aml_nftl_info_t *aml_nftl_info = wl->aml_nftl_info;
  177. struct phyblk_node_t *phy_blk_node;
  178. struct wl_rb_t* tnode;
  179. phy_blk_node = &aml_nftl_info->phypmt[blk];
  180. tnode = search_tree(wl, &wl->used_root, blk, phy_blk_node->ec);
  181. if (tnode)
  182. _del_used(wl, tnode);
  183. tnode = construct_tnode(wl, blk);
  184. if(tnode)
  185. add_tree(wl, &wl->erased_root, tnode);
  186. }
  187. /**
  188. * add_free - add free block into free rb tree
  189. * @ blk : linear block addr
  190. *
  191. * 1. construct tree node
  192. * 2. add node into free rb tree
  193. * 3. update current delta(current free block ec - coldest used block)
  194. */
  195. static void add_free(struct aml_nftl_wl_t * wl, addr_blk_t blk)
  196. {
  197. struct aml_nftl_info_t *aml_nftl_info = wl->aml_nftl_info;
  198. struct phyblk_node_t *phy_blk_node;
  199. struct wl_rb_t* tnode_cold;
  200. struct wl_rb_t* tnode;
  201. if (blk < 0)
  202. return;
  203. phy_blk_node = &aml_nftl_info->phypmt[blk];
  204. tnode = search_tree(wl, &wl->used_root, blk, phy_blk_node->ec);
  205. if (tnode)
  206. _del_used(wl, tnode);
  207. tnode = construct_tnode(wl, blk);
  208. if(tnode) {
  209. add_tree(wl, &wl->free_root, tnode);
  210. /*update current delta(current free block, coldest block)*/
  211. tnode_cold = rb_entry(rb_first(&wl->used_root.root), struct wl_rb_t, rb_node);
  212. if ((tnode->ec > tnode_cold->ec) && (tnode_cold->ec > 0) && (tnode->ec > 0))
  213. wl->cur_delta = tnode->ec - tnode_cold->ec;
  214. }
  215. }
  216. /**
  217. * staticwl_linear_blk - static wear leveling this linear block
  218. * @ blk : root linear block
  219. *
  220. * Static wl block should be the logical block only with root block
  221. *
  222. * 1. get root sector map
  223. * 2. get best free block
  224. * 3. calculate src, dest page addr base
  225. * 4. do copy_page from 0 to block end
  226. * 5. add this block to free tree
  227. */
  228. static int32_t staticwl_linear_blk(struct aml_nftl_wl_t* aml_nftl_wl, addr_blk_t blk)
  229. {
  230. struct aml_nftl_info_t *aml_nftl_info = aml_nftl_wl->aml_nftl_info;
  231. struct phyblk_node_t *phy_blk_node_src, *phy_blk_node_dest;
  232. struct vtblk_node_t *vt_blk_node;
  233. uint16_t i, retry_cnt = 0;
  234. addr_blk_t dest_blk, src_blk;
  235. addr_page_t dest_page;
  236. addr_page_t src_page;
  237. WRITE_RETRY:
  238. if(aml_nftl_wl->get_best_free(aml_nftl_wl, &dest_blk))
  239. {
  240. aml_nftl_dbg("%s line:%d nftl couldn`t get best free block: %d %d\n", __func__, __LINE__, aml_nftl_wl->free_root.count, aml_nftl_wl->wait_gc_block);
  241. return -ENOENT;
  242. }
  243. aml_nftl_wl->add_used(aml_nftl_wl, dest_blk);
  244. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + blk));
  245. src_blk = vt_blk_node->phy_blk_addr;
  246. phy_blk_node_src = &aml_nftl_info->phypmt[src_blk];
  247. phy_blk_node_dest = &aml_nftl_info->phypmt[dest_blk];
  248. phy_blk_node_dest->vtblk = phy_blk_node_src->vtblk;
  249. if (phy_blk_node_src->timestamp >= MAX_TIMESTAMP_NUM)
  250. phy_blk_node_dest->timestamp = 0;
  251. else
  252. phy_blk_node_dest->timestamp = (phy_blk_node_src->timestamp + 1);
  253. dest_page = 0;
  254. for(i=0; i<aml_nftl_wl->pages_per_blk; i++){
  255. src_page = phy_blk_node_src->phy_page_map[i];
  256. if (src_page < 0)
  257. continue;
  258. dest_page = phy_blk_node_dest->last_write + 1;
  259. if((aml_nftl_info->copy_page(aml_nftl_info, dest_blk, dest_page, src_blk, src_page) == 2) && (retry_cnt++ <3))
  260. {
  261. //aml_nftl_wl->add_free(aml_nftl_wl, dest_blk);
  262. aml_nftl_info->blk_mark_bad(aml_nftl_info, dest_blk);
  263. aml_nftl_dbg("%s: nftl write page faile blk: %d page: %d, retry_cnt:%d\n", __func__, dest_blk, dest_page, retry_cnt);
  264. goto WRITE_RETRY;
  265. }
  266. }
  267. aml_nftl_wl->add_free(aml_nftl_wl, src_blk);
  268. vt_blk_node->phy_blk_addr = dest_blk;
  269. return 0;
  270. }
  271. static void add_gc(struct aml_nftl_wl_t* aml_nftl_wl, addr_blk_t gc_blk_addr)
  272. {
  273. struct gc_blk_list *gc_add_list, *gc_cur_list, *gc_prev_list = NULL;
  274. struct list_head *l, *n;
  275. if (gc_blk_addr < NFTL_FAT_TABLE_NUM)
  276. return;
  277. if (!list_empty(&aml_nftl_wl->gc_blk_list)) {
  278. list_for_each_safe(l, n, &aml_nftl_wl->gc_blk_list) {
  279. gc_cur_list = list_to_gc_node(l);
  280. if (gc_cur_list->gc_blk_addr == gc_blk_addr)
  281. return;
  282. else if (gc_blk_addr < gc_cur_list->gc_blk_addr) {
  283. gc_cur_list = gc_prev_list;
  284. break;
  285. }
  286. else
  287. gc_prev_list = gc_cur_list;
  288. }
  289. }
  290. else {
  291. gc_cur_list = NULL;
  292. }
  293. gc_add_list = aml_nftl_malloc(sizeof(struct gc_blk_list));
  294. if (!gc_add_list)
  295. return;
  296. gc_add_list->gc_blk_addr = gc_blk_addr;
  297. if (gc_cur_list != NULL)
  298. list_add(&gc_add_list->list, &gc_cur_list->list);
  299. else
  300. list_add(&gc_add_list->list, &aml_nftl_wl->gc_blk_list);
  301. return;
  302. }
  303. static int gc_get_dirty_block(struct aml_nftl_wl_t* aml_nftl_wl, uint8_t gc_flag)
  304. {
  305. struct aml_nftl_info_t *aml_nftl_info = aml_nftl_wl->aml_nftl_info;
  306. struct gc_blk_list *gc_cur_list;
  307. struct list_head *l, *n;
  308. struct vtblk_node_t *vt_blk_node;
  309. struct phyblk_node_t *phy_blk_node;
  310. addr_blk_t dest_blk;
  311. int16_t vt_blk_num, start_free_num;
  312. int node_length, valid_page, k;
  313. start_free_num = aml_nftl_wl->free_root.count;
  314. if (!list_empty(&aml_nftl_wl->gc_blk_list)) {
  315. list_for_each_safe(l, n, &aml_nftl_wl->gc_blk_list) {
  316. gc_cur_list = list_to_gc_node(l);
  317. vt_blk_num = gc_cur_list->gc_blk_addr;
  318. if (vt_blk_num == aml_nftl_info->current_write_block)
  319. continue;
  320. if ((vt_blk_num >= aml_nftl_info->current_write_block - MAX_BLK_NUM_PER_NODE)
  321. && (vt_blk_num <= aml_nftl_info->current_write_block + MAX_BLK_NUM_PER_NODE)
  322. && (gc_flag != DO_COPY_PAGE))
  323. continue;
  324. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk_num));
  325. node_length = aml_nftl_get_node_length(aml_nftl_info, vt_blk_node);
  326. if (node_length < BASIC_BLK_NUM_PER_NODE) {
  327. list_del(&gc_cur_list->list);
  328. aml_nftl_free(gc_cur_list);
  329. continue;
  330. }
  331. aml_nftl_check_node(aml_nftl_info, vt_blk_num);
  332. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk_num));
  333. node_length = aml_nftl_get_node_length(aml_nftl_info, vt_blk_node);
  334. if (node_length < BASIC_BLK_NUM_PER_NODE) {
  335. list_del(&gc_cur_list->list);
  336. aml_nftl_free(gc_cur_list);
  337. continue;
  338. }
  339. if (aml_nftl_wl->free_root.count <= AML_LIMIT_FACTOR) {
  340. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk_num));
  341. dest_blk = vt_blk_node->phy_blk_addr;
  342. phy_blk_node = &aml_nftl_info->phypmt[dest_blk];
  343. aml_nftl_info->get_phy_sect_map(aml_nftl_info, dest_blk);
  344. valid_page = 0;
  345. for (k=0; k<aml_nftl_wl->pages_per_blk; k++) {
  346. if (phy_blk_node->phy_page_map[k] >= 0)
  347. valid_page++;
  348. }
  349. if ((valid_page < (phy_blk_node->last_write + 1)) && (gc_flag != DO_COPY_PAGE))
  350. continue;
  351. }
  352. aml_nftl_wl->wait_gc_block = vt_blk_num;
  353. list_del(&gc_cur_list->list);
  354. aml_nftl_free(gc_cur_list);
  355. break;
  356. }
  357. }
  358. else {
  359. for (vt_blk_num=aml_nftl_info->accessibleblocks - 1; vt_blk_num>=0; vt_blk_num--) {
  360. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk_num));
  361. if (vt_blk_node == NULL)
  362. continue;
  363. node_length = aml_nftl_get_node_length(aml_nftl_info, vt_blk_node);
  364. if (node_length == 1) {
  365. if(aml_nftl_wl->cur_delta >= aml_nftl_wl->wl_delta)
  366. staticwl_linear_blk(aml_nftl_wl, vt_blk_num);
  367. continue;
  368. }
  369. aml_nftl_wl->add_gc(aml_nftl_wl, vt_blk_num);
  370. }
  371. }
  372. return (aml_nftl_wl->free_root.count - start_free_num);
  373. }
  374. /**
  375. * gc_copy_special - copy root, leaf, sroot, sleaf to free block
  376. *
  377. * Copy all block to new free block, regardless valid sectors in each blocks
  378. *
  379. * 1. malloc special root, leaf sector map for temp use
  380. * 2. get best free linear block
  381. * 3. get root, leaf, sroot, sleaf linear block relative to special node
  382. * 4. get sector map for root, leaf, sroot, sleaf
  383. * 5. calculate page addr base
  384. * 6. copy_page depend on sector map, do not encode/decode ga_pmap
  385. * 7. add root, leaf, sroot, sleaf linear block into free tree
  386. * add_free will not process invalid block(if no sleaf in this vblk)
  387. * 8. free temp sroot, sleaf sector map
  388. */
  389. static int32_t gc_copy_one(struct aml_nftl_wl_t* aml_nftl_wl, addr_blk_t vt_blk, uint8_t gc_flag)
  390. {
  391. int gc_free = 0, node_length, node_length_cnt, k, writed_pages = 0, retry_cnt = 0;
  392. struct aml_nftl_info_t *aml_nftl_info = aml_nftl_wl->aml_nftl_info;
  393. struct phyblk_node_t *phy_blk_src_node, *phy_blk_node_dest;
  394. struct vtblk_node_t *vt_blk_node, *vt_blk_node_free;
  395. addr_blk_t dest_blk, src_blk;
  396. addr_page_t dest_page, src_page;
  397. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk));
  398. if (vt_blk_node == NULL)
  399. return -ENOMEM;
  400. WRITE_RETRY:
  401. dest_blk = vt_blk_node->phy_blk_addr;
  402. phy_blk_node_dest = &aml_nftl_info->phypmt[dest_blk];
  403. node_length = aml_nftl_get_node_length(aml_nftl_info, vt_blk_node);
  404. for (k=0; k<aml_nftl_wl->pages_per_blk; k++) {
  405. if (phy_blk_node_dest->phy_page_map[k] >= 0)
  406. continue;
  407. node_length_cnt = 0;
  408. src_blk = -1;
  409. src_page = -1;
  410. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk));
  411. while (vt_blk_node != NULL) {
  412. node_length_cnt++;
  413. src_blk = vt_blk_node->phy_blk_addr;
  414. phy_blk_src_node = &aml_nftl_info->phypmt[src_blk];
  415. if ((phy_blk_src_node->phy_page_map[k] >= 0) && (phy_blk_src_node->phy_page_delete[k>>3] & (1 << (k % 8)))) {
  416. src_page = phy_blk_src_node->phy_page_map[k];
  417. break;
  418. }
  419. vt_blk_node = vt_blk_node->next;
  420. }
  421. if ((src_page < 0) || (src_blk < 0) || ((node_length_cnt < node_length) && (gc_flag == 0)))
  422. continue;
  423. dest_page = phy_blk_node_dest->last_write + 1;
  424. if((aml_nftl_info->copy_page(aml_nftl_info, dest_blk, dest_page, src_blk, src_page) == 2) && (retry_cnt++ < 3)) //write error
  425. {
  426. aml_nftl_dbg("%s, line %d, aml_nftl_badblock_handle failed blk: %d retry_cnt:%d\n", __func__, __LINE__, dest_blk, retry_cnt);
  427. #if 1
  428. if(aml_nftl_badblock_handle(aml_nftl_info, dest_blk, vt_blk) >= 0) //sucess
  429. goto WRITE_RETRY;
  430. else
  431. {
  432. aml_nftl_dbg("%s, line %d, aml_nftl_badblock_handle failed blk: %d retry_cnt:%d\n", __func__, __LINE__, dest_blk, retry_cnt);
  433. return -ENOENT;
  434. }
  435. #else
  436. aml_nftl_dbg("%s line:%d, nftl write page faile blk: %d page: %d , retry_cnt:%d\n", __func__, __LINE__, dest_blk, dest_page, retry_cnt);
  437. //aml_nftl_wl->add_free(aml_nftl_wl, dest_blk);
  438. aml_nftl_info->blk_mark_bad(aml_nftl_info, dest_blk);
  439. if(aml_nftl_wl->get_best_free(aml_nftl_wl, &dest_blk))
  440. {
  441. aml_nftl_dbg("%s line:%d, nftl write page faile blk: %d page: %d , retry_cnt:%d\n", __func__, __LINE__, dest_blk, dest_page, retry_cnt);
  442. return -ENOENT;
  443. }
  444. goto WRITE_RETRY;
  445. #endif
  446. }
  447. writed_pages++;
  448. if ((gc_flag == DO_COPY_PAGE_AVERAGELY) && (writed_pages >= aml_nftl_wl->page_copy_per_gc) && (k < (aml_nftl_wl->pages_per_blk - aml_nftl_wl->page_copy_per_gc)))
  449. goto exit;
  450. }
  451. node_length_cnt = 0;
  452. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + vt_blk));
  453. while (vt_blk_node->next != NULL) {
  454. node_length_cnt++;
  455. vt_blk_node_free = vt_blk_node->next;
  456. if (((node_length_cnt == (node_length - 1)) && (gc_flag == 0))
  457. || ((node_length_cnt > 0) && (gc_flag != 0))) {
  458. aml_nftl_wl->add_free(aml_nftl_wl, vt_blk_node_free->phy_blk_addr);
  459. vt_blk_node->next = vt_blk_node_free->next;
  460. aml_nftl_free(vt_blk_node_free);
  461. gc_free++;
  462. continue;
  463. }
  464. if (vt_blk_node->next != NULL)
  465. vt_blk_node = vt_blk_node->next;
  466. }
  467. exit:
  468. return gc_free;
  469. }
  470. static int aml_nftl_garbage_collect(struct aml_nftl_wl_t *aml_nftl_wl, uint8_t gc_flag)
  471. {
  472. struct vtblk_node_t *vt_blk_node;
  473. struct phyblk_node_t *phy_blk_node;
  474. addr_blk_t dest_blk, vt_blk_num;
  475. int gc_num = 0, copy_page_bounce_num, copy_page_total_num, valid_page, k;
  476. struct aml_nftl_info_t *aml_nftl_info = aml_nftl_wl->aml_nftl_info;
  477. if ((aml_nftl_info->fillfactor / 4) >= AML_LIMIT_FACTOR)
  478. copy_page_total_num = aml_nftl_info->fillfactor / 4;
  479. else
  480. copy_page_total_num = AML_LIMIT_FACTOR;
  481. copy_page_bounce_num = copy_page_total_num * 2;
  482. if ((aml_nftl_wl->wait_gc_block < 0) && (aml_nftl_wl->free_root.count < copy_page_bounce_num)) {
  483. gc_get_dirty_block(aml_nftl_wl, 0);
  484. if (aml_nftl_wl->wait_gc_block < 0)
  485. gc_get_dirty_block(aml_nftl_wl, DO_COPY_PAGE);
  486. if ((gc_flag == 0) && (aml_nftl_wl->free_root.count >= copy_page_total_num))
  487. return 0;
  488. }
  489. if (aml_nftl_wl->wait_gc_block >= 0) {
  490. if ((aml_nftl_wl->free_root.count < copy_page_total_num) || (gc_flag == DO_COPY_PAGE))
  491. aml_nftl_wl->page_copy_per_gc = aml_nftl_wl->pages_per_blk;
  492. else if (aml_nftl_wl->free_root.count >= (copy_page_total_num + AML_LIMIT_FACTOR))
  493. aml_nftl_wl->page_copy_per_gc = 4;
  494. else
  495. aml_nftl_wl->page_copy_per_gc = 8;
  496. vt_blk_node = (struct vtblk_node_t *)(*(aml_nftl_info->vtpmt + aml_nftl_wl->wait_gc_block));
  497. dest_blk = vt_blk_node->phy_blk_addr;
  498. phy_blk_node = &aml_nftl_info->phypmt[dest_blk];
  499. aml_nftl_info->get_phy_sect_map(aml_nftl_info, dest_blk);
  500. valid_page = 0;
  501. for (k=0; k<aml_nftl_wl->pages_per_blk; k++) {
  502. if (phy_blk_node->phy_page_map[k] >= 0)
  503. valid_page++;
  504. }
  505. if (valid_page < (phy_blk_node->last_write + 1)) {
  506. if(aml_nftl_wl->get_best_free(aml_nftl_wl, &dest_blk)) {
  507. aml_nftl_dbg("nftl garbage couldn`t found free block: %d %d\n", aml_nftl_wl->wait_gc_block, aml_nftl_wl->free_root.count);
  508. vt_blk_num = aml_nftl_wl->wait_gc_block;
  509. aml_nftl_wl->wait_gc_block = BLOCK_INIT_VALUE;
  510. gc_get_dirty_block(aml_nftl_wl, 0);
  511. if (aml_nftl_wl->wait_gc_block < 0) {
  512. aml_nftl_dbg("nftl garbage couldn`t found copy block: %d %d\n", aml_nftl_wl->wait_gc_block, aml_nftl_wl->free_root.count);
  513. return 0;
  514. }
  515. gc_num = aml_nftl_wl->garbage_one(aml_nftl_wl, aml_nftl_wl->wait_gc_block, DO_COPY_PAGE_AVERAGELY);
  516. if (gc_num > 0)
  517. aml_nftl_wl->wait_gc_block = BLOCK_INIT_VALUE;
  518. aml_nftl_wl->wait_gc_block = vt_blk_num;
  519. if (aml_nftl_wl->get_best_free(aml_nftl_wl, &dest_blk))
  520. return 0;
  521. }
  522. aml_nftl_add_node(aml_nftl_info, aml_nftl_wl->wait_gc_block, dest_blk);
  523. aml_nftl_wl->add_used(aml_nftl_wl, dest_blk);
  524. }
  525. gc_num = aml_nftl_wl->garbage_one(aml_nftl_wl, aml_nftl_wl->wait_gc_block, DO_COPY_PAGE_AVERAGELY);
  526. if (gc_num > 0)
  527. aml_nftl_wl->wait_gc_block = BLOCK_INIT_VALUE;
  528. return gc_num;
  529. }
  530. return 0;
  531. }
  532. /**
  533. * get_best_free - get best free block
  534. * @block : linear block
  535. *
  536. * 1. get free block from erased free tree & remove node from tree
  537. * 2. get free block from dirty free tree & remove node from tree
  538. * 3. update ec
  539. * 4. erase dirty free block
  540. */
  541. static int32_t get_best_free(struct aml_nftl_wl_t *wl, addr_blk_t* blk)
  542. {
  543. int error = 0;
  544. struct rb_node* p;
  545. struct wl_rb_t* tnode;
  546. struct aml_nftl_info_t *aml_nftl_info = wl->aml_nftl_info;
  547. get_free:
  548. if(wl->erased_root.count) {
  549. p = rb_first(&wl->erased_root.root);
  550. tnode = rb_entry(p, struct wl_rb_t, rb_node);
  551. *blk = tnode->blk;
  552. _del_erased(wl, tnode);
  553. }
  554. else if(wl->free_root.count) {
  555. p = rb_first(&wl->free_root.root);
  556. tnode = rb_entry(p, struct wl_rb_t, rb_node);
  557. *blk = tnode->blk;
  558. _del_free(wl, tnode);
  559. error = aml_nftl_info->erase_block(aml_nftl_info, *blk);
  560. if (error) {
  561. aml_nftl_info->blk_mark_bad(aml_nftl_info, *blk);
  562. goto get_free;
  563. }
  564. }
  565. else
  566. return -ENOENT;
  567. return 0;
  568. }
  569. int aml_nftl_wl_init(struct aml_nftl_info_t *aml_nftl_info)
  570. {
  571. struct aml_nftl_wl_t *aml_nftl_wl = aml_nftl_malloc(sizeof(struct aml_nftl_wl_t));
  572. if(!aml_nftl_wl)
  573. return -1;
  574. aml_nftl_wl->aml_nftl_info = aml_nftl_info;
  575. aml_nftl_info->aml_nftl_wl = aml_nftl_wl;
  576. aml_nftl_wl->wl_delta = WL_DELTA;
  577. aml_nftl_wl->pages_per_blk = aml_nftl_info->pages_per_blk;
  578. aml_nftl_wl->gc_start_block = aml_nftl_info->accessibleblocks - 1;
  579. aml_nftl_wl->erased_root.root = RB_ROOT;
  580. aml_nftl_wl->free_root.root = RB_ROOT;
  581. aml_nftl_wl->used_root.root = RB_ROOT;
  582. INIT_LIST_HEAD(&aml_nftl_wl->gc_blk_list);
  583. INIT_LIST_HEAD(&aml_nftl_wl->readerr_head.list);
  584. aml_nftl_wl->readerr_head.vt_blk = BLOCK_INIT_VALUE;
  585. aml_nftl_wl->wait_gc_block = BLOCK_INIT_VALUE;
  586. /*init function pointer*/
  587. aml_nftl_wl->add_free = add_free;
  588. aml_nftl_wl->add_erased = add_erased;
  589. aml_nftl_wl->add_used = add_used;
  590. aml_nftl_wl->add_gc = add_gc;
  591. aml_nftl_wl->get_best_free = get_best_free;
  592. aml_nftl_wl->garbage_one = gc_copy_one;
  593. aml_nftl_wl->garbage_collect = aml_nftl_garbage_collect;
  594. return 0;
  595. }