123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482 |
- /*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
- #include "rrpc.h"
- static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
- static DECLARE_RWSEM(rrpc_lock);
- static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags);
- #define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
- static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
- {
- struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
- lockdep_assert_held(&rrpc->rev_lock);
- if (a->addr == ADDR_EMPTY || !rblk)
- return;
- spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
- WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
- rblk->nr_invalid_pages++;
- spin_unlock(&rblk->lock);
- rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
- }
- static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned int len)
- {
- sector_t i;
- spin_lock(&rrpc->rev_lock);
- for (i = slba; i < slba + len; i++) {
- struct rrpc_addr *gp = &rrpc->trans_map[i];
- rrpc_page_invalidate(rrpc, gp);
- gp->rblk = NULL;
- }
- spin_unlock(&rrpc->rev_lock);
- }
- static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
- sector_t laddr, unsigned int pages)
- {
- struct nvm_rq *rqd;
- struct rrpc_inflight_rq *inf;
- rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
- if (!rqd)
- return ERR_PTR(-ENOMEM);
- inf = rrpc_get_inflight_rq(rqd);
- if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
- mempool_free(rqd, rrpc->rq_pool);
- return NULL;
- }
- return rqd;
- }
- static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
- {
- struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
- rrpc_unlock_laddr(rrpc, inf);
- mempool_free(rqd, rrpc->rq_pool);
- }
- static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
- {
- sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
- sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
- struct nvm_rq *rqd;
- while (1) {
- rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
- if (rqd)
- break;
- schedule();
- }
- if (IS_ERR(rqd)) {
- pr_err("rrpc: unable to acquire inflight IO\n");
- bio_io_error(bio);
- return;
- }
- rrpc_invalidate_range(rrpc, slba, len);
- rrpc_inflight_laddr_release(rrpc, rqd);
- }
- static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- return (rblk->next_page == rrpc->dev->sec_per_blk);
- }
- /* Calculate relative addr for the given block, considering instantiated LUNs */
- static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- struct nvm_block *blk = rblk->parent;
- int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
- return lun_blk * rrpc->dev->sec_per_blk;
- }
- /* Calculate global addr for the given block */
- static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- struct nvm_block *blk = rblk->parent;
- return blk->id * rrpc->dev->sec_per_blk;
- }
- static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
- {
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = r.ppa;
- l.ppa = 0;
- div_u64_rem(ppa, dev->sec_per_pg, &secs);
- l.g.sec = secs;
- sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
- l.g.pg = pgs;
- sector_div(ppa, dev->pgs_per_blk);
- div_u64_rem(ppa, dev->blks_per_lun, &blks);
- l.g.blk = blks;
- sector_div(ppa, dev->blks_per_lun);
- div_u64_rem(ppa, dev->luns_per_chnl, &luns);
- l.g.lun = luns;
- sector_div(ppa, dev->luns_per_chnl);
- l.g.ch = ppa;
- return l;
- }
- static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
- {
- struct ppa_addr paddr;
- paddr.ppa = addr;
- return linear_to_generic_addr(dev, paddr);
- }
- /* requires lun->lock taken */
- static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
- struct rrpc_block **cur_rblk)
- {
- struct rrpc *rrpc = rlun->rrpc;
- if (*cur_rblk) {
- spin_lock(&(*cur_rblk)->lock);
- WARN_ON(!block_is_full(rrpc, *cur_rblk));
- spin_unlock(&(*cur_rblk)->lock);
- }
- *cur_rblk = new_rblk;
- }
- static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
- unsigned long flags)
- {
- struct nvm_block *blk;
- struct rrpc_block *rblk;
- blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
- if (!blk) {
- pr_err("nvm: rrpc: cannot get new block from media manager\n");
- return NULL;
- }
- rblk = rrpc_get_rblk(rlun, blk->id);
- blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
- rblk->next_page = 0;
- rblk->nr_invalid_pages = 0;
- atomic_set(&rblk->data_cmnt_size, 0);
- return rblk;
- }
- static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- nvm_put_blk(rrpc->dev, rblk->parent);
- }
- static void rrpc_put_blks(struct rrpc *rrpc)
- {
- struct rrpc_lun *rlun;
- int i;
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- if (rlun->cur)
- rrpc_put_blk(rrpc, rlun->cur);
- if (rlun->gc_cur)
- rrpc_put_blk(rrpc, rlun->gc_cur);
- }
- }
- static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
- {
- int next = atomic_inc_return(&rrpc->next_lun);
- return &rrpc->luns[next % rrpc->nr_luns];
- }
- static void rrpc_gc_kick(struct rrpc *rrpc)
- {
- struct rrpc_lun *rlun;
- unsigned int i;
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- queue_work(rrpc->krqd_wq, &rlun->ws_gc);
- }
- }
- /*
- * timed GC every interval.
- */
- static void rrpc_gc_timer(unsigned long data)
- {
- struct rrpc *rrpc = (struct rrpc *)data;
- rrpc_gc_kick(rrpc);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
- }
- static void rrpc_end_sync_bio(struct bio *bio)
- {
- struct completion *waiting = bio->bi_private;
- if (bio->bi_error)
- pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
- complete(waiting);
- }
- /*
- * rrpc_move_valid_pages -- migrate live data off the block
- * @rrpc: the 'rrpc' structure
- * @block: the block from which to migrate live pages
- *
- * Description:
- * GC algorithms may call this function to migrate remaining live
- * pages off the block prior to erasing it. This function blocks
- * further execution until the operation is complete.
- */
- static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- struct request_queue *q = rrpc->dev->q;
- struct rrpc_rev_addr *rev;
- struct nvm_rq *rqd;
- struct bio *bio;
- struct page *page;
- int slot;
- int nr_sec_per_blk = rrpc->dev->sec_per_blk;
- u64 phys_addr;
- DECLARE_COMPLETION_ONSTACK(wait);
- if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
- return 0;
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- pr_err("nvm: could not alloc bio to gc\n");
- return -ENOMEM;
- }
- page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
- if (!page) {
- bio_put(bio);
- return -ENOMEM;
- }
- while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_sec_per_blk)) < nr_sec_per_blk) {
- /* Lock laddr */
- phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
- try:
- spin_lock(&rrpc->rev_lock);
- /* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
- /* already updated by previous regular write */
- if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
- continue;
- }
- rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
- if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
- schedule();
- goto try;
- }
- spin_unlock(&rrpc->rev_lock);
- /* Perform read to do GC */
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
- /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc read failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- if (bio->bi_error) {
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- bio_reset(bio);
- reinit_completion(&wait);
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
- /* turn the command around and write the data back to a new
- * address
- */
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc write failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- rrpc_inflight_laddr_release(rrpc, rqd);
- if (bio->bi_error)
- goto finished;
- bio_reset(bio);
- }
- finished:
- mempool_free(page, rrpc->page_pool);
- bio_put(bio);
- if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
- pr_err("nvm: failed to garbage collect block\n");
- return -EIO;
- }
- return 0;
- }
- static void rrpc_block_gc(struct work_struct *work)
- {
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- struct nvm_dev *dev = rrpc->dev;
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
- if (rrpc_move_valid_pages(rrpc, rblk))
- goto put_back;
- if (nvm_erase_blk(dev, rblk->parent))
- goto put_back;
- rrpc_put_blk(rrpc, rblk);
- return;
- put_back:
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
- }
- /* the block with highest number of invalid pages, will be in the beginning
- * of the list
- */
- static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
- struct rrpc_block *rb)
- {
- if (ra->nr_invalid_pages == rb->nr_invalid_pages)
- return ra;
- return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
- }
- /* linearly find the block with highest number of invalid pages
- * requires lun->lock
- */
- static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
- {
- struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblock, *max;
- BUG_ON(list_empty(prio_list));
- max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblock, prio_list, prio)
- max = rblock_max_invalid(max, rblock);
- return max;
- }
- static void rrpc_lun_gc(struct work_struct *work)
- {
- struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
- struct rrpc *rrpc = rlun->rrpc;
- struct nvm_lun *lun = rlun->parent;
- struct rrpc_block_gc *gcb;
- unsigned int nr_blocks_need;
- nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
- if (nr_blocks_need < rrpc->nr_luns)
- nr_blocks_need = rrpc->nr_luns;
- spin_lock(&rlun->lock);
- while (nr_blocks_need > lun->nr_free_blocks &&
- !list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblock = block_prio_find_max(rlun);
- struct nvm_block *block = rblock->parent;
- if (!rblock->nr_invalid_pages)
- break;
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
- list_del_init(&rblock->prio);
- BUG_ON(!block_is_full(rrpc, rblock));
- pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
- gcb->rrpc = rrpc;
- gcb->rblk = rblock;
- INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
- nr_blocks_need--;
- }
- spin_unlock(&rlun->lock);
- /* TODO: Hint that request queue can be started again */
- }
- static void rrpc_gc_queue(struct work_struct *work)
- {
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
- rblk->parent->id);
- }
- static const struct block_device_operations rrpc_fops = {
- .owner = THIS_MODULE,
- };
- static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
- {
- unsigned int i;
- struct rrpc_lun *rlun, *max_free;
- if (!is_gc)
- return get_next_lun(rrpc);
- /* during GC, we don't care about RR, instead we want to make
- * sure that we maintain evenness between the block luns.
- */
- max_free = &rrpc->luns[0];
- /* prevent GC-ing lun from devouring pages of a lun with
- * little free blocks. We don't take the lock as we only need an
- * estimate.
- */
- rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->parent->nr_free_blocks >
- max_free->parent->nr_free_blocks)
- max_free = rlun;
- }
- return max_free;
- }
- static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, u64 paddr)
- {
- struct rrpc_addr *gp;
- struct rrpc_rev_addr *rev;
- BUG_ON(laddr >= rrpc->nr_sects);
- gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
- if (gp->rblk)
- rrpc_page_invalidate(rrpc, gp);
- gp->addr = paddr;
- gp->rblk = rblk;
- rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
- rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
- return gp;
- }
- static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- u64 addr = ADDR_EMPTY;
- spin_lock(&rblk->lock);
- if (block_is_full(rrpc, rblk))
- goto out;
- addr = block_to_addr(rrpc, rblk) + rblk->next_page;
- rblk->next_page++;
- out:
- spin_unlock(&rblk->lock);
- return addr;
- }
- /* Map logical address to a physical page. The mapping implements a round robin
- * approach and allocates a page from the next lun available.
- *
- * Returns rrpc_addr with the physical address and block. Returns NULL if no
- * blocks in the next rlun are available.
- */
- static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
- int is_gc)
- {
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk, **cur_rblk;
- struct nvm_lun *lun;
- u64 paddr;
- int gc_force = 0;
- rlun = rrpc_get_lun_rr(rrpc, is_gc);
- lun = rlun->parent;
- if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
- return NULL;
- /*
- * page allocation steps:
- * 1. Try to allocate new page from current rblk
- * 2a. If succeed, proceed to map it in and return
- * 2b. If fail, first try to allocate a new block from media manger,
- * and then retry step 1. Retry until the normal block pool is
- * exhausted.
- * 3. If exhausted, and garbage collector is requesting the block,
- * go to the reserved block and retry step 1.
- * In the case that this fails as well, or it is not GC
- * requesting, report not able to retrieve a block and let the
- * caller handle further processing.
- */
- spin_lock(&rlun->lock);
- cur_rblk = &rlun->cur;
- rblk = rlun->cur;
- retry:
- paddr = rrpc_alloc_addr(rrpc, rblk);
- if (paddr != ADDR_EMPTY)
- goto done;
- if (!list_empty(&rlun->wblk_list)) {
- new_blk:
- rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
- prio);
- rrpc_set_lun_cur(rlun, rblk, cur_rblk);
- list_del(&rblk->prio);
- goto retry;
- }
- spin_unlock(&rlun->lock);
- rblk = rrpc_get_blk(rrpc, rlun, gc_force);
- if (rblk) {
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->wblk_list);
- /*
- * another thread might already have added a new block,
- * Therefore, make sure that one is used, instead of the
- * one just added.
- */
- goto new_blk;
- }
- if (unlikely(is_gc) && !gc_force) {
- /* retry from emergency gc block */
- cur_rblk = &rlun->gc_cur;
- rblk = rlun->gc_cur;
- gc_force = 1;
- spin_lock(&rlun->lock);
- goto retry;
- }
- pr_err("rrpc: failed to allocate new block\n");
- return NULL;
- done:
- spin_unlock(&rlun->lock);
- return rrpc_update_map(rrpc, laddr, rblk, paddr);
- }
- static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- struct rrpc_block_gc *gcb;
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb) {
- pr_err("rrpc: unable to queue block for gc.");
- return;
- }
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
- INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
- }
- static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- sector_t laddr, uint8_t npages)
- {
- struct rrpc_addr *p;
- struct rrpc_block *rblk;
- struct nvm_lun *lun;
- int cmnt_size, i;
- for (i = 0; i < npages; i++) {
- p = &rrpc->trans_map[laddr + i];
- rblk = p->rblk;
- lun = rblk->parent->lun;
- cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
- rrpc_run_gc(rrpc, rblk);
- }
- }
- static void rrpc_end_io(struct nvm_rq *rqd)
- {
- struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_ppas;
- sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
- if (bio_data_dir(rqd->bio) == WRITE)
- rrpc_end_io_write(rrpc, rrqd, laddr, npages);
- bio_put(rqd->bio);
- if (rrqd->flags & NVM_IOTYPE_GC)
- return;
- rrpc_unlock_rq(rrpc, rqd);
- if (npages > 1)
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
- mempool_free(rqd, rrpc->rq_pool);
- }
- static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
- {
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *gp;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr + i];
- if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- gp->addr);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
- rqd->dma_ppa_list);
- return NVM_IO_DONE;
- }
- }
- rqd->opcode = NVM_OP_HBREAD;
- return NVM_IO_OK;
- }
- static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
- unsigned long flags)
- {
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- struct rrpc_addr *gp;
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
- BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr];
- if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- return NVM_IO_DONE;
- }
- rqd->opcode = NVM_OP_HBREAD;
- rrqd->addr = gp;
- return NVM_IO_OK;
- }
- static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
- {
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *p;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (!p) {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
- rqd->dma_ppa_list);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
- p->addr);
- }
- rqd->opcode = NVM_OP_HBWRITE;
- return NVM_IO_OK;
- }
- static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
- {
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- struct rrpc_addr *p;
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
- p = rrpc_map_page(rrpc, laddr, is_gc);
- if (!p) {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
- rqd->opcode = NVM_OP_HBWRITE;
- rrqd->addr = p;
- return NVM_IO_OK;
- }
- static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
- {
- if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
- &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("rrpc: not able to allocate ppa list\n");
- return NVM_IO_ERR;
- }
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
- npages);
- return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
- }
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_rq(rrpc, bio, rqd, flags);
- return rrpc_read_rq(rrpc, bio, rqd, flags);
- }
- static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
- {
- int err;
- struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
- uint8_t nr_pages = rrpc_get_pages(bio);
- int bio_size = bio_sectors(bio) << 9;
- if (bio_size < rrpc->dev->sec_size)
- return NVM_IO_ERR;
- else if (bio_size > rrpc->dev->max_rq_size)
- return NVM_IO_ERR;
- err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
- if (err)
- return err;
- bio_get(bio);
- rqd->bio = bio;
- rqd->ins = &rrpc->instance;
- rqd->nr_ppas = nr_pages;
- rrq->flags = flags;
- err = nvm_submit_io(rrpc->dev, rqd);
- if (err) {
- pr_err("rrpc: I/O submission failed: %d\n", err);
- bio_put(bio);
- if (!(flags & NVM_IOTYPE_GC)) {
- rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(rrpc->dev,
- rqd->ppa_list, rqd->dma_ppa_list);
- }
- return NVM_IO_ERR;
- }
- return NVM_IO_OK;
- }
- static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
- {
- struct rrpc *rrpc = q->queuedata;
- struct nvm_rq *rqd;
- int err;
- if (bio_op(bio) == REQ_OP_DISCARD) {
- rrpc_discard(rrpc, bio);
- return BLK_QC_T_NONE;
- }
- rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
- if (!rqd) {
- pr_err_ratelimited("rrpc: not able to queue bio.");
- bio_io_error(bio);
- return BLK_QC_T_NONE;
- }
- memset(rqd, 0, sizeof(struct nvm_rq));
- err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
- switch (err) {
- case NVM_IO_OK:
- return BLK_QC_T_NONE;
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
- case NVM_IO_REQUEUE:
- spin_lock(&rrpc->bio_lock);
- bio_list_add(&rrpc->requeue_bios, bio);
- spin_unlock(&rrpc->bio_lock);
- queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
- break;
- }
- mempool_free(rqd, rrpc->rq_pool);
- return BLK_QC_T_NONE;
- }
- static void rrpc_requeue(struct work_struct *work)
- {
- struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
- struct bio_list bios;
- struct bio *bio;
- bio_list_init(&bios);
- spin_lock(&rrpc->bio_lock);
- bio_list_merge(&bios, &rrpc->requeue_bios);
- bio_list_init(&rrpc->requeue_bios);
- spin_unlock(&rrpc->bio_lock);
- while ((bio = bio_list_pop(&bios)))
- rrpc_make_rq(rrpc->disk->queue, bio);
- }
- static void rrpc_gc_free(struct rrpc *rrpc)
- {
- if (rrpc->krqd_wq)
- destroy_workqueue(rrpc->krqd_wq);
- if (rrpc->kgc_wq)
- destroy_workqueue(rrpc->kgc_wq);
- }
- static int rrpc_gc_init(struct rrpc *rrpc)
- {
- rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
- rrpc->nr_luns);
- if (!rrpc->krqd_wq)
- return -ENOMEM;
- rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
- if (!rrpc->kgc_wq)
- return -ENOMEM;
- setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
- return 0;
- }
- static void rrpc_map_free(struct rrpc *rrpc)
- {
- vfree(rrpc->rev_trans_map);
- vfree(rrpc->trans_map);
- }
- static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
- {
- struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- u64 elba = slba + nlb;
- u64 i;
- if (unlikely(elba > dev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- return -EINVAL;
- }
- for (i = 0; i < nlb; i++) {
- u64 pba = le64_to_cpu(entries[i]);
- unsigned int mod;
- /* LNVM treats address-spaces as silos, LBA and PBA are
- * equally large and zero-indexed.
- */
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("nvm: L2P data entry is out of bounds!\n");
- return -EINVAL;
- }
- /* Address zero is a special one. The first page on a disk is
- * protected. As it often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
- div_u64_rem(pba, rrpc->nr_sects, &mod);
- addr[i].addr = pba;
- raddr[mod].addr = slba + i;
- }
- return 0;
- }
- static int rrpc_map_init(struct rrpc *rrpc)
- {
- struct nvm_dev *dev = rrpc->dev;
- sector_t i;
- int ret;
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
- if (!rrpc->trans_map)
- return -ENOMEM;
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_sects);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
- for (i = 0; i < rrpc->nr_sects; i++) {
- struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
- p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
- }
- if (!dev->ops->get_l2p_tbl)
- return 0;
- /* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not read L2P table.\n");
- return -EINVAL;
- }
- return 0;
- }
- /* Minimum pages needed within a lun */
- #define PAGE_POOL_SIZE 16
- #define ADDR_POOL_SIZE 64
- static int rrpc_core_init(struct rrpc *rrpc)
- {
- down_write(&rrpc_lock);
- if (!rrpc_gcb_cache) {
- rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
- sizeof(struct rrpc_block_gc), 0, 0, NULL);
- if (!rrpc_gcb_cache) {
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- rrpc_rq_cache = kmem_cache_create("rrpc_rq",
- sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
- 0, 0, NULL);
- if (!rrpc_rq_cache) {
- kmem_cache_destroy(rrpc_gcb_cache);
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- }
- up_write(&rrpc_lock);
- rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!rrpc->page_pool)
- return -ENOMEM;
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
- rrpc_gcb_cache);
- if (!rrpc->gcb_pool)
- return -ENOMEM;
- rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
- if (!rrpc->rq_pool)
- return -ENOMEM;
- spin_lock_init(&rrpc->inflights.lock);
- INIT_LIST_HEAD(&rrpc->inflights.reqs);
- return 0;
- }
- static void rrpc_core_free(struct rrpc *rrpc)
- {
- mempool_destroy(rrpc->page_pool);
- mempool_destroy(rrpc->gcb_pool);
- mempool_destroy(rrpc->rq_pool);
- }
- static void rrpc_luns_free(struct rrpc *rrpc)
- {
- struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun;
- struct rrpc_lun *rlun;
- int i;
- if (!rrpc->luns)
- return;
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- lun = rlun->parent;
- if (!lun)
- break;
- dev->mt->release_lun(dev, lun->id);
- vfree(rlun->blocks);
- }
- kfree(rrpc->luns);
- }
- static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
- {
- struct nvm_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- int i, j, ret = -EINVAL;
- if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- return -EINVAL;
- }
- spin_lock_init(&rrpc->rev_lock);
- rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
- GFP_KERNEL);
- if (!rrpc->luns)
- return -ENOMEM;
- /* 1:1 mapping */
- for (i = 0; i < rrpc->nr_luns; i++) {
- int lunid = lun_begin + i;
- struct nvm_lun *lun;
- if (dev->mt->reserve_lun(dev, lunid)) {
- pr_err("rrpc: lun %u is already allocated\n", lunid);
- goto err;
- }
- lun = dev->mt->get_lun(dev, lunid);
- if (!lun)
- goto err;
- rlun = &rrpc->luns[i];
- rlun->parent = lun;
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- rrpc->dev->blks_per_lun);
- if (!rlun->blocks) {
- ret = -ENOMEM;
- goto err;
- }
- for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
- struct nvm_block *blk = &lun->blocks[j];
- rblk->parent = blk;
- rblk->rlun = rlun;
- INIT_LIST_HEAD(&rblk->prio);
- spin_lock_init(&rblk->lock);
- }
- rlun->rrpc = rrpc;
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->wblk_list);
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
- }
- return 0;
- err:
- return ret;
- }
- /* returns 0 on success and stores the beginning address in *begin */
- static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
- {
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t size = rrpc->nr_sects * dev->sec_size;
- int ret;
- size >>= 9;
- ret = mt->get_area(dev, begin, size);
- if (!ret)
- *begin >>= (ilog2(dev->sec_size) - 9);
- return ret;
- }
- static void rrpc_area_free(struct rrpc *rrpc)
- {
- struct nvm_dev *dev = rrpc->dev;
- struct nvmm_type *mt = dev->mt;
- sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
- mt->put_area(dev, begin);
- }
- static void rrpc_free(struct rrpc *rrpc)
- {
- rrpc_gc_free(rrpc);
- rrpc_map_free(rrpc);
- rrpc_core_free(rrpc);
- rrpc_luns_free(rrpc);
- rrpc_area_free(rrpc);
- kfree(rrpc);
- }
- static void rrpc_exit(void *private)
- {
- struct rrpc *rrpc = private;
- del_timer(&rrpc->gc_timer);
- flush_workqueue(rrpc->krqd_wq);
- flush_workqueue(rrpc->kgc_wq);
- rrpc_free(rrpc);
- }
- static sector_t rrpc_capacity(void *private)
- {
- struct rrpc *rrpc = private;
- struct nvm_dev *dev = rrpc->dev;
- sector_t reserved, provisioned;
- /* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
- provisioned = rrpc->nr_sects - reserved;
- if (reserved > rrpc->nr_sects) {
- pr_err("rrpc: not enough space available to expose storage.\n");
- return 0;
- }
- sector_div(provisioned, 10);
- return provisioned * 9 * NR_PHY_IN_LOG;
- }
- /*
- * Looks up the logical address from reverse trans map and check if its valid by
- * comparing the logical to physical address with the physical address.
- * Returns 0 on free, otherwise 1 if in use
- */
- static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
- {
- struct nvm_dev *dev = rrpc->dev;
- int offset;
- struct rrpc_addr *laddr;
- u64 bpaddr, paddr, pladdr;
- bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->sec_per_blk; offset++) {
- paddr = bpaddr + offset;
- pladdr = rrpc->rev_trans_map[paddr].addr;
- if (pladdr == ADDR_EMPTY)
- continue;
- laddr = &rrpc->trans_map[pladdr];
- if (paddr == laddr->addr) {
- laddr->rblk = rblk;
- } else {
- set_bit(offset, rblk->invalid_pages);
- rblk->nr_invalid_pages++;
- }
- }
- }
- static int rrpc_blocks_init(struct rrpc *rrpc)
- {
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int lun_iter, blk_iter;
- for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
- rlun = &rrpc->luns[lun_iter];
- for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
- blk_iter++) {
- rblk = &rlun->blocks[blk_iter];
- rrpc_block_map_update(rrpc, rblk);
- }
- }
- return 0;
- }
- static int rrpc_luns_configure(struct rrpc *rrpc)
- {
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int i;
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
- /* Emergency gc block */
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
- }
- return 0;
- err:
- rrpc_put_blks(rrpc);
- return -EINVAL;
- }
- static struct nvm_tgt_type tt_rrpc;
- static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
- int lun_begin, int lun_end)
- {
- struct request_queue *bqueue = dev->q;
- struct request_queue *tqueue = tdisk->queue;
- struct rrpc *rrpc;
- sector_t soffset;
- int ret;
- if (!(dev->identity.dom & NVM_RSP_L2P)) {
- pr_err("nvm: rrpc: device does not support l2p (%x)\n",
- dev->identity.dom);
- return ERR_PTR(-EINVAL);
- }
- rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
- if (!rrpc)
- return ERR_PTR(-ENOMEM);
- rrpc->instance.tt = &tt_rrpc;
- rrpc->dev = dev;
- rrpc->disk = tdisk;
- bio_list_init(&rrpc->requeue_bios);
- spin_lock_init(&rrpc->bio_lock);
- INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
- rrpc->nr_luns = lun_end - lun_begin + 1;
- rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
- rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
- /* simple round-robin strategy */
- atomic_set(&rrpc->next_lun, -1);
- ret = rrpc_area_init(rrpc, &soffset);
- if (ret < 0) {
- pr_err("nvm: rrpc: could not initialize area\n");
- return ERR_PTR(ret);
- }
- rrpc->soffset = soffset;
- ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize luns\n");
- goto err;
- }
- rrpc->poffset = dev->sec_per_lun * lun_begin;
- rrpc->lun_offset = lun_begin;
- ret = rrpc_core_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize core\n");
- goto err;
- }
- ret = rrpc_map_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize maps\n");
- goto err;
- }
- ret = rrpc_blocks_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize state for blocks\n");
- goto err;
- }
- ret = rrpc_luns_configure(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
- goto err;
- }
- ret = rrpc_gc_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize gc\n");
- goto err;
- }
- /* inherit the size from the underlying device */
- blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
- blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
- pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
- return rrpc;
- err:
- rrpc_free(rrpc);
- return ERR_PTR(ret);
- }
- /* round robin, page-based FTL, and cost-based GC */
- static struct nvm_tgt_type tt_rrpc = {
- .name = "rrpc",
- .version = {1, 0, 0},
- .make_rq = rrpc_make_rq,
- .capacity = rrpc_capacity,
- .end_io = rrpc_end_io,
- .init = rrpc_init,
- .exit = rrpc_exit,
- };
- static int __init rrpc_module_init(void)
- {
- return nvm_register_tgt_type(&tt_rrpc);
- }
- static void rrpc_module_exit(void)
- {
- nvm_unregister_tgt_type(&tt_rrpc);
- }
- module_init(rrpc_module_init);
- module_exit(rrpc_module_exit);
- MODULE_LICENSE("GPL v2");
- MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
|