12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084 |
- /*
- * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
- */
- #include <linux/mm.h>
- #include <linux/swap.h>
- #include <linux/bio.h>
- #include <linux/blkdev.h>
- #include <linux/uio.h>
- #include <linux/iocontext.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/export.h>
- #include <linux/mempool.h>
- #include <linux/workqueue.h>
- #include <linux/cgroup.h>
- #include <trace/events/block.h>
- /*
- * Test patch to inline a certain number of bi_io_vec's inside the bio
- * itself, to shrink a bio data allocation from two mempool calls to one
- */
- #define BIO_INLINE_VECS 4
- /*
- * if you change this list, also change bvec_alloc or things will
- * break badly! cannot be bigger than what you can fit into an
- * unsigned short
- */
- #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
- static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
- BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
- };
- #undef BV
- /*
- * fs_bio_set is the bio_set containing bio and iovec memory pools used by
- * IO code that does not need private memory pools.
- */
- struct bio_set *fs_bio_set;
- EXPORT_SYMBOL(fs_bio_set);
- /*
- * Our slab pool management
- */
- struct bio_slab {
- struct kmem_cache *slab;
- unsigned int slab_ref;
- unsigned int slab_size;
- char name[8];
- };
- static DEFINE_MUTEX(bio_slab_lock);
- static struct bio_slab *bio_slabs;
- static unsigned int bio_slab_nr, bio_slab_max;
- static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
- {
- unsigned int sz = sizeof(struct bio) + extra_size;
- struct kmem_cache *slab = NULL;
- struct bio_slab *bslab, *new_bio_slabs;
- unsigned int new_bio_slab_max;
- unsigned int i, entry = -1;
- mutex_lock(&bio_slab_lock);
- i = 0;
- while (i < bio_slab_nr) {
- bslab = &bio_slabs[i];
- if (!bslab->slab && entry == -1)
- entry = i;
- else if (bslab->slab_size == sz) {
- slab = bslab->slab;
- bslab->slab_ref++;
- break;
- }
- i++;
- }
- if (slab)
- goto out_unlock;
- if (bio_slab_nr == bio_slab_max && entry == -1) {
- new_bio_slab_max = bio_slab_max << 1;
- new_bio_slabs = krealloc(bio_slabs,
- new_bio_slab_max * sizeof(struct bio_slab),
- GFP_KERNEL);
- if (!new_bio_slabs)
- goto out_unlock;
- bio_slab_max = new_bio_slab_max;
- bio_slabs = new_bio_slabs;
- }
- if (entry == -1)
- entry = bio_slab_nr++;
- bslab = &bio_slabs[entry];
- snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
- slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!slab)
- goto out_unlock;
- bslab->slab = slab;
- bslab->slab_ref = 1;
- bslab->slab_size = sz;
- out_unlock:
- mutex_unlock(&bio_slab_lock);
- return slab;
- }
- static void bio_put_slab(struct bio_set *bs)
- {
- struct bio_slab *bslab = NULL;
- unsigned int i;
- mutex_lock(&bio_slab_lock);
- for (i = 0; i < bio_slab_nr; i++) {
- if (bs->bio_slab == bio_slabs[i].slab) {
- bslab = &bio_slabs[i];
- break;
- }
- }
- if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
- goto out;
- WARN_ON(!bslab->slab_ref);
- if (--bslab->slab_ref)
- goto out;
- kmem_cache_destroy(bslab->slab);
- bslab->slab = NULL;
- out:
- mutex_unlock(&bio_slab_lock);
- }
- unsigned int bvec_nr_vecs(unsigned short idx)
- {
- return bvec_slabs[idx].nr_vecs;
- }
- void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
- {
- if (!idx)
- return;
- idx--;
- BIO_BUG_ON(idx >= BVEC_POOL_NR);
- if (idx == BVEC_POOL_MAX) {
- mempool_free(bv, pool);
- } else {
- struct biovec_slab *bvs = bvec_slabs + idx;
- kmem_cache_free(bvs->slab, bv);
- }
- }
- struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
- mempool_t *pool)
- {
- struct bio_vec *bvl;
- /*
- * see comment near bvec_array define!
- */
- switch (nr) {
- case 1:
- *idx = 0;
- break;
- case 2 ... 4:
- *idx = 1;
- break;
- case 5 ... 16:
- *idx = 2;
- break;
- case 17 ... 64:
- *idx = 3;
- break;
- case 65 ... 128:
- *idx = 4;
- break;
- case 129 ... BIO_MAX_PAGES:
- *idx = 5;
- break;
- default:
- return NULL;
- }
- /*
- * idx now points to the pool we want to allocate from. only the
- * 1-vec entry pool is mempool backed.
- */
- if (*idx == BVEC_POOL_MAX) {
- fallback:
- bvl = mempool_alloc(pool, gfp_mask);
- } else {
- struct biovec_slab *bvs = bvec_slabs + *idx;
- gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
- /*
- * Make this allocation restricted and don't dump info on
- * allocation failures, since we'll fallback to the mempool
- * in case of failure.
- */
- __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
- /*
- * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
- * is set, retry with the 1-entry mempool
- */
- bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
- if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
- *idx = BVEC_POOL_MAX;
- goto fallback;
- }
- }
- (*idx)++;
- return bvl;
- }
- static void __bio_free(struct bio *bio)
- {
- bio_disassociate_task(bio);
- if (bio_integrity(bio))
- bio_integrity_free(bio);
- }
- static void bio_free(struct bio *bio)
- {
- struct bio_set *bs = bio->bi_pool;
- void *p;
- __bio_free(bio);
- if (bs) {
- bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
- /*
- * If we have front padding, adjust the bio pointer before freeing
- */
- p = bio;
- p -= bs->front_pad;
- mempool_free(p, bs->bio_pool);
- } else {
- /* Bio was allocated by bio_kmalloc() */
- kfree(bio);
- }
- }
- void bio_init(struct bio *bio)
- {
- memset(bio, 0, sizeof(*bio));
- atomic_set(&bio->__bi_remaining, 1);
- atomic_set(&bio->__bi_cnt, 1);
- }
- EXPORT_SYMBOL(bio_init);
- /**
- * bio_reset - reinitialize a bio
- * @bio: bio to reset
- *
- * Description:
- * After calling bio_reset(), @bio will be in the same state as a freshly
- * allocated bio returned bio bio_alloc_bioset() - the only fields that are
- * preserved are the ones that are initialized by bio_alloc_bioset(). See
- * comment in struct bio.
- */
- void bio_reset(struct bio *bio)
- {
- unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
- __bio_free(bio);
- memset(bio, 0, BIO_RESET_BYTES);
- bio->bi_flags = flags;
- atomic_set(&bio->__bi_remaining, 1);
- }
- EXPORT_SYMBOL(bio_reset);
- static struct bio *__bio_chain_endio(struct bio *bio)
- {
- struct bio *parent = bio->bi_private;
- if (!parent->bi_error)
- parent->bi_error = bio->bi_error;
- bio_put(bio);
- return parent;
- }
- static void bio_chain_endio(struct bio *bio)
- {
- bio_endio(__bio_chain_endio(bio));
- }
- /**
- * bio_chain - chain bio completions
- * @bio: the target bio
- * @parent: the @bio's parent bio
- *
- * The caller won't have a bi_end_io called when @bio completes - instead,
- * @parent's bi_end_io won't be called until both @parent and @bio have
- * completed; the chained bio will also be freed when it completes.
- *
- * The caller must not set bi_private or bi_end_io in @bio.
- */
- void bio_chain(struct bio *bio, struct bio *parent)
- {
- BUG_ON(bio->bi_private || bio->bi_end_io);
- bio->bi_private = parent;
- bio->bi_end_io = bio_chain_endio;
- bio_inc_remaining(parent);
- }
- EXPORT_SYMBOL(bio_chain);
- static void bio_alloc_rescue(struct work_struct *work)
- {
- struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
- struct bio *bio;
- while (1) {
- spin_lock(&bs->rescue_lock);
- bio = bio_list_pop(&bs->rescue_list);
- spin_unlock(&bs->rescue_lock);
- if (!bio)
- break;
- generic_make_request(bio);
- }
- }
- static void punt_bios_to_rescuer(struct bio_set *bs)
- {
- struct bio_list punt, nopunt;
- struct bio *bio;
- /*
- * In order to guarantee forward progress we must punt only bios that
- * were allocated from this bio_set; otherwise, if there was a bio on
- * there for a stacking driver higher up in the stack, processing it
- * could require allocating bios from this bio_set, and doing that from
- * our own rescuer would be bad.
- *
- * Since bio lists are singly linked, pop them all instead of trying to
- * remove from the middle of the list:
- */
- bio_list_init(&punt);
- bio_list_init(&nopunt);
- while ((bio = bio_list_pop(¤t->bio_list[0])))
- bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
- current->bio_list[0] = nopunt;
- bio_list_init(&nopunt);
- while ((bio = bio_list_pop(¤t->bio_list[1])))
- bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
- current->bio_list[1] = nopunt;
- spin_lock(&bs->rescue_lock);
- bio_list_merge(&bs->rescue_list, &punt);
- spin_unlock(&bs->rescue_lock);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- }
- /**
- * bio_alloc_bioset - allocate a bio for I/O
- * @gfp_mask: the GFP_ mask given to the slab allocator
- * @nr_iovecs: number of iovecs to pre-allocate
- * @bs: the bio_set to allocate from.
- *
- * Description:
- * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
- * backed by the @bs's mempool.
- *
- * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
- * always be able to allocate a bio. This is due to the mempool guarantees.
- * To make this work, callers must never allocate more than 1 bio at a time
- * from this pool. Callers that need to allocate more than 1 bio must always
- * submit the previously allocated bio for IO before attempting to allocate
- * a new one. Failure to do so can cause deadlocks under memory pressure.
- *
- * Note that when running under generic_make_request() (i.e. any block
- * driver), bios are not submitted until after you return - see the code in
- * generic_make_request() that converts recursion into iteration, to prevent
- * stack overflows.
- *
- * This would normally mean allocating multiple bios under
- * generic_make_request() would be susceptible to deadlocks, but we have
- * deadlock avoidance code that resubmits any blocked bios from a rescuer
- * thread.
- *
- * However, we do not guarantee forward progress for allocations from other
- * mempools. Doing multiple allocations from the same mempool under
- * generic_make_request() should be avoided - instead, use bio_set's front_pad
- * for per bio allocations.
- *
- * RETURNS:
- * Pointer to new bio on success, NULL on failure.
- */
- struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
- {
- gfp_t saved_gfp = gfp_mask;
- unsigned front_pad;
- unsigned inline_vecs;
- struct bio_vec *bvl = NULL;
- struct bio *bio;
- void *p;
- if (!bs) {
- if (nr_iovecs > UIO_MAXIOV)
- return NULL;
- p = kmalloc(sizeof(struct bio) +
- nr_iovecs * sizeof(struct bio_vec),
- gfp_mask);
- front_pad = 0;
- inline_vecs = nr_iovecs;
- } else {
- /* should not use nobvec bioset for nr_iovecs > 0 */
- if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
- return NULL;
- /*
- * generic_make_request() converts recursion to iteration; this
- * means if we're running beneath it, any bios we allocate and
- * submit will not be submitted (and thus freed) until after we
- * return.
- *
- * This exposes us to a potential deadlock if we allocate
- * multiple bios from the same bio_set() while running
- * underneath generic_make_request(). If we were to allocate
- * multiple bios (say a stacking block driver that was splitting
- * bios), we would deadlock if we exhausted the mempool's
- * reserve.
- *
- * We solve this, and guarantee forward progress, with a rescuer
- * workqueue per bio_set. If we go to allocate and there are
- * bios on current->bio_list, we first try the allocation
- * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
- * bios we would be blocking to the rescuer workqueue before
- * we retry with the original gfp_flags.
- */
- if (current->bio_list &&
- (!bio_list_empty(¤t->bio_list[0]) ||
- !bio_list_empty(¤t->bio_list[1])))
- gfp_mask &= ~__GFP_DIRECT_RECLAIM;
- p = mempool_alloc(bs->bio_pool, gfp_mask);
- if (!p && gfp_mask != saved_gfp) {
- punt_bios_to_rescuer(bs);
- gfp_mask = saved_gfp;
- p = mempool_alloc(bs->bio_pool, gfp_mask);
- }
- front_pad = bs->front_pad;
- inline_vecs = BIO_INLINE_VECS;
- }
- if (unlikely(!p))
- return NULL;
- bio = p + front_pad;
- bio_init(bio);
- if (nr_iovecs > inline_vecs) {
- unsigned long idx = 0;
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
- if (!bvl && gfp_mask != saved_gfp) {
- punt_bios_to_rescuer(bs);
- gfp_mask = saved_gfp;
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
- }
- if (unlikely(!bvl))
- goto err_free;
- bio->bi_flags |= idx << BVEC_POOL_OFFSET;
- } else if (nr_iovecs) {
- bvl = bio->bi_inline_vecs;
- }
- bio->bi_pool = bs;
- bio->bi_max_vecs = nr_iovecs;
- bio->bi_io_vec = bvl;
- return bio;
- err_free:
- mempool_free(p, bs->bio_pool);
- return NULL;
- }
- EXPORT_SYMBOL(bio_alloc_bioset);
- void zero_fill_bio(struct bio *bio)
- {
- unsigned long flags;
- struct bio_vec bv;
- struct bvec_iter iter;
- bio_for_each_segment(bv, bio, iter) {
- char *data = bvec_kmap_irq(&bv, &flags);
- memset(data, 0, bv.bv_len);
- flush_dcache_page(bv.bv_page);
- bvec_kunmap_irq(data, &flags);
- }
- }
- EXPORT_SYMBOL(zero_fill_bio);
- /**
- * bio_put - release a reference to a bio
- * @bio: bio to release reference to
- *
- * Description:
- * Put a reference to a &struct bio, either one you have gotten with
- * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
- **/
- void bio_put(struct bio *bio)
- {
- if (!bio_flagged(bio, BIO_REFFED))
- bio_free(bio);
- else {
- BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
- /*
- * last put frees it
- */
- if (atomic_dec_and_test(&bio->__bi_cnt))
- bio_free(bio);
- }
- }
- EXPORT_SYMBOL(bio_put);
- inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
- {
- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
- blk_recount_segments(q, bio);
- return bio->bi_phys_segments;
- }
- EXPORT_SYMBOL(bio_phys_segments);
- /**
- * __bio_clone_fast - clone a bio that shares the original bio's biovec
- * @bio: destination bio
- * @bio_src: bio to clone
- *
- * Clone a &bio. Caller will own the returned bio, but not
- * the actual data it points to. Reference count of returned
- * bio will be one.
- *
- * Caller must ensure that @bio_src is not freed before @bio.
- */
- void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
- {
- BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
- /*
- * most users will be overriding ->bi_bdev with a new target,
- * so we don't set nor calculate new physical/hw segment counts here
- */
- bio->bi_bdev = bio_src->bi_bdev;
- bio_set_flag(bio, BIO_CLONED);
- bio->bi_opf = bio_src->bi_opf;
- bio->bi_iter = bio_src->bi_iter;
- bio->bi_io_vec = bio_src->bi_io_vec;
- bio_clone_blkcg_association(bio, bio_src);
- }
- EXPORT_SYMBOL(__bio_clone_fast);
- /**
- * bio_clone_fast - clone a bio that shares the original bio's biovec
- * @bio: bio to clone
- * @gfp_mask: allocation priority
- * @bs: bio_set to allocate from
- *
- * Like __bio_clone_fast, only also allocates the returned bio
- */
- struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
- {
- struct bio *b;
- b = bio_alloc_bioset(gfp_mask, 0, bs);
- if (!b)
- return NULL;
- __bio_clone_fast(b, bio);
- if (bio_integrity(bio)) {
- int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask);
- if (ret < 0) {
- bio_put(b);
- return NULL;
- }
- }
- return b;
- }
- EXPORT_SYMBOL(bio_clone_fast);
- /**
- * bio_clone_bioset - clone a bio
- * @bio_src: bio to clone
- * @gfp_mask: allocation priority
- * @bs: bio_set to allocate from
- *
- * Clone bio. Caller will own the returned bio, but not the actual data it
- * points to. Reference count of returned bio will be one.
- */
- struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
- struct bio_set *bs)
- {
- struct bvec_iter iter;
- struct bio_vec bv;
- struct bio *bio;
- /*
- * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
- * bio_src->bi_io_vec to bio->bi_io_vec.
- *
- * We can't do that anymore, because:
- *
- * - The point of cloning the biovec is to produce a bio with a biovec
- * the caller can modify: bi_idx and bi_bvec_done should be 0.
- *
- * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
- * we tried to clone the whole thing bio_alloc_bioset() would fail.
- * But the clone should succeed as long as the number of biovecs we
- * actually need to allocate is fewer than BIO_MAX_PAGES.
- *
- * - Lastly, bi_vcnt should not be looked at or relied upon by code
- * that does not own the bio - reason being drivers don't use it for
- * iterating over the biovec anymore, so expecting it to be kept up
- * to date (i.e. for clones that share the parent biovec) is just
- * asking for trouble and would force extra work on
- * __bio_clone_fast() anyways.
- */
- bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
- if (!bio)
- return NULL;
- bio->bi_bdev = bio_src->bi_bdev;
- bio->bi_opf = bio_src->bi_opf;
- bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
- switch (bio_op(bio)) {
- case REQ_OP_DISCARD:
- case REQ_OP_SECURE_ERASE:
- break;
- case REQ_OP_WRITE_SAME:
- bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
- break;
- default:
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
- break;
- }
- if (bio_integrity(bio_src)) {
- int ret;
- ret = bio_integrity_clone(bio, bio_src, gfp_mask);
- if (ret < 0) {
- bio_put(bio);
- return NULL;
- }
- }
- bio_clone_blkcg_association(bio, bio_src);
- return bio;
- }
- EXPORT_SYMBOL(bio_clone_bioset);
- /**
- * bio_add_pc_page - attempt to add page to bio
- * @q: the target queue
- * @bio: destination bio
- * @page: page to add
- * @len: vec entry length
- * @offset: vec entry offset
- *
- * Attempt to add a page to the bio_vec maplist. This can fail for a
- * number of reasons, such as the bio being full or target block device
- * limitations. The target block device must allow bio's up to PAGE_SIZE,
- * so it is always possible to add a single page to an empty bio.
- *
- * This should only be used by REQ_PC bios.
- */
- int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
- *page, unsigned int len, unsigned int offset)
- {
- int retried_segments = 0;
- struct bio_vec *bvec;
- /*
- * cloned bio must not modify vec list
- */
- if (unlikely(bio_flagged(bio, BIO_CLONED)))
- return 0;
- if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
- return 0;
- /*
- * For filesystems with a blocksize smaller than the pagesize
- * we will often be called with the same page as last time and
- * a consecutive offset. Optimize this special case.
- */
- if (bio->bi_vcnt > 0) {
- struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (page == prev->bv_page &&
- offset == prev->bv_offset + prev->bv_len) {
- prev->bv_len += len;
- bio->bi_iter.bi_size += len;
- goto done;
- }
- /*
- * If the queue doesn't support SG gaps and adding this
- * offset would create a gap, disallow it.
- */
- if (bvec_gap_to_prev(q, prev, offset))
- return 0;
- }
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return 0;
- /*
- * setup the new entry, we might clear it again later if we
- * cannot add the page
- */
- bvec = &bio->bi_io_vec[bio->bi_vcnt];
- bvec->bv_page = page;
- bvec->bv_len = len;
- bvec->bv_offset = offset;
- bio->bi_vcnt++;
- bio->bi_phys_segments++;
- bio->bi_iter.bi_size += len;
- /*
- * Perform a recount if the number of segments is greater
- * than queue_max_segments(q).
- */
- while (bio->bi_phys_segments > queue_max_segments(q)) {
- if (retried_segments)
- goto failed;
- retried_segments = 1;
- blk_recount_segments(q, bio);
- }
- /* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
- bio_clear_flag(bio, BIO_SEG_VALID);
- done:
- return len;
- failed:
- bvec->bv_page = NULL;
- bvec->bv_len = 0;
- bvec->bv_offset = 0;
- bio->bi_vcnt--;
- bio->bi_iter.bi_size -= len;
- blk_recount_segments(q, bio);
- return 0;
- }
- EXPORT_SYMBOL(bio_add_pc_page);
- /**
- * bio_add_page - attempt to add page to bio
- * @bio: destination bio
- * @page: page to add
- * @len: vec entry length
- * @offset: vec entry offset
- *
- * Attempt to add a page to the bio_vec maplist. This will only fail
- * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
- */
- int bio_add_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset)
- {
- struct bio_vec *bv;
- /*
- * cloned bio must not modify vec list
- */
- if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
- return 0;
- /*
- * For filesystems with a blocksize smaller than the pagesize
- * we will often be called with the same page as last time and
- * a consecutive offset. Optimize this special case.
- */
- if (bio->bi_vcnt > 0) {
- bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (page == bv->bv_page &&
- offset == bv->bv_offset + bv->bv_len) {
- bv->bv_len += len;
- goto done;
- }
- }
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return 0;
- bv = &bio->bi_io_vec[bio->bi_vcnt];
- bv->bv_page = page;
- bv->bv_len = len;
- bv->bv_offset = offset;
- bio->bi_vcnt++;
- done:
- bio->bi_iter.bi_size += len;
- return len;
- }
- EXPORT_SYMBOL(bio_add_page);
- struct submit_bio_ret {
- struct completion event;
- int error;
- };
- static void submit_bio_wait_endio(struct bio *bio)
- {
- struct submit_bio_ret *ret = bio->bi_private;
- ret->error = bio->bi_error;
- complete(&ret->event);
- }
- /**
- * submit_bio_wait - submit a bio, and wait until it completes
- * @bio: The &struct bio which describes the I/O
- *
- * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
- * bio_endio() on failure.
- */
- int submit_bio_wait(struct bio *bio)
- {
- struct submit_bio_ret ret;
- init_completion(&ret.event);
- bio->bi_private = &ret;
- bio->bi_end_io = submit_bio_wait_endio;
- bio->bi_opf |= REQ_SYNC;
- submit_bio(bio);
- wait_for_completion_io(&ret.event);
- return ret.error;
- }
- EXPORT_SYMBOL(submit_bio_wait);
- /**
- * bio_advance - increment/complete a bio by some number of bytes
- * @bio: bio to advance
- * @bytes: number of bytes to complete
- *
- * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
- * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
- * be updated on the last bvec as well.
- *
- * @bio will then represent the remaining, uncompleted portion of the io.
- */
- void bio_advance(struct bio *bio, unsigned bytes)
- {
- if (bio_integrity(bio))
- bio_integrity_advance(bio, bytes);
- bio_advance_iter(bio, &bio->bi_iter, bytes);
- }
- EXPORT_SYMBOL(bio_advance);
- /**
- * bio_alloc_pages - allocates a single page for each bvec in a bio
- * @bio: bio to allocate pages for
- * @gfp_mask: flags for allocation
- *
- * Allocates pages up to @bio->bi_vcnt.
- *
- * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
- * freed.
- */
- int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
- {
- int i;
- struct bio_vec *bv;
- bio_for_each_segment_all(bv, bio, i) {
- bv->bv_page = alloc_page(gfp_mask);
- if (!bv->bv_page) {
- while (--bv >= bio->bi_io_vec)
- __free_page(bv->bv_page);
- return -ENOMEM;
- }
- }
- return 0;
- }
- EXPORT_SYMBOL(bio_alloc_pages);
- /**
- * bio_copy_data - copy contents of data buffers from one chain of bios to
- * another
- * @src: source bio list
- * @dst: destination bio list
- *
- * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
- * @src and @dst as linked lists of bios.
- *
- * Stops when it reaches the end of either @src or @dst - that is, copies
- * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
- */
- void bio_copy_data(struct bio *dst, struct bio *src)
- {
- struct bvec_iter src_iter, dst_iter;
- struct bio_vec src_bv, dst_bv;
- void *src_p, *dst_p;
- unsigned bytes;
- src_iter = src->bi_iter;
- dst_iter = dst->bi_iter;
- while (1) {
- if (!src_iter.bi_size) {
- src = src->bi_next;
- if (!src)
- break;
- src_iter = src->bi_iter;
- }
- if (!dst_iter.bi_size) {
- dst = dst->bi_next;
- if (!dst)
- break;
- dst_iter = dst->bi_iter;
- }
- src_bv = bio_iter_iovec(src, src_iter);
- dst_bv = bio_iter_iovec(dst, dst_iter);
- bytes = min(src_bv.bv_len, dst_bv.bv_len);
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- bytes);
- kunmap_atomic(dst_p);
- kunmap_atomic(src_p);
- bio_advance_iter(src, &src_iter, bytes);
- bio_advance_iter(dst, &dst_iter, bytes);
- }
- }
- EXPORT_SYMBOL(bio_copy_data);
- struct bio_map_data {
- int is_our_pages;
- struct iov_iter iter;
- struct iovec iov[];
- };
- static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
- gfp_t gfp_mask)
- {
- if (iov_count > UIO_MAXIOV)
- return NULL;
- return kmalloc(sizeof(struct bio_map_data) +
- sizeof(struct iovec) * iov_count, gfp_mask);
- }
- /**
- * bio_copy_from_iter - copy all pages from iov_iter to bio
- * @bio: The &struct bio which describes the I/O as destination
- * @iter: iov_iter as source
- *
- * Copy all pages from iov_iter to bio.
- * Returns 0 on success, or error on failure.
- */
- static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
- {
- int i;
- struct bio_vec *bvec;
- bio_for_each_segment_all(bvec, bio, i) {
- ssize_t ret;
- ret = copy_page_from_iter(bvec->bv_page,
- bvec->bv_offset,
- bvec->bv_len,
- &iter);
- if (!iov_iter_count(&iter))
- break;
- if (ret < bvec->bv_len)
- return -EFAULT;
- }
- return 0;
- }
- /**
- * bio_copy_to_iter - copy all pages from bio to iov_iter
- * @bio: The &struct bio which describes the I/O as source
- * @iter: iov_iter as destination
- *
- * Copy all pages from bio to iov_iter.
- * Returns 0 on success, or error on failure.
- */
- static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
- {
- int i;
- struct bio_vec *bvec;
- bio_for_each_segment_all(bvec, bio, i) {
- ssize_t ret;
- ret = copy_page_to_iter(bvec->bv_page,
- bvec->bv_offset,
- bvec->bv_len,
- &iter);
- if (!iov_iter_count(&iter))
- break;
- if (ret < bvec->bv_len)
- return -EFAULT;
- }
- return 0;
- }
- void bio_free_pages(struct bio *bio)
- {
- struct bio_vec *bvec;
- int i;
- bio_for_each_segment_all(bvec, bio, i)
- __free_page(bvec->bv_page);
- }
- EXPORT_SYMBOL(bio_free_pages);
- /**
- * bio_uncopy_user - finish previously mapped bio
- * @bio: bio being terminated
- *
- * Free pages allocated from bio_copy_user_iov() and write back data
- * to user space in case of a read.
- */
- int bio_uncopy_user(struct bio *bio)
- {
- struct bio_map_data *bmd = bio->bi_private;
- int ret = 0;
- if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
- /*
- * if we're in a workqueue, the request is orphaned, so
- * don't copy into a random user address space, just free
- * and return -EINTR so user space doesn't expect any data.
- */
- if (!current->mm)
- ret = -EINTR;
- else if (bio_data_dir(bio) == READ)
- ret = bio_copy_to_iter(bio, bmd->iter);
- if (bmd->is_our_pages)
- bio_free_pages(bio);
- }
- kfree(bmd);
- bio_put(bio);
- return ret;
- }
- /**
- * bio_copy_user_iov - copy user data to bio
- * @q: destination block queue
- * @map_data: pointer to the rq_map_data holding pages (if necessary)
- * @iter: iovec iterator
- * @gfp_mask: memory allocation flags
- *
- * Prepares and returns a bio for indirect user io, bouncing data
- * to/from kernel pages as necessary. Must be paired with
- * call bio_uncopy_user() on io completion.
- */
- struct bio *bio_copy_user_iov(struct request_queue *q,
- struct rq_map_data *map_data,
- const struct iov_iter *iter,
- gfp_t gfp_mask)
- {
- struct bio_map_data *bmd;
- struct page *page;
- struct bio *bio;
- int i, ret;
- int nr_pages = 0;
- unsigned int len = iter->count;
- unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
- for (i = 0; i < iter->nr_segs; i++) {
- unsigned long uaddr;
- unsigned long end;
- unsigned long start;
- uaddr = (unsigned long) iter->iov[i].iov_base;
- end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- start = uaddr >> PAGE_SHIFT;
- /*
- * Overflow, abort
- */
- if (end < start)
- return ERR_PTR(-EINVAL);
- nr_pages += end - start;
- }
- if (offset)
- nr_pages++;
- bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
- if (!bmd)
- return ERR_PTR(-ENOMEM);
- /*
- * We need to do a deep copy of the iov_iter including the iovecs.
- * The caller provided iov might point to an on-stack or otherwise
- * shortlived one.
- */
- bmd->is_our_pages = map_data ? 0 : 1;
- memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
- bmd->iter = *iter;
- bmd->iter.iov = bmd->iov;
- ret = -ENOMEM;
- bio = bio_kmalloc(gfp_mask, nr_pages);
- if (!bio)
- goto out_bmd;
- if (iter->type & WRITE)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- ret = 0;
- if (map_data) {
- nr_pages = 1 << map_data->page_order;
- i = map_data->offset / PAGE_SIZE;
- }
- while (len) {
- unsigned int bytes = PAGE_SIZE;
- bytes -= offset;
- if (bytes > len)
- bytes = len;
- if (map_data) {
- if (i == map_data->nr_entries * nr_pages) {
- ret = -ENOMEM;
- break;
- }
- page = map_data->pages[i / nr_pages];
- page += (i % nr_pages);
- i++;
- } else {
- page = alloc_page(q->bounce_gfp | gfp_mask);
- if (!page) {
- ret = -ENOMEM;
- break;
- }
- }
- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
- break;
- len -= bytes;
- offset = 0;
- }
- if (ret)
- goto cleanup;
- /*
- * success
- */
- if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
- (map_data && map_data->from_user)) {
- ret = bio_copy_from_iter(bio, *iter);
- if (ret)
- goto cleanup;
- }
- bio->bi_private = bmd;
- return bio;
- cleanup:
- if (!map_data)
- bio_free_pages(bio);
- bio_put(bio);
- out_bmd:
- kfree(bmd);
- return ERR_PTR(ret);
- }
- /**
- * bio_map_user_iov - map user iovec into bio
- * @q: the struct request_queue for the bio
- * @iter: iovec iterator
- * @gfp_mask: memory allocation flags
- *
- * Map the user space address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
- struct bio *bio_map_user_iov(struct request_queue *q,
- const struct iov_iter *iter,
- gfp_t gfp_mask)
- {
- int j;
- int nr_pages = 0;
- struct page **pages;
- struct bio *bio;
- int cur_page = 0;
- int ret, offset;
- struct iov_iter i;
- struct iovec iov;
- struct bio_vec *bvec;
- iov_for_each(iov, i, *iter) {
- unsigned long uaddr = (unsigned long) iov.iov_base;
- unsigned long len = iov.iov_len;
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- /*
- * Overflow, abort
- */
- if (end < start)
- return ERR_PTR(-EINVAL);
- nr_pages += end - start;
- /*
- * buffer must be aligned to at least logical block size for now
- */
- if (uaddr & queue_dma_alignment(q))
- return ERR_PTR(-EINVAL);
- }
- if (!nr_pages)
- return ERR_PTR(-EINVAL);
- bio = bio_kmalloc(gfp_mask, nr_pages);
- if (!bio)
- return ERR_PTR(-ENOMEM);
- ret = -ENOMEM;
- pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
- if (!pages)
- goto out;
- iov_for_each(iov, i, *iter) {
- unsigned long uaddr = (unsigned long) iov.iov_base;
- unsigned long len = iov.iov_len;
- unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = uaddr >> PAGE_SHIFT;
- const int local_nr_pages = end - start;
- const int page_limit = cur_page + local_nr_pages;
- ret = get_user_pages_fast(uaddr, local_nr_pages,
- (iter->type & WRITE) != WRITE,
- &pages[cur_page]);
- if (unlikely(ret < local_nr_pages)) {
- for (j = cur_page; j < page_limit; j++) {
- if (!pages[j])
- break;
- put_page(pages[j]);
- }
- ret = -EFAULT;
- goto out_unmap;
- }
- offset = offset_in_page(uaddr);
- for (j = cur_page; j < page_limit; j++) {
- unsigned int bytes = PAGE_SIZE - offset;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
- if (len <= 0)
- break;
-
- if (bytes > len)
- bytes = len;
- /*
- * sorry...
- */
- if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
- bytes)
- break;
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(pages[j]);
- len -= bytes;
- offset = 0;
- }
- cur_page = j;
- /*
- * release the pages we didn't map into the bio, if any
- */
- while (j < page_limit)
- put_page(pages[j++]);
- }
- kfree(pages);
- /*
- * set data direction, and check if mapped pages need bouncing
- */
- if (iter->type & WRITE)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio_set_flag(bio, BIO_USER_MAPPED);
- /*
- * subtle -- if __bio_map_user() ended up bouncing a bio,
- * it would normally disappear when its bi_end_io is run.
- * however, we need it for the unmap, so grab an extra
- * reference to it
- */
- bio_get(bio);
- return bio;
- out_unmap:
- bio_for_each_segment_all(bvec, bio, j) {
- put_page(bvec->bv_page);
- }
- out:
- kfree(pages);
- bio_put(bio);
- return ERR_PTR(ret);
- }
- static void __bio_unmap_user(struct bio *bio)
- {
- struct bio_vec *bvec;
- int i;
- /*
- * make sure we dirty pages we wrote to
- */
- bio_for_each_segment_all(bvec, bio, i) {
- if (bio_data_dir(bio) == READ)
- set_page_dirty_lock(bvec->bv_page);
- put_page(bvec->bv_page);
- }
- bio_put(bio);
- }
- /**
- * bio_unmap_user - unmap a bio
- * @bio: the bio being unmapped
- *
- * Unmap a bio previously mapped by bio_map_user(). Must be called with
- * a process context.
- *
- * bio_unmap_user() may sleep.
- */
- void bio_unmap_user(struct bio *bio)
- {
- __bio_unmap_user(bio);
- bio_put(bio);
- }
- static void bio_map_kern_endio(struct bio *bio)
- {
- bio_put(bio);
- }
- /**
- * bio_map_kern - map kernel address into bio
- * @q: the struct request_queue for the bio
- * @data: pointer to buffer to map
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio allocation
- *
- * Map the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
- struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
- gfp_t gfp_mask)
- {
- unsigned long kaddr = (unsigned long)data;
- unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = kaddr >> PAGE_SHIFT;
- const int nr_pages = end - start;
- int offset, i;
- struct bio *bio;
- bio = bio_kmalloc(gfp_mask, nr_pages);
- if (!bio)
- return ERR_PTR(-ENOMEM);
- offset = offset_in_page(kaddr);
- for (i = 0; i < nr_pages; i++) {
- unsigned int bytes = PAGE_SIZE - offset;
- if (len <= 0)
- break;
- if (bytes > len)
- bytes = len;
- if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
- offset) < bytes) {
- /* we don't support partial mappings */
- bio_put(bio);
- return ERR_PTR(-EINVAL);
- }
- data += bytes;
- len -= bytes;
- offset = 0;
- }
- bio->bi_end_io = bio_map_kern_endio;
- return bio;
- }
- EXPORT_SYMBOL(bio_map_kern);
- static void bio_copy_kern_endio(struct bio *bio)
- {
- bio_free_pages(bio);
- bio_put(bio);
- }
- static void bio_copy_kern_endio_read(struct bio *bio)
- {
- char *p = bio->bi_private;
- struct bio_vec *bvec;
- int i;
- bio_for_each_segment_all(bvec, bio, i) {
- memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
- p += bvec->bv_len;
- }
- bio_copy_kern_endio(bio);
- }
- /**
- * bio_copy_kern - copy kernel address into bio
- * @q: the struct request_queue for the bio
- * @data: pointer to buffer to copy
- * @len: length in bytes
- * @gfp_mask: allocation flags for bio and page allocation
- * @reading: data direction is READ
- *
- * copy the kernel address into a bio suitable for io to a block
- * device. Returns an error pointer in case of error.
- */
- struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
- gfp_t gfp_mask, int reading)
- {
- unsigned long kaddr = (unsigned long)data;
- unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long start = kaddr >> PAGE_SHIFT;
- struct bio *bio;
- void *p = data;
- int nr_pages = 0;
- /*
- * Overflow, abort
- */
- if (end < start)
- return ERR_PTR(-EINVAL);
- nr_pages = end - start;
- bio = bio_kmalloc(gfp_mask, nr_pages);
- if (!bio)
- return ERR_PTR(-ENOMEM);
- while (len) {
- struct page *page;
- unsigned int bytes = PAGE_SIZE;
- if (bytes > len)
- bytes = len;
- page = alloc_page(q->bounce_gfp | gfp_mask);
- if (!page)
- goto cleanup;
- if (!reading)
- memcpy(page_address(page), p, bytes);
- if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
- break;
- len -= bytes;
- p += bytes;
- }
- if (reading) {
- bio->bi_end_io = bio_copy_kern_endio_read;
- bio->bi_private = data;
- } else {
- bio->bi_end_io = bio_copy_kern_endio;
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- }
- return bio;
- cleanup:
- bio_free_pages(bio);
- bio_put(bio);
- return ERR_PTR(-ENOMEM);
- }
- /*
- * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
- * for performing direct-IO in BIOs.
- *
- * The problem is that we cannot run set_page_dirty() from interrupt context
- * because the required locks are not interrupt-safe. So what we can do is to
- * mark the pages dirty _before_ performing IO. And in interrupt context,
- * check that the pages are still dirty. If so, fine. If not, redirty them
- * in process context.
- *
- * We special-case compound pages here: normally this means reads into hugetlb
- * pages. The logic in here doesn't really work right for compound pages
- * because the VM does not uniformly chase down the head page in all cases.
- * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
- * handle them at all. So we skip compound pages here at an early stage.
- *
- * Note that this code is very hard to test under normal circumstances because
- * direct-io pins the pages with get_user_pages(). This makes
- * is_page_cache_freeable return false, and the VM will not clean the pages.
- * But other code (eg, flusher threads) could clean the pages if they are mapped
- * pagecache.
- *
- * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
- * deferred bio dirtying paths.
- */
- /*
- * bio_set_pages_dirty() will mark all the bio's pages as dirty.
- */
- void bio_set_pages_dirty(struct bio *bio)
- {
- struct bio_vec *bvec;
- int i;
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
- if (page && !PageCompound(page))
- set_page_dirty_lock(page);
- }
- }
- static void bio_release_pages(struct bio *bio)
- {
- struct bio_vec *bvec;
- int i;
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
- if (page)
- put_page(page);
- }
- }
- /*
- * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
- * If they are, then fine. If, however, some pages are clean then they must
- * have been written out during the direct-IO read. So we take another ref on
- * the BIO and the offending pages and re-dirty the pages in process context.
- *
- * It is expected that bio_check_pages_dirty() will wholly own the BIO from
- * here on. It will run one put_page() against each page and will run one
- * bio_put() against the BIO.
- */
- static void bio_dirty_fn(struct work_struct *work);
- static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
- static DEFINE_SPINLOCK(bio_dirty_lock);
- static struct bio *bio_dirty_list;
- /*
- * This runs in process context
- */
- static void bio_dirty_fn(struct work_struct *work)
- {
- unsigned long flags;
- struct bio *bio;
- spin_lock_irqsave(&bio_dirty_lock, flags);
- bio = bio_dirty_list;
- bio_dirty_list = NULL;
- spin_unlock_irqrestore(&bio_dirty_lock, flags);
- while (bio) {
- struct bio *next = bio->bi_private;
- bio_set_pages_dirty(bio);
- bio_release_pages(bio);
- bio_put(bio);
- bio = next;
- }
- }
- void bio_check_pages_dirty(struct bio *bio)
- {
- struct bio_vec *bvec;
- int nr_clean_pages = 0;
- int i;
- bio_for_each_segment_all(bvec, bio, i) {
- struct page *page = bvec->bv_page;
- if (PageDirty(page) || PageCompound(page)) {
- put_page(page);
- bvec->bv_page = NULL;
- } else {
- nr_clean_pages++;
- }
- }
- if (nr_clean_pages) {
- unsigned long flags;
- spin_lock_irqsave(&bio_dirty_lock, flags);
- bio->bi_private = bio_dirty_list;
- bio_dirty_list = bio;
- spin_unlock_irqrestore(&bio_dirty_lock, flags);
- schedule_work(&bio_dirty_work);
- } else {
- bio_put(bio);
- }
- }
- void generic_start_io_acct(int rw, unsigned long sectors,
- struct hd_struct *part)
- {
- int cpu = part_stat_lock();
- part_round_stats(cpu, part);
- part_stat_inc(cpu, part, ios[rw]);
- part_stat_add(cpu, part, sectors[rw], sectors);
- part_inc_in_flight(part, rw);
- part_stat_unlock();
- }
- EXPORT_SYMBOL(generic_start_io_acct);
- void generic_end_io_acct(int rw, struct hd_struct *part,
- unsigned long start_time)
- {
- unsigned long duration = jiffies - start_time;
- int cpu = part_stat_lock();
- part_stat_add(cpu, part, ticks[rw], duration);
- part_round_stats(cpu, part);
- part_dec_in_flight(part, rw);
- part_stat_unlock();
- }
- EXPORT_SYMBOL(generic_end_io_acct);
- #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
- void bio_flush_dcache_pages(struct bio *bi)
- {
- struct bio_vec bvec;
- struct bvec_iter iter;
- bio_for_each_segment(bvec, bi, iter)
- flush_dcache_page(bvec.bv_page);
- }
- EXPORT_SYMBOL(bio_flush_dcache_pages);
- #endif
- static inline bool bio_remaining_done(struct bio *bio)
- {
- /*
- * If we're not chaining, then ->__bi_remaining is always 1 and
- * we always end io on the first invocation.
- */
- if (!bio_flagged(bio, BIO_CHAIN))
- return true;
- BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
- if (atomic_dec_and_test(&bio->__bi_remaining)) {
- bio_clear_flag(bio, BIO_CHAIN);
- return true;
- }
- return false;
- }
- /**
- * bio_endio - end I/O on a bio
- * @bio: bio
- *
- * Description:
- * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
- * way to end I/O on a bio. No one should call bi_end_io() directly on a
- * bio unless they own it and thus know that it has an end_io function.
- **/
- void bio_endio(struct bio *bio)
- {
- again:
- if (!bio_remaining_done(bio))
- return;
- /*
- * Need to have a real endio function for chained bios, otherwise
- * various corner cases will break (like stacking block devices that
- * save/restore bi_end_io) - however, we want to avoid unbounded
- * recursion and blowing the stack. Tail call optimization would
- * handle this, but compiling with frame pointers also disables
- * gcc's sibling call optimization.
- */
- if (bio->bi_end_io == bio_chain_endio) {
- bio = __bio_chain_endio(bio);
- goto again;
- }
- if (bio->bi_end_io)
- bio->bi_end_io(bio);
- }
- EXPORT_SYMBOL(bio_endio);
- /**
- * bio_split - split a bio
- * @bio: bio to split
- * @sectors: number of sectors to split from the front of @bio
- * @gfp: gfp mask
- * @bs: bio set to allocate from
- *
- * Allocates and returns a new bio which represents @sectors from the start of
- * @bio, and updates @bio to represent the remaining sectors.
- *
- * Unless this is a discard request the newly allocated bio will point
- * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
- * @bio is not freed before the split.
- */
- struct bio *bio_split(struct bio *bio, int sectors,
- gfp_t gfp, struct bio_set *bs)
- {
- struct bio *split = NULL;
- BUG_ON(sectors <= 0);
- BUG_ON(sectors >= bio_sectors(bio));
- /*
- * Discards need a mutable bio_vec to accommodate the payload
- * required by the DSM TRIM and UNMAP commands.
- */
- if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
- split = bio_clone_bioset(bio, gfp, bs);
- else
- split = bio_clone_fast(bio, gfp, bs);
- if (!split)
- return NULL;
- split->bi_iter.bi_size = sectors << 9;
- if (bio_integrity(split))
- bio_integrity_trim(split, 0, sectors);
- bio_advance(bio, split->bi_iter.bi_size);
- return split;
- }
- EXPORT_SYMBOL(bio_split);
- /**
- * bio_trim - trim a bio
- * @bio: bio to trim
- * @offset: number of sectors to trim from the front of @bio
- * @size: size we want to trim @bio to, in sectors
- */
- void bio_trim(struct bio *bio, int offset, int size)
- {
- /* 'bio' is a cloned bio which we need to trim to match
- * the given offset and size.
- */
- size <<= 9;
- if (offset == 0 && size == bio->bi_iter.bi_size)
- return;
- bio_clear_flag(bio, BIO_SEG_VALID);
- bio_advance(bio, offset << 9);
- bio->bi_iter.bi_size = size;
- }
- EXPORT_SYMBOL_GPL(bio_trim);
- /*
- * create memory pools for biovec's in a bio_set.
- * use the global biovec slabs created for general use.
- */
- mempool_t *biovec_create_pool(int pool_entries)
- {
- struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
- return mempool_create_slab_pool(pool_entries, bp->slab);
- }
- void bioset_free(struct bio_set *bs)
- {
- if (bs->rescue_workqueue)
- destroy_workqueue(bs->rescue_workqueue);
- if (bs->bio_pool)
- mempool_destroy(bs->bio_pool);
- if (bs->bvec_pool)
- mempool_destroy(bs->bvec_pool);
- bioset_integrity_free(bs);
- bio_put_slab(bs);
- kfree(bs);
- }
- EXPORT_SYMBOL(bioset_free);
- static struct bio_set *__bioset_create(unsigned int pool_size,
- unsigned int front_pad,
- bool create_bvec_pool)
- {
- unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
- struct bio_set *bs;
- bs = kzalloc(sizeof(*bs), GFP_KERNEL);
- if (!bs)
- return NULL;
- bs->front_pad = front_pad;
- spin_lock_init(&bs->rescue_lock);
- bio_list_init(&bs->rescue_list);
- INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
- bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
- if (!bs->bio_slab) {
- kfree(bs);
- return NULL;
- }
- bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
- if (!bs->bio_pool)
- goto bad;
- if (create_bvec_pool) {
- bs->bvec_pool = biovec_create_pool(pool_size);
- if (!bs->bvec_pool)
- goto bad;
- }
- bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
- if (!bs->rescue_workqueue)
- goto bad;
- return bs;
- bad:
- bioset_free(bs);
- return NULL;
- }
- /**
- * bioset_create - Create a bio_set
- * @pool_size: Number of bio and bio_vecs to cache in the mempool
- * @front_pad: Number of bytes to allocate in front of the returned bio
- *
- * Description:
- * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
- * to ask for a number of bytes to be allocated in front of the bio.
- * Front pad allocation is useful for embedding the bio inside
- * another structure, to avoid allocating extra data to go with the bio.
- * Note that the bio must be embedded at the END of that structure always,
- * or things will break badly.
- */
- struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
- {
- return __bioset_create(pool_size, front_pad, true);
- }
- EXPORT_SYMBOL(bioset_create);
- /**
- * bioset_create_nobvec - Create a bio_set without bio_vec mempool
- * @pool_size: Number of bio to cache in the mempool
- * @front_pad: Number of bytes to allocate in front of the returned bio
- *
- * Description:
- * Same functionality as bioset_create() except that mempool is not
- * created for bio_vecs. Saving some memory for bio_clone_fast() users.
- */
- struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
- {
- return __bioset_create(pool_size, front_pad, false);
- }
- EXPORT_SYMBOL(bioset_create_nobvec);
- #ifdef CONFIG_BLK_CGROUP
- /**
- * bio_associate_blkcg - associate a bio with the specified blkcg
- * @bio: target bio
- * @blkcg_css: css of the blkcg to associate
- *
- * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
- * treat @bio as if it were issued by a task which belongs to the blkcg.
- *
- * This function takes an extra reference of @blkcg_css which will be put
- * when @bio is released. The caller must own @bio and is responsible for
- * synchronizing calls to this function.
- */
- int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
- {
- if (unlikely(bio->bi_css))
- return -EBUSY;
- css_get(blkcg_css);
- bio->bi_css = blkcg_css;
- return 0;
- }
- EXPORT_SYMBOL_GPL(bio_associate_blkcg);
- /**
- * bio_associate_current - associate a bio with %current
- * @bio: target bio
- *
- * Associate @bio with %current if it hasn't been associated yet. Block
- * layer will treat @bio as if it were issued by %current no matter which
- * task actually issues it.
- *
- * This function takes an extra reference of @task's io_context and blkcg
- * which will be put when @bio is released. The caller must own @bio,
- * ensure %current->io_context exists, and is responsible for synchronizing
- * calls to this function.
- */
- int bio_associate_current(struct bio *bio)
- {
- struct io_context *ioc;
- if (bio->bi_css)
- return -EBUSY;
- ioc = current->io_context;
- if (!ioc)
- return -ENOENT;
- get_io_context_active(ioc);
- bio->bi_ioc = ioc;
- bio->bi_css = task_get_css(current, io_cgrp_id);
- return 0;
- }
- EXPORT_SYMBOL_GPL(bio_associate_current);
- /**
- * bio_disassociate_task - undo bio_associate_current()
- * @bio: target bio
- */
- void bio_disassociate_task(struct bio *bio)
- {
- if (bio->bi_ioc) {
- put_io_context(bio->bi_ioc);
- bio->bi_ioc = NULL;
- }
- if (bio->bi_css) {
- css_put(bio->bi_css);
- bio->bi_css = NULL;
- }
- }
- /**
- * bio_clone_blkcg_association - clone blkcg association from src to dst bio
- * @dst: destination bio
- * @src: source bio
- */
- void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
- {
- if (src->bi_css)
- WARN_ON(bio_associate_blkcg(dst, src->bi_css));
- }
- #endif /* CONFIG_BLK_CGROUP */
- static void __init biovec_init_slabs(void)
- {
- int i;
- for (i = 0; i < BVEC_POOL_NR; i++) {
- int size;
- struct biovec_slab *bvs = bvec_slabs + i;
- if (bvs->nr_vecs <= BIO_INLINE_VECS) {
- bvs->slab = NULL;
- continue;
- }
- size = bvs->nr_vecs * sizeof(struct bio_vec);
- bvs->slab = kmem_cache_create(bvs->name, size, 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
- }
- }
- static int __init init_bio(void)
- {
- bio_slab_max = 2;
- bio_slab_nr = 0;
- bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
- if (!bio_slabs)
- panic("bio: can't allocate bios\n");
- bio_integrity_init();
- biovec_init_slabs();
- fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
- if (!fs_bio_set)
- panic("bio: can't allocate bios\n");
- if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
- panic("bio: can't create integrity pool\n");
- return 0;
- }
- subsys_initcall(init_bio);
|