12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Deadline Scheduling Class (SCHED_DEADLINE)
- *
- * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
- *
- * Tasks that periodically executes their instances for less than their
- * runtime won't miss any of their deadlines.
- * Tasks that are not periodic or sporadic or that tries to execute more
- * than their reserved bandwidth will be slowed down (and may potentially
- * miss some of their deadlines), and won't affect any other task.
- *
- * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
- * Juri Lelli <juri.lelli@gmail.com>,
- * Michael Trimarchi <michael@amarulasolutions.com>,
- * Fabio Checconi <fchecconi@gmail.com>
- */
- #include "sched.h"
- #include <linux/slab.h>
- #include <uapi/linux/sched/types.h>
- #include "walt.h"
- struct dl_bandwidth def_dl_bandwidth;
- static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
- {
- return container_of(dl_se, struct task_struct, dl);
- }
- static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
- {
- return container_of(dl_rq, struct rq, dl);
- }
- static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
- {
- struct task_struct *p = dl_task_of(dl_se);
- struct rq *rq = task_rq(p);
- return &rq->dl;
- }
- static inline int on_dl_rq(struct sched_dl_entity *dl_se)
- {
- return !RB_EMPTY_NODE(&dl_se->rb_node);
- }
- #ifdef CONFIG_SMP
- static inline struct dl_bw *dl_bw_of(int i)
- {
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
- "sched RCU must be held");
- return &cpu_rq(i)->rd->dl_bw;
- }
- static inline int dl_bw_cpus(int i)
- {
- struct root_domain *rd = cpu_rq(i)->rd;
- int cpus = 0;
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
- "sched RCU must be held");
- for_each_cpu_and(i, rd->span, cpu_active_mask)
- cpus++;
- return cpus;
- }
- #else
- static inline struct dl_bw *dl_bw_of(int i)
- {
- return &cpu_rq(i)->dl.dl_bw;
- }
- static inline int dl_bw_cpus(int i)
- {
- return 1;
- }
- #endif
- static inline
- void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
- {
- u64 old = dl_rq->running_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
- dl_rq->running_bw += dl_bw;
- SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
- SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
- }
- static inline
- void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
- {
- u64 old = dl_rq->running_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
- dl_rq->running_bw -= dl_bw;
- SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
- if (dl_rq->running_bw > old)
- dl_rq->running_bw = 0;
- }
- static inline
- void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
- {
- u64 old = dl_rq->this_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
- dl_rq->this_bw += dl_bw;
- SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
- }
- static inline
- void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
- {
- u64 old = dl_rq->this_bw;
- lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
- dl_rq->this_bw -= dl_bw;
- SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
- if (dl_rq->this_bw > old)
- dl_rq->this_bw = 0;
- SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
- }
- void dl_change_utilization(struct task_struct *p, u64 new_bw)
- {
- struct rq *rq;
- if (task_on_rq_queued(p))
- return;
- rq = task_rq(p);
- if (p->dl.dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
- p->dl.dl_non_contending = 0;
- /*
- * If the timer handler is currently running and the
- * timer cannot be cancelled, inactive_task_timer()
- * will see that dl_not_contending is not set, and
- * will not touch the rq's active utilization,
- * so we are still safe.
- */
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
- }
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- add_rq_bw(new_bw, &rq->dl);
- }
- /*
- * The utilization of a task cannot be immediately removed from
- * the rq active utilization (running_bw) when the task blocks.
- * Instead, we have to wait for the so called "0-lag time".
- *
- * If a task blocks before the "0-lag time", a timer (the inactive
- * timer) is armed, and running_bw is decreased when the timer
- * fires.
- *
- * If the task wakes up again before the inactive timer fires,
- * the timer is cancelled, whereas if the task wakes up after the
- * inactive timer fired (and running_bw has been decreased) the
- * task's utilization has to be added to running_bw again.
- * A flag in the deadline scheduling entity (dl_non_contending)
- * is used to avoid race conditions between the inactive timer handler
- * and task wakeups.
- *
- * The following diagram shows how running_bw is updated. A task is
- * "ACTIVE" when its utilization contributes to running_bw; an
- * "ACTIVE contending" task is in the TASK_RUNNING state, while an
- * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
- * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
- * time already passed, which does not contribute to running_bw anymore.
- * +------------------+
- * wakeup | ACTIVE |
- * +------------------>+ contending |
- * | add_running_bw | |
- * | +----+------+------+
- * | | ^
- * | dequeue | |
- * +--------+-------+ | |
- * | | t >= 0-lag | | wakeup
- * | INACTIVE |<---------------+ |
- * | | sub_running_bw | |
- * +--------+-------+ | |
- * ^ | |
- * | t < 0-lag | |
- * | | |
- * | V |
- * | +----+------+------+
- * | sub_running_bw | ACTIVE |
- * +-------------------+ |
- * inactive timer | non contending |
- * fired +------------------+
- *
- * The task_non_contending() function is invoked when a task
- * blocks, and checks if the 0-lag time already passed or
- * not (in the first case, it directly updates running_bw;
- * in the second case, it arms the inactive timer).
- *
- * The task_contending() function is invoked when a task wakes
- * up, and checks if the task is still in the "ACTIVE non contending"
- * state or not (in the second case, it updates running_bw).
- */
- static void task_non_contending(struct task_struct *p)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- struct hrtimer *timer = &dl_se->inactive_timer;
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq);
- s64 zerolag_time;
- /*
- * If this is a non-deadline task that has been boosted,
- * do nothing
- */
- if (dl_se->dl_runtime == 0)
- return;
- WARN_ON(dl_se->dl_non_contending);
- zerolag_time = dl_se->deadline -
- div64_long((dl_se->runtime * dl_se->dl_period),
- dl_se->dl_runtime);
- /*
- * Using relative times instead of the absolute "0-lag time"
- * allows to simplify the code
- */
- zerolag_time -= rq_clock(rq);
- /*
- * If the "0-lag time" already passed, decrease the active
- * utilization now, instead of starting a timer
- */
- if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
- if (dl_task(p))
- sub_running_bw(dl_se->dl_bw, dl_rq);
- if (!dl_task(p) || p->state == TASK_DEAD) {
- struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- if (p->state == TASK_DEAD)
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- raw_spin_lock(&dl_b->lock);
- __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
- __dl_clear_params(p);
- raw_spin_unlock(&dl_b->lock);
- }
- return;
- }
- dl_se->dl_non_contending = 1;
- get_task_struct(p);
- hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
- }
- static void task_contending(struct sched_dl_entity *dl_se, int flags)
- {
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- /*
- * If this is a non-deadline task that has been boosted,
- * do nothing
- */
- if (dl_se->dl_runtime == 0)
- return;
- if (flags & ENQUEUE_MIGRATED)
- add_rq_bw(dl_se->dl_bw, dl_rq);
- if (dl_se->dl_non_contending) {
- dl_se->dl_non_contending = 0;
- /*
- * If the timer handler is currently running and the
- * timer cannot be cancelled, inactive_task_timer()
- * will see that dl_not_contending is not set, and
- * will not touch the rq's active utilization,
- * so we are still safe.
- */
- if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
- put_task_struct(dl_task_of(dl_se));
- } else {
- /*
- * Since "dl_non_contending" is not set, the
- * task's utilization has already been removed from
- * active utilization (either when the task blocked,
- * when the "inactive timer" fired).
- * So, add it back.
- */
- add_running_bw(dl_se->dl_bw, dl_rq);
- }
- }
- static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- return dl_rq->root.rb_leftmost == &dl_se->rb_node;
- }
- void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
- {
- raw_spin_lock_init(&dl_b->dl_runtime_lock);
- dl_b->dl_period = period;
- dl_b->dl_runtime = runtime;
- }
- void init_dl_bw(struct dl_bw *dl_b)
- {
- raw_spin_lock_init(&dl_b->lock);
- raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
- if (global_rt_runtime() == RUNTIME_INF)
- dl_b->bw = -1;
- else
- dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
- raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
- dl_b->total_bw = 0;
- }
- void init_dl_rq(struct dl_rq *dl_rq)
- {
- dl_rq->root = RB_ROOT_CACHED;
- #ifdef CONFIG_SMP
- /* zero means no -deadline tasks */
- dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
- dl_rq->dl_nr_migratory = 0;
- dl_rq->overloaded = 0;
- dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
- #else
- init_dl_bw(&dl_rq->dl_bw);
- #endif
- dl_rq->running_bw = 0;
- dl_rq->this_bw = 0;
- init_dl_rq_bw_ratio(dl_rq);
- }
- #ifdef CONFIG_SMP
- static inline int dl_overloaded(struct rq *rq)
- {
- return atomic_read(&rq->rd->dlo_count);
- }
- static inline void dl_set_overload(struct rq *rq)
- {
- if (!rq->online)
- return;
- cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
- /*
- * Must be visible before the overload count is
- * set (as in sched_rt.c).
- *
- * Matched by the barrier in pull_dl_task().
- */
- smp_wmb();
- atomic_inc(&rq->rd->dlo_count);
- }
- static inline void dl_clear_overload(struct rq *rq)
- {
- if (!rq->online)
- return;
- atomic_dec(&rq->rd->dlo_count);
- cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
- }
- static void update_dl_migration(struct dl_rq *dl_rq)
- {
- if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
- if (!dl_rq->overloaded) {
- dl_set_overload(rq_of_dl_rq(dl_rq));
- dl_rq->overloaded = 1;
- }
- } else if (dl_rq->overloaded) {
- dl_clear_overload(rq_of_dl_rq(dl_rq));
- dl_rq->overloaded = 0;
- }
- }
- static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
- {
- struct task_struct *p = dl_task_of(dl_se);
- if (p->nr_cpus_allowed > 1)
- dl_rq->dl_nr_migratory++;
- update_dl_migration(dl_rq);
- }
- static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
- {
- struct task_struct *p = dl_task_of(dl_se);
- if (p->nr_cpus_allowed > 1)
- dl_rq->dl_nr_migratory--;
- update_dl_migration(dl_rq);
- }
- /*
- * The list of pushable -deadline task is not a plist, like in
- * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
- */
- static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
- {
- struct dl_rq *dl_rq = &rq->dl;
- struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct task_struct *entry;
- bool leftmost = true;
- BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct task_struct,
- pushable_dl_tasks);
- if (dl_entity_preempt(&p->dl, &entry->dl))
- link = &parent->rb_left;
- else {
- link = &parent->rb_right;
- leftmost = false;
- }
- }
- if (leftmost)
- dl_rq->earliest_dl.next = p->dl.deadline;
- rb_link_node(&p->pushable_dl_tasks, parent, link);
- rb_insert_color_cached(&p->pushable_dl_tasks,
- &dl_rq->pushable_dl_tasks_root, leftmost);
- }
- static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
- {
- struct dl_rq *dl_rq = &rq->dl;
- if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
- return;
- if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
- struct rb_node *next_node;
- next_node = rb_next(&p->pushable_dl_tasks);
- if (next_node) {
- dl_rq->earliest_dl.next = rb_entry(next_node,
- struct task_struct, pushable_dl_tasks)->dl.deadline;
- }
- }
- rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
- RB_CLEAR_NODE(&p->pushable_dl_tasks);
- }
- static inline int has_pushable_dl_tasks(struct rq *rq)
- {
- return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
- }
- static int push_dl_task(struct rq *rq);
- static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
- {
- return dl_task(prev);
- }
- static DEFINE_PER_CPU(struct callback_head, dl_push_head);
- static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
- static void push_dl_tasks(struct rq *);
- static void pull_dl_task(struct rq *);
- static inline void queue_push_tasks(struct rq *rq)
- {
- if (!has_pushable_dl_tasks(rq))
- return;
- queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
- }
- static inline void queue_pull_task(struct rq *rq)
- {
- queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
- }
- static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
- static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
- {
- struct rq *later_rq = NULL;
- later_rq = find_lock_later_rq(p, rq);
- if (!later_rq) {
- int cpu;
- /*
- * If we cannot preempt any rq, fall back to pick any
- * online cpu.
- */
- cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
- if (cpu >= nr_cpu_ids) {
- /*
- * Fail to find any suitable cpu.
- * The task will never come back!
- */
- BUG_ON(dl_bandwidth_enabled());
- /*
- * If admission control is disabled we
- * try a little harder to let the task
- * run.
- */
- cpu = cpumask_any(cpu_active_mask);
- }
- later_rq = cpu_rq(cpu);
- double_lock_balance(rq, later_rq);
- }
- set_task_cpu(p, later_rq->cpu);
- double_unlock_balance(later_rq, rq);
- return later_rq;
- }
- #else
- static inline
- void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
- {
- }
- static inline
- void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
- {
- }
- static inline
- void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
- {
- }
- static inline
- void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
- {
- }
- static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
- {
- return false;
- }
- static inline void pull_dl_task(struct rq *rq)
- {
- }
- static inline void queue_push_tasks(struct rq *rq)
- {
- }
- static inline void queue_pull_task(struct rq *rq)
- {
- }
- #endif /* CONFIG_SMP */
- static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
- static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
- static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
- int flags);
- /*
- * We are being explicitly informed that a new instance is starting,
- * and this means that:
- * - the absolute deadline of the entity has to be placed at
- * current time + relative deadline;
- * - the runtime of the entity has to be set to the maximum value.
- *
- * The capability of specifying such event is useful whenever a -deadline
- * entity wants to (try to!) synchronize its behaviour with the scheduler's
- * one, and to (try to!) reconcile itself with its own scheduling
- * parameters.
- */
- static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
- {
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq);
- WARN_ON(dl_se->dl_boosted);
- WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
- /*
- * We are racing with the deadline timer. So, do nothing because
- * the deadline timer handler will take care of properly recharging
- * the runtime and postponing the deadline
- */
- if (dl_se->dl_throttled)
- return;
- /*
- * We use the regular wall clock time to set deadlines in the
- * future; in fact, we must consider execution overheads (time
- * spent on hardirq context, etc.).
- */
- dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
- dl_se->runtime = dl_se->dl_runtime;
- }
- /*
- * Pure Earliest Deadline First (EDF) scheduling does not deal with the
- * possibility of a entity lasting more than what it declared, and thus
- * exhausting its runtime.
- *
- * Here we are interested in making runtime overrun possible, but we do
- * not want a entity which is misbehaving to affect the scheduling of all
- * other entities.
- * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
- * is used, in order to confine each entity within its own bandwidth.
- *
- * This function deals exactly with that, and ensures that when the runtime
- * of a entity is replenished, its deadline is also postponed. That ensures
- * the overrunning entity can't interfere with other entity in the system and
- * can't make them miss their deadlines. Reasons why this kind of overruns
- * could happen are, typically, a entity voluntarily trying to overcome its
- * runtime, or it just underestimated it during sched_setattr().
- */
- static void replenish_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se)
- {
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq);
- BUG_ON(pi_se->dl_runtime <= 0);
- /*
- * This could be the case for a !-dl task that is boosted.
- * Just go with full inherited parameters.
- */
- if (dl_se->dl_deadline == 0) {
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
- }
- if (dl_se->dl_yielded && dl_se->runtime > 0)
- dl_se->runtime = 0;
- /*
- * We keep moving the deadline away until we get some
- * available runtime for the entity. This ensures correct
- * handling of situations where the runtime overrun is
- * arbitrary large.
- */
- while (dl_se->runtime <= 0) {
- dl_se->deadline += pi_se->dl_period;
- dl_se->runtime += pi_se->dl_runtime;
- }
- /*
- * At this point, the deadline really should be "in
- * the future" with respect to rq->clock. If it's
- * not, we are, for some reason, lagging too much!
- * Anyway, after having warn userspace abut that,
- * we still try to keep the things running by
- * resetting the deadline and the budget of the
- * entity.
- */
- if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
- printk_deferred_once("sched: DL replenish lagged too much\n");
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
- }
- if (dl_se->dl_yielded)
- dl_se->dl_yielded = 0;
- if (dl_se->dl_throttled)
- dl_se->dl_throttled = 0;
- }
- /*
- * Here we check if --at time t-- an entity (which is probably being
- * [re]activated or, in general, enqueued) can use its remaining runtime
- * and its current deadline _without_ exceeding the bandwidth it is
- * assigned (function returns true if it can't). We are in fact applying
- * one of the CBS rules: when a task wakes up, if the residual runtime
- * over residual deadline fits within the allocated bandwidth, then we
- * can keep the current (absolute) deadline and residual budget without
- * disrupting the schedulability of the system. Otherwise, we should
- * refill the runtime and set the deadline a period in the future,
- * because keeping the current (absolute) deadline of the task would
- * result in breaking guarantees promised to other tasks (refer to
- * Documentation/scheduler/sched-deadline.txt for more informations).
- *
- * This function returns true if:
- *
- * runtime / (deadline - t) > dl_runtime / dl_deadline ,
- *
- * IOW we can't recycle current parameters.
- *
- * Notice that the bandwidth check is done against the deadline. For
- * task with deadline equal to period this is the same of using
- * dl_period instead of dl_deadline in the equation above.
- */
- static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se, u64 t)
- {
- u64 left, right;
- /*
- * left and right are the two sides of the equation above,
- * after a bit of shuffling to use multiplications instead
- * of divisions.
- *
- * Note that none of the time values involved in the two
- * multiplications are absolute: dl_deadline and dl_runtime
- * are the relative deadline and the maximum runtime of each
- * instance, runtime is the runtime left for the last instance
- * and (deadline - t), since t is rq->clock, is the time left
- * to the (absolute) deadline. Even if overflowing the u64 type
- * is very unlikely to occur in both cases, here we scale down
- * as we want to avoid that risk at all. Scaling down by 10
- * means that we reduce granularity to 1us. We are fine with it,
- * since this is only a true/false check and, anyway, thinking
- * of anything below microseconds resolution is actually fiction
- * (but still we want to give the user that illusion >;).
- */
- left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
- right = ((dl_se->deadline - t) >> DL_SCALE) *
- (pi_se->dl_runtime >> DL_SCALE);
- return dl_time_before(right, left);
- }
- /*
- * Revised wakeup rule [1]: For self-suspending tasks, rather then
- * re-initializing task's runtime and deadline, the revised wakeup
- * rule adjusts the task's runtime to avoid the task to overrun its
- * density.
- *
- * Reasoning: a task may overrun the density if:
- * runtime / (deadline - t) > dl_runtime / dl_deadline
- *
- * Therefore, runtime can be adjusted to:
- * runtime = (dl_runtime / dl_deadline) * (deadline - t)
- *
- * In such way that runtime will be equal to the maximum density
- * the task can use without breaking any rule.
- *
- * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
- * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
- */
- static void
- update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
- {
- u64 laxity = dl_se->deadline - rq_clock(rq);
- /*
- * If the task has deadline < period, and the deadline is in the past,
- * it should already be throttled before this check.
- *
- * See update_dl_entity() comments for further details.
- */
- WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
- dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
- }
- /*
- * Regarding the deadline, a task with implicit deadline has a relative
- * deadline == relative period. A task with constrained deadline has a
- * relative deadline <= relative period.
- *
- * We support constrained deadline tasks. However, there are some restrictions
- * applied only for tasks which do not have an implicit deadline. See
- * update_dl_entity() to know more about such restrictions.
- *
- * The dl_is_implicit() returns true if the task has an implicit deadline.
- */
- static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
- {
- return dl_se->dl_deadline == dl_se->dl_period;
- }
- /*
- * When a deadline entity is placed in the runqueue, its runtime and deadline
- * might need to be updated. This is done by a CBS wake up rule. There are two
- * different rules: 1) the original CBS; and 2) the Revisited CBS.
- *
- * When the task is starting a new period, the Original CBS is used. In this
- * case, the runtime is replenished and a new absolute deadline is set.
- *
- * When a task is queued before the begin of the next period, using the
- * remaining runtime and deadline could make the entity to overflow, see
- * dl_entity_overflow() to find more about runtime overflow. When such case
- * is detected, the runtime and deadline need to be updated.
- *
- * If the task has an implicit deadline, i.e., deadline == period, the Original
- * CBS is applied. the runtime is replenished and a new absolute deadline is
- * set, as in the previous cases.
- *
- * However, the Original CBS does not work properly for tasks with
- * deadline < period, which are said to have a constrained deadline. By
- * applying the Original CBS, a constrained deadline task would be able to run
- * runtime/deadline in a period. With deadline < period, the task would
- * overrun the runtime/period allowed bandwidth, breaking the admission test.
- *
- * In order to prevent this misbehave, the Revisited CBS is used for
- * constrained deadline tasks when a runtime overflow is detected. In the
- * Revisited CBS, rather than replenishing & setting a new absolute deadline,
- * the remaining runtime of the task is reduced to avoid runtime overflow.
- * Please refer to the comments update_dl_revised_wakeup() function to find
- * more about the Revised CBS rule.
- */
- static void update_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se)
- {
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq);
- if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
- dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
- if (unlikely(!dl_is_implicit(dl_se) &&
- !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
- !dl_se->dl_boosted)){
- update_dl_revised_wakeup(dl_se, rq);
- return;
- }
- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
- dl_se->runtime = pi_se->dl_runtime;
- }
- }
- static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
- {
- return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
- }
- /*
- * If the entity depleted all its runtime, and if we want it to sleep
- * while waiting for some new execution time to become available, we
- * set the bandwidth replenishment timer to the replenishment instant
- * and try to activate it.
- *
- * Notice that it is important for the caller to know if the timer
- * actually started or not (i.e., the replenishment instant is in
- * the future or in the past).
- */
- static int start_dl_timer(struct task_struct *p)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- struct hrtimer *timer = &dl_se->dl_timer;
- struct rq *rq = task_rq(p);
- ktime_t now, act;
- s64 delta;
- lockdep_assert_held(&rq->lock);
- /*
- * We want the timer to fire at the deadline, but considering
- * that it is actually coming from rq->clock and not from
- * hrtimer's time base reading.
- */
- act = ns_to_ktime(dl_next_period(dl_se));
- now = hrtimer_cb_get_time(timer);
- delta = ktime_to_ns(now) - rq_clock(rq);
- act = ktime_add_ns(act, delta);
- /*
- * If the expiry time already passed, e.g., because the value
- * chosen as the deadline is too small, don't even try to
- * start the timer in the past!
- */
- if (ktime_us_delta(act, now) < 0)
- return 0;
- /*
- * !enqueued will guarantee another callback; even if one is already in
- * progress. This ensures a balanced {get,put}_task_struct().
- *
- * The race against __run_timer() clearing the enqueued state is
- * harmless because we're holding task_rq()->lock, therefore the timer
- * expiring after we've done the check will wait on its task_rq_lock()
- * and observe our state.
- */
- if (!hrtimer_is_queued(timer)) {
- get_task_struct(p);
- hrtimer_start(timer, act, HRTIMER_MODE_ABS);
- }
- return 1;
- }
- /*
- * This is the bandwidth enforcement timer callback. If here, we know
- * a task is not on its dl_rq, since the fact that the timer was running
- * means the task is throttled and needs a runtime replenishment.
- *
- * However, what we actually do depends on the fact the task is active,
- * (it is on its rq) or has been removed from there by a call to
- * dequeue_task_dl(). In the former case we must issue the runtime
- * replenishment and add the task back to the dl_rq; in the latter, we just
- * do nothing but clearing dl_throttled, so that runtime and deadline
- * updating (and the queueing back to dl_rq) will be done by the
- * next call to enqueue_task_dl().
- */
- static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
- {
- struct sched_dl_entity *dl_se = container_of(timer,
- struct sched_dl_entity,
- dl_timer);
- struct task_struct *p = dl_task_of(dl_se);
- struct rq_flags rf;
- struct rq *rq;
- rq = task_rq_lock(p, &rf);
- /*
- * The task might have changed its scheduling policy to something
- * different than SCHED_DEADLINE (through switched_from_dl()).
- */
- if (!dl_task(p))
- goto unlock;
- /*
- * The task might have been boosted by someone else and might be in the
- * boosting/deboosting path, its not throttled.
- */
- if (dl_se->dl_boosted)
- goto unlock;
- /*
- * Spurious timer due to start_dl_timer() race; or we already received
- * a replenishment from rt_mutex_setprio().
- */
- if (!dl_se->dl_throttled)
- goto unlock;
- sched_clock_tick();
- update_rq_clock(rq);
- /*
- * If the throttle happened during sched-out; like:
- *
- * schedule()
- * deactivate_task()
- * dequeue_task_dl()
- * update_curr_dl()
- * start_dl_timer()
- * __dequeue_task_dl()
- * prev->on_rq = 0;
- *
- * We can be both throttled and !queued. Replenish the counter
- * but do not enqueue -- wait for our wakeup to do that.
- */
- if (!task_on_rq_queued(p)) {
- replenish_dl_entity(dl_se, dl_se);
- goto unlock;
- }
- #ifdef CONFIG_SMP
- if (unlikely(!rq->online)) {
- /*
- * If the runqueue is no longer available, migrate the
- * task elsewhere. This necessarily changes rq.
- */
- lockdep_unpin_lock(&rq->lock, rf.cookie);
- rq = dl_task_offline_migration(rq, p);
- rf.cookie = lockdep_pin_lock(&rq->lock);
- update_rq_clock(rq);
- /*
- * Now that the task has been migrated to the new RQ and we
- * have that locked, proceed as normal and enqueue the task
- * there.
- */
- }
- #endif
- enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
- if (dl_task(rq->curr))
- check_preempt_curr_dl(rq, p, 0);
- else
- resched_curr(rq);
- #ifdef CONFIG_SMP
- /*
- * Queueing this task back might have overloaded rq, check if we need
- * to kick someone away.
- */
- if (has_pushable_dl_tasks(rq)) {
- /*
- * Nothing relies on rq->lock after this, so its safe to drop
- * rq->lock.
- */
- rq_unpin_lock(rq, &rf);
- push_dl_task(rq);
- rq_repin_lock(rq, &rf);
- }
- #endif
- unlock:
- task_rq_unlock(rq, p, &rf);
- /*
- * This can free the task_struct, including this hrtimer, do not touch
- * anything related to that after this.
- */
- put_task_struct(p);
- return HRTIMER_NORESTART;
- }
- void init_dl_task_timer(struct sched_dl_entity *dl_se)
- {
- struct hrtimer *timer = &dl_se->dl_timer;
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer->function = dl_task_timer;
- }
- /*
- * During the activation, CBS checks if it can reuse the current task's
- * runtime and period. If the deadline of the task is in the past, CBS
- * cannot use the runtime, and so it replenishes the task. This rule
- * works fine for implicit deadline tasks (deadline == period), and the
- * CBS was designed for implicit deadline tasks. However, a task with
- * constrained deadline (deadine < period) might be awakened after the
- * deadline, but before the next period. In this case, replenishing the
- * task would allow it to run for runtime / deadline. As in this case
- * deadline < period, CBS enables a task to run for more than the
- * runtime / period. In a very loaded system, this can cause a domino
- * effect, making other tasks miss their deadlines.
- *
- * To avoid this problem, in the activation of a constrained deadline
- * task after the deadline but before the next period, throttle the
- * task and set the replenishing timer to the begin of the next period,
- * unless it is boosted.
- */
- static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
- {
- struct task_struct *p = dl_task_of(dl_se);
- struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
- if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
- dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
- if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
- return;
- dl_se->dl_throttled = 1;
- if (dl_se->runtime > 0)
- dl_se->runtime = 0;
- }
- }
- static
- int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
- {
- return (dl_se->runtime <= 0);
- }
- extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
- /*
- * This function implements the GRUB accounting rule:
- * according to the GRUB reclaiming algorithm, the runtime is
- * not decreased as "dq = -dt", but as
- * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
- * where u is the utilization of the task, Umax is the maximum reclaimable
- * utilization, Uinact is the (per-runqueue) inactive utilization, computed
- * as the difference between the "total runqueue utilization" and the
- * runqueue active utilization, and Uextra is the (per runqueue) extra
- * reclaimable utilization.
- * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
- * multiplied by 2^BW_SHIFT, the result has to be shifted right by
- * BW_SHIFT.
- * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
- * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
- * Since delta is a 64 bit variable, to have an overflow its value
- * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
- * So, overflow is not an issue here.
- */
- static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
- {
- u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
- u64 u_act;
- u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
- /*
- * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
- * we compare u_inact + rq->dl.extra_bw with
- * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
- * u_inact + rq->dl.extra_bw can be larger than
- * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
- * leading to wrong results)
- */
- if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
- u_act = u_act_min;
- else
- u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
- return (delta * u_act) >> BW_SHIFT;
- }
- /*
- * Update the current task's runtime statistics (provided it is still
- * a -deadline task and has not been removed from the dl_rq).
- */
- static void update_curr_dl(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- struct sched_dl_entity *dl_se = &curr->dl;
- u64 delta_exec;
- if (!dl_task(curr) || !on_dl_rq(dl_se))
- return;
- /*
- * Consumed budget is computed considering the time as
- * observed by schedulable tasks (excluding time spent
- * in hardirq context, etc.). Deadlines are instead
- * computed using hard walltime. This seems to be the more
- * natural solution, but the full ramifications of this
- * approach need further study.
- */
- delta_exec = rq_clock_task(rq) - curr->se.exec_start;
- if (unlikely((s64)delta_exec <= 0)) {
- if (unlikely(dl_se->dl_yielded))
- goto throttle;
- return;
- }
- /* kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
- schedstat_set(curr->se.statistics.exec_max,
- max(curr->se.statistics.exec_max, delta_exec));
- curr->se.sum_exec_runtime += delta_exec;
- account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq_clock_task(rq);
- cpuacct_charge(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
- if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
- delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
- dl_se->runtime -= delta_exec;
- throttle:
- if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
- dl_se->dl_throttled = 1;
- __dequeue_task_dl(rq, curr, 0);
- if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
- enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
- if (!is_leftmost(curr, &rq->dl))
- resched_curr(rq);
- }
- /*
- * Because -- for now -- we share the rt bandwidth, we need to
- * account our runtime there too, otherwise actual rt tasks
- * would be able to exceed the shared quota.
- *
- * Account to the root rt group for now.
- *
- * The solution we're working towards is having the RT groups scheduled
- * using deadline servers -- however there's a few nasties to figure
- * out before that can happen.
- */
- if (rt_bandwidth_enabled()) {
- struct rt_rq *rt_rq = &rq->rt;
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- /*
- * We'll let actual RT tasks worry about the overflow here, we
- * have our own CBS to keep us inline; only account when RT
- * bandwidth is relevant.
- */
- if (sched_rt_bandwidth_account(rt_rq))
- rt_rq->rt_time += delta_exec;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- }
- static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
- {
- struct sched_dl_entity *dl_se = container_of(timer,
- struct sched_dl_entity,
- inactive_timer);
- struct task_struct *p = dl_task_of(dl_se);
- struct rq_flags rf;
- struct rq *rq;
- rq = task_rq_lock(p, &rf);
- if (!dl_task(p) || p->state == TASK_DEAD) {
- struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
- sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
- dl_se->dl_non_contending = 0;
- }
- raw_spin_lock(&dl_b->lock);
- __dl_clear(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
- raw_spin_unlock(&dl_b->lock);
- __dl_clear_params(p);
- goto unlock;
- }
- if (dl_se->dl_non_contending == 0)
- goto unlock;
- sched_clock_tick();
- update_rq_clock(rq);
- sub_running_bw(dl_se->dl_bw, &rq->dl);
- dl_se->dl_non_contending = 0;
- unlock:
- task_rq_unlock(rq, p, &rf);
- put_task_struct(p);
- return HRTIMER_NORESTART;
- }
- void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
- {
- struct hrtimer *timer = &dl_se->inactive_timer;
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer->function = inactive_task_timer;
- }
- #ifdef CONFIG_SMP
- static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
- {
- struct rq *rq = rq_of_dl_rq(dl_rq);
- if (dl_rq->earliest_dl.curr == 0 ||
- dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
- dl_rq->earliest_dl.curr = deadline;
- cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
- }
- }
- static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
- {
- struct rq *rq = rq_of_dl_rq(dl_rq);
- /*
- * Since we may have removed our earliest (and/or next earliest)
- * task we must recompute them.
- */
- if (!dl_rq->dl_nr_running) {
- dl_rq->earliest_dl.curr = 0;
- dl_rq->earliest_dl.next = 0;
- cpudl_clear(&rq->rd->cpudl, rq->cpu);
- } else {
- struct rb_node *leftmost = dl_rq->root.rb_leftmost;
- struct sched_dl_entity *entry;
- entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
- dl_rq->earliest_dl.curr = entry->deadline;
- cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
- }
- }
- #else
- static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
- static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
- #endif /* CONFIG_SMP */
- static inline
- void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
- {
- int prio = dl_task_of(dl_se)->prio;
- u64 deadline = dl_se->deadline;
- WARN_ON(!dl_prio(prio));
- dl_rq->dl_nr_running++;
- add_nr_running(rq_of_dl_rq(dl_rq), 1);
- walt_inc_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
- inc_dl_deadline(dl_rq, deadline);
- inc_dl_migration(dl_se, dl_rq);
- }
- static inline
- void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
- {
- int prio = dl_task_of(dl_se)->prio;
- WARN_ON(!dl_prio(prio));
- WARN_ON(!dl_rq->dl_nr_running);
- dl_rq->dl_nr_running--;
- sub_nr_running(rq_of_dl_rq(dl_rq), 1);
- walt_dec_cumulative_runnable_avg(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
- dec_dl_deadline(dl_rq, dl_se->deadline);
- dec_dl_migration(dl_se, dl_rq);
- }
- static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
- {
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- struct rb_node **link = &dl_rq->root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct sched_dl_entity *entry;
- int leftmost = 1;
- BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct sched_dl_entity, rb_node);
- if (dl_time_before(dl_se->deadline, entry->deadline))
- link = &parent->rb_left;
- else {
- link = &parent->rb_right;
- leftmost = 0;
- }
- }
- rb_link_node(&dl_se->rb_node, parent, link);
- rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
- inc_dl_tasks(dl_se, dl_rq);
- }
- static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
- {
- struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
- if (RB_EMPTY_NODE(&dl_se->rb_node))
- return;
- rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
- RB_CLEAR_NODE(&dl_se->rb_node);
- dec_dl_tasks(dl_se, dl_rq);
- }
- static void
- enqueue_dl_entity(struct sched_dl_entity *dl_se,
- struct sched_dl_entity *pi_se, int flags)
- {
- BUG_ON(on_dl_rq(dl_se));
- /*
- * If this is a wakeup or a new instance, the scheduling
- * parameters of the task might need updating. Otherwise,
- * we want a replenishment of its runtime.
- */
- if (flags & ENQUEUE_WAKEUP) {
- task_contending(dl_se, flags);
- update_dl_entity(dl_se, pi_se);
- } else if (flags & ENQUEUE_REPLENISH) {
- replenish_dl_entity(dl_se, pi_se);
- } else if ((flags & ENQUEUE_RESTORE) &&
- dl_time_before(dl_se->deadline,
- rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
- setup_new_dl_entity(dl_se);
- }
- __enqueue_dl_entity(dl_se);
- }
- static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
- {
- __dequeue_dl_entity(dl_se);
- }
- static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
- {
- struct task_struct *pi_task = rt_mutex_get_top_task(p);
- struct sched_dl_entity *pi_se = &p->dl;
- /*
- * Use the scheduling parameters of the top pi-waiter task if:
- * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
- * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
- * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
- * boosted due to a SCHED_DEADLINE pi-waiter).
- * Otherwise we keep our runtime and deadline.
- */
- if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
- pi_se = &pi_task->dl;
- } else if (!dl_prio(p->normal_prio)) {
- /*
- * Special case in which we have a !SCHED_DEADLINE task
- * that is going to be deboosted, but exceeds its
- * runtime while doing so. No point in replenishing
- * it, as it's going to return back to its original
- * scheduling class after this.
- */
- BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
- return;
- }
- /*
- * Check if a constrained deadline task was activated
- * after the deadline but before the next period.
- * If that is the case, the task will be throttled and
- * the replenishment timer will be set to the next period.
- */
- if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
- dl_check_constrained_dl(&p->dl);
- if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
- add_rq_bw(p->dl.dl_bw, &rq->dl);
- add_running_bw(p->dl.dl_bw, &rq->dl);
- }
- /*
- * If p is throttled, we do not enqueue it. In fact, if it exhausted
- * its budget it needs a replenishment and, since it now is on
- * its rq, the bandwidth timer callback (which clearly has not
- * run yet) will take care of this.
- * However, the active utilization does not depend on the fact
- * that the task is on the runqueue or not (but depends on the
- * task's state - in GRUB parlance, "inactive" vs "active contending").
- * In other words, even if a task is throttled its utilization must
- * be counted in the active utilization; hence, we need to call
- * add_running_bw().
- */
- if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
- if (flags & ENQUEUE_WAKEUP)
- task_contending(&p->dl, flags);
- return;
- }
- enqueue_dl_entity(&p->dl, pi_se, flags);
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
- enqueue_pushable_dl_task(rq, p);
- }
- static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
- {
- dequeue_dl_entity(&p->dl);
- dequeue_pushable_dl_task(rq, p);
- }
- static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
- {
- update_curr_dl(rq);
- __dequeue_task_dl(rq, p, flags);
- if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- }
- /*
- * This check allows to start the inactive timer (or to immediately
- * decrease the active utilization, if needed) in two cases:
- * when the task blocks and when it is terminating
- * (p->state == TASK_DEAD). We can handle the two cases in the same
- * way, because from GRUB's point of view the same thing is happening
- * (the task moves from "active contending" to "active non contending"
- * or "inactive")
- */
- if (flags & DEQUEUE_SLEEP)
- task_non_contending(p);
- }
- /*
- * Yield task semantic for -deadline tasks is:
- *
- * get off from the CPU until our next instance, with
- * a new runtime. This is of little use now, since we
- * don't have a bandwidth reclaiming mechanism. Anyway,
- * bandwidth reclaiming is planned for the future, and
- * yield_task_dl will indicate that some spare budget
- * is available for other task instances to use it.
- */
- static void yield_task_dl(struct rq *rq)
- {
- /*
- * We make the task go to sleep until its current deadline by
- * forcing its runtime to zero. This way, update_curr_dl() stops
- * it and the bandwidth timer will wake it up and will give it
- * new scheduling parameters (thanks to dl_yielded=1).
- */
- rq->curr->dl.dl_yielded = 1;
- update_rq_clock(rq);
- update_curr_dl(rq);
- /*
- * Tell update_rq_clock() that we've just updated,
- * so we don't do microscopic update in schedule()
- * and double the fastpath cost.
- */
- rq_clock_skip_update(rq, true);
- }
- #ifdef CONFIG_SMP
- static int find_later_rq(struct task_struct *task);
- static int
- select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
- int sibling_count_hint)
- {
- struct task_struct *curr;
- struct rq *rq;
- if (sd_flag != SD_BALANCE_WAKE)
- goto out;
- rq = cpu_rq(cpu);
- rcu_read_lock();
- curr = READ_ONCE(rq->curr); /* unlocked access */
- /*
- * If we are dealing with a -deadline task, we must
- * decide where to wake it up.
- * If it has a later deadline and the current task
- * on this rq can't move (provided the waking task
- * can!) we prefer to send it somewhere else. On the
- * other hand, if it has a shorter deadline, we
- * try to make it stay here, it might be important.
- */
- if (unlikely(dl_task(curr)) &&
- (curr->nr_cpus_allowed < 2 ||
- !dl_entity_preempt(&p->dl, &curr->dl)) &&
- (p->nr_cpus_allowed > 1)) {
- int target = find_later_rq(p);
- if (target != -1 &&
- (dl_time_before(p->dl.deadline,
- cpu_rq(target)->dl.earliest_dl.curr) ||
- (cpu_rq(target)->dl.dl_nr_running == 0)))
- cpu = target;
- }
- rcu_read_unlock();
- out:
- return cpu;
- }
- static void migrate_task_rq_dl(struct task_struct *p)
- {
- struct rq *rq;
- if (p->state != TASK_WAKING)
- return;
- rq = task_rq(p);
- /*
- * Since p->state == TASK_WAKING, set_task_cpu() has been called
- * from try_to_wake_up(). Hence, p->pi_lock is locked, but
- * rq->lock is not... So, lock it
- */
- raw_spin_lock(&rq->lock);
- if (p->dl.dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
- p->dl.dl_non_contending = 0;
- /*
- * If the timer handler is currently running and the
- * timer cannot be cancelled, inactive_task_timer()
- * will see that dl_not_contending is not set, and
- * will not touch the rq's active utilization,
- * so we are still safe.
- */
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
- }
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- raw_spin_unlock(&rq->lock);
- }
- static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
- {
- /*
- * Current can't be migrated, useless to reschedule,
- * let's hope p can move out.
- */
- if (rq->curr->nr_cpus_allowed == 1 ||
- !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
- return;
- /*
- * p is migratable, so let's not schedule it and
- * see if it is pushed or pulled somewhere else.
- */
- if (p->nr_cpus_allowed != 1 &&
- cpudl_find(&rq->rd->cpudl, p, NULL))
- return;
- resched_curr(rq);
- }
- #endif /* CONFIG_SMP */
- /*
- * Only called when both the current and waking task are -deadline
- * tasks.
- */
- static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
- int flags)
- {
- if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
- resched_curr(rq);
- return;
- }
- #ifdef CONFIG_SMP
- /*
- * In the unlikely case current and p have the same deadline
- * let us try to decide what's the best thing to do...
- */
- if ((p->dl.deadline == rq->curr->dl.deadline) &&
- !test_tsk_need_resched(rq->curr))
- check_preempt_equal_dl(rq, p);
- #endif /* CONFIG_SMP */
- }
- #ifdef CONFIG_SCHED_HRTICK
- static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
- {
- hrtick_start(rq, p->dl.runtime);
- }
- #else /* !CONFIG_SCHED_HRTICK */
- static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
- {
- }
- #endif
- static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
- struct dl_rq *dl_rq)
- {
- struct rb_node *left = rb_first_cached(&dl_rq->root);
- if (!left)
- return NULL;
- return rb_entry(left, struct sched_dl_entity, rb_node);
- }
- static struct task_struct *
- pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
- {
- struct sched_dl_entity *dl_se;
- struct task_struct *p;
- struct dl_rq *dl_rq;
- dl_rq = &rq->dl;
- if (need_pull_dl_task(rq, prev)) {
- /*
- * This is OK, because current is on_cpu, which avoids it being
- * picked for load-balance and preemption/IRQs are still
- * disabled avoiding further scheduler activity on it and we're
- * being very careful to re-start the picking loop.
- */
- rq_unpin_lock(rq, rf);
- pull_dl_task(rq);
- rq_repin_lock(rq, rf);
- /*
- * pull_dl_task() can drop (and re-acquire) rq->lock; this
- * means a stop task can slip in, in which case we need to
- * re-start task selection.
- */
- if (rq->stop && task_on_rq_queued(rq->stop))
- return RETRY_TASK;
- }
- /*
- * When prev is DL, we may throttle it in put_prev_task().
- * So, we update time before we check for dl_nr_running.
- */
- if (prev->sched_class == &dl_sched_class)
- update_curr_dl(rq);
- if (unlikely(!dl_rq->dl_nr_running))
- return NULL;
- put_prev_task(rq, prev);
- dl_se = pick_next_dl_entity(rq, dl_rq);
- BUG_ON(!dl_se);
- p = dl_task_of(dl_se);
- p->se.exec_start = rq_clock_task(rq);
- /* Running task will never be pushed. */
- dequeue_pushable_dl_task(rq, p);
- if (hrtick_enabled(rq))
- start_hrtick_dl(rq, p);
- queue_push_tasks(rq);
- return p;
- }
- static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
- {
- update_curr_dl(rq);
- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
- enqueue_pushable_dl_task(rq, p);
- }
- static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
- {
- update_curr_dl(rq);
- /*
- * Even when we have runtime, update_curr_dl() might have resulted in us
- * not being the leftmost task anymore. In that case NEED_RESCHED will
- * be set and schedule() will start a new hrtick for the next task.
- */
- if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
- is_leftmost(p, &rq->dl))
- start_hrtick_dl(rq, p);
- }
- static void task_fork_dl(struct task_struct *p)
- {
- /*
- * SCHED_DEADLINE tasks cannot fork and this is achieved through
- * sched_fork()
- */
- }
- static void set_curr_task_dl(struct rq *rq)
- {
- struct task_struct *p = rq->curr;
- p->se.exec_start = rq_clock_task(rq);
- /* You can't push away the running task */
- dequeue_pushable_dl_task(rq, p);
- }
- #ifdef CONFIG_SMP
- /* Only try algorithms three times */
- #define DL_MAX_TRIES 3
- static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
- {
- if (!task_running(rq, p) &&
- cpumask_test_cpu(cpu, &p->cpus_allowed))
- return 1;
- return 0;
- }
- /*
- * Return the earliest pushable rq's task, which is suitable to be executed
- * on the CPU, NULL otherwise:
- */
- static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
- {
- struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
- struct task_struct *p = NULL;
- if (!has_pushable_dl_tasks(rq))
- return NULL;
- next_node:
- if (next_node) {
- p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
- if (pick_dl_task(rq, p, cpu))
- return p;
- next_node = rb_next(next_node);
- goto next_node;
- }
- return NULL;
- }
- static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
- static int find_later_rq(struct task_struct *task)
- {
- struct sched_domain *sd;
- struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
- int this_cpu = smp_processor_id();
- int cpu = task_cpu(task);
- /* Make sure the mask is initialized first */
- if (unlikely(!later_mask))
- return -1;
- if (task->nr_cpus_allowed == 1)
- return -1;
- /*
- * We have to consider system topology and task affinity
- * first, then we can look for a suitable cpu.
- */
- if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
- return -1;
- /*
- * If we are here, some targets have been found, including
- * the most suitable which is, among the runqueues where the
- * current tasks have later deadlines than the task's one, the
- * rq with the latest possible one.
- *
- * Now we check how well this matches with task's
- * affinity and system topology.
- *
- * The last cpu where the task run is our first
- * guess, since it is most likely cache-hot there.
- */
- if (cpumask_test_cpu(cpu, later_mask))
- return cpu;
- /*
- * Check if this_cpu is to be skipped (i.e., it is
- * not in the mask) or not.
- */
- if (!cpumask_test_cpu(this_cpu, later_mask))
- this_cpu = -1;
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
- int best_cpu;
- /*
- * If possible, preempting this_cpu is
- * cheaper than migrating.
- */
- if (this_cpu != -1 &&
- cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
- rcu_read_unlock();
- return this_cpu;
- }
- best_cpu = cpumask_first_and(later_mask,
- sched_domain_span(sd));
- /*
- * Last chance: if a cpu being in both later_mask
- * and current sd span is valid, that becomes our
- * choice. Of course, the latest possible cpu is
- * already under consideration through later_mask.
- */
- if (best_cpu < nr_cpu_ids) {
- rcu_read_unlock();
- return best_cpu;
- }
- }
- }
- rcu_read_unlock();
- /*
- * At this point, all our guesses failed, we just return
- * 'something', and let the caller sort the things out.
- */
- if (this_cpu != -1)
- return this_cpu;
- cpu = cpumask_any(later_mask);
- if (cpu < nr_cpu_ids)
- return cpu;
- return -1;
- }
- /* Locks the rq it finds */
- static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
- {
- struct rq *later_rq = NULL;
- int tries;
- int cpu;
- for (tries = 0; tries < DL_MAX_TRIES; tries++) {
- cpu = find_later_rq(task);
- if ((cpu == -1) || (cpu == rq->cpu))
- break;
- later_rq = cpu_rq(cpu);
- if (later_rq->dl.dl_nr_running &&
- !dl_time_before(task->dl.deadline,
- later_rq->dl.earliest_dl.curr)) {
- /*
- * Target rq has tasks of equal or earlier deadline,
- * retrying does not release any lock and is unlikely
- * to yield a different result.
- */
- later_rq = NULL;
- break;
- }
- /* Retry if something changed. */
- if (double_lock_balance(rq, later_rq)) {
- if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
- task_running(rq, task) ||
- !dl_task(task) ||
- !task_on_rq_queued(task))) {
- double_unlock_balance(rq, later_rq);
- later_rq = NULL;
- break;
- }
- }
- /*
- * If the rq we found has no -deadline task, or
- * its earliest one has a later deadline than our
- * task, the rq is a good one.
- */
- if (!later_rq->dl.dl_nr_running ||
- dl_time_before(task->dl.deadline,
- later_rq->dl.earliest_dl.curr))
- break;
- /* Otherwise we try again. */
- double_unlock_balance(rq, later_rq);
- later_rq = NULL;
- }
- return later_rq;
- }
- static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
- {
- struct task_struct *p;
- if (!has_pushable_dl_tasks(rq))
- return NULL;
- p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
- struct task_struct, pushable_dl_tasks);
- BUG_ON(rq->cpu != task_cpu(p));
- BUG_ON(task_current(rq, p));
- BUG_ON(p->nr_cpus_allowed <= 1);
- BUG_ON(!task_on_rq_queued(p));
- BUG_ON(!dl_task(p));
- return p;
- }
- /*
- * See if the non running -deadline tasks on this rq
- * can be sent to some other CPU where they can preempt
- * and start executing.
- */
- static int push_dl_task(struct rq *rq)
- {
- struct task_struct *next_task;
- struct rq *later_rq;
- int ret = 0;
- if (!rq->dl.overloaded)
- return 0;
- next_task = pick_next_pushable_dl_task(rq);
- if (!next_task)
- return 0;
- retry:
- if (unlikely(next_task == rq->curr)) {
- WARN_ON(1);
- return 0;
- }
- /*
- * If next_task preempts rq->curr, and rq->curr
- * can move away, it makes sense to just reschedule
- * without going further in pushing next_task.
- */
- if (dl_task(rq->curr) &&
- dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
- rq->curr->nr_cpus_allowed > 1) {
- resched_curr(rq);
- return 0;
- }
- /* We might release rq lock */
- get_task_struct(next_task);
- /* Will lock the rq it'll find */
- later_rq = find_lock_later_rq(next_task, rq);
- if (!later_rq) {
- struct task_struct *task;
- /*
- * We must check all this again, since
- * find_lock_later_rq releases rq->lock and it is
- * then possible that next_task has migrated.
- */
- task = pick_next_pushable_dl_task(rq);
- if (task == next_task) {
- /*
- * The task is still there. We don't try
- * again, some other cpu will pull it when ready.
- */
- goto out;
- }
- if (!task)
- /* No more tasks */
- goto out;
- put_task_struct(next_task);
- next_task = task;
- goto retry;
- }
- deactivate_task(rq, next_task, 0);
- sub_running_bw(next_task->dl.dl_bw, &rq->dl);
- sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
- next_task->on_rq = TASK_ON_RQ_MIGRATING;
- set_task_cpu(next_task, later_rq->cpu);
- next_task->on_rq = TASK_ON_RQ_QUEUED;
- add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
- add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
- activate_task(later_rq, next_task, 0);
- ret = 1;
- resched_curr(later_rq);
- double_unlock_balance(rq, later_rq);
- out:
- put_task_struct(next_task);
- return ret;
- }
- static void push_dl_tasks(struct rq *rq)
- {
- /* push_dl_task() will return true if it moved a -deadline task */
- while (push_dl_task(rq))
- ;
- }
- static void pull_dl_task(struct rq *this_rq)
- {
- int this_cpu = this_rq->cpu, cpu;
- struct task_struct *p;
- bool resched = false;
- struct rq *src_rq;
- u64 dmin = LONG_MAX;
- if (likely(!dl_overloaded(this_rq)))
- return;
- /*
- * Match the barrier from dl_set_overloaded; this guarantees that if we
- * see overloaded we must also see the dlo_mask bit.
- */
- smp_rmb();
- for_each_cpu(cpu, this_rq->rd->dlo_mask) {
- if (this_cpu == cpu)
- continue;
- src_rq = cpu_rq(cpu);
- /*
- * It looks racy, abd it is! However, as in sched_rt.c,
- * we are fine with this.
- */
- if (this_rq->dl.dl_nr_running &&
- dl_time_before(this_rq->dl.earliest_dl.curr,
- src_rq->dl.earliest_dl.next))
- continue;
- /* Might drop this_rq->lock */
- double_lock_balance(this_rq, src_rq);
- /*
- * If there are no more pullable tasks on the
- * rq, we're done with it.
- */
- if (src_rq->dl.dl_nr_running <= 1)
- goto skip;
- p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
- /*
- * We found a task to be pulled if:
- * - it preempts our current (if there's one),
- * - it will preempt the last one we pulled (if any).
- */
- if (p && dl_time_before(p->dl.deadline, dmin) &&
- (!this_rq->dl.dl_nr_running ||
- dl_time_before(p->dl.deadline,
- this_rq->dl.earliest_dl.curr))) {
- WARN_ON(p == src_rq->curr);
- WARN_ON(!task_on_rq_queued(p));
- /*
- * Then we pull iff p has actually an earlier
- * deadline than the current task of its runqueue.
- */
- if (dl_time_before(p->dl.deadline,
- src_rq->curr->dl.deadline))
- goto skip;
- resched = true;
- deactivate_task(src_rq, p, 0);
- sub_running_bw(p->dl.dl_bw, &src_rq->dl);
- sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
- p->on_rq = TASK_ON_RQ_MIGRATING;
- set_task_cpu(p, this_cpu);
- p->on_rq = TASK_ON_RQ_QUEUED;
- add_rq_bw(p->dl.dl_bw, &this_rq->dl);
- add_running_bw(p->dl.dl_bw, &this_rq->dl);
- activate_task(this_rq, p, 0);
- dmin = p->dl.deadline;
- /* Is there any other task even earlier? */
- }
- skip:
- double_unlock_balance(this_rq, src_rq);
- }
- if (resched)
- resched_curr(this_rq);
- }
- /*
- * Since the task is not running and a reschedule is not going to happen
- * anytime soon on its runqueue, we try pushing it away now.
- */
- static void task_woken_dl(struct rq *rq, struct task_struct *p)
- {
- if (!task_running(rq, p) &&
- !test_tsk_need_resched(rq->curr) &&
- p->nr_cpus_allowed > 1 &&
- dl_task(rq->curr) &&
- (rq->curr->nr_cpus_allowed < 2 ||
- !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
- push_dl_tasks(rq);
- }
- }
- static void set_cpus_allowed_dl(struct task_struct *p,
- const struct cpumask *new_mask)
- {
- struct root_domain *src_rd;
- struct rq *rq;
- BUG_ON(!dl_task(p));
- rq = task_rq(p);
- src_rd = rq->rd;
- /*
- * Migrating a SCHED_DEADLINE task between exclusive
- * cpusets (different root_domains) entails a bandwidth
- * update. We already made space for us in the destination
- * domain (see cpuset_can_attach()).
- */
- if (!cpumask_intersects(src_rd->span, new_mask)) {
- struct dl_bw *src_dl_b;
- src_dl_b = dl_bw_of(cpu_of(rq));
- /*
- * We now free resources of the root_domain we are migrating
- * off. In the worst case, sched_setattr() may temporary fail
- * until we complete the update.
- */
- raw_spin_lock(&src_dl_b->lock);
- __dl_clear(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
- raw_spin_unlock(&src_dl_b->lock);
- }
- set_cpus_allowed_common(p, new_mask);
- }
- /* Assumes rq->lock is held */
- static void rq_online_dl(struct rq *rq)
- {
- if (rq->dl.overloaded)
- dl_set_overload(rq);
- cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
- if (rq->dl.dl_nr_running > 0)
- cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
- }
- /* Assumes rq->lock is held */
- static void rq_offline_dl(struct rq *rq)
- {
- if (rq->dl.overloaded)
- dl_clear_overload(rq);
- cpudl_clear(&rq->rd->cpudl, rq->cpu);
- cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
- }
- void __init init_sched_dl_class(void)
- {
- unsigned int i;
- for_each_possible_cpu(i)
- zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
- GFP_KERNEL, cpu_to_node(i));
- }
- #endif /* CONFIG_SMP */
- static void switched_from_dl(struct rq *rq, struct task_struct *p)
- {
- /*
- * task_non_contending() can start the "inactive timer" (if the 0-lag
- * time is in the future). If the task switches back to dl before
- * the "inactive timer" fires, it can continue to consume its current
- * runtime using its current deadline. If it stays outside of
- * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
- * will reset the task parameters.
- */
- if (task_on_rq_queued(p) && p->dl.dl_runtime)
- task_non_contending(p);
- if (!task_on_rq_queued(p))
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- /*
- * We cannot use inactive_task_timer() to invoke sub_running_bw()
- * at the 0-lag time, because the task could have been migrated
- * while SCHED_OTHER in the meanwhile.
- */
- if (p->dl.dl_non_contending)
- p->dl.dl_non_contending = 0;
- /*
- * Since this might be the only -deadline task on the rq,
- * this is the right place to try to pull some other one
- * from an overloaded cpu, if any.
- */
- if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
- return;
- queue_pull_task(rq);
- }
- /*
- * When switching to -deadline, we may overload the rq, then
- * we try to push someone off, if possible.
- */
- static void switched_to_dl(struct rq *rq, struct task_struct *p)
- {
- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
- put_task_struct(p);
- /* If p is not queued we will update its parameters at next wakeup. */
- if (!task_on_rq_queued(p)) {
- add_rq_bw(p->dl.dl_bw, &rq->dl);
- return;
- }
- if (rq->curr != p) {
- #ifdef CONFIG_SMP
- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
- queue_push_tasks(rq);
- #endif
- if (dl_task(rq->curr))
- check_preempt_curr_dl(rq, p, 0);
- else
- resched_curr(rq);
- }
- }
- /*
- * If the scheduling parameters of a -deadline task changed,
- * a push or pull operation might be needed.
- */
- static void prio_changed_dl(struct rq *rq, struct task_struct *p,
- int oldprio)
- {
- if (task_on_rq_queued(p) || rq->curr == p) {
- #ifdef CONFIG_SMP
- /*
- * This might be too much, but unfortunately
- * we don't have the old deadline value, and
- * we can't argue if the task is increasing
- * or lowering its prio, so...
- */
- if (!rq->dl.overloaded)
- queue_pull_task(rq);
- /*
- * If we now have a earlier deadline task than p,
- * then reschedule, provided p is still on this
- * runqueue.
- */
- if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
- resched_curr(rq);
- #else
- /*
- * Again, we don't know if p has a earlier
- * or later deadline, so let's blindly set a
- * (maybe not needed) rescheduling point.
- */
- resched_curr(rq);
- #endif /* CONFIG_SMP */
- }
- }
- const struct sched_class dl_sched_class = {
- .next = &rt_sched_class,
- .enqueue_task = enqueue_task_dl,
- .dequeue_task = dequeue_task_dl,
- .yield_task = yield_task_dl,
- .check_preempt_curr = check_preempt_curr_dl,
- .pick_next_task = pick_next_task_dl,
- .put_prev_task = put_prev_task_dl,
- #ifdef CONFIG_SMP
- .select_task_rq = select_task_rq_dl,
- .migrate_task_rq = migrate_task_rq_dl,
- .set_cpus_allowed = set_cpus_allowed_dl,
- .rq_online = rq_online_dl,
- .rq_offline = rq_offline_dl,
- .task_woken = task_woken_dl,
- #endif
- .set_curr_task = set_curr_task_dl,
- .task_tick = task_tick_dl,
- .task_fork = task_fork_dl,
- .prio_changed = prio_changed_dl,
- .switched_from = switched_from_dl,
- .switched_to = switched_to_dl,
- .update_curr = update_curr_dl,
- #ifdef CONFIG_SCHED_WALT
- .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
- #endif
- };
- int sched_dl_global_validate(void)
- {
- u64 runtime = global_rt_runtime();
- u64 period = global_rt_period();
- u64 new_bw = to_ratio(period, runtime);
- struct dl_bw *dl_b;
- int cpu, cpus, ret = 0;
- unsigned long flags;
- /*
- * Here we want to check the bandwidth not being set to some
- * value smaller than the currently allocated bandwidth in
- * any of the root_domains.
- *
- * FIXME: Cycling on all the CPUs is overdoing, but simpler than
- * cycling on root_domains... Discussion on different/better
- * solutions is welcome!
- */
- for_each_possible_cpu(cpu) {
- rcu_read_lock_sched();
- dl_b = dl_bw_of(cpu);
- cpus = dl_bw_cpus(cpu);
- raw_spin_lock_irqsave(&dl_b->lock, flags);
- if (new_bw * cpus < dl_b->total_bw)
- ret = -EBUSY;
- raw_spin_unlock_irqrestore(&dl_b->lock, flags);
- rcu_read_unlock_sched();
- if (ret)
- break;
- }
- return ret;
- }
- void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
- {
- if (global_rt_runtime() == RUNTIME_INF) {
- dl_rq->bw_ratio = 1 << RATIO_SHIFT;
- dl_rq->extra_bw = 1 << BW_SHIFT;
- } else {
- dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
- global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
- dl_rq->extra_bw = to_ratio(global_rt_period(),
- global_rt_runtime());
- }
- }
- void sched_dl_do_global(void)
- {
- u64 new_bw = -1;
- struct dl_bw *dl_b;
- int cpu;
- unsigned long flags;
- def_dl_bandwidth.dl_period = global_rt_period();
- def_dl_bandwidth.dl_runtime = global_rt_runtime();
- if (global_rt_runtime() != RUNTIME_INF)
- new_bw = to_ratio(global_rt_period(), global_rt_runtime());
- /*
- * FIXME: As above...
- */
- for_each_possible_cpu(cpu) {
- rcu_read_lock_sched();
- dl_b = dl_bw_of(cpu);
- raw_spin_lock_irqsave(&dl_b->lock, flags);
- dl_b->bw = new_bw;
- raw_spin_unlock_irqrestore(&dl_b->lock, flags);
- rcu_read_unlock_sched();
- init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
- }
- }
- /*
- * We must be sure that accepting a new task (or allowing changing the
- * parameters of an existing one) is consistent with the bandwidth
- * constraints. If yes, this function also accordingly updates the currently
- * allocated bandwidth to reflect the new situation.
- *
- * This function is called while holding p's rq->lock.
- */
- int sched_dl_overflow(struct task_struct *p, int policy,
- const struct sched_attr *attr)
- {
- struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- u64 period = attr->sched_period ?: attr->sched_deadline;
- u64 runtime = attr->sched_runtime;
- u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
- int cpus, err = -1;
- /* !deadline task may carry old deadline bandwidth */
- if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
- return 0;
- /*
- * Either if a task, enters, leave, or stays -deadline but changes
- * its parameters, we may need to update accordingly the total
- * allocated bandwidth of the container.
- */
- raw_spin_lock(&dl_b->lock);
- cpus = dl_bw_cpus(task_cpu(p));
- if (dl_policy(policy) && !task_has_dl_policy(p) &&
- !__dl_overflow(dl_b, cpus, 0, new_bw)) {
- if (hrtimer_active(&p->dl.inactive_timer))
- __dl_clear(dl_b, p->dl.dl_bw, cpus);
- __dl_add(dl_b, new_bw, cpus);
- err = 0;
- } else if (dl_policy(policy) && task_has_dl_policy(p) &&
- !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
- /*
- * XXX this is slightly incorrect: when the task
- * utilization decreases, we should delay the total
- * utilization change until the task's 0-lag point.
- * But this would require to set the task's "inactive
- * timer" when the task is not inactive.
- */
- __dl_clear(dl_b, p->dl.dl_bw, cpus);
- __dl_add(dl_b, new_bw, cpus);
- dl_change_utilization(p, new_bw);
- err = 0;
- } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
- /*
- * Do not decrease the total deadline utilization here,
- * switched_from_dl() will take care to do it at the correct
- * (0-lag) time.
- */
- err = 0;
- }
- raw_spin_unlock(&dl_b->lock);
- return err;
- }
- /*
- * This function initializes the sched_dl_entity of a newly becoming
- * SCHED_DEADLINE task.
- *
- * Only the static values are considered here, the actual runtime and the
- * absolute deadline will be properly calculated when the task is enqueued
- * for the first time with its new policy.
- */
- void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- dl_se->dl_runtime = attr->sched_runtime;
- dl_se->dl_deadline = attr->sched_deadline;
- dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
- dl_se->flags = attr->sched_flags;
- dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
- dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
- }
- void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- attr->sched_priority = p->rt_priority;
- attr->sched_runtime = dl_se->dl_runtime;
- attr->sched_deadline = dl_se->dl_deadline;
- attr->sched_period = dl_se->dl_period;
- attr->sched_flags = dl_se->flags;
- }
- /*
- * This function validates the new parameters of a -deadline task.
- * We ask for the deadline not being zero, and greater or equal
- * than the runtime, as well as the period of being zero or
- * greater than deadline. Furthermore, we have to be sure that
- * user parameters are above the internal resolution of 1us (we
- * check sched_runtime only since it is always the smaller one) and
- * below 2^63 ns (we have to check both sched_deadline and
- * sched_period, as the latter can be zero).
- */
- bool __checkparam_dl(const struct sched_attr *attr)
- {
- /* deadline != 0 */
- if (attr->sched_deadline == 0)
- return false;
- /*
- * Since we truncate DL_SCALE bits, make sure we're at least
- * that big.
- */
- if (attr->sched_runtime < (1ULL << DL_SCALE))
- return false;
- /*
- * Since we use the MSB for wrap-around and sign issues, make
- * sure it's not set (mind that period can be equal to zero).
- */
- if (attr->sched_deadline & (1ULL << 63) ||
- attr->sched_period & (1ULL << 63))
- return false;
- /* runtime <= deadline <= period (if period != 0) */
- if ((attr->sched_period != 0 &&
- attr->sched_period < attr->sched_deadline) ||
- attr->sched_deadline < attr->sched_runtime)
- return false;
- return true;
- }
- /*
- * This function clears the sched_dl_entity static params.
- */
- void __dl_clear_params(struct task_struct *p)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- dl_se->dl_runtime = 0;
- dl_se->dl_deadline = 0;
- dl_se->dl_period = 0;
- dl_se->flags = 0;
- dl_se->dl_bw = 0;
- dl_se->dl_density = 0;
- dl_se->dl_throttled = 0;
- dl_se->dl_yielded = 0;
- dl_se->dl_non_contending = 0;
- }
- bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
- {
- struct sched_dl_entity *dl_se = &p->dl;
- if (dl_se->dl_runtime != attr->sched_runtime ||
- dl_se->dl_deadline != attr->sched_deadline ||
- dl_se->dl_period != attr->sched_period ||
- dl_se->flags != attr->sched_flags)
- return true;
- return false;
- }
- #ifdef CONFIG_SMP
- int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
- {
- unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
- cs_cpus_allowed);
- struct dl_bw *dl_b;
- bool overflow;
- int cpus, ret;
- unsigned long flags;
- rcu_read_lock_sched();
- dl_b = dl_bw_of(dest_cpu);
- raw_spin_lock_irqsave(&dl_b->lock, flags);
- cpus = dl_bw_cpus(dest_cpu);
- overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
- if (overflow)
- ret = -EBUSY;
- else {
- /*
- * We reserve space for this task in the destination
- * root_domain, as we can't fail after this point.
- * We will free resources in the source root_domain
- * later on (see set_cpus_allowed_dl()).
- */
- __dl_add(dl_b, p->dl.dl_bw, cpus);
- ret = 0;
- }
- raw_spin_unlock_irqrestore(&dl_b->lock, flags);
- rcu_read_unlock_sched();
- return ret;
- }
- int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
- const struct cpumask *trial)
- {
- int ret = 1, trial_cpus;
- struct dl_bw *cur_dl_b;
- unsigned long flags;
- rcu_read_lock_sched();
- cur_dl_b = dl_bw_of(cpumask_any(cur));
- trial_cpus = cpumask_weight(trial);
- raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
- if (cur_dl_b->bw != -1 &&
- cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
- ret = 0;
- raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
- rcu_read_unlock_sched();
- return ret;
- }
- bool dl_cpu_busy(unsigned int cpu)
- {
- unsigned long flags;
- struct dl_bw *dl_b;
- bool overflow;
- int cpus;
- rcu_read_lock_sched();
- dl_b = dl_bw_of(cpu);
- raw_spin_lock_irqsave(&dl_b->lock, flags);
- cpus = dl_bw_cpus(cpu);
- overflow = __dl_overflow(dl_b, cpus, 0, 0);
- raw_spin_unlock_irqrestore(&dl_b->lock, flags);
- rcu_read_unlock_sched();
- return overflow;
- }
- #endif
- #ifdef CONFIG_SCHED_DEBUG
- void print_dl_stats(struct seq_file *m, int cpu)
- {
- print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
- }
- #endif /* CONFIG_SCHED_DEBUG */
|