12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
- * policies)
- */
- #include "sched.h"
- #include <linux/slab.h>
- #include <linux/irq_work.h>
- #include "tune.h"
- #include <trace/events/sched.h>
- #include "walt.h"
- #ifdef CONFIG_MTK_RT_THROTTLE_MON
- #include "mtk_rt_mon.h"
- #endif
- #include "rt_ext.c"
- int sched_rr_timeslice = RR_TIMESLICE;
- int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
- static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
- struct rt_bandwidth def_rt_bandwidth;
- static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
- {
- struct rt_bandwidth *rt_b =
- container_of(timer, struct rt_bandwidth, rt_period_timer);
- int idle = 0;
- int overrun;
- raw_spin_lock(&rt_b->rt_runtime_lock);
- for (;;) {
- overrun = hrtimer_forward_now(timer, rt_b->rt_period);
- if (!overrun)
- break;
- raw_spin_unlock(&rt_b->rt_runtime_lock);
- idle = do_sched_rt_period_timer(rt_b, overrun);
- raw_spin_lock(&rt_b->rt_runtime_lock);
- }
- if (idle)
- rt_b->rt_period_active = 0;
- raw_spin_unlock(&rt_b->rt_runtime_lock);
- return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
- }
- void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
- {
- rt_b->rt_period = ns_to_ktime(period);
- rt_b->rt_runtime = runtime;
- raw_spin_lock_init(&rt_b->rt_runtime_lock);
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rt_b->rt_period_timer.function = sched_rt_period_timer;
- }
- static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
- {
- if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
- return;
- raw_spin_lock(&rt_b->rt_runtime_lock);
- if (!rt_b->rt_period_active) {
- rt_b->rt_period_active = 1;
- /*
- * SCHED_DEADLINE updates the bandwidth, as a run away
- * RT task with a DL task could hog a CPU. But DL does
- * not reset the period. If a deadline task was running
- * without an RT task running, it can cause RT tasks to
- * throttle when they start up. Kick the timer right away
- * to update the period.
- */
- hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
- hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
- }
- raw_spin_unlock(&rt_b->rt_runtime_lock);
- }
- void init_rt_rq(struct rt_rq *rt_rq)
- {
- struct rt_prio_array *array;
- int i;
- array = &rt_rq->active;
- for (i = 0; i < MAX_RT_PRIO; i++) {
- INIT_LIST_HEAD(array->queue + i);
- __clear_bit(i, array->bitmap);
- }
- /* delimiter for bitsearch: */
- __set_bit(MAX_RT_PRIO, array->bitmap);
- #if defined CONFIG_SMP
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
- rt_rq->highest_prio.next = MAX_RT_PRIO;
- rt_rq->rt_nr_migratory = 0;
- rt_rq->overloaded = 0;
- plist_head_init(&rt_rq->pushable_tasks);
- #endif /* CONFIG_SMP */
- /* We start is dequeued state, because no RT tasks are queued */
- rt_rq->rt_queued = 0;
- rt_rq->rt_time = 0;
- rt_rq->rt_throttled = 0;
- rt_rq->rt_runtime = 0;
- raw_spin_lock_init(&rt_rq->rt_runtime_lock);
- }
- #ifdef CONFIG_RT_GROUP_SCHED
- static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
- {
- hrtimer_cancel(&rt_b->rt_period_timer);
- }
- #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
- static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
- {
- #ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(!rt_entity_is_task(rt_se));
- #endif
- return container_of(rt_se, struct task_struct, rt);
- }
- static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
- {
- return rt_rq->rq;
- }
- static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
- {
- return rt_se->rt_rq;
- }
- static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
- {
- struct rt_rq *rt_rq = rt_se->rt_rq;
- return rt_rq->rq;
- }
- void free_rt_sched_group(struct task_group *tg)
- {
- int i;
- if (tg->rt_se)
- destroy_rt_bandwidth(&tg->rt_bandwidth);
- for_each_possible_cpu(i) {
- if (tg->rt_rq)
- kfree(tg->rt_rq[i]);
- if (tg->rt_se)
- kfree(tg->rt_se[i]);
- }
- kfree(tg->rt_rq);
- kfree(tg->rt_se);
- }
- void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
- struct sched_rt_entity *rt_se, int cpu,
- struct sched_rt_entity *parent)
- {
- struct rq *rq = cpu_rq(cpu);
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
- rt_rq->rt_nr_boosted = 0;
- rt_rq->rq = rq;
- rt_rq->tg = tg;
- tg->rt_rq[cpu] = rt_rq;
- tg->rt_se[cpu] = rt_se;
- if (!rt_se)
- return;
- if (!parent)
- rt_se->rt_rq = &rq->rt;
- else
- rt_se->rt_rq = parent->my_q;
- rt_se->my_q = rt_rq;
- rt_se->parent = parent;
- INIT_LIST_HEAD(&rt_se->run_list);
- }
- int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
- {
- struct rt_rq *rt_rq;
- struct sched_rt_entity *rt_se;
- int i;
- tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->rt_rq)
- goto err;
- tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
- if (!tg->rt_se)
- goto err;
- init_rt_bandwidth(&tg->rt_bandwidth,
- ktime_to_ns(def_rt_bandwidth.rt_period), 0);
- for_each_possible_cpu(i) {
- rt_rq = kzalloc_node(sizeof(struct rt_rq),
- GFP_KERNEL, cpu_to_node(i));
- if (!rt_rq)
- goto err;
- rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
- GFP_KERNEL, cpu_to_node(i));
- if (!rt_se)
- goto err_free_rq;
- init_rt_rq(rt_rq);
- rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
- init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
- }
- return 1;
- err_free_rq:
- kfree(rt_rq);
- err:
- return 0;
- }
- #else /* CONFIG_RT_GROUP_SCHED */
- #define rt_entity_is_task(rt_se) (1)
- static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
- {
- return container_of(rt_se, struct task_struct, rt);
- }
- static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
- {
- return container_of(rt_rq, struct rq, rt);
- }
- static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
- {
- struct task_struct *p = rt_task_of(rt_se);
- return task_rq(p);
- }
- static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
- {
- struct rq *rq = rq_of_rt_se(rt_se);
- return &rq->rt;
- }
- void free_rt_sched_group(struct task_group *tg) { }
- int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
- {
- return 1;
- }
- #endif /* CONFIG_RT_GROUP_SCHED */
- #ifdef CONFIG_SMP
- static void pull_rt_task(struct rq *this_rq);
- static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
- {
- /* Try to pull RT tasks here if we lower this rq's prio */
- return rq->rt.highest_prio.curr > prev->prio &&
- !cpu_isolated(cpu_of(rq));
- }
- static inline int rt_overloaded(struct rq *rq)
- {
- return atomic_read(&rq->rd->rto_count);
- }
- static inline void rt_set_overload(struct rq *rq)
- {
- if (!rq->online)
- return;
- cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
- /*
- * Make sure the mask is visible before we set
- * the overload count. That is checked to determine
- * if we should look at the mask. It would be a shame
- * if we looked at the mask, but the mask was not
- * updated yet.
- *
- * Matched by the barrier in pull_rt_task().
- */
- smp_wmb();
- atomic_inc(&rq->rd->rto_count);
- }
- static inline void rt_clear_overload(struct rq *rq)
- {
- if (!rq->online)
- return;
- /* the order here really doesn't matter */
- atomic_dec(&rq->rd->rto_count);
- cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
- }
- static void update_rt_migration(struct rt_rq *rt_rq)
- {
- if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
- if (!rt_rq->overloaded) {
- rt_set_overload(rq_of_rt_rq(rt_rq));
- rt_rq->overloaded = 1;
- }
- } else if (rt_rq->overloaded) {
- rt_clear_overload(rq_of_rt_rq(rt_rq));
- rt_rq->overloaded = 0;
- }
- }
- static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- struct task_struct *p;
- if (!rt_entity_is_task(rt_se))
- return;
- p = rt_task_of(rt_se);
- rt_rq = &rq_of_rt_rq(rt_rq)->rt;
- rt_rq->rt_nr_total++;
- if (p->nr_cpus_allowed > 1)
- rt_rq->rt_nr_migratory++;
- update_rt_migration(rt_rq);
- }
- static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- struct task_struct *p;
- if (!rt_entity_is_task(rt_se))
- return;
- p = rt_task_of(rt_se);
- rt_rq = &rq_of_rt_rq(rt_rq)->rt;
- rt_rq->rt_nr_total--;
- if (p->nr_cpus_allowed > 1)
- rt_rq->rt_nr_migratory--;
- update_rt_migration(rt_rq);
- }
- static inline int has_pushable_tasks(struct rq *rq)
- {
- return !plist_head_empty(&rq->rt.pushable_tasks);
- }
- static DEFINE_PER_CPU(struct callback_head, rt_push_head);
- static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
- static void push_rt_tasks(struct rq *);
- static void pull_rt_task(struct rq *);
- static inline void queue_push_tasks(struct rq *rq)
- {
- if (!has_pushable_tasks(rq))
- return;
- queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
- }
- static inline void queue_pull_task(struct rq *rq)
- {
- queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
- }
- static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
- {
- plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
- plist_node_init(&p->pushable_tasks, p->prio);
- plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
- /* Update the highest prio pushable task */
- if (p->prio < rq->rt.highest_prio.next)
- rq->rt.highest_prio.next = p->prio;
- }
- static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
- {
- plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
- /* Update the new highest prio pushable task */
- if (has_pushable_tasks(rq)) {
- p = plist_first_entry(&rq->rt.pushable_tasks,
- struct task_struct, pushable_tasks);
- rq->rt.highest_prio.next = p->prio;
- } else
- rq->rt.highest_prio.next = MAX_RT_PRIO;
- }
- #else
- static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
- {
- }
- static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
- {
- }
- static inline
- void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- }
- static inline
- void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- }
- static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
- {
- return false;
- }
- static inline void pull_rt_task(struct rq *this_rq)
- {
- }
- static inline void queue_push_tasks(struct rq *rq)
- {
- }
- #endif /* CONFIG_SMP */
- static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
- static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
- static inline int on_rt_rq(struct sched_rt_entity *rt_se)
- {
- return rt_se->on_rq;
- }
- #ifdef CONFIG_RT_GROUP_SCHED
- static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
- {
- if (!rt_rq->tg)
- return RUNTIME_INF;
- return rt_rq->rt_runtime;
- }
- static inline u64 sched_rt_period(struct rt_rq *rt_rq)
- {
- return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
- }
- typedef struct task_group *rt_rq_iter_t;
- static inline struct task_group *next_task_group(struct task_group *tg)
- {
- do {
- tg = list_entry_rcu(tg->list.next,
- typeof(struct task_group), list);
- } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
- if (&tg->list == &task_groups)
- tg = NULL;
- return tg;
- }
- #define for_each_rt_rq(rt_rq, iter, rq) \
- for (iter = container_of(&task_groups, typeof(*iter), list); \
- (iter = next_task_group(iter)) && \
- (rt_rq = iter->rt_rq[cpu_of(rq)]);)
- #define for_each_sched_rt_entity(rt_se) \
- for (; rt_se; rt_se = rt_se->parent)
- static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
- {
- return rt_se->my_q;
- }
- static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
- static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
- static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
- {
- struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
- struct rq *rq = rq_of_rt_rq(rt_rq);
- struct sched_rt_entity *rt_se;
- int cpu = cpu_of(rq);
- rt_se = rt_rq->tg->rt_se[cpu];
- if (rt_rq->rt_nr_running) {
- if (!rt_se)
- enqueue_top_rt_rq(rt_rq);
- else if (!on_rt_rq(rt_se))
- enqueue_rt_entity(rt_se, 0);
- if (rt_rq->highest_prio.curr < curr->prio)
- resched_curr(rq);
- }
- }
- static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
- {
- struct sched_rt_entity *rt_se;
- int cpu = cpu_of(rq_of_rt_rq(rt_rq));
- rt_se = rt_rq->tg->rt_se[cpu];
- if (!rt_se)
- dequeue_top_rt_rq(rt_rq);
- else if (on_rt_rq(rt_se))
- dequeue_rt_entity(rt_se, 0);
- }
- static inline int rt_rq_throttled(struct rt_rq *rt_rq)
- {
- return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
- }
- static int rt_se_boosted(struct sched_rt_entity *rt_se)
- {
- struct rt_rq *rt_rq = group_rt_rq(rt_se);
- struct task_struct *p;
- if (rt_rq)
- return !!rt_rq->rt_nr_boosted;
- p = rt_task_of(rt_se);
- return p->prio != p->normal_prio;
- }
- #ifdef CONFIG_SMP
- static inline const struct cpumask *sched_rt_period_mask(void)
- {
- return this_rq()->rd->span;
- }
- #else
- static inline const struct cpumask *sched_rt_period_mask(void)
- {
- return cpu_online_mask;
- }
- #endif
- static inline
- struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
- {
- return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
- }
- static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
- {
- return &rt_rq->tg->rt_bandwidth;
- }
- #else /* !CONFIG_RT_GROUP_SCHED */
- static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
- {
- return rt_rq->rt_runtime;
- }
- static inline u64 sched_rt_period(struct rt_rq *rt_rq)
- {
- return ktime_to_ns(def_rt_bandwidth.rt_period);
- }
- typedef struct rt_rq *rt_rq_iter_t;
- #define for_each_rt_rq(rt_rq, iter, rq) \
- for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
- #define for_each_sched_rt_entity(rt_se) \
- for (; rt_se; rt_se = NULL)
- static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
- {
- return NULL;
- }
- static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
- {
- struct rq *rq = rq_of_rt_rq(rt_rq);
- if (!rt_rq->rt_nr_running)
- return;
- enqueue_top_rt_rq(rt_rq);
- resched_curr(rq);
- }
- static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
- {
- dequeue_top_rt_rq(rt_rq);
- }
- static inline int rt_rq_throttled(struct rt_rq *rt_rq)
- {
- return rt_rq->rt_throttled;
- }
- static inline const struct cpumask *sched_rt_period_mask(void)
- {
- return cpu_online_mask;
- }
- static inline
- struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
- {
- return &cpu_rq(cpu)->rt;
- }
- static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
- {
- return &def_rt_bandwidth;
- }
- #endif /* CONFIG_RT_GROUP_SCHED */
- bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
- {
- struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- return (hrtimer_active(&rt_b->rt_period_timer) ||
- rt_rq->rt_time < rt_b->rt_runtime);
- }
- #ifdef CONFIG_SMP
- /*
- * We ran out of runtime, see if we can borrow some from our neighbours.
- */
- static void do_balance_runtime(struct rt_rq *rt_rq)
- {
- struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
- int i, weight;
- u64 rt_period;
- weight = cpumask_weight(rd->span);
- raw_spin_lock(&rt_b->rt_runtime_lock);
- rt_period = ktime_to_ns(rt_b->rt_period);
- for_each_cpu(i, rd->span) {
- struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
- s64 diff;
- if (iter == rt_rq)
- continue;
- raw_spin_lock(&iter->rt_runtime_lock);
- /*
- * Either all rqs have inf runtime and there's nothing to steal
- * or __disable_runtime() below sets a specific rq to inf to
- * indicate its been disabled and disalow stealing.
- */
- if (iter->rt_runtime == RUNTIME_INF)
- goto next;
- /*
- * From runqueues with spare time, take 1/n part of their
- * spare time, but no more than our period.
- */
- diff = iter->rt_runtime - iter->rt_time;
- if (diff > 0) {
- diff = div_u64((u64)diff, weight);
- if (rt_rq->rt_runtime + diff > rt_period)
- diff = rt_period - rt_rq->rt_runtime;
- iter->rt_runtime -= diff;
- rt_rq->rt_runtime += diff;
- if (rt_rq->rt_runtime == rt_period) {
- raw_spin_unlock(&iter->rt_runtime_lock);
- break;
- }
- }
- next:
- raw_spin_unlock(&iter->rt_runtime_lock);
- }
- raw_spin_unlock(&rt_b->rt_runtime_lock);
- }
- /*
- * Ensure this RQ takes back all the runtime it lend to its neighbours.
- */
- static void __disable_runtime(struct rq *rq)
- {
- struct root_domain *rd = rq->rd;
- rt_rq_iter_t iter;
- struct rt_rq *rt_rq;
- if (unlikely(!scheduler_running))
- return;
- for_each_rt_rq(rt_rq, iter, rq) {
- struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- s64 want;
- int i;
- raw_spin_lock(&rt_b->rt_runtime_lock);
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- /*
- * Either we're all inf and nobody needs to borrow, or we're
- * already disabled and thus have nothing to do, or we have
- * exactly the right amount of runtime to take out.
- */
- if (rt_rq->rt_runtime == RUNTIME_INF ||
- rt_rq->rt_runtime == rt_b->rt_runtime)
- goto balanced;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- /*
- * Calculate the difference between what we started out with
- * and what we current have, that's the amount of runtime
- * we lend and now have to reclaim.
- */
- want = rt_b->rt_runtime - rt_rq->rt_runtime;
- /*
- * Greedy reclaim, take back as much as we can.
- */
- for_each_cpu(i, rd->span) {
- struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
- s64 diff;
- /*
- * Can't reclaim from ourselves or disabled runqueues.
- */
- if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
- continue;
- raw_spin_lock(&iter->rt_runtime_lock);
- if (want > 0) {
- diff = min_t(s64, iter->rt_runtime, want);
- iter->rt_runtime -= diff;
- want -= diff;
- } else {
- iter->rt_runtime -= want;
- want -= want;
- }
- raw_spin_unlock(&iter->rt_runtime_lock);
- if (!want)
- break;
- }
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- /*
- * We cannot be left wanting - that would mean some runtime
- * leaked out of the system.
- */
- BUG_ON(want);
- balanced:
- /*
- * Disable all the borrow logic by pretending we have inf
- * runtime - in which case borrowing doesn't make sense.
- */
- /*
- * sched: prevent normal task could run anymore,
- * use rt_disable_borrow
- */
- /* rt_rq->rt_runtime = RUNTIME_INF; */
- rt_rq->rt_runtime = rt_b->rt_runtime;
- /* sched: print __disable_runtime unthrottled */
- if (rt_rq->rt_throttled == 1)
- print_disable_runtime_unthrottle(rt_rq);
- rt_rq->rt_throttled = 0;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- raw_spin_unlock(&rt_b->rt_runtime_lock);
- /* Make rt_rq available for pick_next_task() */
- sched_rt_rq_enqueue(rt_rq);
- }
- }
- static void __enable_runtime(struct rq *rq)
- {
- rt_rq_iter_t iter;
- struct rt_rq *rt_rq;
- if (unlikely(!scheduler_running))
- return;
- /*
- * Reset each runqueue's bandwidth settings
- */
- for_each_rt_rq(rt_rq, iter, rq) {
- struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- raw_spin_lock(&rt_b->rt_runtime_lock);
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = rt_b->rt_runtime;
- rt_rq->rt_time = 0;
- rt_rq->rt_throttled = 0;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- raw_spin_unlock(&rt_b->rt_runtime_lock);
- }
- }
- static void balance_runtime(struct rt_rq *rt_rq)
- {
- if (!sched_feat(RT_RUNTIME_SHARE))
- return;
- if (rt_rq->rt_time > rt_rq->rt_runtime) {
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- do_balance_runtime(rt_rq);
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- }
- }
- #else /* !CONFIG_SMP */
- static inline void balance_runtime(struct rt_rq *rt_rq) {}
- #endif /* CONFIG_SMP */
- static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
- {
- int i, idle = 1, throttled = 0;
- const struct cpumask *span;
- span = sched_rt_period_mask();
- #ifdef CONFIG_RT_GROUP_SCHED
- /*
- * FIXME: isolated CPUs should really leave the root task group,
- * whether they are isolcpus or were isolated via cpusets, lest
- * the timer run on a CPU which does not service all runqueues,
- * potentially leaving other CPUs indefinitely throttled. If
- * isolation is really required, the user will turn the throttle
- * off to kill the perturbations it causes anyway. Meanwhile,
- * this maintains functionality for boot and/or troubleshooting.
- */
- if (rt_b == &root_task_group.rt_bandwidth)
- span = cpu_online_mask;
- #endif
- for_each_cpu(i, span) {
- int enqueue = 0;
- struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
- struct rq *rq = rq_of_rt_rq(rt_rq);
- int skip;
- u64 runtime_pre = 0, rt_time_pre = 0; /* sched: get runtime */
- /*
- * When span == cpu_online_mask, taking each rq->lock
- * can be time-consuming. Try to avoid it when possible.
- */
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
- rt_rq->rt_runtime = rt_b->rt_runtime;
- skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- if (skip)
- continue;
- raw_spin_lock(&rq->lock);
- update_rq_clock(rq);
- per_cpu(rt_period_time, i) = rq_clock_task(rq);
- if (rt_rq->rt_time) {
- u64 runtime;
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- per_cpu(old_rt_time, i) = rt_rq->rt_time;
- if (rt_rq->rt_throttled) {
- runtime_pre = rt_rq->rt_runtime;
- rt_time_pre = rt_rq->rt_time;
- }
- if (rt_rq->rt_throttled)
- balance_runtime(rt_rq);
- runtime = rt_rq->rt_runtime;
- rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
- per_cpu(init_rt_time, i) = rt_rq->rt_time;
- /* sched: print rt_time_info */
- if (rt_rq->rt_throttled) {
- printk_deferred(
- "[name:rt&]sched: cpu=%d, [%llu -> %llu] -= min(%llu, %d*[%llu -> %llu])\n",
- i, rt_time_pre, rt_rq->rt_time,
- rt_time_pre, overrun,
- runtime_pre, runtime);
- }
- if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
- rt_rq->rt_throttled = 0;
- enqueue = 1;
- /* sched: print unthrottle*/
- printk_deferred("[name:rt&]sched: RT throttling inactivated cpu=%d\n",
- i);
- #ifdef CONFIG_MTK_RT_THROTTLE_MON
- if (rt_rq->rt_time != 0) {
- mt_rt_mon_switch(MON_RESET, i);
- mt_rt_mon_switch(MON_START, i);
- }
- #endif
- /*
- * When we're idle and a woken (rt) task is
- * throttled check_preempt_curr() will set
- * skip_update and the time between the wakeup
- * and this unthrottle will get accounted as
- * 'runtime'.
- */
- if (rt_rq->rt_nr_running && rq->curr == rq->idle)
- rq_clock_skip_update(rq, false);
- }
- if (rt_rq->rt_time || rt_rq->rt_nr_running)
- idle = 0;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- } else if (rt_rq->rt_nr_running) {
- idle = 0;
- if (!rt_rq_throttled(rt_rq))
- enqueue = 1;
- }
- if (rt_rq->rt_throttled)
- throttled = 1;
- if (enqueue)
- sched_rt_rq_enqueue(rt_rq);
- raw_spin_unlock(&rq->lock);
- }
- if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
- return 1;
- return idle;
- }
- static inline int rt_se_prio(struct sched_rt_entity *rt_se)
- {
- #ifdef CONFIG_RT_GROUP_SCHED
- struct rt_rq *rt_rq = group_rt_rq(rt_se);
- if (rt_rq)
- return rt_rq->highest_prio.curr;
- #endif
- return rt_task_of(rt_se)->prio;
- }
- static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
- {
- u64 runtime = sched_rt_runtime(rt_rq);
- #ifdef CONFIG_RT_GROUP_SCHED
- u64 runtime_pre = runtime; /* sched: get runtime */
- int cpu = rq_cpu(rt_rq->rq);
- #endif
- if (rt_rq->rt_throttled)
- return rt_rq_throttled(rt_rq);
- if (runtime >= sched_rt_period(rt_rq))
- return 0;
- balance_runtime(rt_rq);
- runtime = sched_rt_runtime(rt_rq);
- if (runtime == RUNTIME_INF)
- return 0;
- if (rt_rq->rt_time > runtime) {
- struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
- #ifdef CONFIG_RT_GROUP_SCHED
- print_rt_throttle_info(cpu, rt_rq, runtime_pre, runtime);
- #endif
- /*
- * Don't actually throttle groups that have no runtime assigned
- * but accrue some time due to boosting.
- */
- if (likely(rt_b->rt_runtime)) {
- rt_rq->rt_throttled = 1;
- /* sched: print throttle every time*/
- printk_deferred("sched: RT throttling activated\n");
- #ifdef CONFIG_RT_GROUP_SCHED
- per_cpu(rt_throttling_start, cpu) =
- rq_clock_task(rt_rq->rq);
- #ifdef CONFIG_MTK_RT_THROTTLE_MON
- /* sched: rt throttle monitor */
- mt_rt_mon_switch(MON_STOP, cpu);
- mt_rt_mon_print_task(cpu);
- #endif
- #endif
- } else {
- /*
- * In case we did anyway, make it go away,
- * replenishment is a joke, since it will replenish us
- * with exactly 0 ns.
- */
- rt_rq->rt_time = 0;
- }
- if (rt_rq_throttled(rt_rq)) {
- sched_rt_rq_dequeue(rt_rq);
- return 1;
- }
- }
- return 0;
- }
- /*
- * Update the current task's runtime statistics. Skip current tasks that
- * are not in our scheduling class.
- */
- static void update_curr_rt(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- struct sched_rt_entity *rt_se = &curr->rt;
- u64 delta_exec;
- #ifdef CONFIG_MTK_RT_THROTTLE_MON
- struct rt_rq *cpu_rt_rq;
- u64 runtime;
- u64 old_exec_start;
- #endif
- if (curr->sched_class != &rt_sched_class)
- return;
- delta_exec = rq_clock_task(rq) - curr->se.exec_start;
- if (unlikely((s64)delta_exec <= 0))
- return;
- /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, SCHED_CPUFREQ_RT);
- schedstat_set(curr->se.statistics.exec_max,
- max(curr->se.statistics.exec_max, delta_exec));
- /* sched: update rt exec info*/
- update_rt_exec_info(curr, delta_exec, rq);
- #ifdef CONFIG_MTK_RT_THROTTLE_MON
- old_exec_start = curr->se.exec_start;
- #endif
- curr->se.sum_exec_runtime += delta_exec;
- account_group_exec_runtime(curr, delta_exec);
- curr->se.exec_start = rq_clock_task(rq);
- per_cpu(sched_update_exec_start, rq->cpu) =
- per_cpu(update_curr_exec_start, rq->cpu);
- per_cpu(update_curr_exec_start, rq->cpu) = sched_clock_cpu(rq->cpu);
- cpuacct_charge(curr, delta_exec);
- sched_rt_avg_update(rq, delta_exec);
- if (!rt_bandwidth_enabled())
- return;
- #ifdef CONFIG_MTK_RT_THROTTLE_MON
- cpu_rt_rq = rt_rq_of_se(rt_se);
- runtime = sched_rt_runtime(cpu_rt_rq);
- if (cpu_rt_rq->rt_time == 0 && !(cpu_rt_rq->rt_throttled)) {
- if (old_exec_start < per_cpu(rt_period_time, rq->cpu) &&
- (per_cpu(old_rt_time, rq->cpu) + delta_exec) > runtime) {
- save_mt_rt_mon_info(rq->cpu, delta_exec, curr);
- mt_rt_mon_switch(MON_STOP, rq->cpu);
- mt_rt_mon_print_task(rq->cpu);
- }
- mt_rt_mon_switch(MON_RESET, rq->cpu);
- mt_rt_mon_switch(MON_START, rq->cpu);
- update_mt_rt_mon_start(rq->cpu, delta_exec);
- }
- save_mt_rt_mon_info(rq->cpu, delta_exec, curr);
- #endif
- for_each_sched_rt_entity(rt_se) {
- struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
- if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_time += delta_exec;
- if (sched_rt_runtime_exceeded(rt_rq))
- resched_curr(rq);
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- }
- }
- static void
- dequeue_top_rt_rq(struct rt_rq *rt_rq)
- {
- struct rq *rq = rq_of_rt_rq(rt_rq);
- BUG_ON(&rq->rt != rt_rq);
- if (!rt_rq->rt_queued)
- return;
- BUG_ON(!rq->nr_running);
- sub_nr_running(rq, rt_rq->rt_nr_running);
- rt_rq->rt_queued = 0;
- }
- static void
- enqueue_top_rt_rq(struct rt_rq *rt_rq)
- {
- struct rq *rq = rq_of_rt_rq(rt_rq);
- BUG_ON(&rq->rt != rt_rq);
- if (rt_rq->rt_queued)
- return;
- if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
- return;
- add_nr_running(rq, rt_rq->rt_nr_running);
- rt_rq->rt_queued = 1;
- }
- #if defined CONFIG_SMP
- static void
- inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
- {
- struct rq *rq = rq_of_rt_rq(rt_rq);
- #ifdef CONFIG_RT_GROUP_SCHED
- /*
- * Change rq's cpupri only if rt_rq is the top queue.
- */
- if (&rq->rt != rt_rq)
- return;
- #endif
- if (rq->online && prio < prev_prio)
- cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
- }
- static void
- dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
- {
- struct rq *rq = rq_of_rt_rq(rt_rq);
- #ifdef CONFIG_RT_GROUP_SCHED
- /*
- * Change rq's cpupri only if rt_rq is the top queue.
- */
- if (&rq->rt != rt_rq)
- return;
- #endif
- if (rq->online && rt_rq->highest_prio.curr != prev_prio)
- cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
- }
- #else /* CONFIG_SMP */
- static inline
- void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
- static inline
- void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
- #endif /* CONFIG_SMP */
- #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
- static void
- inc_rt_prio(struct rt_rq *rt_rq, int prio)
- {
- int prev_prio = rt_rq->highest_prio.curr;
- if (prio < prev_prio)
- rt_rq->highest_prio.curr = prio;
- inc_rt_prio_smp(rt_rq, prio, prev_prio);
- }
- static void
- dec_rt_prio(struct rt_rq *rt_rq, int prio)
- {
- int prev_prio = rt_rq->highest_prio.curr;
- if (rt_rq->rt_nr_running) {
- WARN_ON(prio < prev_prio);
- /*
- * This may have been our highest task, and therefore
- * we may have some recomputation to do
- */
- if (prio == prev_prio) {
- struct rt_prio_array *array = &rt_rq->active;
- rt_rq->highest_prio.curr =
- sched_find_first_bit(array->bitmap);
- }
- } else
- rt_rq->highest_prio.curr = MAX_RT_PRIO;
- dec_rt_prio_smp(rt_rq, prio, prev_prio);
- }
- #else
- static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
- static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
- #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
- #ifdef CONFIG_RT_GROUP_SCHED
- static void
- inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- if (rt_se_boosted(rt_se))
- rt_rq->rt_nr_boosted++;
- if (rt_rq->tg)
- start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
- }
- static void
- dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- if (rt_se_boosted(rt_se))
- rt_rq->rt_nr_boosted--;
- WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
- }
- #else /* CONFIG_RT_GROUP_SCHED */
- static void
- inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- start_rt_bandwidth(&def_rt_bandwidth);
- }
- static inline
- void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
- #endif /* CONFIG_RT_GROUP_SCHED */
- static inline
- unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
- {
- struct rt_rq *group_rq = group_rt_rq(rt_se);
- if (group_rq)
- return group_rq->rt_nr_running;
- else
- return 1;
- }
- static inline
- unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
- {
- struct rt_rq *group_rq = group_rt_rq(rt_se);
- struct task_struct *tsk;
- if (group_rq)
- return group_rq->rr_nr_running;
- tsk = rt_task_of(rt_se);
- return (tsk->policy == SCHED_RR) ? 1 : 0;
- }
- static inline
- void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- int prio = rt_se_prio(rt_se);
- WARN_ON(!rt_prio(prio));
- rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
- rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
- inc_rt_prio(rt_rq, prio);
- inc_rt_migration(rt_se, rt_rq);
- inc_rt_group(rt_se, rt_rq);
- }
- static inline
- void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
- {
- WARN_ON(!rt_prio(rt_se_prio(rt_se)));
- WARN_ON(!rt_rq->rt_nr_running);
- rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
- rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
- dec_rt_prio(rt_rq, rt_se_prio(rt_se));
- dec_rt_migration(rt_se, rt_rq);
- dec_rt_group(rt_se, rt_rq);
- }
- /*
- * Change rt_se->run_list location unless SAVE && !MOVE
- *
- * assumes ENQUEUE/DEQUEUE flags match
- */
- static inline bool move_entity(unsigned int flags)
- {
- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
- return false;
- return true;
- }
- static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
- {
- list_del_init(&rt_se->run_list);
- if (list_empty(array->queue + rt_se_prio(rt_se)))
- __clear_bit(rt_se_prio(rt_se), array->bitmap);
- rt_se->on_list = 0;
- }
- static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
- {
- struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
- struct rt_prio_array *array = &rt_rq->active;
- struct rt_rq *group_rq = group_rt_rq(rt_se);
- struct list_head *queue = array->queue + rt_se_prio(rt_se);
- /*
- * Don't enqueue the group if its throttled, or when empty.
- * The latter is a consequence of the former when a child group
- * get throttled and the current group doesn't have any other
- * active members.
- */
- if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
- if (rt_se->on_list)
- __delist_rt_entity(rt_se, array);
- return;
- }
- if (move_entity(flags)) {
- WARN_ON_ONCE(rt_se->on_list);
- if (flags & ENQUEUE_HEAD)
- list_add(&rt_se->run_list, queue);
- else
- list_add_tail(&rt_se->run_list, queue);
- __set_bit(rt_se_prio(rt_se), array->bitmap);
- rt_se->on_list = 1;
- }
- rt_se->on_rq = 1;
- inc_rt_tasks(rt_se, rt_rq);
- }
- static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
- {
- struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
- struct rt_prio_array *array = &rt_rq->active;
- if (move_entity(flags)) {
- WARN_ON_ONCE(!rt_se->on_list);
- __delist_rt_entity(rt_se, array);
- }
- rt_se->on_rq = 0;
- dec_rt_tasks(rt_se, rt_rq);
- }
- /*
- * Because the prio of an upper entry depends on the lower
- * entries, we must remove entries top - down.
- */
- static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
- {
- struct sched_rt_entity *back = NULL;
- for_each_sched_rt_entity(rt_se) {
- rt_se->back = back;
- back = rt_se;
- }
- dequeue_top_rt_rq(rt_rq_of_se(back));
- for (rt_se = back; rt_se; rt_se = rt_se->back) {
- if (on_rt_rq(rt_se))
- __dequeue_rt_entity(rt_se, flags);
- }
- }
- static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
- {
- struct rq *rq = rq_of_rt_se(rt_se);
- dequeue_rt_stack(rt_se, flags);
- for_each_sched_rt_entity(rt_se)
- __enqueue_rt_entity(rt_se, flags);
- enqueue_top_rt_rq(&rq->rt);
- }
- static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
- {
- struct rq *rq = rq_of_rt_se(rt_se);
- dequeue_rt_stack(rt_se, flags);
- for_each_sched_rt_entity(rt_se) {
- struct rt_rq *rt_rq = group_rt_rq(rt_se);
- if (rt_rq && rt_rq->rt_nr_running)
- __enqueue_rt_entity(rt_se, flags);
- }
- enqueue_top_rt_rq(&rq->rt);
- }
- /*
- * Adding/removing a task to/from a priority array:
- */
- static void
- enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
- {
- struct sched_rt_entity *rt_se = &p->rt;
- schedtune_enqueue_task(p, cpu_of(rq));
- if (flags & ENQUEUE_WAKEUP)
- rt_se->timeout = 0;
- enqueue_rt_entity(rt_se, flags);
- walt_inc_cumulative_runnable_avg(rq, p);
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
- enqueue_pushable_task(rq, p);
- }
- static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
- {
- struct sched_rt_entity *rt_se = &p->rt;
- schedtune_dequeue_task(p, cpu_of(rq));
- update_curr_rt(rq);
- dequeue_rt_entity(rt_se, flags);
- walt_dec_cumulative_runnable_avg(rq, p);
- dequeue_pushable_task(rq, p);
- }
- /*
- * Put task to the head or the end of the run list without the overhead of
- * dequeue followed by enqueue.
- */
- static void
- requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
- {
- if (on_rt_rq(rt_se)) {
- struct rt_prio_array *array = &rt_rq->active;
- struct list_head *queue = array->queue + rt_se_prio(rt_se);
- if (head)
- list_move(&rt_se->run_list, queue);
- else
- list_move_tail(&rt_se->run_list, queue);
- }
- }
- static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
- {
- struct sched_rt_entity *rt_se = &p->rt;
- struct rt_rq *rt_rq;
- for_each_sched_rt_entity(rt_se) {
- rt_rq = rt_rq_of_se(rt_se);
- requeue_rt_entity(rt_rq, rt_se, head);
- }
- }
- static void yield_task_rt(struct rq *rq)
- {
- requeue_task_rt(rq, rq->curr, 0);
- }
- #ifdef CONFIG_SMP
- static int find_lowest_rq(struct task_struct *task);
- static int
- select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
- int sibling_count_hint)
- {
- struct task_struct *curr;
- struct rq *rq;
- /* For anything but wake ups, just return the task_cpu */
- if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK
- && !cpu_isolated(cpu))
- goto out;
- rq = cpu_rq(cpu);
- rcu_read_lock();
- curr = READ_ONCE(rq->curr); /* unlocked access */
- /*
- * If the current task on @p's runqueue is an RT task, then
- * try to see if we can wake this RT task up on another
- * runqueue. Otherwise simply start this RT task
- * on its current runqueue.
- *
- * We want to avoid overloading runqueues. If the woken
- * task is a higher priority, then it will stay on this CPU
- * and the lower prio task should be moved to another CPU.
- * Even though this will probably make the lower prio task
- * lose its cache, we do not want to bounce a higher task
- * around just because it gave up its CPU, perhaps for a
- * lock?
- *
- * For equal prio tasks, we just let the scheduler sort it out.
- *
- * Otherwise, just let it ride on the affined RQ and the
- * post-schedule router will push the preempted task away
- *
- * This test is optimistic, if we get it wrong the load-balancer
- * will have to sort it out.
- */
- #if defined(CONFIG_MTK_SCHED_INTEROP)
- /* if the task is allowed to put more than one CPU. */
- if ((p->nr_cpus_allowed > 1)) {
- #else
- if ((curr && unlikely(rt_task(curr)) &&
- (curr->nr_cpus_allowed < 2 ||
- curr->prio <= p->prio)) || cpu_isolated(cpu)) {
- #endif
- int target = find_lowest_rq(p);
- /*
- * Don't bother moving it if the destination CPU is
- * not running a lower priority task.
- */
- if (target != -1 &&
- p->prio < cpu_rq(target)->rt.highest_prio.curr)
- cpu = target;
- }
- rcu_read_unlock();
- out:
- #ifdef CONFIG_MTK_SCHED_BOOST
- cpu = select_task_prefer_cpu(p, cpu);
- #endif
- return cpu;
- }
- static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
- {
- /*
- * Current can't be migrated, useless to reschedule,
- * let's hope p can move out.
- */
- if (rq->curr->nr_cpus_allowed == 1 ||
- !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
- return;
- /*
- * p is migratable, so let's not schedule it and
- * see if it is pushed or pulled somewhere else.
- */
- if (p->nr_cpus_allowed != 1
- && cpupri_find(&rq->rd->cpupri, p, NULL))
- return;
- /*
- * There appears to be other cpus that can accept
- * current and none to run 'p', so lets reschedule
- * to try and push current away:
- */
- requeue_task_rt(rq, p, 1);
- resched_curr(rq);
- }
- #endif /* CONFIG_SMP */
- /*
- * Preempt the current task with a newly woken task if needed:
- */
- static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
- {
- if (p->prio < rq->curr->prio) {
- resched_curr(rq);
- return;
- }
- #ifdef CONFIG_SMP
- /*
- * If:
- *
- * - the newly woken task is of equal priority to the current task
- * - the newly woken task is non-migratable while current is migratable
- * - current will be preempted on the next reschedule
- *
- * we should check to see if current can readily move to a different
- * cpu. If so, we will reschedule to allow the push logic to try
- * to move current somewhere else, making room for our non-migratable
- * task.
- */
- if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
- check_preempt_equal_prio(rq, p);
- #endif
- }
- static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
- struct rt_rq *rt_rq)
- {
- struct rt_prio_array *array = &rt_rq->active;
- struct sched_rt_entity *next = NULL;
- struct list_head *queue;
- int idx;
- idx = sched_find_first_bit(array->bitmap);
- BUG_ON(idx >= MAX_RT_PRIO);
- queue = array->queue + idx;
- next = list_entry(queue->next, struct sched_rt_entity, run_list);
- return next;
- }
- static struct task_struct *_pick_next_task_rt(struct rq *rq)
- {
- struct sched_rt_entity *rt_se;
- struct task_struct *p;
- struct rt_rq *rt_rq = &rq->rt;
- do {
- rt_se = pick_next_rt_entity(rq, rt_rq);
- BUG_ON(!rt_se);
- rt_rq = group_rt_rq(rt_se);
- } while (rt_rq);
- p = rt_task_of(rt_se);
- p->se.exec_start = rq_clock_task(rq);
- per_cpu(pick_exec_start, rq->cpu) = p->se.exec_start;
- per_cpu(sched_pick_exec_start, rq->cpu) = sched_clock_cpu(rq->cpu);
- return p;
- }
- extern int update_rt_rq_load_avg(u64 now, int cpu, struct rt_rq *rt_rq, int running);
- static struct task_struct *
- pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
- {
- struct task_struct *p;
- struct rt_rq *rt_rq = &rq->rt;
- if (need_pull_rt_task(rq, prev)) {
- /*
- * This is OK, because current is on_cpu, which avoids it being
- * picked for load-balance and preemption/IRQs are still
- * disabled avoiding further scheduler activity on it and we're
- * being very careful to re-start the picking loop.
- */
- rq_unpin_lock(rq, rf);
- pull_rt_task(rq);
- rq_repin_lock(rq, rf);
- /*
- * pull_rt_task() can drop (and re-acquire) rq->lock; this
- * means a dl or stop task can slip in, in which case we need
- * to re-start task selection.
- */
- if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
- rq->dl.dl_nr_running))
- return RETRY_TASK;
- }
- /*
- * We may dequeue prev's rt_rq in put_prev_task().
- * So, we update time before rt_nr_running check.
- */
- if (prev->sched_class == &rt_sched_class)
- update_curr_rt(rq);
- if (!rt_rq->rt_queued)
- return NULL;
- put_prev_task(rq, prev);
- p = _pick_next_task_rt(rq);
- /* The running task is never eligible for pushing */
- dequeue_pushable_task(rq, p);
- queue_push_tasks(rq);
- if (p)
- update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), rt_rq,
- rq->curr->sched_class == &rt_sched_class);
- return p;
- }
- static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
- {
- update_curr_rt(rq);
- update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1);
- /*
- * The previous task needs to be made eligible for pushing
- * if it is still active
- */
- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
- enqueue_pushable_task(rq, p);
- }
- #ifdef CONFIG_SMP
- /* Only try algorithms three times */
- #define RT_MAX_TRIES 3
- static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
- {
- if (!task_running(rq, p) &&
- cpumask_test_cpu(cpu, &p->cpus_allowed))
- return 1;
- return 0;
- }
- /*
- * Return the highest pushable rq's task, which is suitable to be executed
- * on the cpu, NULL otherwise
- */
- static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
- {
- struct plist_head *head = &rq->rt.pushable_tasks;
- struct task_struct *p;
- if (!has_pushable_tasks(rq))
- return NULL;
- plist_for_each_entry(p, head, pushable_tasks) {
- if (pick_rt_task(rq, p, cpu))
- return p;
- }
- return NULL;
- }
- static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
- #ifdef CONFIG_MTK_SCHED_INTEROP
- static int mt_sched_interop_rt(int cpu, struct cpumask *lowest_mask)
- {
- int lowest_cpu = -1, lowest_prio = 0;
- trace_sched_interop(cpu, lowest_mask->bits[0]);
- if (cpumask_test_cpu(cpu, lowest_mask) && idle_cpu(cpu)
- && hmp_cpu_is_slowest(cpu) && !cpu_isolated(cpu))
- return cpu;
- for_each_cpu(cpu, lowest_mask) {
- struct rq *rq;
- struct task_struct *curr;
- if (cpu_isolated(cpu))
- continue;
- if (idle_cpu(cpu))
- return cpu;
- rq = cpu_rq(cpu);
- curr = rq->curr;
- if ((curr->sched_class == &fair_sched_class)
- && (curr->prio > lowest_prio)) {
- lowest_prio = curr->prio;
- lowest_cpu = cpu;
- }
- }
- if (-1 != lowest_cpu)
- return lowest_cpu;
- return -1;
- }
- #endif
- static int find_lowest_rq(struct task_struct *task)
- {
- struct sched_domain *sd;
- struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
- int this_cpu = smp_processor_id();
- int cpu = task_cpu(task);
- #ifdef CONFIG_MTK_SCHED_INTEROP
- int interop_cpu;
- #endif
- /* Make sure the mask is initialized first */
- if (unlikely(!lowest_mask))
- return -1;
- if (task->nr_cpus_allowed == 1)
- return -1; /* No other targets possible */
- if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
- return -1; /* No targets found */
- #ifdef CONFIG_MTK_SCHED_INTEROP
- interop_cpu = mt_sched_interop_rt(cpu, lowest_mask);
- if (interop_cpu != -1) {
- return interop_cpu;
- }
- #endif
- /*
- * At this point we have built a mask of cpus representing the
- * lowest priority tasks in the system. Now we want to elect
- * the best one based on our affinity and topology.
- *
- * We prioritize the last cpu that the task executed on since
- * it is most likely cache-hot in that location.
- */
- if (cpumask_test_cpu(cpu, lowest_mask) && !cpu_isolated(cpu))
- return cpu;
- /*
- * Otherwise, we consult the sched_domains span maps to figure
- * out which cpu is logically closest to our hot cache data.
- */
- if (!cpumask_test_cpu(this_cpu, lowest_mask))
- this_cpu = -1; /* Skip this_cpu opt if not among lowest */
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
- int best_cpu;
- /*
- * "this_cpu" is cheaper to preempt than a
- * remote processor.
- */
- if (this_cpu != -1 &&
- cpumask_test_cpu(this_cpu, sched_domain_span(sd)) &&
- !cpu_isolated(this_cpu)) {
- rcu_read_unlock();
- return this_cpu;
- }
- best_cpu = cpumask_first_and(lowest_mask,
- sched_domain_span(sd));
- if (best_cpu < nr_cpu_ids && !cpu_isolated(best_cpu)) {
- rcu_read_unlock();
- return best_cpu;
- }
- }
- }
- rcu_read_unlock();
- /*
- * And finally, if there were no matches within the domains
- * just give the caller *something* to work with from the compatible
- * locations.
- */
- if (this_cpu != -1 && !cpu_isolated(this_cpu))
- return this_cpu;
- cpu = cpumask_any(lowest_mask);
- if (cpu < nr_cpu_ids && !cpu_isolated(cpu))
- return cpu;
- return -1;
- }
- /* Will lock the rq it finds */
- static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
- {
- struct rq *lowest_rq = NULL;
- int tries;
- int cpu;
- for (tries = 0; tries < RT_MAX_TRIES; tries++) {
- cpu = find_lowest_rq(task);
- if ((cpu == -1) || (cpu == rq->cpu))
- break;
- lowest_rq = cpu_rq(cpu);
- if (lowest_rq->rt.highest_prio.curr <= task->prio) {
- /*
- * Target rq has tasks of equal or higher priority,
- * retrying does not release any lock and is unlikely
- * to yield a different result.
- */
- lowest_rq = NULL;
- break;
- }
- /* if the prio of this runqueue changed, try again */
- if (double_lock_balance(rq, lowest_rq)) {
- /*
- * We had to unlock the run queue. In
- * the mean time, task could have
- * migrated already or had its affinity changed.
- * Also make sure that it wasn't scheduled on its rq.
- */
- if (unlikely(task_rq(task) != rq ||
- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
- task_running(rq, task) ||
- !rt_task(task) ||
- !task_on_rq_queued(task))) {
- double_unlock_balance(rq, lowest_rq);
- lowest_rq = NULL;
- break;
- }
- }
- /* If this rq is still suitable use it. */
- if (lowest_rq->rt.highest_prio.curr > task->prio)
- break;
- /* try again */
- double_unlock_balance(rq, lowest_rq);
- lowest_rq = NULL;
- }
- return lowest_rq;
- }
- static struct task_struct *pick_next_pushable_task(struct rq *rq)
- {
- struct task_struct *p;
- if (!has_pushable_tasks(rq))
- return NULL;
- p = plist_first_entry(&rq->rt.pushable_tasks,
- struct task_struct, pushable_tasks);
- BUG_ON(rq->cpu != task_cpu(p));
- BUG_ON(task_current(rq, p));
- BUG_ON(p->nr_cpus_allowed <= 1);
- BUG_ON(!task_on_rq_queued(p));
- BUG_ON(!rt_task(p));
- return p;
- }
- /*
- * If the current CPU has more than one RT task, see if the non
- * running task can migrate over to a CPU that is running a task
- * of lesser priority.
- */
- static int push_rt_task(struct rq *rq)
- {
- struct task_struct *next_task;
- struct rq *lowest_rq;
- int ret = 0;
- #ifdef CONFIG_RT_GROUP_SCHED
- struct rt_rq *rt_rq;
- #endif
- if (!rq->rt.overloaded)
- return 0;
- next_task = pick_next_pushable_task(rq);
- if (!next_task)
- return 0;
- retry:
- #ifdef CONFIG_RT_GROUP_SCHED
- rt_rq = next_task->rt.rt_rq;
- #endif
- if (unlikely(next_task == rq->curr)) {
- WARN_ON(1);
- return 0;
- }
- /*
- * It's possible that the next_task slipped in of
- * higher priority than current. If that's the case
- * just reschedule current.
- */
- if (unlikely(next_task->prio < rq->curr->prio)) {
- #ifdef CONFIG_RT_GROUP_SCHED
- /* We only reschedule when next_task not throttle */
- if (!rt_rq_throttled(rt_rq)) {
- resched_curr(rq);
- return 0;
- }
- #else
- resched_curr(rq);
- return 0;
- #endif
- }
- /* We might release rq lock */
- get_task_struct(next_task);
- /* find_lock_lowest_rq locks the rq if found */
- lowest_rq = find_lock_lowest_rq(next_task, rq);
- if (!lowest_rq) {
- struct task_struct *task;
- /*
- * find_lock_lowest_rq releases rq->lock
- * so it is possible that next_task has migrated.
- *
- * We need to make sure that the task is still on the same
- * run-queue and is also still the next task eligible for
- * pushing.
- */
- task = pick_next_pushable_task(rq);
- if (task == next_task) {
- /*
- * The task hasn't migrated, and is still the next
- * eligible task, but we failed to find a run-queue
- * to push it to. Do not retry in this case, since
- * other cpus will pull from us when ready.
- */
- goto out;
- }
- if (!task)
- /* No more tasks, just exit */
- goto out;
- /*
- * Something has shifted, try again.
- */
- put_task_struct(next_task);
- next_task = task;
- goto retry;
- }
- deactivate_task(rq, next_task, 0);
- next_task->on_rq = TASK_ON_RQ_MIGRATING;
- set_task_cpu(next_task, lowest_rq->cpu);
- next_task->on_rq = TASK_ON_RQ_QUEUED;
- activate_task(lowest_rq, next_task, 0);
- ret = 1;
- resched_curr(lowest_rq);
- double_unlock_balance(rq, lowest_rq);
- out:
- put_task_struct(next_task);
- return ret;
- }
- static void push_rt_tasks(struct rq *rq)
- {
- /* push_rt_task will return true if it moved an RT */
- while (push_rt_task(rq))
- ;
- }
- #ifdef HAVE_RT_PUSH_IPI
- /*
- * When a high priority task schedules out from a CPU and a lower priority
- * task is scheduled in, a check is made to see if there's any RT tasks
- * on other CPUs that are waiting to run because a higher priority RT task
- * is currently running on its CPU. In this case, the CPU with multiple RT
- * tasks queued on it (overloaded) needs to be notified that a CPU has opened
- * up that may be able to run one of its non-running queued RT tasks.
- *
- * All CPUs with overloaded RT tasks need to be notified as there is currently
- * no way to know which of these CPUs have the highest priority task waiting
- * to run. Instead of trying to take a spinlock on each of these CPUs,
- * which has shown to cause large latency when done on machines with many
- * CPUs, sending an IPI to the CPUs to have them push off the overloaded
- * RT tasks waiting to run.
- *
- * Just sending an IPI to each of the CPUs is also an issue, as on large
- * count CPU machines, this can cause an IPI storm on a CPU, especially
- * if its the only CPU with multiple RT tasks queued, and a large number
- * of CPUs scheduling a lower priority task at the same time.
- *
- * Each root domain has its own irq work function that can iterate over
- * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
- * tassk must be checked if there's one or many CPUs that are lowering
- * their priority, there's a single irq work iterator that will try to
- * push off RT tasks that are waiting to run.
- *
- * When a CPU schedules a lower priority task, it will kick off the
- * irq work iterator that will jump to each CPU with overloaded RT tasks.
- * As it only takes the first CPU that schedules a lower priority task
- * to start the process, the rto_start variable is incremented and if
- * the atomic result is one, then that CPU will try to take the rto_lock.
- * This prevents high contention on the lock as the process handles all
- * CPUs scheduling lower priority tasks.
- *
- * All CPUs that are scheduling a lower priority task will increment the
- * rt_loop_next variable. This will make sure that the irq work iterator
- * checks all RT overloaded CPUs whenever a CPU schedules a new lower
- * priority task, even if the iterator is in the middle of a scan. Incrementing
- * the rt_loop_next will cause the iterator to perform another scan.
- *
- */
- static int rto_next_cpu(struct root_domain *rd)
- {
- int next;
- int cpu;
- /*
- * When starting the IPI RT pushing, the rto_cpu is set to -1,
- * rt_next_cpu() will simply return the first CPU found in
- * the rto_mask.
- *
- * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
- * will return the next CPU found in the rto_mask.
- *
- * If there are no more CPUs left in the rto_mask, then a check is made
- * against rto_loop and rto_loop_next. rto_loop is only updated with
- * the rto_lock held, but any CPU may increment the rto_loop_next
- * without any locking.
- */
- for (;;) {
- /* When rto_cpu is -1 this acts like cpumask_first() */
- cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
- rd->rto_cpu = cpu;
- if (cpu < nr_cpu_ids)
- return cpu;
- rd->rto_cpu = -1;
- /*
- * ACQUIRE ensures we see the @rto_mask changes
- * made prior to the @next value observed.
- *
- * Matches WMB in rt_set_overload().
- */
- next = atomic_read_acquire(&rd->rto_loop_next);
- if (rd->rto_loop == next)
- break;
- rd->rto_loop = next;
- }
- return -1;
- }
- static inline bool rto_start_trylock(atomic_t *v)
- {
- return !atomic_cmpxchg_acquire(v, 0, 1);
- }
- static inline void rto_start_unlock(atomic_t *v)
- {
- atomic_set_release(v, 0);
- }
- static void tell_cpu_to_push(struct rq *rq)
- {
- int cpu = -1;
- /* Keep the loop going if the IPI is currently active */
- atomic_inc(&rq->rd->rto_loop_next);
- /* Only one CPU can initiate a loop at a time */
- if (!rto_start_trylock(&rq->rd->rto_loop_start))
- return;
- raw_spin_lock(&rq->rd->rto_lock);
- /*
- * The rto_cpu is updated under the lock, if it has a valid cpu
- * then the IPI is still running and will continue due to the
- * update to loop_next, and nothing needs to be done here.
- * Otherwise it is finishing up and an ipi needs to be sent.
- */
- if (rq->rd->rto_cpu < 0)
- cpu = rto_next_cpu(rq->rd);
- raw_spin_unlock(&rq->rd->rto_lock);
- rto_start_unlock(&rq->rd->rto_loop_start);
- if (cpu >= 0) {
- /* Make sure the rd does not get freed while pushing */
- sched_get_rd(rq->rd);
- irq_work_queue_on(&rq->rd->rto_push_work, cpu);
- }
- }
- /* Called from hardirq context */
- void rto_push_irq_work_func(struct irq_work *work)
- {
- struct root_domain *rd =
- container_of(work, struct root_domain, rto_push_work);
- struct rq *rq;
- int cpu;
- rq = this_rq();
- /*
- * We do not need to grab the lock to check for has_pushable_tasks.
- * When it gets updated, a check is made if a push is possible.
- */
- if (has_pushable_tasks(rq)) {
- raw_spin_lock(&rq->lock);
- push_rt_tasks(rq);
- raw_spin_unlock(&rq->lock);
- }
- raw_spin_lock(&rd->rto_lock);
- /* Pass the IPI to the next rt overloaded queue */
- cpu = rto_next_cpu(rd);
- raw_spin_unlock(&rd->rto_lock);
- if (cpu < 0) {
- sched_put_rd(rd);
- return;
- }
- /* Try the next RT overloaded CPU */
- irq_work_queue_on(&rd->rto_push_work, cpu);
- }
- #endif /* HAVE_RT_PUSH_IPI */
- static void pull_rt_task(struct rq *this_rq)
- {
- int this_cpu = this_rq->cpu, cpu;
- bool resched = false;
- struct task_struct *p;
- struct rq *src_rq;
- int rt_overload_count = rt_overloaded(this_rq);
- if (likely(!rt_overload_count))
- return;
- /*
- * Match the barrier from rt_set_overloaded; this guarantees that if we
- * see overloaded we must also see the rto_mask bit.
- */
- smp_rmb();
- /* If we are the only overloaded CPU do nothing */
- if (rt_overload_count == 1 &&
- cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
- return;
- #ifdef HAVE_RT_PUSH_IPI
- if (sched_feat(RT_PUSH_IPI)) {
- tell_cpu_to_push(this_rq);
- return;
- }
- #endif
- for_each_cpu(cpu, this_rq->rd->rto_mask) {
- if (this_cpu == cpu)
- continue;
- src_rq = cpu_rq(cpu);
- /*
- * Don't bother taking the src_rq->lock if the next highest
- * task is known to be lower-priority than our current task.
- * This may look racy, but if this value is about to go
- * logically higher, the src_rq will push this task away.
- * And if its going logically lower, we do not care
- */
- if (src_rq->rt.highest_prio.next >=
- this_rq->rt.highest_prio.curr)
- continue;
- /*
- * We can potentially drop this_rq's lock in
- * double_lock_balance, and another CPU could
- * alter this_rq
- */
- double_lock_balance(this_rq, src_rq);
- /*
- * We can pull only a task, which is pushable
- * on its rq, and no others.
- */
- p = pick_highest_pushable_task(src_rq, this_cpu);
- /*
- * Do we have an RT task that preempts
- * the to-be-scheduled task?
- */
- if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
- WARN_ON(p == src_rq->curr);
- WARN_ON(!task_on_rq_queued(p));
- /*
- * There's a chance that p is higher in priority
- * than what's currently running on its cpu.
- * This is just that p is wakeing up and hasn't
- * had a chance to schedule. We only pull
- * p if it is lower in priority than the
- * current task on the run queue
- */
- if (p->prio < src_rq->curr->prio)
- goto skip;
- resched = true;
- deactivate_task(src_rq, p, 0);
- p->on_rq = TASK_ON_RQ_MIGRATING;
- set_task_cpu(p, this_cpu);
- p->on_rq = TASK_ON_RQ_QUEUED;
- activate_task(this_rq, p, 0);
- /*
- * We continue with the search, just in
- * case there's an even higher prio task
- * in another runqueue. (low likelihood
- * but possible)
- */
- }
- skip:
- double_unlock_balance(this_rq, src_rq);
- }
- if (resched)
- resched_curr(this_rq);
- }
- /*
- * If we are not running and we are not going to reschedule soon, we should
- * try to push tasks away now
- */
- static void task_woken_rt(struct rq *rq, struct task_struct *p)
- {
- if (!task_running(rq, p) &&
- !test_tsk_need_resched(rq->curr) &&
- p->nr_cpus_allowed > 1 &&
- (dl_task(rq->curr) || rt_task(rq->curr)) &&
- (rq->curr->nr_cpus_allowed < 2 ||
- rq->curr->prio <= p->prio))
- push_rt_tasks(rq);
- }
- /* Assumes rq->lock is held */
- static void rq_online_rt(struct rq *rq)
- {
- if (rq->rt.overloaded)
- rt_set_overload(rq);
- __enable_runtime(rq);
- cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
- }
- /* Assumes rq->lock is held */
- static void rq_offline_rt(struct rq *rq)
- {
- if (rq->rt.overloaded)
- rt_clear_overload(rq);
- __disable_runtime(rq);
- cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
- }
- void unthrottle_offline_rt_rqs(struct rq *rq)
- {
- rt_rq_iter_t iter;
- struct rt_rq *rt_rq;
- for_each_rt_rq(rt_rq, iter, rq) {
- if (rt_rq_throttled(rt_rq)) {
- rt_rq->rt_throttled = 0;
- printk_deferred("[name:rt&]sched: migrate_tasks: RT throttling inactivated\n");
- }
- sched_rt_rq_enqueue(rt_rq);
- }
- }
- /*
- * When switch from the rt queue, we bring ourselves to a position
- * that we might want to pull RT tasks from other runqueues.
- */
- static void switched_from_rt(struct rq *rq, struct task_struct *p)
- {
- /*
- * If there are other RT tasks then we will reschedule
- * and the scheduling of the other RT tasks will handle
- * the balancing. But if we are the last RT task
- * we may need to handle the pulling of RT tasks
- * now.
- */
- if (!task_on_rq_queued(p) || rq->rt.rt_nr_running ||
- cpu_isolated(cpu_of(rq)))
- return;
- queue_pull_task(rq);
- }
- void __init init_sched_rt_class(void)
- {
- unsigned int i;
- for_each_possible_cpu(i) {
- zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
- GFP_KERNEL, cpu_to_node(i));
- }
- }
- #endif /* CONFIG_SMP */
- /*
- * When switching a task to RT, we may overload the runqueue
- * with RT tasks. In this case we try to push them off to
- * other runqueues.
- */
- static void switched_to_rt(struct rq *rq, struct task_struct *p)
- {
- /*
- * If we are already running, then there's nothing
- * that needs to be done. But if we are not running
- * we may need to preempt the current running task.
- * If that current running task is also an RT task
- * then see if we can move to another run queue.
- */
- if (task_on_rq_queued(p) && rq->curr != p) {
- #ifdef CONFIG_SMP
- if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
- queue_push_tasks(rq);
- #endif /* CONFIG_SMP */
- if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
- resched_curr(rq);
- }
- }
- /*
- * Priority of the task has changed. This may cause
- * us to initiate a push or pull.
- */
- static void
- prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
- {
- if (!task_on_rq_queued(p))
- return;
- if (rq->curr == p) {
- #ifdef CONFIG_SMP
- /*
- * If our priority decreases while running, we
- * may need to pull tasks to this runqueue.
- */
- if (oldprio < p->prio)
- queue_pull_task(rq);
- /*
- * If there's a higher priority task waiting to run
- * then reschedule.
- */
- if (p->prio > rq->rt.highest_prio.curr)
- resched_curr(rq);
- #else
- /* For UP simply resched on drop of prio */
- if (oldprio < p->prio)
- resched_curr(rq);
- #endif /* CONFIG_SMP */
- } else {
- /*
- * This task is not running, but if it is
- * greater than the current running task
- * then reschedule.
- */
- if (p->prio < rq->curr->prio)
- resched_curr(rq);
- }
- }
- #ifdef CONFIG_POSIX_TIMERS
- static void watchdog(struct rq *rq, struct task_struct *p)
- {
- unsigned long soft, hard;
- /* max may change after cur was read, this will be fixed next tick */
- soft = task_rlimit(p, RLIMIT_RTTIME);
- hard = task_rlimit_max(p, RLIMIT_RTTIME);
- if (soft != RLIM_INFINITY) {
- unsigned long next;
- if (p->rt.watchdog_stamp != jiffies) {
- p->rt.timeout++;
- p->rt.watchdog_stamp = jiffies;
- }
- next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
- if (p->rt.timeout > next)
- p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
- }
- }
- #else
- static inline void watchdog(struct rq *rq, struct task_struct *p) { }
- #endif
- static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
- {
- struct sched_rt_entity *rt_se = &p->rt;
- update_curr_rt(rq);
- update_rt_rq_load_avg(rq_clock_task(rq), cpu_of(rq), &rq->rt, 1);
- watchdog(rq, p);
- /*
- * RR tasks need a special form of timeslice management.
- * FIFO tasks have no timeslices.
- */
- if (p->policy != SCHED_RR)
- return;
- if (--p->rt.time_slice)
- return;
- p->rt.time_slice = sched_rr_timeslice;
- /*
- * Requeue to the end of queue if we (and all of our ancestors) are not
- * the only element on the queue
- */
- for_each_sched_rt_entity(rt_se) {
- if (rt_se->run_list.prev != rt_se->run_list.next) {
- requeue_task_rt(rq, p, 0);
- resched_curr(rq);
- return;
- }
- }
- }
- static void set_curr_task_rt(struct rq *rq)
- {
- struct task_struct *p = rq->curr;
- p->se.exec_start = rq_clock_task(rq);
- per_cpu(set_curr_exec_start, rq->cpu) = p->se.exec_start;
- per_cpu(sched_set_curr_exec_start, rq->cpu) = sched_clock_cpu(rq->cpu);
- /* The running task is never eligible for pushing */
- dequeue_pushable_task(rq, p);
- }
- static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
- {
- /*
- * Time slice is 0 for SCHED_FIFO tasks
- */
- if (task->policy == SCHED_RR)
- return sched_rr_timeslice;
- else
- return 0;
- }
- const struct sched_class rt_sched_class = {
- .next = &fair_sched_class,
- .enqueue_task = enqueue_task_rt,
- .dequeue_task = dequeue_task_rt,
- .yield_task = yield_task_rt,
- .check_preempt_curr = check_preempt_curr_rt,
- .pick_next_task = pick_next_task_rt,
- .put_prev_task = put_prev_task_rt,
- #ifdef CONFIG_SMP
- .select_task_rq = select_task_rq_rt,
- .set_cpus_allowed = set_cpus_allowed_common,
- .rq_online = rq_online_rt,
- .rq_offline = rq_offline_rt,
- .task_woken = task_woken_rt,
- .switched_from = switched_from_rt,
- #endif
- .set_curr_task = set_curr_task_rt,
- .task_tick = task_tick_rt,
- .get_rr_interval = get_rr_interval_rt,
- .prio_changed = prio_changed_rt,
- .switched_to = switched_to_rt,
- .update_curr = update_curr_rt,
- #ifdef CONFIG_UCLAMP_TASK
- .uclamp_enabled = 1,
- #endif
- #ifdef CONFIG_SCHED_WALT
- .fixup_cumulative_runnable_avg = walt_fixup_cumulative_runnable_avg,
- #endif
- };
- /*
- * Ensure that the real time constraints are schedulable.
- */
- static DEFINE_MUTEX(rt_constraints_mutex);
- #ifdef CONFIG_MTK_SCHED_INTEROP
- bool is_rt_throttle(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- rt_rq_iter_t iter;
- struct rt_rq *rt_rq;
- bool rt_throttle = false;
- for_each_rt_rq(rt_rq, iter, rq) {
- if (rt_rq_throttled(rt_rq)) {
- rt_throttle = true;
- break;
- }
- }
- return rt_throttle;
- }
- #endif
- #ifdef CONFIG_RT_GROUP_SCHED
- /* Must be called with tasklist_lock held */
- static inline int tg_has_rt_tasks(struct task_group *tg)
- {
- struct task_struct *g, *p;
- /*
- * Autogroups do not have RT tasks; see autogroup_create().
- */
- if (task_group_is_autogroup(tg))
- return 0;
- for_each_process_thread(g, p) {
- if (rt_task(p) && task_group(p) == tg)
- return 1;
- }
- return 0;
- }
- struct rt_schedulable_data {
- struct task_group *tg;
- u64 rt_period;
- u64 rt_runtime;
- };
- static int tg_rt_schedulable(struct task_group *tg, void *data)
- {
- struct rt_schedulable_data *d = data;
- struct task_group *child;
- unsigned long total, sum = 0;
- u64 period, runtime;
- period = ktime_to_ns(tg->rt_bandwidth.rt_period);
- runtime = tg->rt_bandwidth.rt_runtime;
- if (tg == d->tg) {
- period = d->rt_period;
- runtime = d->rt_runtime;
- }
- /*
- * Cannot have more runtime than the period.
- */
- if (runtime > period && runtime != RUNTIME_INF)
- return -EINVAL;
- /*
- * Ensure we don't starve existing RT tasks.
- */
- if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
- return -EBUSY;
- total = to_ratio(period, runtime);
- /*
- * Nobody can have more than the global setting allows.
- */
- if (total > to_ratio(global_rt_period(), global_rt_runtime()))
- return -EINVAL;
- /*
- * The sum of our children's runtime should not exceed our own.
- */
- list_for_each_entry_rcu(child, &tg->children, siblings) {
- period = ktime_to_ns(child->rt_bandwidth.rt_period);
- runtime = child->rt_bandwidth.rt_runtime;
- if (child == d->tg) {
- period = d->rt_period;
- runtime = d->rt_runtime;
- }
- sum += to_ratio(period, runtime);
- }
- if (sum > total)
- return -EINVAL;
- return 0;
- }
- static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
- {
- int ret;
- struct rt_schedulable_data data = {
- .tg = tg,
- .rt_period = period,
- .rt_runtime = runtime,
- };
- rcu_read_lock();
- ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
- rcu_read_unlock();
- return ret;
- }
- static int tg_set_rt_bandwidth(struct task_group *tg,
- u64 rt_period, u64 rt_runtime)
- {
- int i, err = 0;
- /*
- * Disallowing the root group RT runtime is BAD, it would disallow the
- * kernel creating (and or operating) RT threads.
- */
- if (tg == &root_task_group && rt_runtime == 0)
- return -EINVAL;
- /* No period doesn't make any sense. */
- if (rt_period == 0)
- return -EINVAL;
- mutex_lock(&rt_constraints_mutex);
- read_lock(&tasklist_lock);
- err = __rt_schedulable(tg, rt_period, rt_runtime);
- if (err)
- goto unlock;
- raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
- tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
- tg->rt_bandwidth.rt_runtime = rt_runtime;
- for_each_possible_cpu(i) {
- struct rt_rq *rt_rq = tg->rt_rq[i];
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = rt_runtime;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
- unlock:
- read_unlock(&tasklist_lock);
- mutex_unlock(&rt_constraints_mutex);
- return err;
- }
- int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
- {
- u64 rt_runtime, rt_period;
- rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
- rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
- if (rt_runtime_us < 0)
- rt_runtime = RUNTIME_INF;
- else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
- return -EINVAL;
- return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
- }
- long sched_group_rt_runtime(struct task_group *tg)
- {
- u64 rt_runtime_us;
- if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
- return -1;
- rt_runtime_us = tg->rt_bandwidth.rt_runtime;
- do_div(rt_runtime_us, NSEC_PER_USEC);
- return rt_runtime_us;
- }
- int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
- {
- u64 rt_runtime, rt_period;
- if (rt_period_us > U64_MAX / NSEC_PER_USEC)
- return -EINVAL;
- rt_period = rt_period_us * NSEC_PER_USEC;
- rt_runtime = tg->rt_bandwidth.rt_runtime;
- return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
- }
- long sched_group_rt_period(struct task_group *tg)
- {
- u64 rt_period_us;
- rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
- do_div(rt_period_us, NSEC_PER_USEC);
- return rt_period_us;
- }
- static int sched_rt_global_constraints(void)
- {
- int ret = 0;
- mutex_lock(&rt_constraints_mutex);
- read_lock(&tasklist_lock);
- ret = __rt_schedulable(NULL, 0, 0);
- read_unlock(&tasklist_lock);
- mutex_unlock(&rt_constraints_mutex);
- return ret;
- }
- int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
- {
- /* Don't accept realtime tasks when there is no way for them to run */
- if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
- return 0;
- return 1;
- }
- #else /* !CONFIG_RT_GROUP_SCHED */
- static int sched_rt_global_constraints(void)
- {
- unsigned long flags;
- int i;
- raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
- for_each_possible_cpu(i) {
- struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = global_rt_runtime();
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
- return 0;
- }
- #endif /* CONFIG_RT_GROUP_SCHED */
- static int sched_rt_global_validate(void)
- {
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
- if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
- (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
- return -EINVAL;
- return 0;
- }
- static void sched_rt_do_global(void)
- {
- def_rt_bandwidth.rt_runtime = global_rt_runtime();
- def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
- }
- int sched_rt_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int old_period, old_runtime;
- static DEFINE_MUTEX(mutex);
- int ret;
- mutex_lock(&mutex);
- old_period = sysctl_sched_rt_period;
- old_runtime = sysctl_sched_rt_runtime;
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (!ret && write) {
- ret = sched_rt_global_validate();
- if (ret)
- goto undo;
- ret = sched_dl_global_validate();
- if (ret)
- goto undo;
- ret = sched_rt_global_constraints();
- if (ret)
- goto undo;
- sched_rt_do_global();
- sched_dl_do_global();
- }
- if (0) {
- undo:
- sysctl_sched_rt_period = old_period;
- sysctl_sched_rt_runtime = old_runtime;
- }
- mutex_unlock(&mutex);
- return ret;
- }
- int sched_rr_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- static DEFINE_MUTEX(mutex);
- mutex_lock(&mutex);
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- /*
- * Make sure that internally we keep jiffies.
- * Also, writing zero resets the timeslice to default:
- */
- if (!ret && write) {
- sched_rr_timeslice =
- sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
- msecs_to_jiffies(sysctl_sched_rr_timeslice);
- }
- mutex_unlock(&mutex);
- return ret;
- }
- #ifdef CONFIG_SCHED_DEBUG
- void print_rt_stats(struct seq_file *m, int cpu)
- {
- rt_rq_iter_t iter;
- struct rt_rq *rt_rq;
- rcu_read_lock();
- for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
- print_rt_rq(m, cpu, rt_rq);
- rcu_read_unlock();
- }
- #endif /* CONFIG_SCHED_DEBUG */
|