12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891 |
- /*
- * kernel/sched/core.c
- *
- * Kernel scheduler and related syscalls
- *
- * Copyright (C) 1991-2002 Linus Torvalds
- *
- * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
- * make semaphores SMP safe
- * 1998-11-19 Implemented schedule_timeout() and related stuff
- * by Andrea Arcangeli
- * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
- * hybrid priority-list and round-robin design with
- * an array-switch method of distributing timeslices
- * and per-CPU runqueues. Cleanups and useful suggestions
- * by Davide Libenzi, preemptible kernel bits by Robert Love.
- * 2003-09-03 Interactivity tuning by Con Kolivas.
- * 2004-04-02 Scheduler domains code by Nick Piggin
- * 2007-04-15 Work begun on replacing all interactivity tuning with a
- * fair scheduling design by Con Kolivas.
- * 2007-05-05 Load balancing (smp-nice) and other improvements
- * by Peter Williams
- * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
- * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
- * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
- * Thomas Gleixner, Mike Kravetz
- */
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/nmi.h>
- #include <linux/init.h>
- #include <linux/uaccess.h>
- #include <linux/highmem.h>
- #include <asm/mmu_context.h>
- #include <linux/interrupt.h>
- #include <linux/capability.h>
- #include <linux/completion.h>
- #include <linux/cpufreq.h>
- #include <linux/kernel_stat.h>
- #include <linux/debug_locks.h>
- #include <linux/perf_event.h>
- #include <linux/security.h>
- #include <linux/notifier.h>
- #include <linux/profile.h>
- #include <linux/freezer.h>
- #include <linux/vmalloc.h>
- #include <linux/blkdev.h>
- #include <linux/delay.h>
- #include <linux/pid_namespace.h>
- #include <linux/smp.h>
- #include <linux/threads.h>
- #include <linux/timer.h>
- #include <linux/rcupdate.h>
- #include <linux/cpu.h>
- #include <linux/cpuset.h>
- #include <linux/percpu.h>
- #include <linux/proc_fs.h>
- #include <linux/seq_file.h>
- #include <linux/sysctl.h>
- #include <linux/syscalls.h>
- #include <linux/times.h>
- #include <linux/tsacct_kern.h>
- #include <linux/kprobes.h>
- #include <linux/delayacct.h>
- #include <linux/unistd.h>
- #include <linux/pagemap.h>
- #include <linux/hrtimer.h>
- #include <linux/tick.h>
- #include <linux/debugfs.h>
- #include <linux/ctype.h>
- #include <linux/ftrace.h>
- #include <linux/slab.h>
- #include <linux/init_task.h>
- #include <linux/binfmts.h>
- #include <linux/poll.h>
- #include <linux/nospec.h>
- #include <linux/compiler.h>
- #include <asm/switch_to.h>
- #include <asm/tlb.h>
- #include <asm/irq_regs.h>
- #include <asm/mutex.h>
- #ifdef CONFIG_PARAVIRT
- #include <asm/paravirt.h>
- #endif
- #include <mach/sec_debug.h>
- #include "sched.h"
- #include "../workqueue_sched.h"
- #define CREATE_TRACE_POINTS
- #include <trace/events/sched.h>
- static atomic_t __su_instances;
- int su_instances(void)
- {
- return atomic_read(&__su_instances);
- }
- bool su_running(void)
- {
- return su_instances() > 0;
- }
- bool su_visible(void)
- {
- uid_t uid = current_uid();
- if (su_running())
- return true;
- if (uid == 0 || uid == 1000)
- return true;
- return false;
- }
- void su_exec(void)
- {
- atomic_inc(&__su_instances);
- }
- void su_exit(void)
- {
- atomic_dec(&__su_instances);
- }
- ATOMIC_NOTIFIER_HEAD(migration_notifier_head);
- #ifdef CONFIG_ANDROID_BG_SCAN_MEM
- RAW_NOTIFIER_HEAD(bgtsk_migration_notifier_head);
- #endif
- void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
- {
- unsigned long delta;
- ktime_t soft, hard, now;
- for (;;) {
- if (hrtimer_active(period_timer))
- break;
- now = hrtimer_cb_get_time(period_timer);
- hrtimer_forward(period_timer, now, period);
- soft = hrtimer_get_softexpires(period_timer);
- hard = hrtimer_get_expires(period_timer);
- delta = ktime_to_ns(ktime_sub(hard, soft));
- __hrtimer_start_range_ns(period_timer, soft, delta,
- HRTIMER_MODE_ABS_PINNED, 0);
- }
- }
- DEFINE_MUTEX(sched_domains_mutex);
- DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
- static void update_rq_clock_task(struct rq *rq, s64 delta);
- void update_rq_clock(struct rq *rq)
- {
- s64 delta;
- if (rq->skip_clock_update > 0)
- return;
- delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
- rq->clock += delta;
- update_rq_clock_task(rq, delta);
- }
- /*
- * Debugging: various feature bits
- */
- #define SCHED_FEAT(name, enabled) \
- (1UL << __SCHED_FEAT_##name) * enabled |
- const_debug unsigned int sysctl_sched_features =
- #include "features.h"
- 0;
- #undef SCHED_FEAT
- #ifdef CONFIG_SCHED_DEBUG
- #define SCHED_FEAT(name, enabled) \
- #name ,
- static const char * const sched_feat_names[] = {
- #include "features.h"
- };
- #undef SCHED_FEAT
- static int sched_feat_show(struct seq_file *m, void *v)
- {
- int i;
- for (i = 0; i < __SCHED_FEAT_NR; i++) {
- if (!(sysctl_sched_features & (1UL << i)))
- seq_puts(m, "NO_");
- seq_printf(m, "%s ", sched_feat_names[i]);
- }
- seq_puts(m, "\n");
- return 0;
- }
- #ifdef HAVE_JUMP_LABEL
- #define jump_label_key__true STATIC_KEY_INIT_TRUE
- #define jump_label_key__false STATIC_KEY_INIT_FALSE
- #define SCHED_FEAT(name, enabled) \
- jump_label_key__##enabled ,
- struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
- #include "features.h"
- };
- #undef SCHED_FEAT
- static void sched_feat_disable(int i)
- {
- if (static_key_enabled(&sched_feat_keys[i]))
- static_key_slow_dec(&sched_feat_keys[i]);
- }
- static void sched_feat_enable(int i)
- {
- if (!static_key_enabled(&sched_feat_keys[i]))
- static_key_slow_inc(&sched_feat_keys[i]);
- }
- #else
- static void sched_feat_disable(int i) { };
- static void sched_feat_enable(int i) { };
- #endif /* HAVE_JUMP_LABEL */
- static ssize_t
- sched_feat_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
- {
- char buf[64];
- char *cmp;
- int neg = 0;
- int i;
- if (cnt > 63)
- cnt = 63;
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
- buf[cnt] = 0;
- cmp = strstrip(buf);
- if (strncmp(cmp, "NO_", 3) == 0) {
- neg = 1;
- cmp += 3;
- }
- for (i = 0; i < __SCHED_FEAT_NR; i++) {
- if (strcmp(cmp, sched_feat_names[i]) == 0) {
- if (neg) {
- sysctl_sched_features &= ~(1UL << i);
- sched_feat_disable(i);
- } else {
- sysctl_sched_features |= (1UL << i);
- sched_feat_enable(i);
- }
- break;
- }
- }
- if (i == __SCHED_FEAT_NR)
- return -EINVAL;
- *ppos += cnt;
- return cnt;
- }
- static int sched_feat_open(struct inode *inode, struct file *filp)
- {
- return single_open(filp, sched_feat_show, NULL);
- }
- static const struct file_operations sched_feat_fops = {
- .open = sched_feat_open,
- .write = sched_feat_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
- static __init int sched_init_debug(void)
- {
- debugfs_create_file("sched_features", 0644, NULL, NULL,
- &sched_feat_fops);
- return 0;
- }
- late_initcall(sched_init_debug);
- #endif /* CONFIG_SCHED_DEBUG */
- /*
- * Number of tasks to iterate in a single balance run.
- * Limited because this is done with IRQs disabled.
- */
- const_debug unsigned int sysctl_sched_nr_migrate = 32;
- /*
- * period over which we average the RT time consumption, measured
- * in ms.
- *
- * default: 1s
- */
- const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
- /*
- * period over which we measure -rt task cpu usage in us.
- * default: 1s
- */
- unsigned int sysctl_sched_rt_period = 1000000;
- __read_mostly int scheduler_running;
- /*
- * part of the period that we allow rt tasks to run in us.
- * default: 0.95s
- */
- int sysctl_sched_rt_runtime = 950000;
- /*
- * __task_rq_lock - lock the rq @p resides on.
- */
- static inline struct rq *__task_rq_lock(struct task_struct *p)
- __acquires(rq->lock)
- {
- struct rq *rq;
- lockdep_assert_held(&p->pi_lock);
- for (;;) {
- rq = task_rq(p);
- raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p)))
- return rq;
- raw_spin_unlock(&rq->lock);
- }
- }
- /*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
- static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
- __acquires(p->pi_lock)
- __acquires(rq->lock)
- {
- struct rq *rq;
- for (;;) {
- raw_spin_lock_irqsave(&p->pi_lock, *flags);
- rq = task_rq(p);
- raw_spin_lock(&rq->lock);
- if (likely(rq == task_rq(p)))
- return rq;
- raw_spin_unlock(&rq->lock);
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
- }
- }
- static void __task_rq_unlock(struct rq *rq)
- __releases(rq->lock)
- {
- raw_spin_unlock(&rq->lock);
- }
- static inline void
- task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
- __releases(rq->lock)
- __releases(p->pi_lock)
- {
- raw_spin_unlock(&rq->lock);
- raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
- }
- /*
- * this_rq_lock - lock this runqueue and disable interrupts.
- */
- static struct rq *this_rq_lock(void)
- __acquires(rq->lock)
- {
- struct rq *rq;
- local_irq_disable();
- rq = this_rq();
- raw_spin_lock(&rq->lock);
- return rq;
- }
- #ifdef CONFIG_SCHED_HRTICK
- /*
- * Use HR-timers to deliver accurate preemption points.
- *
- * Its all a bit involved since we cannot program an hrt while holding the
- * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
- * reschedule event.
- *
- * When we get rescheduled we reprogram the hrtick_timer outside of the
- * rq->lock.
- */
- static void hrtick_clear(struct rq *rq)
- {
- if (hrtimer_active(&rq->hrtick_timer))
- hrtimer_cancel(&rq->hrtick_timer);
- }
- /*
- * High-resolution timer tick.
- * Runs from hardirq context with interrupts disabled.
- */
- static enum hrtimer_restart hrtick(struct hrtimer *timer)
- {
- struct rq *rq = container_of(timer, struct rq, hrtick_timer);
- WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
- raw_spin_lock(&rq->lock);
- update_rq_clock(rq);
- rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- raw_spin_unlock(&rq->lock);
- return HRTIMER_NORESTART;
- }
- #ifdef CONFIG_SMP
- /*
- * called from hardirq (IPI) context
- */
- static void __hrtick_start(void *arg)
- {
- struct rq *rq = arg;
- struct hrtimer *timer = &rq->hrtick_timer;
- ktime_t soft, hard;
- unsigned long delta;
- soft = hrtimer_get_softexpires(timer);
- hard = hrtimer_get_expires(timer);
- delta = ktime_to_ns(ktime_sub(hard, soft));
- raw_spin_lock(&rq->lock);
- __hrtimer_start_range_ns(timer, soft, delta, HRTIMER_MODE_ABS, 0);
- rq->hrtick_csd_pending = 0;
- raw_spin_unlock(&rq->lock);
- }
- /*
- * Called to set the hrtick timer state.
- *
- * called with rq->lock held and irqs disabled
- */
- void hrtick_start(struct rq *rq, u64 delay)
- {
- struct hrtimer *timer = &rq->hrtick_timer;
- ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
- hrtimer_set_expires(timer, time);
- if (rq == this_rq()) {
- __hrtimer_start_range_ns(timer, ns_to_ktime(delay), 0,
- HRTIMER_MODE_REL_PINNED, 0);
- } else if (!rq->hrtick_csd_pending) {
- __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
- rq->hrtick_csd_pending = 1;
- }
- }
- static int
- hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
- {
- int cpu = (int)(long)hcpu;
- switch (action) {
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- hrtick_clear(cpu_rq(cpu));
- return NOTIFY_OK;
- }
- return NOTIFY_DONE;
- }
- static __init void init_hrtick(void)
- {
- hotcpu_notifier(hotplug_hrtick, 0);
- }
- #else
- /*
- * Called to set the hrtick timer state.
- *
- * called with rq->lock held and irqs disabled
- */
- void hrtick_start(struct rq *rq, u64 delay)
- {
- __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
- HRTIMER_MODE_REL_PINNED, 0);
- }
- static inline void init_hrtick(void)
- {
- }
- #endif /* CONFIG_SMP */
- static void init_rq_hrtick(struct rq *rq)
- {
- #ifdef CONFIG_SMP
- rq->hrtick_csd_pending = 0;
- rq->hrtick_csd.flags = 0;
- rq->hrtick_csd.func = __hrtick_start;
- rq->hrtick_csd.info = rq;
- #endif
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rq->hrtick_timer.function = hrtick;
- }
- #else /* CONFIG_SCHED_HRTICK */
- static inline void hrtick_clear(struct rq *rq)
- {
- }
- static inline void init_rq_hrtick(struct rq *rq)
- {
- }
- static inline void init_hrtick(void)
- {
- }
- #endif /* CONFIG_SCHED_HRTICK */
- /*
- * cmpxchg based fetch_or, macro so it works for different integer types
- */
- #define fetch_or(ptr, val) \
- ({ typeof(*(ptr)) __old, __val = *(ptr); \
- for (;;) { \
- __old = cmpxchg((ptr), __val, __val | (val)); \
- if (__old == __val) \
- break; \
- __val = __old; \
- } \
- __old; \
- })
- #ifdef TIF_POLLING_NRFLAG
- /*
- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
- * this avoids any races wrt polling state changes and thereby avoids
- * spurious IPIs.
- */
- static bool set_nr_and_not_polling(struct task_struct *p)
- {
- struct thread_info *ti = task_thread_info(p);
- return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
- }
- #else
- static bool set_nr_and_not_polling(struct task_struct *p)
- {
- set_tsk_need_resched(p);
- return true;
- }
- #endif
- /*
- * resched_task - mark a task 'to be rescheduled now'.
- *
- * On UP this means the setting of the need_resched flag, on SMP it
- * might also involve a cross-CPU call to trigger the scheduler on
- * the target CPU.
- */
- #ifdef CONFIG_SMP
- #ifndef tsk_is_polling
- #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
- #endif
- void resched_task(struct task_struct *p)
- {
- int cpu;
- assert_raw_spin_locked(&task_rq(p)->lock);
- if (test_tsk_need_resched(p))
- return;
- cpu = task_cpu(p);
- if (cpu == smp_processor_id()) {
- set_tsk_need_resched(p);
- return;
- }
- if (set_nr_and_not_polling(p))
- smp_send_reschedule(cpu);
- }
- void resched_cpu(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
- return;
- resched_task(cpu_curr(cpu));
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- }
- #ifdef CONFIG_NO_HZ
- /*
- * In the semi idle case, use the nearest busy cpu for migrating timers
- * from an idle cpu. This is good for power-savings.
- *
- * We don't do similar optimization for completely idle system, as
- * selecting an idle cpu will add more delays to the timers than intended
- * (as that cpu's timer base may not be uptodate wrt jiffies etc).
- */
- int get_nohz_timer_target(void)
- {
- int cpu = smp_processor_id();
- int i;
- struct sched_domain *sd;
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- for_each_cpu(i, sched_domain_span(sd)) {
- if (!idle_cpu(i)) {
- cpu = i;
- goto unlock;
- }
- }
- }
- unlock:
- rcu_read_unlock();
- return cpu;
- }
- /*
- * When add_timer_on() enqueues a timer into the timer wheel of an
- * idle CPU then this timer might expire before the next timer event
- * which is scheduled to wake up that CPU. In case of a completely
- * idle system the next event might even be infinite time into the
- * future. wake_up_idle_cpu() ensures that the CPU is woken up and
- * leaves the inner idle loop so the newly added timer is taken into
- * account when the CPU goes back to idle and evaluates the timer
- * wheel for the next timer event.
- */
- void wake_up_idle_cpu(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- if (cpu == smp_processor_id())
- return;
- /*
- * This is safe, as this function is called with the timer
- * wheel base lock of (cpu) held. When the CPU is on the way
- * to idle and has not yet set rq->curr to idle then it will
- * be serialized on the timer wheel base lock and take the new
- * timer into account automatically.
- */
- if (rq->curr != rq->idle)
- return;
- /*
- * We can set TIF_RESCHED on the idle task of the other CPU
- * lockless. The worst case is that the other CPU runs the
- * idle task through an additional NOOP schedule()
- */
- set_tsk_need_resched(rq->idle);
- /* NEED_RESCHED must be visible before we test polling */
- smp_mb();
- if (!tsk_is_polling(rq->idle))
- smp_send_reschedule(cpu);
- }
- static inline bool got_nohz_idle_kick(void)
- {
- int cpu = smp_processor_id();
- if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
- return false;
- if (idle_cpu(cpu) && !need_resched())
- return true;
- /*
- * We can't run Idle Load Balance on this CPU for this time so we
- * cancel it and clear NOHZ_BALANCE_KICK
- */
- clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
- return false;
- }
- #else /* CONFIG_NO_HZ */
- static inline bool got_nohz_idle_kick(void)
- {
- if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
- return false;
- if (idle_cpu(cpu) && !need_resched())
- return true;
- /*
- * We can't run Idle Load Balance on this CPU for this time so we
- * cancel it and clear NOHZ_BALANCE_KICK
- */
- clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
- return false;
- }
- #endif /* CONFIG_NO_HZ */
- void sched_avg_update(struct rq *rq)
- {
- s64 period = sched_avg_period();
- while ((s64)(rq->clock - rq->age_stamp) > period) {
- /*
- * Inline assembly required to prevent the compiler
- * optimising this loop into a divmod call.
- * See __iter_div_u64_rem() for another example of this.
- */
- asm("" : "+rm" (rq->age_stamp));
- rq->age_stamp += period;
- rq->rt_avg /= 2;
- }
- }
- #else /* !CONFIG_SMP */
- void resched_task(struct task_struct *p)
- {
- assert_raw_spin_locked(&task_rq(p)->lock);
- set_tsk_need_resched(p);
- }
- #endif /* CONFIG_SMP */
- #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
- (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
- /*
- * Iterate task_group tree rooted at *from, calling @down when first entering a
- * node and @up when leaving it for the final time.
- *
- * Caller must hold rcu_lock or sufficient equivalent.
- */
- int walk_tg_tree_from(struct task_group *from,
- tg_visitor down, tg_visitor up, void *data)
- {
- struct task_group *parent, *child;
- int ret;
- parent = from;
- down:
- ret = (*down)(parent, data);
- if (ret)
- goto out;
- list_for_each_entry_rcu(child, &parent->children, siblings) {
- parent = child;
- goto down;
- up:
- continue;
- }
- ret = (*up)(parent, data);
- if (ret || parent == from)
- goto out;
- child = parent;
- parent = parent->parent;
- if (parent)
- goto up;
- out:
- return ret;
- }
- int tg_nop(struct task_group *tg, void *data)
- {
- return 0;
- }
- #endif
- static void set_load_weight(struct task_struct *p)
- {
- int prio = p->static_prio - MAX_RT_PRIO;
- struct load_weight *load = &p->se.load;
- /*
- * SCHED_IDLE tasks get minimal weight:
- */
- if (p->policy == SCHED_IDLE) {
- load->weight = scale_load(WEIGHT_IDLEPRIO);
- load->inv_weight = WMULT_IDLEPRIO;
- return;
- }
- prio = array_index_nospec(prio, 40);
- load->weight = scale_load(prio_to_weight[prio]);
- load->inv_weight = prio_to_wmult[prio];
- }
- static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
- {
- update_rq_clock(rq);
- sched_info_queued(p);
- p->sched_class->enqueue_task(rq, p, flags);
- trace_sched_enq_deq_task(p, 1);
- }
- static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
- {
- update_rq_clock(rq);
- sched_info_dequeued(p);
- p->sched_class->dequeue_task(rq, p, flags);
- trace_sched_enq_deq_task(p, 0);
- }
- void activate_task(struct rq *rq, struct task_struct *p, int flags)
- {
- if (task_contributes_to_load(p))
- rq->nr_uninterruptible--;
- enqueue_task(rq, p, flags);
- }
- void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
- {
- if (task_contributes_to_load(p))
- rq->nr_uninterruptible++;
- dequeue_task(rq, p, flags);
- }
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- /*
- * There are no locks covering percpu hardirq/softirq time.
- * They are only modified in account_system_vtime, on corresponding CPU
- * with interrupts disabled. So, writes are safe.
- * They are read and saved off onto struct rq in update_rq_clock().
- * This may result in other CPU reading this CPU's irq time and can
- * race with irq/account_system_vtime on this CPU. We would either get old
- * or new value with a side effect of accounting a slice of irq time to wrong
- * task when irq is in progress while we read rq->clock. That is a worthy
- * compromise in place of having locks on each irq in account_system_time.
- */
- static DEFINE_PER_CPU(u64, cpu_hardirq_time);
- static DEFINE_PER_CPU(u64, cpu_softirq_time);
- static DEFINE_PER_CPU(u64, irq_start_time);
- static int sched_clock_irqtime;
- void enable_sched_clock_irqtime(void)
- {
- sched_clock_irqtime = 1;
- }
- void disable_sched_clock_irqtime(void)
- {
- sched_clock_irqtime = 0;
- }
- #ifndef CONFIG_64BIT
- static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
- static inline void irq_time_write_begin(void)
- {
- __this_cpu_inc(irq_time_seq.sequence);
- smp_wmb();
- }
- static inline void irq_time_write_end(void)
- {
- smp_wmb();
- __this_cpu_inc(irq_time_seq.sequence);
- }
- static inline u64 irq_time_read(int cpu)
- {
- u64 irq_time;
- unsigned seq;
- do {
- seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
- irq_time = per_cpu(cpu_softirq_time, cpu) +
- per_cpu(cpu_hardirq_time, cpu);
- } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
- return irq_time;
- }
- #else /* CONFIG_64BIT */
- static inline void irq_time_write_begin(void)
- {
- }
- static inline void irq_time_write_end(void)
- {
- }
- static inline u64 irq_time_read(int cpu)
- {
- return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
- }
- #endif /* CONFIG_64BIT */
- /*
- * Called before incrementing preempt_count on {soft,}irq_enter
- * and before decrementing preempt_count on {soft,}irq_exit.
- */
- void account_system_vtime(struct task_struct *curr)
- {
- unsigned long flags;
- s64 delta;
- int cpu;
- if (!sched_clock_irqtime)
- return;
- local_irq_save(flags);
- cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
- __this_cpu_add(irq_start_time, delta);
- irq_time_write_begin();
- /*
- * We do not account for softirq time from ksoftirqd here.
- * We want to continue accounting softirq time to ksoftirqd thread
- * in that case, so as not to confuse scheduler with a special task
- * that do not consume any time, but still wants to run.
- */
- if (hardirq_count())
- __this_cpu_add(cpu_hardirq_time, delta);
- else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
- __this_cpu_add(cpu_softirq_time, delta);
- irq_time_write_end();
- local_irq_restore(flags);
- }
- EXPORT_SYMBOL_GPL(account_system_vtime);
- #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
- #ifdef CONFIG_PARAVIRT
- static inline u64 steal_ticks(u64 steal)
- {
- if (unlikely(steal > NSEC_PER_SEC))
- return div_u64(steal, TICK_NSEC);
- return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
- }
- #endif
- static void update_rq_clock_task(struct rq *rq, s64 delta)
- {
- /*
- * In theory, the compile should just see 0 here, and optimize out the call
- * to sched_rt_avg_update. But I don't trust it...
- */
- #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- s64 steal = 0, irq_delta = 0;
- #endif
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
- /*
- * Since irq_time is only updated on {soft,}irq_exit, we might run into
- * this case when a previous update_rq_clock() happened inside a
- * {soft,}irq region.
- *
- * When this happens, we stop ->clock_task and only update the
- * prev_irq_time stamp to account for the part that fit, so that a next
- * update will consume the rest. This ensures ->clock_task is
- * monotonic.
- *
- * It does however cause some slight miss-attribution of {soft,}irq
- * time, a more accurate solution would be to update the irq_time using
- * the current rq->clock timestamp, except that would require using
- * atomic ops.
- */
- if (irq_delta > delta)
- irq_delta = delta;
- rq->prev_irq_time += irq_delta;
- delta -= irq_delta;
- #endif
- #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
- if (static_key_false((¶virt_steal_rq_enabled))) {
- u64 st;
- steal = paravirt_steal_clock(cpu_of(rq));
- steal -= rq->prev_steal_time_rq;
- if (unlikely(steal > delta))
- steal = delta;
- st = steal_ticks(steal);
- steal = st * TICK_NSEC;
- rq->prev_steal_time_rq += steal;
- delta -= steal;
- }
- #endif
- rq->clock_task += delta;
- #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
- if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
- sched_rt_avg_update(rq, irq_delta + steal);
- #endif
- }
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- static int irqtime_account_hi_update(void)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- unsigned long flags;
- u64 latest_ns;
- int ret = 0;
- local_irq_save(flags);
- latest_ns = this_cpu_read(cpu_hardirq_time);
- if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
- ret = 1;
- local_irq_restore(flags);
- return ret;
- }
- static int irqtime_account_si_update(void)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- unsigned long flags;
- u64 latest_ns;
- int ret = 0;
- local_irq_save(flags);
- latest_ns = this_cpu_read(cpu_softirq_time);
- if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
- ret = 1;
- local_irq_restore(flags);
- return ret;
- }
- #else /* CONFIG_IRQ_TIME_ACCOUNTING */
- #define sched_clock_irqtime (0)
- #endif
- void sched_set_stop_task(int cpu, struct task_struct *stop)
- {
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
- struct task_struct *old_stop = cpu_rq(cpu)->stop;
- if (stop) {
- /*
- * Make it appear like a SCHED_FIFO task, its something
- * userspace knows about and won't get confused about.
- *
- * Also, it will make PI more or less work without too
- * much confusion -- but then, stop work should not
- * rely on PI working anyway.
- */
- sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m);
- stop->sched_class = &stop_sched_class;
- }
- cpu_rq(cpu)->stop = stop;
- if (old_stop) {
- /*
- * Reset it back to a normal scheduling class so that
- * it can die in pieces.
- */
- old_stop->sched_class = &rt_sched_class;
- }
- }
- /*
- * __normal_prio - return the priority that is based on the static prio
- */
- static inline int __normal_prio(struct task_struct *p)
- {
- return p->static_prio;
- }
- /*
- * Calculate the expected normal priority: i.e. priority
- * without taking RT-inheritance into account. Might be
- * boosted by interactivity modifiers. Changes upon fork,
- * setprio syscalls, and whenever the interactivity
- * estimator recalculates.
- */
- static inline int normal_prio(struct task_struct *p)
- {
- int prio;
- if (task_has_rt_policy(p))
- prio = MAX_RT_PRIO-1 - p->rt_priority;
- else
- prio = __normal_prio(p);
- return prio;
- }
- /*
- * Calculate the current priority, i.e. the priority
- * taken into account by the scheduler. This value might
- * be boosted by RT tasks, or might be boosted by
- * interactivity modifiers. Will be RT if the task got
- * RT-boosted. If not then it returns p->normal_prio.
- */
- static int effective_prio(struct task_struct *p)
- {
- p->normal_prio = normal_prio(p);
- /*
- * If we are RT tasks or we were boosted to RT priority,
- * keep the priority unchanged. Otherwise, update priority
- * to the normal priority:
- */
- if (!rt_prio(p->prio))
- return p->normal_prio;
- return p->prio;
- }
- /**
- * task_curr - is this task currently executing on a CPU?
- * @p: the task in question.
- */
- inline int task_curr(const struct task_struct *p)
- {
- return cpu_curr(task_cpu(p)) == p;
- }
- static inline void check_class_changed(struct rq *rq, struct task_struct *p,
- const struct sched_class *prev_class,
- int oldprio)
- {
- if (prev_class != p->sched_class) {
- if (prev_class->switched_from)
- prev_class->switched_from(rq, p);
- p->sched_class->switched_to(rq, p);
- } else if (oldprio != p->prio)
- p->sched_class->prio_changed(rq, p, oldprio);
- }
- void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
- {
- const struct sched_class *class;
- if (p->sched_class == rq->curr->sched_class) {
- rq->curr->sched_class->check_preempt_curr(rq, p, flags);
- } else {
- for_each_class(class) {
- if (class == rq->curr->sched_class)
- break;
- if (class == p->sched_class) {
- resched_task(rq->curr);
- break;
- }
- }
- }
- /*
- * A queue event has occurred, and we're going to schedule. In
- * this case, we can save a useless back to back clock update.
- */
- if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
- rq->skip_clock_update = 1;
- }
- #ifdef CONFIG_SMP
- void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
- {
- #ifdef CONFIG_SCHED_DEBUG
- /*
- * We should never call set_task_cpu() on a blocked task,
- * ttwu() will sort out the placement.
- */
- WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
- !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
- #ifdef CONFIG_LOCKDEP
- /*
- * The caller should hold either p->pi_lock or rq->lock, when changing
- * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
- *
- * sched_move_task() holds both and thus holding either pins the cgroup,
- * see task_group().
- *
- * Furthermore, all task_rq users should acquire both locks, see
- * task_rq_lock().
- */
- WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
- lockdep_is_held(&task_rq(p)->lock)));
- #endif
- #endif
- trace_sched_migrate_task(p, new_cpu);
- if (task_cpu(p) != new_cpu) {
- p->se.nr_migrations++;
- perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
- }
- __set_task_cpu(p, new_cpu);
- }
- struct migration_arg {
- struct task_struct *task;
- int dest_cpu;
- };
- static int migration_cpu_stop(void *data);
- /*
- * wait_task_inactive - wait for a thread to unschedule.
- *
- * If @match_state is nonzero, it's the @p->state value just checked and
- * not expected to change. If it changes, i.e. @p might have woken up,
- * then return zero. When we succeed in waiting for @p to be off its CPU,
- * we return a positive number (its total switch count). If a second call
- * a short while later returns the same number, the caller can be sure that
- * @p has remained unscheduled the whole time.
- *
- * The caller must ensure that the task *will* unschedule sometime soon,
- * else this function might spin for a *long* time. This function can't
- * be called with interrupts off, or it may introduce deadlock with
- * smp_call_function() if an IPI is sent by the same process we are
- * waiting to become inactive.
- */
- unsigned long wait_task_inactive(struct task_struct *p, long match_state)
- {
- unsigned long flags;
- int running, on_rq;
- unsigned long ncsw;
- struct rq *rq;
- for (;;) {
- /*
- * We do the initial early heuristics without holding
- * any task-queue locks at all. We'll only try to get
- * the runqueue lock when things look like they will
- * work out!
- */
- rq = task_rq(p);
- /*
- * If the task is actively running on another CPU
- * still, just relax and busy-wait without holding
- * any locks.
- *
- * NOTE! Since we don't hold any locks, it's not
- * even sure that "rq" stays as the right runqueue!
- * But we don't care, since "task_running()" will
- * return false if the runqueue has changed and p
- * is actually now running somewhere else!
- */
- while (task_running(rq, p)) {
- if (match_state && unlikely(p->state != match_state))
- return 0;
- cpu_relax();
- }
- /*
- * Ok, time to look more closely! We need the rq
- * lock now, to be *sure*. If we're wrong, we'll
- * just go back and repeat.
- */
- rq = task_rq_lock(p, &flags);
- trace_sched_wait_task(p);
- running = task_running(rq, p);
- on_rq = p->on_rq;
- ncsw = 0;
- if (!match_state || p->state == match_state)
- ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
- task_rq_unlock(rq, p, &flags);
- /*
- * If it changed from the expected state, bail out now.
- */
- if (unlikely(!ncsw))
- break;
- /*
- * Was it really running after all now that we
- * checked with the proper locks actually held?
- *
- * Oops. Go back and try again..
- */
- if (unlikely(running)) {
- cpu_relax();
- continue;
- }
- /*
- * It's not enough that it's not actively running,
- * it must be off the runqueue _entirely_, and not
- * preempted!
- *
- * So if it was still runnable (but just not actively
- * running right now), it's preempted, and we should
- * yield - it could be a while.
- */
- if (unlikely(on_rq)) {
- ktime_t to = ktime_set(0, NSEC_PER_MSEC);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout(&to, HRTIMER_MODE_REL);
- continue;
- }
- /*
- * Ahh, all good. It wasn't running, and it wasn't
- * runnable, which means that it will never become
- * running in the future either. We're all done!
- */
- break;
- }
- return ncsw;
- }
- /***
- * kick_process - kick a running thread to enter/exit the kernel
- * @p: the to-be-kicked thread
- *
- * Cause a process which is running on another CPU to enter
- * kernel-mode, without any delay. (to get signals handled.)
- *
- * NOTE: this function doesn't have to take the runqueue lock,
- * because all it wants to ensure is that the remote task enters
- * the kernel. If the IPI races and the task has been migrated
- * to another CPU then no harm is done and the purpose has been
- * achieved as well.
- */
- void kick_process(struct task_struct *p)
- {
- int cpu;
- preempt_disable();
- cpu = task_cpu(p);
- if ((cpu != smp_processor_id()) && task_curr(p))
- smp_send_reschedule(cpu);
- preempt_enable();
- }
- EXPORT_SYMBOL_GPL(kick_process);
- #endif /* CONFIG_SMP */
- #ifdef CONFIG_SMP
- /*
- * ->cpus_allowed is protected by both rq->lock and p->pi_lock
- */
- static int select_fallback_rq(int cpu, struct task_struct *p)
- {
- const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
- enum { cpuset, possible, fail } state = cpuset;
- int dest_cpu;
- /* Look for allowed, online CPU in same node. */
- for_each_cpu(dest_cpu, nodemask) {
- if (!cpu_online(dest_cpu))
- continue;
- if (!cpu_active(dest_cpu))
- continue;
- if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
- return dest_cpu;
- }
- for (;;) {
- /* Any allowed, online CPU? */
- for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
- if (!cpu_online(dest_cpu))
- continue;
- if (!cpu_active(dest_cpu))
- continue;
- goto out;
- }
- switch (state) {
- case cpuset:
- /* No more Mr. Nice Guy. */
- cpuset_cpus_allowed_fallback(p);
- state = possible;
- break;
- case possible:
- do_set_cpus_allowed(p, cpu_possible_mask);
- state = fail;
- break;
- case fail:
- BUG();
- break;
- }
- }
- out:
- if (state != cpuset) {
- /*
- * Don't tell them about moving exiting tasks or
- * kernel threads (both mm NULL), since they never
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
- printk_deferred("process %d (%s) no longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, cpu);
- }
- }
- return dest_cpu;
- }
- /*
- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
- */
- static inline
- int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
- {
- int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
- /*
- * In order not to call set_task_cpu() on a blocking task we need
- * to rely on ttwu() to place the task on a valid ->cpus_allowed
- * cpu.
- *
- * Since this is common to all placement strategies, this lives here.
- *
- * [ this allows ->select_task() to simply return task_cpu(p) and
- * not worry about this generic constraint ]
- */
- if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
- !cpu_online(cpu)))
- cpu = select_fallback_rq(task_cpu(p), p);
- return cpu;
- }
- static void update_avg(u64 *avg, u64 sample)
- {
- s64 diff = sample - *avg;
- *avg += diff >> 3;
- }
- #endif
- static void
- ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
- {
- #ifdef CONFIG_SCHEDSTATS
- struct rq *rq = this_rq();
- #ifdef CONFIG_SMP
- int this_cpu = smp_processor_id();
- if (cpu == this_cpu) {
- schedstat_inc(rq, ttwu_local);
- schedstat_inc(p, se.statistics.nr_wakeups_local);
- } else {
- struct sched_domain *sd;
- schedstat_inc(p, se.statistics.nr_wakeups_remote);
- rcu_read_lock();
- for_each_domain(this_cpu, sd) {
- if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
- schedstat_inc(sd, ttwu_wake_remote);
- break;
- }
- }
- rcu_read_unlock();
- }
- if (wake_flags & WF_MIGRATED)
- schedstat_inc(p, se.statistics.nr_wakeups_migrate);
- #endif /* CONFIG_SMP */
- schedstat_inc(rq, ttwu_count);
- schedstat_inc(p, se.statistics.nr_wakeups);
- if (wake_flags & WF_SYNC)
- schedstat_inc(p, se.statistics.nr_wakeups_sync);
- #endif /* CONFIG_SCHEDSTATS */
- }
- static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
- {
- activate_task(rq, p, en_flags);
- p->on_rq = 1;
- /* if a worker is waking up, notify workqueue */
- if (p->flags & PF_WQ_WORKER)
- wq_worker_waking_up(p, cpu_of(rq));
- }
- /*
- * Mark the task runnable and perform wakeup-preemption.
- */
- static void
- ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
- {
- trace_sched_wakeup(p, true);
- check_preempt_curr(rq, p, wake_flags);
- p->state = TASK_RUNNING;
- #ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
- p->sched_class->task_woken(rq, p);
- if (rq->idle_stamp) {
- u64 delta = rq->clock - rq->idle_stamp;
- u64 max = 2*sysctl_sched_migration_cost;
- update_avg(&rq->avg_idle, delta);
- if (rq->avg_idle > max)
- rq->avg_idle = max;
- rq->idle_stamp = 0;
- }
- #endif
- }
- static void
- ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
- {
- #ifdef CONFIG_SMP
- if (p->sched_contributes_to_load)
- rq->nr_uninterruptible--;
- #endif
- ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
- ttwu_do_wakeup(rq, p, wake_flags);
- }
- /*
- * Called in case the task @p isn't fully descheduled from its runqueue,
- * in this case we must do a remote wakeup. Its a 'light' wakeup though,
- * since all we need to do is flip p->state to TASK_RUNNING, since
- * the task is still ->on_rq.
- */
- static int ttwu_remote(struct task_struct *p, int wake_flags)
- {
- struct rq *rq;
- int ret = 0;
- rq = __task_rq_lock(p);
- if (p->on_rq) {
- ttwu_do_wakeup(rq, p, wake_flags);
- ret = 1;
- }
- __task_rq_unlock(rq);
- return ret;
- }
- #ifdef CONFIG_SMP
- static void sched_ttwu_pending(void)
- {
- struct rq *rq = this_rq();
- struct llist_node *llist = llist_del_all(&rq->wake_list);
- struct task_struct *p;
- raw_spin_lock(&rq->lock);
- while (llist) {
- p = llist_entry(llist, struct task_struct, wake_entry);
- llist = llist_next(llist);
- ttwu_do_activate(rq, p, 0);
- }
- raw_spin_unlock(&rq->lock);
- }
- void scheduler_ipi(void)
- {
- if (llist_empty(&this_rq()->wake_list)
- && !got_nohz_idle_kick())
- return;
- /*
- * Not all reschedule IPI handlers call irq_enter/irq_exit, since
- * traditionally all their work was done from the interrupt return
- * path. Now that we actually do some work, we need to make sure
- * we do call them.
- *
- * Some archs already do call them, luckily irq_enter/exit nest
- * properly.
- *
- * Arguably we should visit all archs and update all handlers,
- * however a fair share of IPIs are still resched only so this would
- * somewhat pessimize the simple resched case.
- */
- irq_enter();
- sched_ttwu_pending();
- /*
- * Check if someone kicked us for doing the nohz idle load balance.
- */
- if (unlikely(got_nohz_idle_kick())) {
- this_rq()->idle_balance = 1;
- raise_softirq_irqoff(SCHED_SOFTIRQ);
- }
- irq_exit();
- }
- static void ttwu_queue_remote(struct task_struct *p, int cpu)
- {
- if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
- smp_send_reschedule(cpu);
- }
- bool cpus_share_cache(int this_cpu, int that_cpu)
- {
- return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
- }
- #endif /* CONFIG_SMP */
- static void ttwu_queue(struct task_struct *p, int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- #if defined(CONFIG_SMP)
- if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
- sched_clock_cpu(cpu); /* sync clocks x-cpu */
- ttwu_queue_remote(p, cpu);
- return;
- }
- #endif
- raw_spin_lock(&rq->lock);
- ttwu_do_activate(rq, p, 0);
- raw_spin_unlock(&rq->lock);
- }
- /**
- * try_to_wake_up - wake up a thread
- * @p: the thread to be awakened
- * @state: the mask of task states that can be woken
- * @wake_flags: wake modifier flags (WF_*)
- *
- * Put it on the run-queue if it's not already there. The "current"
- * thread is always on the run-queue (except when the actual
- * re-schedule is in progress), and as such you're allowed to do
- * the simpler "current->state = TASK_RUNNING" to mark yourself
- * runnable without the overhead of this.
- *
- * Returns %true if @p was woken up, %false if it was already running
- * or @state didn't match @p's state.
- */
- static int
- try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
- {
- unsigned long flags;
- int cpu, src_cpu, success = 0;
- int notify = 0;
- smp_wmb();
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- src_cpu = cpu = task_cpu(p);
- if (!(p->state & state))
- goto out;
- success = 1; /* we're going to change ->state */
- if (p->on_rq && ttwu_remote(p, wake_flags))
- goto stat;
- #ifdef CONFIG_SMP
- /*
- * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
- * possible to, falsely, observe p->on_cpu == 0.
- *
- * One must be running (->on_cpu == 1) in order to remove oneself
- * from the runqueue.
- *
- * [S] ->on_cpu = 1; [L] ->on_rq
- * UNLOCK rq->lock
- * RMB
- * LOCK rq->lock
- * [S] ->on_rq = 0; [L] ->on_cpu
- *
- * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
- * from the consecutive calls to schedule(); the first switching to our
- * task, the second putting it to sleep.
- */
- smp_rmb();
- /*
- * If the owning (remote) cpu is still in the middle of schedule() with
- * this task as prev, wait until its done referencing the task.
- */
- while (p->on_cpu)
- cpu_relax();
- /*
- * Pairs with the smp_wmb() in finish_lock_switch().
- */
- smp_rmb();
- p->sched_contributes_to_load = !!task_contributes_to_load(p);
- p->state = TASK_WAKING;
- if (p->sched_class->task_waking)
- p->sched_class->task_waking(p);
- cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
- /* Refresh src_cpu as it could have changed since we last read it */
- src_cpu = task_cpu(p);
- if (src_cpu != cpu) {
- wake_flags |= WF_MIGRATED;
- set_task_cpu(p, cpu);
- }
- #endif /* CONFIG_SMP */
- ttwu_queue(p, cpu);
- stat:
- ttwu_stat(p, cpu, wake_flags);
- if (src_cpu != cpu && task_notify_on_migrate(p))
- notify = 1;
- out:
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- if (notify)
- atomic_notifier_call_chain(&migration_notifier_head,
- cpu, (void *)src_cpu);
- return success;
- }
- /**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
- static void try_to_wake_up_local(struct task_struct *p)
- {
- struct rq *rq = task_rq(p);
- if (WARN_ON_ONCE(rq != this_rq()) ||
- WARN_ON_ONCE(p == current))
- return;
- lockdep_assert_held(&rq->lock);
- if (!raw_spin_trylock(&p->pi_lock)) {
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
- }
- if (!(p->state & TASK_NORMAL))
- goto out;
- if (!p->on_rq)
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
- ttwu_do_wakeup(rq, p, 0);
- ttwu_stat(p, smp_processor_id(), 0);
- out:
- raw_spin_unlock(&p->pi_lock);
- }
- /**
- * wake_up_process - Wake up a specific process
- * @p: The process to be woken up.
- *
- * Attempt to wake up the nominated process and move it to the set of runnable
- * processes. Returns 1 if the process was woken up, 0 if it was already
- * running.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
- int wake_up_process(struct task_struct *p)
- {
- WARN_ON(task_is_stopped_or_traced(p));
- return try_to_wake_up(p, TASK_NORMAL, 0);
- }
- EXPORT_SYMBOL(wake_up_process);
- int wake_up_state(struct task_struct *p, unsigned int state)
- {
- return try_to_wake_up(p, state, 0);
- }
- /*
- * Perform scheduler related setup for a newly forked process p.
- * p is forked by current.
- *
- * __sched_fork() is basic setup used by init_idle() too:
- */
- static void __sched_fork(struct task_struct *p)
- {
- p->on_rq = 0;
- p->se.on_rq = 0;
- p->se.exec_start = 0;
- p->se.sum_exec_runtime = 0;
- p->se.prev_sum_exec_runtime = 0;
- p->se.nr_migrations = 0;
- p->se.vruntime = 0;
- INIT_LIST_HEAD(&p->se.group_node);
- #ifdef CONFIG_SCHEDSTATS
- memset(&p->se.statistics, 0, sizeof(p->se.statistics));
- #endif
- INIT_LIST_HEAD(&p->rt.run_list);
- #ifdef CONFIG_PREEMPT_NOTIFIERS
- INIT_HLIST_HEAD(&p->preempt_notifiers);
- #endif
- }
- /*
- * fork()/clone()-time setup:
- */
- void sched_fork(struct task_struct *p)
- {
- unsigned long flags;
- int cpu = get_cpu();
- __sched_fork(p);
- /*
- * We mark the process as running here. This guarantees that
- * nobody will actually run it, and a signal or other external
- * event cannot wake it up and insert it on the runqueue either.
- */
- p->state = TASK_RUNNING;
- /*
- * Make sure we do not leak PI boosting priority to the child.
- */
- p->prio = current->normal_prio;
- /*
- * Revert to default priority/policy on fork if requested.
- */
- if (unlikely(p->sched_reset_on_fork)) {
- if (task_has_rt_policy(p)) {
- p->policy = SCHED_NORMAL;
- p->static_prio = NICE_TO_PRIO(0);
- p->rt_priority = 0;
- } else if (PRIO_TO_NICE(p->static_prio) < 0)
- p->static_prio = NICE_TO_PRIO(0);
- p->prio = p->normal_prio = __normal_prio(p);
- set_load_weight(p);
- /*
- * We don't need the reset flag anymore after the fork. It has
- * fulfilled its duty:
- */
- p->sched_reset_on_fork = 0;
- }
- if (!rt_prio(p->prio))
- p->sched_class = &fair_sched_class;
- if (p->sched_class->task_fork)
- p->sched_class->task_fork(p);
- /*
- * The child is not yet in the pid-hash so no cgroup attach races,
- * and the cgroup is pinned to this child due to cgroup_fork()
- * is ran before sched_fork().
- *
- * Silence PROVE_RCU.
- */
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- set_task_cpu(p, cpu);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
- if (likely(sched_info_on()))
- memset(&p->sched_info, 0, sizeof(p->sched_info));
- #endif
- #if defined(CONFIG_SMP)
- p->on_cpu = 0;
- #endif
- #ifdef CONFIG_PREEMPT_COUNT
- /* Want to start with kernel preemption disabled. */
- task_thread_info(p)->preempt_count = 1;
- #endif
- #ifdef CONFIG_SMP
- plist_node_init(&p->pushable_tasks, MAX_PRIO);
- #endif
- put_cpu();
- }
- /*
- * wake_up_new_task - wake up a newly created task for the first time.
- *
- * This function will do some initial scheduler statistics housekeeping
- * that must be done for every newly created context, then puts the task
- * on the runqueue and wakes it.
- */
- void wake_up_new_task(struct task_struct *p)
- {
- unsigned long flags;
- struct rq *rq;
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- #ifdef CONFIG_SMP
- /*
- * Fork balancing, do it here and not earlier because:
- * - cpus_allowed can change in the fork path
- * - any previously selected cpu might disappear through hotplug
- */
- set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
- #endif
- rq = __task_rq_lock(p);
- activate_task(rq, p, 0);
- p->on_rq = 1;
- trace_sched_wakeup_new(p, true);
- check_preempt_curr(rq, p, WF_FORK);
- #ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
- p->sched_class->task_woken(rq, p);
- #endif
- task_rq_unlock(rq, p, &flags);
- }
- #ifdef CONFIG_PREEMPT_NOTIFIERS
- /**
- * preempt_notifier_register - tell me when current is being preempted & rescheduled
- * @notifier: notifier struct to register
- */
- void preempt_notifier_register(struct preempt_notifier *notifier)
- {
- hlist_add_head(¬ifier->link, ¤t->preempt_notifiers);
- }
- EXPORT_SYMBOL_GPL(preempt_notifier_register);
- /**
- * preempt_notifier_unregister - no longer interested in preemption notifications
- * @notifier: notifier struct to unregister
- *
- * This is safe to call from within a preemption notifier.
- */
- void preempt_notifier_unregister(struct preempt_notifier *notifier)
- {
- hlist_del(¬ifier->link);
- }
- EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
- static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
- {
- struct preempt_notifier *notifier;
- struct hlist_node *node;
- hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
- notifier->ops->sched_in(notifier, raw_smp_processor_id());
- }
- static void
- fire_sched_out_preempt_notifiers(struct task_struct *curr,
- struct task_struct *next)
- {
- struct preempt_notifier *notifier;
- struct hlist_node *node;
- hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
- notifier->ops->sched_out(notifier, next);
- }
- #else /* !CONFIG_PREEMPT_NOTIFIERS */
- static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
- {
- }
- static void
- fire_sched_out_preempt_notifiers(struct task_struct *curr,
- struct task_struct *next)
- {
- }
- #endif /* CONFIG_PREEMPT_NOTIFIERS */
- /**
- * prepare_task_switch - prepare to switch tasks
- * @rq: the runqueue preparing to switch
- * @prev: the current task that is being switched out
- * @next: the task we are going to switch to.
- *
- * This is called with the rq lock held and interrupts off. It must
- * be paired with a subsequent finish_task_switch after the context
- * switch.
- *
- * prepare_task_switch sets up locking and calls architecture specific
- * hooks.
- */
- static inline void
- prepare_task_switch(struct rq *rq, struct task_struct *prev,
- struct task_struct *next)
- {
- trace_sched_switch(prev, next);
- sched_info_switch(prev, next);
- perf_event_task_sched_out(prev, next);
- fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
- prepare_arch_switch(next);
- }
- /**
- * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
- * @prev: the thread we just switched away from.
- *
- * finish_task_switch must be called after the context switch, paired
- * with a prepare_task_switch call before the context switch.
- * finish_task_switch will reconcile locking set up by prepare_task_switch,
- * and do any other architecture-specific cleanup actions.
- *
- * Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock. (Doing it
- * with the lock held can cause deadlocks; see schedule() for
- * details.)
- */
- static void finish_task_switch(struct rq *rq, struct task_struct *prev)
- __releases(rq->lock)
- {
- struct mm_struct *mm = rq->prev_mm;
- long prev_state;
- rq->prev_mm = NULL;
- /*
- * A task struct has one reference for the use as "current".
- * If a task dies, then it sets TASK_DEAD in tsk->state and calls
- * schedule one last time. The schedule call will never return, and
- * the scheduled task must drop that reference.
- *
- * We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
- * running on another CPU and we could rave with its RUNNING -> DEAD
- * transition, resulting in a double drop.
- */
- prev_state = prev->state;
- finish_arch_switch(prev);
- perf_event_task_sched_in(prev, current);
- finish_lock_switch(rq, prev);
- finish_arch_post_lock_switch();
- fire_sched_in_preempt_notifiers(current);
- if (mm)
- mmdrop(mm);
- if (unlikely(prev_state == TASK_DEAD)) {
- /*
- * Remove function-return probe instances associated with this
- * task and put them back on the free list.
- */
- kprobe_flush_task(prev);
- put_task_struct(prev);
- }
- }
- #ifdef CONFIG_SMP
- /* assumes rq->lock is held */
- static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
- {
- if (prev->sched_class->pre_schedule)
- prev->sched_class->pre_schedule(rq, prev);
- }
- /* rq->lock is NOT held, but preemption is disabled */
- static inline void post_schedule(struct rq *rq)
- {
- if (rq->post_schedule) {
- unsigned long flags;
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rq->curr->sched_class->post_schedule)
- rq->curr->sched_class->post_schedule(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- rq->post_schedule = 0;
- }
- }
- #else
- static inline void pre_schedule(struct rq *rq, struct task_struct *p)
- {
- }
- static inline void post_schedule(struct rq *rq)
- {
- }
- #endif
- /**
- * schedule_tail - first thing a freshly forked thread must call.
- * @prev: the thread we just switched away from.
- */
- asmlinkage void schedule_tail(struct task_struct *prev)
- __releases(rq->lock)
- {
- struct rq *rq = this_rq();
- finish_task_switch(rq, prev);
- /*
- * FIXME: do we need to worry about rq being invalidated by the
- * task_switch?
- */
- post_schedule(rq);
- #ifdef __ARCH_WANT_UNLOCKED_CTXSW
- /* In this case, finish_task_switch does not reenable preemption */
- preempt_enable();
- #endif
- if (current->set_child_tid)
- put_user(task_pid_vnr(current), current->set_child_tid);
- }
- /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
- */
- static inline void
- context_switch(struct rq *rq, struct task_struct *prev,
- struct task_struct *next)
- {
- struct mm_struct *mm, *oldmm;
- prepare_task_switch(rq, prev, next);
- mm = next->mm;
- oldmm = prev->active_mm;
- /*
- * For paravirt, this is coupled with an exit in switch_to to
- * combine the page table reload and the switch backend into
- * one hypercall.
- */
- arch_start_context_switch(prev);
- if (!mm) {
- next->active_mm = oldmm;
- atomic_inc(&oldmm->mm_count);
- enter_lazy_tlb(oldmm, next);
- } else
- switch_mm(oldmm, mm, next);
- if (!prev->mm) {
- prev->active_mm = NULL;
- rq->prev_mm = oldmm;
- }
- /*
- * Since the runqueue lock will be released by the next
- * task (which is an invalid locking op but in the case
- * of the scheduler it's an obvious special-case), so we
- * do an early lockdep release here:
- */
- #ifndef __ARCH_WANT_UNLOCKED_CTXSW
- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- #endif
- /* Here we just switch the register state and the stack. */
- switch_to(prev, next, prev);
- barrier();
- /*
- * this_rq must be evaluated again because prev may have moved
- * CPUs since it called schedule(), thus the 'rq' on its stack
- * frame will be invalid.
- */
- finish_task_switch(this_rq(), prev);
- }
- /*
- * nr_running and nr_context_switches:
- *
- * externally visible scheduler statistics: current number of runnable
- * threads, total number of context switches performed since bootup.
- */
- unsigned long nr_running(void)
- {
- unsigned long i, sum = 0;
- for_each_online_cpu(i)
- sum += cpu_rq(i)->nr_running;
- return sum;
- }
- unsigned long long nr_context_switches(void)
- {
- int i;
- unsigned long long sum = 0;
- for_each_possible_cpu(i)
- sum += cpu_rq(i)->nr_switches;
- return sum;
- }
- unsigned long nr_iowait(void)
- {
- unsigned long i, sum = 0;
- for_each_possible_cpu(i)
- sum += atomic_read(&cpu_rq(i)->nr_iowait);
- return sum;
- }
- unsigned long nr_iowait_cpu(int cpu)
- {
- struct rq *this = cpu_rq(cpu);
- return atomic_read(&this->nr_iowait);
- }
- unsigned long this_cpu_load(void)
- {
- struct rq *this = this_rq();
- return this->cpu_load[0];
- }
- #ifdef CONFIG_RUNTIME_COMPCACHE
- unsigned long this_cpu_loadx(int i)
- {
- struct rq *this = this_rq();
- return this->cpu_load[i];
- }
- #endif /* CONFIG_RUNTIME_COMPCACHE */
- /*
- * Global load-average calculations
- *
- * We take a distributed and async approach to calculating the global load-avg
- * in order to minimize overhead.
- *
- * The global load average is an exponentially decaying average of nr_running +
- * nr_uninterruptible.
- *
- * Once every LOAD_FREQ:
- *
- * nr_active = 0;
- * for_each_possible_cpu(cpu)
- * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
- *
- * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
- *
- * Due to a number of reasons the above turns in the mess below:
- *
- * - for_each_possible_cpu() is prohibitively expensive on machines with
- * serious number of cpus, therefore we need to take a distributed approach
- * to calculating nr_active.
- *
- * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
- * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
- *
- * So assuming nr_active := 0 when we start out -- true per definition, we
- * can simply take per-cpu deltas and fold those into a global accumulate
- * to obtain the same result. See calc_load_fold_active().
- *
- * Furthermore, in order to avoid synchronizing all per-cpu delta folding
- * across the machine, we assume 10 ticks is sufficient time for every
- * cpu to have completed this task.
- *
- * This places an upper-bound on the IRQ-off latency of the machine. Then
- * again, being late doesn't loose the delta, just wrecks the sample.
- *
- * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
- * this would add another cross-cpu cacheline miss and atomic operation
- * to the wakeup path. Instead we increment on whatever cpu the task ran
- * when it went into uninterruptible state and decrement on whatever cpu
- * did the wakeup. This means that only the sum of nr_uninterruptible over
- * all cpus yields the correct result.
- *
- * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
- */
- /* Variables and functions for calc_load */
- static atomic_long_t calc_load_tasks;
- static unsigned long calc_load_update;
- unsigned long avenrun[3];
- EXPORT_SYMBOL(avenrun); /* should be removed */
- /**
- * get_avenrun - get the load average array
- * @loads: pointer to dest load array
- * @offset: offset to add
- * @shift: shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
- void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
- {
- loads[0] = (avenrun[0] + offset) << shift;
- loads[1] = (avenrun[1] + offset) << shift;
- loads[2] = (avenrun[2] + offset) << shift;
- }
- static long calc_load_fold_active(struct rq *this_rq)
- {
- long nr_active, delta = 0;
- nr_active = this_rq->nr_running;
- nr_active += (long) this_rq->nr_uninterruptible;
- if (nr_active != this_rq->calc_load_active) {
- delta = nr_active - this_rq->calc_load_active;
- this_rq->calc_load_active = nr_active;
- }
- return delta;
- }
- /*
- * a1 = a0 * e + a * (1 - e)
- */
- static unsigned long
- calc_load(unsigned long load, unsigned long exp, unsigned long active)
- {
- load *= exp;
- load += active * (FIXED_1 - exp);
- load += 1UL << (FSHIFT - 1);
- return load >> FSHIFT;
- }
- #ifdef CONFIG_NO_HZ
- /*
- * Handle NO_HZ for the global load-average.
- *
- * Since the above described distributed algorithm to compute the global
- * load-average relies on per-cpu sampling from the tick, it is affected by
- * NO_HZ.
- *
- * The basic idea is to fold the nr_active delta into a global idle-delta upon
- * entering NO_HZ state such that we can include this as an 'extra' cpu delta
- * when we read the global state.
- *
- * Obviously reality has to ruin such a delightfully simple scheme:
- *
- * - When we go NO_HZ idle during the window, we can negate our sample
- * contribution, causing under-accounting.
- *
- * We avoid this by keeping two idle-delta counters and flipping them
- * when the window starts, thus separating old and new NO_HZ load.
- *
- * The only trick is the slight shift in index flip for read vs write.
- *
- * 0s 5s 10s 15s
- * +10 +10 +10 +10
- * |-|-----------|-|-----------|-|-----------|-|
- * r:0 0 1 1 0 0 1 1 0
- * w:0 1 1 0 0 1 1 0 0
- *
- * This ensures we'll fold the old idle contribution in this window while
- * accumlating the new one.
- *
- * - When we wake up from NO_HZ idle during the window, we push up our
- * contribution, since we effectively move our sample point to a known
- * busy state.
- *
- * This is solved by pushing the window forward, and thus skipping the
- * sample, for this cpu (effectively using the idle-delta for this cpu which
- * was in effect at the time the window opened). This also solves the issue
- * of having to deal with a cpu having been in NOHZ idle for multiple
- * LOAD_FREQ intervals.
- *
- * When making the ILB scale, we should try to pull this in as well.
- */
- static atomic_long_t calc_load_idle[2];
- static int calc_load_idx;
- static inline int calc_load_write_idx(void)
- {
- int idx = calc_load_idx;
- /*
- * See calc_global_nohz(), if we observe the new index, we also
- * need to observe the new update time.
- */
- smp_rmb();
- /*
- * If the folding window started, make sure we start writing in the
- * next idle-delta.
- */
- if (!time_before(jiffies, calc_load_update))
- idx++;
- return idx & 1;
- }
- static inline int calc_load_read_idx(void)
- {
- return calc_load_idx & 1;
- }
- void calc_load_enter_idle(void)
- {
- struct rq *this_rq = this_rq();
- long delta;
- /*
- * We're going into NOHZ mode, if there's any pending delta, fold it
- * into the pending idle delta.
- */
- delta = calc_load_fold_active(this_rq);
- if (delta) {
- int idx = calc_load_write_idx();
- atomic_long_add(delta, &calc_load_idle[idx]);
- }
- }
- void calc_load_exit_idle(void)
- {
- struct rq *this_rq = this_rq();
- /*
- * If we're still before the sample window, we're done.
- */
- if (time_before(jiffies, this_rq->calc_load_update))
- return;
- /*
- * We woke inside or after the sample window, this means we're already
- * accounted through the nohz accounting, so skip the entire deal and
- * sync up for the next window.
- */
- this_rq->calc_load_update = calc_load_update;
- if (time_before(jiffies, this_rq->calc_load_update + 10))
- this_rq->calc_load_update += LOAD_FREQ;
- }
- static long calc_load_fold_idle(void)
- {
- int idx = calc_load_read_idx();
- long delta = 0;
- if (atomic_long_read(&calc_load_idle[idx]))
- delta = atomic_long_xchg(&calc_load_idle[idx], 0);
- return delta;
- }
- /**
- * fixed_power_int - compute: x^n, in O(log n) time
- *
- * @x: base of the power
- * @frac_bits: fractional bits of @x
- * @n: power to raise @x to.
- *
- * By exploiting the relation between the definition of the natural power
- * function: x^n := x*x*...*x (x multiplied by itself for n times), and
- * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
- * (where: n_i \elem {0, 1}, the binary vector representing n),
- * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
- * of course trivially computable in O(log_2 n), the length of our binary
- * vector.
- */
- static unsigned long
- fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
- {
- unsigned long result = 1UL << frac_bits;
- if (n) for (;;) {
- if (n & 1) {
- result *= x;
- result += 1UL << (frac_bits - 1);
- result >>= frac_bits;
- }
- n >>= 1;
- if (!n)
- break;
- x *= x;
- x += 1UL << (frac_bits - 1);
- x >>= frac_bits;
- }
- return result;
- }
- /*
- * a1 = a0 * e + a * (1 - e)
- *
- * a2 = a1 * e + a * (1 - e)
- * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
- * = a0 * e^2 + a * (1 - e) * (1 + e)
- *
- * a3 = a2 * e + a * (1 - e)
- * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
- * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
- *
- * ...
- *
- * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
- * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
- * = a0 * e^n + a * (1 - e^n)
- *
- * [1] application of the geometric series:
- *
- * n 1 - x^(n+1)
- * S_n := \Sum x^i = -------------
- * i=0 1 - x
- */
- static unsigned long
- calc_load_n(unsigned long load, unsigned long exp,
- unsigned long active, unsigned int n)
- {
- return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
- }
- /*
- * NO_HZ can leave us missing all per-cpu ticks calling
- * calc_load_account_active(), but since an idle CPU folds its delta into
- * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
- * in the pending idle delta if our idle period crossed a load cycle boundary.
- *
- * Once we've updated the global active value, we need to apply the exponential
- * weights adjusted to the number of cycles missed.
- */
- static void calc_global_nohz(void)
- {
- long delta, active, n;
- if (!time_before(jiffies, calc_load_update + 10)) {
- /*
- * Catch-up, fold however many we are behind still
- */
- delta = jiffies - calc_load_update - 10;
- n = 1 + (delta / LOAD_FREQ);
- active = atomic_long_read(&calc_load_tasks);
- active = active > 0 ? active * FIXED_1 : 0;
- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
- calc_load_update += n * LOAD_FREQ;
- }
- /*
- * Flip the idle index...
- *
- * Make sure we first write the new time then flip the index, so that
- * calc_load_write_idx() will see the new time when it reads the new
- * index, this avoids a double flip messing things up.
- */
- smp_wmb();
- calc_load_idx++;
- }
- #else /* !CONFIG_NO_HZ */
- static inline long calc_load_fold_idle(void) { return 0; }
- static inline void calc_global_nohz(void) { }
- #endif /* CONFIG_NO_HZ */
- /*
- * calc_load - update the avenrun load estimates 10 ticks after the
- * CPUs have updated calc_load_tasks.
- */
- void calc_global_load(unsigned long ticks)
- {
- long active, delta;
- if (time_before(jiffies, calc_load_update + 10))
- return;
- /*
- * Fold the 'old' idle-delta to include all NO_HZ cpus.
- */
- delta = calc_load_fold_idle();
- if (delta)
- atomic_long_add(delta, &calc_load_tasks);
- active = atomic_long_read(&calc_load_tasks);
- active = active > 0 ? active * FIXED_1 : 0;
- avenrun[0] = calc_load(avenrun[0], EXP_1, active);
- avenrun[1] = calc_load(avenrun[1], EXP_5, active);
- avenrun[2] = calc_load(avenrun[2], EXP_15, active);
- calc_load_update += LOAD_FREQ;
- /*
- * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
- */
- calc_global_nohz();
- }
- /*
- * Called from update_cpu_load() to periodically update this CPU's
- * active count.
- */
- static void calc_load_account_active(struct rq *this_rq)
- {
- long delta;
- if (time_before(jiffies, this_rq->calc_load_update))
- return;
- delta = calc_load_fold_active(this_rq);
- if (delta)
- atomic_long_add(delta, &calc_load_tasks);
- this_rq->calc_load_update += LOAD_FREQ;
- }
- /*
- * End of global load-average stuff
- */
- /*
- * The exact cpuload at various idx values, calculated at every tick would be
- * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
- *
- * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
- * on nth tick when cpu may be busy, then we have:
- * load = ((2^idx - 1) / 2^idx)^(n-1) * load
- * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
- *
- * decay_load_missed() below does efficient calculation of
- * load = ((2^idx - 1) / 2^idx)^(n-1) * load
- * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
- *
- * The calculation is approximated on a 128 point scale.
- * degrade_zero_ticks is the number of ticks after which load at any
- * particular idx is approximated to be zero.
- * degrade_factor is a precomputed table, a row for each load idx.
- * Each column corresponds to degradation factor for a power of two ticks,
- * based on 128 point scale.
- * Example:
- * row 2, col 3 (=12) says that the degradation at load idx 2 after
- * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
- *
- * With this power of 2 load factors, we can degrade the load n times
- * by looking at 1 bits in n and doing as many mult/shift instead of
- * n mult/shifts needed by the exact degradation.
- */
- #define DEGRADE_SHIFT 7
- static const unsigned char
- degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
- static const unsigned char
- degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
- {0, 0, 0, 0, 0, 0, 0, 0},
- {64, 32, 8, 0, 0, 0, 0, 0},
- {96, 72, 40, 12, 1, 0, 0},
- {112, 98, 75, 43, 15, 1, 0},
- {120, 112, 98, 76, 45, 16, 2} };
- /*
- * Update cpu_load for any missed ticks, due to tickless idle. The backlog
- * would be when CPU is idle and so we just decay the old load without
- * adding any new load.
- */
- static unsigned long
- decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
- {
- int j = 0;
- if (!missed_updates)
- return load;
- if (missed_updates >= degrade_zero_ticks[idx])
- return 0;
- if (idx == 1)
- return load >> missed_updates;
- while (missed_updates) {
- if (missed_updates % 2)
- load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
- missed_updates >>= 1;
- j++;
- }
- return load;
- }
- /*
- * Update rq->cpu_load[] statistics. This function is usually called every
- * scheduler tick (TICK_NSEC). With tickless idle this will not be called
- * every tick. We fix it up based on jiffies.
- */
- static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
- unsigned long pending_updates)
- {
- int i, scale;
- this_rq->nr_load_updates++;
- /* Update our load: */
- this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
- for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
- unsigned long old_load, new_load;
- /* scale is effectively 1 << i now, and >> i divides by scale */
- old_load = this_rq->cpu_load[i];
- old_load = decay_load_missed(old_load, pending_updates - 1, i);
- new_load = this_load;
- /*
- * Round up the averaging division if load is increasing. This
- * prevents us from getting stuck on 9 if the load is 10, for
- * example.
- */
- if (new_load > old_load)
- new_load += scale - 1;
- this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
- }
- sched_avg_update(this_rq);
- }
- #ifdef CONFIG_NO_HZ
- /*
- * There is no sane way to deal with nohz on smp when using jiffies because the
- * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
- * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
- *
- * Therefore we cannot use the delta approach from the regular tick since that
- * would seriously skew the load calculation. However we'll make do for those
- * updates happening while idle (nohz_idle_balance) or coming out of idle
- * (tick_nohz_idle_exit).
- *
- * This means we might still be one tick off for nohz periods.
- */
- /*
- * Called from nohz_idle_balance() to update the load ratings before doing the
- * idle balance.
- */
- void update_idle_cpu_load(struct rq *this_rq)
- {
- unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
- unsigned long load = this_rq->load.weight;
- unsigned long pending_updates;
- /*
- * bail if there's load or we're actually up-to-date.
- */
- if (load || curr_jiffies == this_rq->last_load_update_tick)
- return;
- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
- this_rq->last_load_update_tick = curr_jiffies;
- __update_cpu_load(this_rq, load, pending_updates);
- }
- /*
- * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
- */
- void update_cpu_load_nohz(void)
- {
- struct rq *this_rq = this_rq();
- unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
- unsigned long pending_updates;
- if (curr_jiffies == this_rq->last_load_update_tick)
- return;
- raw_spin_lock(&this_rq->lock);
- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
- if (pending_updates) {
- this_rq->last_load_update_tick = curr_jiffies;
- /*
- * We were idle, this means load 0, the current load might be
- * !0 due to remote wakeups and the sort.
- */
- __update_cpu_load(this_rq, 0, pending_updates);
- }
- raw_spin_unlock(&this_rq->lock);
- }
- #endif /* CONFIG_NO_HZ */
- /*
- * Called from scheduler_tick()
- */
- static void update_cpu_load_active(struct rq *this_rq)
- {
- /*
- * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
- */
- this_rq->last_load_update_tick = jiffies;
- __update_cpu_load(this_rq, this_rq->load.weight, 1);
- calc_load_account_active(this_rq);
- }
- #ifdef CONFIG_SMP
- /*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
- void sched_exec(void)
- {
- struct task_struct *p = current;
- unsigned long flags;
- int dest_cpu;
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
- if (dest_cpu == smp_processor_id())
- goto unlock;
- if (likely(cpu_active(dest_cpu))) {
- struct migration_arg arg = { p, dest_cpu };
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
- return;
- }
- unlock:
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- }
- #endif
- DEFINE_PER_CPU(struct kernel_stat, kstat);
- DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
- EXPORT_PER_CPU_SYMBOL(kstat);
- EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
- /*
- * Return any ns on the sched_clock that have not yet been accounted in
- * @p in case that task is currently running.
- *
- * Called with task_rq_lock() held on @rq.
- */
- static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
- {
- u64 ns = 0;
- if (task_current(rq, p)) {
- update_rq_clock(rq);
- ns = rq->clock_task - p->se.exec_start;
- if ((s64)ns < 0)
- ns = 0;
- }
- return ns;
- }
- unsigned long long task_delta_exec(struct task_struct *p)
- {
- unsigned long flags;
- struct rq *rq;
- u64 ns = 0;
- rq = task_rq_lock(p, &flags);
- ns = do_task_delta_exec(p, rq);
- task_rq_unlock(rq, p, &flags);
- return ns;
- }
- /*
- * Return accounted runtime for the task.
- * In case the task is currently running, return the runtime plus current's
- * pending runtime that have not been accounted yet.
- */
- unsigned long long task_sched_runtime(struct task_struct *p)
- {
- unsigned long flags;
- struct rq *rq;
- u64 ns = 0;
- rq = task_rq_lock(p, &flags);
- ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
- task_rq_unlock(rq, p, &flags);
- return ns;
- }
- #ifdef CONFIG_CGROUP_CPUACCT
- struct cgroup_subsys cpuacct_subsys;
- struct cpuacct root_cpuacct;
- #endif
- static inline void task_group_account_field(struct task_struct *p, int index,
- u64 tmp)
- {
- #ifdef CONFIG_CGROUP_CPUACCT
- struct kernel_cpustat *kcpustat;
- struct cpuacct *ca;
- #endif
- /*
- * Since all updates are sure to touch the root cgroup, we
- * get ourselves ahead and touch it first. If the root cgroup
- * is the only cgroup, then nothing else should be necessary.
- *
- */
- __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
- #ifdef CONFIG_CGROUP_CPUACCT
- if (unlikely(!cpuacct_subsys.active))
- return;
- rcu_read_lock();
- ca = task_ca(p);
- while (ca && (ca != &root_cpuacct)) {
- kcpustat = this_cpu_ptr(ca->cpustat);
- kcpustat->cpustat[index] += tmp;
- ca = parent_ca(ca);
- }
- rcu_read_unlock();
- #endif
- }
- /*
- * Account user cpu time to a process.
- * @p: the process that the cpu time gets accounted to
- * @cputime: the cpu time spent in user space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- */
- void account_user_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled)
- {
- int index;
- /* Add user time to process. */
- p->utime += cputime;
- p->utimescaled += cputime_scaled;
- account_group_user_time(p, cputime);
- index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
- /* Add user time to cpustat. */
- task_group_account_field(p, index, (__force u64) cputime);
- /* Account for user time used */
- acct_update_integrals(p);
- /* Account power usage for user time */
- acct_update_power(p, cputime);
- }
- /*
- * Account guest cpu time to a process.
- * @p: the process that the cpu time gets accounted to
- * @cputime: the cpu time spent in virtual machine since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- */
- static void account_guest_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- /* Add guest time to process. */
- p->utime += cputime;
- p->utimescaled += cputime_scaled;
- account_group_user_time(p, cputime);
- p->gtime += cputime;
- /* Add guest time to cpustat. */
- if (TASK_NICE(p) > 0) {
- cpustat[CPUTIME_NICE] += (__force u64) cputime;
- cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
- } else {
- cpustat[CPUTIME_USER] += (__force u64) cputime;
- cpustat[CPUTIME_GUEST] += (__force u64) cputime;
- }
- }
- /*
- * Account system cpu time to a process and desired cpustat field
- * @p: the process that the cpu time gets accounted to
- * @cputime: the cpu time spent in kernel space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- * @target_cputime64: pointer to cpustat field that has to be updated
- */
- static inline
- void __account_system_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled, int index)
- {
- /* Add system time to process. */
- p->stime += cputime;
- p->stimescaled += cputime_scaled;
- account_group_system_time(p, cputime);
- /* Add system time to cpustat. */
- task_group_account_field(p, index, (__force u64) cputime);
- /* Account for system time used */
- acct_update_integrals(p);
- /* Account power usage for system time */
- acct_update_power(p, cputime);
- }
- /*
- * Account system cpu time to a process.
- * @p: the process that the cpu time gets accounted to
- * @hardirq_offset: the offset to subtract from hardirq_count()
- * @cputime: the cpu time spent in kernel space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- */
- void account_system_time(struct task_struct *p, int hardirq_offset,
- cputime_t cputime, cputime_t cputime_scaled)
- {
- int index;
- if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
- account_guest_time(p, cputime, cputime_scaled);
- return;
- }
- if (hardirq_count() - hardirq_offset)
- index = CPUTIME_IRQ;
- else if (in_serving_softirq())
- index = CPUTIME_SOFTIRQ;
- else
- index = CPUTIME_SYSTEM;
- __account_system_time(p, cputime, cputime_scaled, index);
- }
- /*
- * Account for involuntary wait time.
- * @cputime: the cpu time spent in involuntary wait
- */
- void account_steal_time(cputime_t cputime)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- cpustat[CPUTIME_STEAL] += (__force u64) cputime;
- }
- /*
- * Account for idle time.
- * @cputime: the cpu time spent in idle wait
- */
- void account_idle_time(cputime_t cputime)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- struct rq *rq = this_rq();
- if (atomic_read(&rq->nr_iowait) > 0)
- cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
- else
- cpustat[CPUTIME_IDLE] += (__force u64) cputime;
- }
- static __always_inline bool steal_account_process_tick(void)
- {
- #ifdef CONFIG_PARAVIRT
- if (static_key_false(¶virt_steal_enabled)) {
- u64 steal, st = 0;
- steal = paravirt_steal_clock(smp_processor_id());
- steal -= this_rq()->prev_steal_time;
- st = steal_ticks(steal);
- this_rq()->prev_steal_time += st * TICK_NSEC;
- account_steal_time(st);
- return st;
- }
- #endif
- return false;
- }
- #ifndef CONFIG_VIRT_CPU_ACCOUNTING
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- /*
- * Account a tick to a process and cpustat
- * @p: the process that the cpu time gets accounted to
- * @user_tick: is the tick from userspace
- * @rq: the pointer to rq
- *
- * Tick demultiplexing follows the order
- * - pending hardirq update
- * - pending softirq update
- * - user_time
- * - idle_time
- * - system time
- * - check for guest_time
- * - else account as system_time
- *
- * Check for hardirq is done both for system and user time as there is
- * no timer going off while we are on hardirq and hence we may never get an
- * opportunity to update it solely in system time.
- * p->stime and friends are only updated on system time and not on irq
- * softirq as those do not count in task exec_runtime any more.
- */
- static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
- struct rq *rq)
- {
- cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- if (steal_account_process_tick())
- return;
- if (irqtime_account_hi_update()) {
- cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
- } else if (irqtime_account_si_update()) {
- cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
- } else if (this_cpu_ksoftirqd() == p) {
- /*
- * ksoftirqd time do not get accounted in cpu_softirq_time.
- * So, we have to handle it separately here.
- * Also, p->stime needs to be updated for ksoftirqd.
- */
- __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
- CPUTIME_SOFTIRQ);
- } else if (user_tick) {
- account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
- } else if (p == rq->idle) {
- account_idle_time(cputime_one_jiffy);
- } else if (p->flags & PF_VCPU) { /* System time or guest time */
- account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
- } else {
- __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
- CPUTIME_SYSTEM);
- }
- }
- static void irqtime_account_idle_ticks(int ticks)
- {
- int i;
- struct rq *rq = this_rq();
- for (i = 0; i < ticks; i++)
- irqtime_account_process_tick(current, 0, rq);
- }
- #else /* CONFIG_IRQ_TIME_ACCOUNTING */
- static void irqtime_account_idle_ticks(int ticks) {}
- static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
- struct rq *rq) {}
- #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
- /*
- * Account a single tick of cpu time.
- * @p: the process that the cpu time gets accounted to
- * @user_tick: indicates if the tick is a user or a system tick
- */
- void account_process_tick(struct task_struct *p, int user_tick)
- {
- cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
- struct rq *rq = this_rq();
- if (sched_clock_irqtime) {
- irqtime_account_process_tick(p, user_tick, rq);
- return;
- }
- if (steal_account_process_tick())
- return;
- if (user_tick)
- account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
- else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
- one_jiffy_scaled);
- else
- account_idle_time(cputime_one_jiffy);
- }
- /*
- * Account multiple ticks of steal time.
- * @p: the process from which the cpu time has been stolen
- * @ticks: number of stolen ticks
- */
- void account_steal_ticks(unsigned long ticks)
- {
- account_steal_time(jiffies_to_cputime(ticks));
- }
- /*
- * Account multiple ticks of idle time.
- * @ticks: number of stolen ticks
- */
- void account_idle_ticks(unsigned long ticks)
- {
- if (sched_clock_irqtime) {
- irqtime_account_idle_ticks(ticks);
- return;
- }
- account_idle_time(jiffies_to_cputime(ticks));
- }
- #endif
- /*
- * Use precise platform statistics if available:
- */
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING
- void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
- {
- *ut = p->utime;
- *st = p->stime;
- }
- void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
- {
- struct task_cputime cputime;
- thread_group_cputime(p, &cputime);
- *ut = cputime.utime;
- *st = cputime.stime;
- }
- #else
- #ifndef nsecs_to_cputime
- # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
- #endif
- static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
- {
- u64 temp = (__force u64) rtime;
- temp *= (__force u64) utime;
- if (sizeof(cputime_t) == 4)
- temp = div_u64(temp, (__force u32) total);
- else
- temp = div64_u64(temp, (__force u64) total);
- return (__force cputime_t) temp;
- }
- void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
- {
- cputime_t rtime, utime = p->utime, total = utime + p->stime;
- /*
- * Use CFS's precise accounting:
- */
- rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
- if (total)
- utime = scale_utime(utime, rtime, total);
- else
- utime = rtime;
- /*
- * Compare with previous values, to keep monotonicity:
- */
- p->prev_utime = max(p->prev_utime, utime);
- p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
- *ut = p->prev_utime;
- *st = p->prev_stime;
- }
- /*
- * Must be called with siglock held.
- */
- void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
- {
- struct signal_struct *sig = p->signal;
- struct task_cputime cputime;
- cputime_t rtime, utime, total;
- thread_group_cputime(p, &cputime);
- total = cputime.utime + cputime.stime;
- rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
- if (total)
- utime = scale_utime(cputime.utime, rtime, total);
- else
- utime = rtime;
- sig->prev_utime = max(sig->prev_utime, utime);
- sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
- *ut = sig->prev_utime;
- *st = sig->prev_stime;
- }
- #endif
- /*
- * This function gets called by the timer code, with HZ frequency.
- * We call it with interrupts disabled.
- */
- void scheduler_tick(void)
- {
- int cpu = smp_processor_id();
- struct rq *rq = cpu_rq(cpu);
- struct task_struct *curr = rq->curr;
- sched_clock_tick();
- raw_spin_lock(&rq->lock);
- update_rq_clock(rq);
- curr->sched_class->task_tick(rq, curr, 0);
- update_cpu_load_active(rq);
- raw_spin_unlock(&rq->lock);
- perf_event_task_tick();
- #ifdef CONFIG_SMP
- rq->idle_balance = idle_cpu(cpu);
- trigger_load_balance(rq, cpu);
- #endif
- }
- notrace unsigned long get_parent_ip(unsigned long addr)
- {
- if (in_lock_functions(addr)) {
- addr = CALLER_ADDR2;
- if (in_lock_functions(addr))
- addr = CALLER_ADDR3;
- }
- return addr;
- }
- #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
- defined(CONFIG_PREEMPT_TRACER))
- void __kprobes add_preempt_count(int val)
- {
- #ifdef CONFIG_DEBUG_PREEMPT
- /*
- * Underflow?
- */
- if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
- return;
- #endif
- preempt_count() += val;
- #ifdef CONFIG_DEBUG_PREEMPT
- /*
- * Spinlock count overflowing soon?
- */
- DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
- PREEMPT_MASK - 10);
- #endif
- if (preempt_count() == val)
- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
- }
- EXPORT_SYMBOL(add_preempt_count);
- void __kprobes sub_preempt_count(int val)
- {
- #ifdef CONFIG_DEBUG_PREEMPT
- /*
- * Underflow?
- */
- if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
- return;
- /*
- * Is the spinlock portion underflowing?
- */
- if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
- !(preempt_count() & PREEMPT_MASK)))
- return;
- #endif
- if (preempt_count() == val)
- trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
- preempt_count() -= val;
- }
- EXPORT_SYMBOL(sub_preempt_count);
- #endif
- /*
- * Print scheduling while atomic bug:
- */
- static noinline void __schedule_bug(struct task_struct *prev)
- {
- if (oops_in_progress)
- return;
- printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
- prev->comm, prev->pid, preempt_count());
- debug_show_held_locks(prev);
- print_modules();
- if (irqs_disabled())
- print_irqtrace_events(prev);
- dump_stack();
- }
- /*
- * Various schedule()-time debugging checks and statistics:
- */
- static inline void schedule_debug(struct task_struct *prev)
- {
- /*
- * Test if we are atomic. Since do_exit() needs to call into
- * schedule() atomically, we ignore that path for now.
- * Otherwise, whine if we are scheduling when we should not be.
- */
- if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
- __schedule_bug(prev);
- rcu_sleep_check();
- profile_hit(SCHED_PROFILING, __builtin_return_address(0));
- schedstat_inc(this_rq(), sched_count);
- }
- static void put_prev_task(struct rq *rq, struct task_struct *prev)
- {
- if (prev->on_rq || rq->skip_clock_update < 0)
- update_rq_clock(rq);
- prev->sched_class->put_prev_task(rq, prev);
- }
- /*
- * Pick up the highest-prio task:
- */
- static inline struct task_struct *
- pick_next_task(struct rq *rq)
- {
- const struct sched_class *class;
- struct task_struct *p;
- /*
- * Optimization: we know that if all tasks are in
- * the fair class we can call that function directly:
- */
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
- p = fair_sched_class.pick_next_task(rq);
- if (likely(p))
- return p;
- }
- for_each_class(class) {
- p = class->pick_next_task(rq);
- if (p)
- return p;
- }
- BUG(); /* the idle class will always have a runnable task */
- }
- /*
- * __schedule() is the main scheduler function.
- */
- static void __sched __schedule(void)
- {
- struct task_struct *prev, *next;
- unsigned long *switch_count;
- struct rq *rq;
- int cpu;
- need_resched:
- preempt_disable();
- cpu = smp_processor_id();
- rq = cpu_rq(cpu);
- rcu_note_context_switch(cpu);
- prev = rq->curr;
- schedule_debug(prev);
- if (sched_feat(HRTICK))
- hrtick_clear(rq);
- raw_spin_lock_irq(&rq->lock);
- switch_count = &prev->nivcsw;
- if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
- if (unlikely(signal_pending_state(prev->state, prev))) {
- prev->state = TASK_RUNNING;
- } else {
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
- /*
- * If a worker went to sleep, notify and ask workqueue
- * whether it wants to wake up a task to maintain
- * concurrency.
- */
- if (prev->flags & PF_WQ_WORKER) {
- struct task_struct *to_wakeup;
- to_wakeup = wq_worker_sleeping(prev, cpu);
- if (to_wakeup)
- try_to_wake_up_local(to_wakeup);
- }
- }
- switch_count = &prev->nvcsw;
- }
- pre_schedule(rq, prev);
- if (unlikely(!rq->nr_running))
- idle_balance(cpu, rq);
- put_prev_task(rq, prev);
- next = pick_next_task(rq);
- clear_tsk_need_resched(prev);
- rq->skip_clock_update = 0;
- if (likely(prev != next)) {
- rq->nr_switches++;
- rq->curr = next;
- ++*switch_count;
- context_switch(rq, prev, next); /* unlocks the rq */
- /*
- * The context switch have flipped the stack from under us
- * and restored the local variables which were saved when
- * this task called schedule() in the past. prev == current
- * is still correct, but it can be moved to another cpu/rq.
- */
- cpu = smp_processor_id();
- rq = cpu_rq(cpu);
- #ifdef CONFIG_SEC_DEBUG
- sec_debug_task_sched_log(cpu, rq->curr);
- #endif
- } else
- raw_spin_unlock_irq(&rq->lock);
- post_schedule(rq);
- sched_preempt_enable_no_resched();
- if (need_resched())
- goto need_resched;
- }
- static inline void sched_submit_work(struct task_struct *tsk)
- {
- if (!tsk->state || tsk_is_pi_blocked(tsk))
- return;
- /*
- * If we are going to sleep and we have plugged IO queued,
- * make sure to submit it to avoid deadlocks.
- */
- if (blk_needs_flush_plug(tsk))
- blk_schedule_flush_plug(tsk);
- }
- asmlinkage void __sched schedule(void)
- {
- struct task_struct *tsk = current;
- sched_submit_work(tsk);
- __schedule();
- }
- EXPORT_SYMBOL(schedule);
- /**
- * schedule_preempt_disabled - called with preemption disabled
- *
- * Returns with preemption disabled. Note: preempt_count must be 1
- */
- void __sched schedule_preempt_disabled(void)
- {
- sched_preempt_enable_no_resched();
- schedule();
- preempt_disable();
- }
- #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
- {
- if (lock->owner != owner)
- return false;
- /*
- * Ensure we emit the owner->on_cpu, dereference _after_ checking
- * lock->owner still matches owner, if that fails, owner might
- * point to free()d memory, if it still matches, the rcu_read_lock()
- * ensures the memory stays valid.
- */
- barrier();
- return owner->on_cpu;
- }
- /*
- * Look out! "owner" is an entirely speculative pointer
- * access and not reliable.
- */
- int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
- {
- if (!sched_feat(OWNER_SPIN))
- return 0;
- rcu_read_lock();
- while (owner_running(lock, owner)) {
- if (need_resched())
- break;
- arch_mutex_cpu_relax();
- }
- rcu_read_unlock();
- /*
- * We break out the loop above on need_resched() and when the
- * owner changed, which is a sign for heavy contention. Return
- * success only when lock->owner is NULL.
- */
- return lock->owner == NULL;
- }
- #endif
- #ifdef CONFIG_PREEMPT
- /*
- * this is the entry point to schedule() from in-kernel preemption
- * off of preempt_enable. Kernel preemptions off return from interrupt
- * occur there and call schedule directly.
- */
- asmlinkage void __sched notrace preempt_schedule(void)
- {
- struct thread_info *ti = current_thread_info();
- /*
- * If there is a non-zero preempt_count or interrupts are disabled,
- * we do not want to preempt the current task. Just return..
- */
- if (likely(ti->preempt_count || irqs_disabled()))
- return;
- do {
- add_preempt_count_notrace(PREEMPT_ACTIVE);
- __schedule();
- sub_preempt_count_notrace(PREEMPT_ACTIVE);
- /*
- * Check again in case we missed a preemption opportunity
- * between schedule and now.
- */
- barrier();
- } while (need_resched());
- }
- EXPORT_SYMBOL(preempt_schedule);
- /*
- * this is the entry point to schedule() from kernel preemption
- * off of irq context.
- * Note, that this is called and return with irqs disabled. This will
- * protect us against recursive calling from irq.
- */
- asmlinkage void __sched preempt_schedule_irq(void)
- {
- struct thread_info *ti = current_thread_info();
- /* Catch callers which need to be fixed */
- BUG_ON(ti->preempt_count || !irqs_disabled());
- do {
- add_preempt_count(PREEMPT_ACTIVE);
- local_irq_enable();
- __schedule();
- local_irq_disable();
- sub_preempt_count(PREEMPT_ACTIVE);
- /*
- * Check again in case we missed a preemption opportunity
- * between schedule and now.
- */
- barrier();
- } while (need_resched());
- }
- #endif /* CONFIG_PREEMPT */
- int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
- void *key)
- {
- return try_to_wake_up(curr->private, mode, wake_flags);
- }
- EXPORT_SYMBOL(default_wake_function);
- /*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
- * number) then we wake all the non-exclusive tasks and one exclusive task.
- *
- * There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
- * zero in this (rare) case, and we handle it by continuing to scan the queue.
- */
- static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, int wake_flags, void *key)
- {
- wait_queue_t *curr, *next;
- list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
- unsigned flags = curr->flags;
- if (curr->func(curr, mode, wake_flags, key) &&
- (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
- break;
- }
- }
- /**
- * __wake_up - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: is directly passed to the wakeup function
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
- void __wake_up(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, void *key)
- {
- unsigned long flags;
- spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, 0, key);
- spin_unlock_irqrestore(&q->lock, flags);
- }
- EXPORT_SYMBOL(__wake_up);
- /*
- * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
- */
- void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
- {
- __wake_up_common(q, mode, nr, 0, NULL);
- }
- EXPORT_SYMBOL_GPL(__wake_up_locked);
- void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
- {
- __wake_up_common(q, mode, 1, 0, key);
- }
- EXPORT_SYMBOL_GPL(__wake_up_locked_key);
- /**
- * __wake_up_sync_key - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: opaque value to be passed to wakeup targets
- *
- * The sync wakeup differs that the waker knows that it will schedule
- * away soon, so while the target thread will be woken up, it will not
- * be migrated to another CPU - ie. the two threads are 'synchronized'
- * with each other. This can prevent needless bouncing between CPUs.
- *
- * On UP it can prevent extra preemption.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
- void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, void *key)
- {
- unsigned long flags;
- int wake_flags = WF_SYNC;
- if (unlikely(!q))
- return;
- if (unlikely(nr_exclusive != 1))
- wake_flags = 0;
- spin_lock_irqsave(&q->lock, flags);
- __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
- spin_unlock_irqrestore(&q->lock, flags);
- }
- EXPORT_SYMBOL_GPL(__wake_up_sync_key);
- /*
- * __wake_up_sync - see __wake_up_sync_key()
- */
- void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
- {
- __wake_up_sync_key(q, mode, nr_exclusive, NULL);
- }
- EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
- void __wake_up_pollfree(wait_queue_head_t *wq_head)
- {
- __wake_up(wq_head, TASK_NORMAL, 0, (void *) (POLLHUP | POLLFREE));
- /* POLLFREE must have cleared the queue. */
- WARN_ON_ONCE(waitqueue_active(wq_head));
- }
- /**
- * complete: - signals a single thread waiting on this completion
- * @x: holds the state of this particular completion
- *
- * This will wake up a single thread waiting on this completion. Threads will be
- * awakened in the same order in which they were queued.
- *
- * See also complete_all(), wait_for_completion() and related routines.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
- void complete(struct completion *x)
- {
- unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
- spin_unlock_irqrestore(&x->wait.lock, flags);
- }
- EXPORT_SYMBOL(complete);
- /**
- * complete_all: - signals all threads waiting on this completion
- * @x: holds the state of this particular completion
- *
- * This will wake up all threads waiting on this particular completion event.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
- void complete_all(struct completion *x)
- {
- unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done += UINT_MAX/2;
- __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
- spin_unlock_irqrestore(&x->wait.lock, flags);
- }
- EXPORT_SYMBOL(complete_all);
- static inline long __sched
- do_wait_for_common(struct completion *x,
- long (*action)(long), long timeout, int state)
- {
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
- __add_wait_queue_tail_exclusive(&x->wait, &wait);
- do {
- if (signal_pending_state(state, current)) {
- timeout = -ERESTARTSYS;
- break;
- }
- __set_current_state(state);
- spin_unlock_irq(&x->wait.lock);
- timeout = action(timeout);
- spin_lock_irq(&x->wait.lock);
- } while (!x->done && timeout);
- __remove_wait_queue(&x->wait, &wait);
- if (!x->done)
- return timeout;
- }
- x->done--;
- return timeout ?: 1;
- }
- static inline long __sched
- __wait_for_common(struct completion *x,
- long (*action)(long), long timeout, int state)
- {
- might_sleep();
- spin_lock_irq(&x->wait.lock);
- timeout = do_wait_for_common(x, action, timeout, state);
- spin_unlock_irq(&x->wait.lock);
- return timeout;
- }
- static long __sched
- wait_for_common(struct completion *x, long timeout, int state)
- {
- return __wait_for_common(x, schedule_timeout, timeout, state);
- }
- static long __sched
- wait_for_common_io(struct completion *x, long timeout, int state)
- {
- return __wait_for_common(x, io_schedule_timeout, timeout, state);
- }
- /**
- * wait_for_completion: - waits for completion of a task
- * @x: holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout.
- *
- * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
- * and interrupt capability. Also see complete().
- */
- void __sched wait_for_completion(struct completion *x)
- {
- wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(wait_for_completion);
- /**
- * wait_for_completion_io: - waits for completion of a task
- * @x: holds the state of this particular completion
- *
- * This waits for completion of a specific task to be signaled. Treats any
- * sleeping as waiting for IO for the purposes of process accounting.
- */
- void __sched wait_for_completion_io(struct completion *x)
- {
- wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(wait_for_completion_io);
- /**
- * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible.
- *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
- */
- unsigned long __sched
- wait_for_completion_timeout(struct completion *x, unsigned long timeout)
- {
- return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(wait_for_completion_timeout);
- /**
- * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible. The caller is accounted as waiting for IO.
- *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
- */
- unsigned long __sched
- wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
- {
- return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(wait_for_completion_io_timeout);
- /**
- * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
- * @x: holds the state of this particular completion
- *
- * This waits for completion of a specific task to be signaled. It is
- * interruptible.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
- */
- int __sched wait_for_completion_interruptible(struct completion *x)
- {
- long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT,
- TASK_INTERRUPTIBLE);
- if (t == -ERESTARTSYS)
- return t;
- return 0;
- }
- EXPORT_SYMBOL(wait_for_completion_interruptible);
- /**
- * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. It is interruptible. The timeout is in jiffies.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
- */
- long __sched
- wait_for_completion_interruptible_timeout(struct completion *x,
- unsigned long timeout)
- {
- return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
- }
- EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
- /**
- * wait_for_completion_killable: - waits for completion of a task (killable)
- * @x: holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It can be
- * interrupted by a kill signal.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
- */
- int __sched wait_for_completion_killable(struct completion *x)
- {
- long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
- if (t == -ERESTARTSYS)
- return t;
- return 0;
- }
- EXPORT_SYMBOL(wait_for_completion_killable);
- /**
- * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
- * @x: holds the state of this particular completion
- * @timeout: timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be
- * signaled or for a specified timeout to expire. It can be
- * interrupted by a kill signal. The timeout is in jiffies.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
- */
- long __sched
- wait_for_completion_killable_timeout(struct completion *x,
- unsigned long timeout)
- {
- return wait_for_common(x, timeout, TASK_KILLABLE);
- }
- EXPORT_SYMBOL(wait_for_completion_killable_timeout);
- /**
- * try_wait_for_completion - try to decrement a completion without blocking
- * @x: completion structure
- *
- * Returns: 0 if a decrement cannot be done without blocking
- * 1 if a decrement succeeded.
- *
- * If a completion is being used as a counting completion,
- * attempt to decrement the counter without blocking. This
- * enables us to avoid waiting if the resource the completion
- * is protecting is not available.
- */
- bool try_wait_for_completion(struct completion *x)
- {
- unsigned long flags;
- int ret = 1;
- spin_lock_irqsave(&x->wait.lock, flags);
- if (!x->done)
- ret = 0;
- else
- x->done--;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(try_wait_for_completion);
- /**
- * completion_done - Test to see if a completion has any waiters
- * @x: completion structure
- *
- * Returns: 0 if there are waiters (wait_for_completion() in progress)
- * 1 if there are no waiters.
- *
- */
- bool completion_done(struct completion *x)
- {
- unsigned long flags;
- int ret = 1;
- spin_lock_irqsave(&x->wait.lock, flags);
- if (!x->done)
- ret = 0;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return ret;
- }
- EXPORT_SYMBOL(completion_done);
- static long __sched
- sleep_on_common(wait_queue_head_t *q, int state, long timeout)
- {
- unsigned long flags;
- wait_queue_t wait;
- init_waitqueue_entry(&wait, current);
- __set_current_state(state);
- spin_lock_irqsave(&q->lock, flags);
- __add_wait_queue(q, &wait);
- spin_unlock(&q->lock);
- timeout = schedule_timeout(timeout);
- spin_lock_irq(&q->lock);
- __remove_wait_queue(q, &wait);
- spin_unlock_irqrestore(&q->lock, flags);
- return timeout;
- }
- void __sched interruptible_sleep_on(wait_queue_head_t *q)
- {
- sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
- }
- EXPORT_SYMBOL(interruptible_sleep_on);
- long __sched
- interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
- {
- return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
- }
- EXPORT_SYMBOL(interruptible_sleep_on_timeout);
- void __sched sleep_on(wait_queue_head_t *q)
- {
- sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
- }
- EXPORT_SYMBOL(sleep_on);
- long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
- {
- return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
- }
- EXPORT_SYMBOL(sleep_on_timeout);
- #ifdef CONFIG_RT_MUTEXES
- /*
- * rt_mutex_setprio - set the current priority of a task
- * @p: task
- * @prio: prio value (kernel-internal form)
- *
- * This function changes the 'effective' priority of a task. It does
- * not touch ->normal_prio like __setscheduler().
- *
- * Used by the rt_mutex code to implement priority inheritance logic.
- */
- void rt_mutex_setprio(struct task_struct *p, int prio)
- {
- int oldprio, on_rq, running;
- struct rq *rq;
- const struct sched_class *prev_class;
- BUG_ON(prio < 0 || prio > MAX_PRIO);
- rq = __task_rq_lock(p);
- /*
- * Idle task boosting is a nono in general. There is one
- * exception, when PREEMPT_RT and NOHZ is active:
- *
- * The idle task calls get_next_timer_interrupt() and holds
- * the timer wheel base->lock on the CPU and another CPU wants
- * to access the timer (probably to cancel it). We can safely
- * ignore the boosting request, as the idle CPU runs this code
- * with interrupts disabled and will complete the lock
- * protected section without being interrupted. So there is no
- * real need to boost.
- */
- if (unlikely(p == rq->idle)) {
- WARN_ON(p != rq->curr);
- WARN_ON(p->pi_blocked_on);
- goto out_unlock;
- }
- trace_sched_pi_setprio(p, prio);
- oldprio = p->prio;
- prev_class = p->sched_class;
- on_rq = p->on_rq;
- running = task_current(rq, p);
- if (on_rq)
- dequeue_task(rq, p, 0);
- if (running)
- p->sched_class->put_prev_task(rq, p);
- if (rt_prio(prio)) {
- p->sched_class = &rt_sched_class;
- } else {
- if (rt_prio(oldprio))
- p->rt.timeout = 0;
- p->sched_class = &fair_sched_class;
- }
- p->prio = prio;
- if (running)
- p->sched_class->set_curr_task(rq);
- if (on_rq)
- enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
- check_class_changed(rq, p, prev_class, oldprio);
- out_unlock:
- __task_rq_unlock(rq);
- }
- #endif
- void set_user_nice(struct task_struct *p, long nice)
- {
- int old_prio, delta, on_rq;
- unsigned long flags;
- struct rq *rq;
- if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
- return;
- /*
- * We have to be careful, if called from sys_setpriority(),
- * the task might be in the middle of scheduling on another CPU.
- */
- rq = task_rq_lock(p, &flags);
- /*
- * The RT priorities are set via sched_setscheduler(), but we still
- * allow the 'normal' nice value to be set - but as expected
- * it wont have any effect on scheduling until the task is
- * SCHED_FIFO/SCHED_RR:
- */
- if (task_has_rt_policy(p)) {
- p->static_prio = NICE_TO_PRIO(nice);
- goto out_unlock;
- }
- on_rq = p->on_rq;
- if (on_rq)
- dequeue_task(rq, p, 0);
- p->static_prio = NICE_TO_PRIO(nice);
- set_load_weight(p);
- old_prio = p->prio;
- p->prio = effective_prio(p);
- delta = p->prio - old_prio;
- if (on_rq) {
- enqueue_task(rq, p, 0);
- /*
- * If the task increased its priority or is running and
- * lowered its priority, then reschedule its CPU:
- */
- if (delta < 0 || (delta > 0 && task_running(rq, p)))
- resched_task(rq->curr);
- }
- out_unlock:
- task_rq_unlock(rq, p, &flags);
- }
- EXPORT_SYMBOL(set_user_nice);
- /*
- * can_nice - check if a task can reduce its nice value
- * @p: task
- * @nice: nice value
- */
- int can_nice(const struct task_struct *p, const int nice)
- {
- /* convert nice value [19,-20] to rlimit style value [1,40] */
- int nice_rlim = 20 - nice;
- return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
- capable(CAP_SYS_NICE));
- }
- #ifdef __ARCH_WANT_SYS_NICE
- /*
- * sys_nice - change the priority of the current process.
- * @increment: priority increment
- *
- * sys_setpriority is a more generic, but much slower function that
- * does similar things.
- */
- SYSCALL_DEFINE1(nice, int, increment)
- {
- long nice, retval;
- /*
- * Setpriority might change our priority at the same moment.
- * We don't have to worry. Conceptually one call occurs first
- * and we have a single winner.
- */
- if (increment < -40)
- increment = -40;
- if (increment > 40)
- increment = 40;
- nice = TASK_NICE(current) + increment;
- if (nice < -20)
- nice = -20;
- if (nice > 19)
- nice = 19;
- if (increment < 0 && !can_nice(current, nice))
- return -EPERM;
- retval = security_task_setnice(current, nice);
- if (retval)
- return retval;
- set_user_nice(current, nice);
- return 0;
- }
- #endif
- /**
- * task_prio - return the priority value of a given task.
- * @p: the task in question.
- *
- * This is the priority value as seen by users in /proc.
- * RT tasks are offset by -200. Normal tasks are centered
- * around 0, value goes from -16 to +15.
- */
- int task_prio(const struct task_struct *p)
- {
- return p->prio - MAX_RT_PRIO;
- }
- /**
- * task_nice - return the nice value of a given task.
- * @p: the task in question.
- */
- int task_nice(const struct task_struct *p)
- {
- return TASK_NICE(p);
- }
- EXPORT_SYMBOL(task_nice);
- /**
- * idle_cpu - is a given cpu idle currently?
- * @cpu: the processor in question.
- */
- int idle_cpu(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- if (rq->curr != rq->idle)
- return 0;
- if (rq->nr_running)
- return 0;
- #ifdef CONFIG_SMP
- if (!llist_empty(&rq->wake_list))
- return 0;
- #endif
- return 1;
- }
- /**
- * idle_task - return the idle task for a given cpu.
- * @cpu: the processor in question.
- */
- struct task_struct *idle_task(int cpu)
- {
- return cpu_rq(cpu)->idle;
- }
- /**
- * find_process_by_pid - find a process with a matching PID value.
- * @pid: the pid in question.
- */
- static struct task_struct *find_process_by_pid(pid_t pid)
- {
- return pid ? find_task_by_vpid(pid) : current;
- }
- /* Actually do priority change: must hold rq lock. */
- static void
- __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
- {
- p->policy = policy;
- p->rt_priority = prio;
- p->normal_prio = normal_prio(p);
- /* we are holding p->pi_lock already */
- p->prio = rt_mutex_getprio(p);
- if (rt_prio(p->prio))
- p->sched_class = &rt_sched_class;
- else
- p->sched_class = &fair_sched_class;
- set_load_weight(p);
- }
- /*
- * check the target process has a UID that matches the current process's
- */
- static bool check_same_owner(struct task_struct *p)
- {
- const struct cred *cred = current_cred(), *pcred;
- bool match;
- rcu_read_lock();
- pcred = __task_cred(p);
- match = (uid_eq(cred->euid, pcred->euid) ||
- uid_eq(cred->euid, pcred->uid));
- rcu_read_unlock();
- return match;
- }
- static int __sched_setscheduler(struct task_struct *p, int policy,
- const struct sched_param *param, bool user)
- {
- int retval, oldprio, oldpolicy = -1, on_rq, running;
- unsigned long flags;
- const struct sched_class *prev_class;
- struct rq *rq;
- int reset_on_fork;
- /* may grab non-irq protected spin_locks */
- BUG_ON(in_interrupt());
- recheck:
- /* double check policy once rq lock held */
- if (policy < 0) {
- reset_on_fork = p->sched_reset_on_fork;
- policy = oldpolicy = p->policy;
- } else {
- reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
- policy &= ~SCHED_RESET_ON_FORK;
- if (policy != SCHED_FIFO && policy != SCHED_RR &&
- policy != SCHED_NORMAL && policy != SCHED_BATCH &&
- policy != SCHED_IDLE)
- return -EINVAL;
- }
- /*
- * Valid priorities for SCHED_FIFO and SCHED_RR are
- * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
- * SCHED_BATCH and SCHED_IDLE is 0.
- */
- if (param->sched_priority < 0 ||
- (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
- (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
- return -EINVAL;
- if (rt_policy(policy) != (param->sched_priority != 0))
- return -EINVAL;
- /*
- * Allow unprivileged RT tasks to decrease priority:
- */
- if (user && !capable(CAP_SYS_NICE)) {
- if (rt_policy(policy)) {
- unsigned long rlim_rtprio =
- task_rlimit(p, RLIMIT_RTPRIO);
- /* can't set/change the rt policy */
- if (policy != p->policy && !rlim_rtprio)
- return -EPERM;
- /* can't increase priority */
- if (param->sched_priority > p->rt_priority &&
- param->sched_priority > rlim_rtprio)
- return -EPERM;
- }
- /*
- * Treat SCHED_IDLE as nice 20. Only allow a switch to
- * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
- */
- if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
- if (!can_nice(p, TASK_NICE(p)))
- return -EPERM;
- }
- /* can't change other user's priorities */
- if (!check_same_owner(p))
- return -EPERM;
- /* Normal users shall not reset the sched_reset_on_fork flag */
- if (p->sched_reset_on_fork && !reset_on_fork)
- return -EPERM;
- }
- if (user) {
- retval = security_task_setscheduler(p);
- if (retval)
- return retval;
- }
- /*
- * make sure no PI-waiters arrive (or leave) while we are
- * changing the priority of the task:
- *
- * To be able to change p->policy safely, the appropriate
- * runqueue lock must be held.
- */
- rq = task_rq_lock(p, &flags);
- /*
- * Changing the policy of the stop threads its a very bad idea
- */
- if (p == rq->stop) {
- task_rq_unlock(rq, p, &flags);
- return -EINVAL;
- }
- /*
- * If not changing anything there's no need to proceed further:
- */
- if (unlikely(policy == p->policy && (!rt_policy(policy) ||
- param->sched_priority == p->rt_priority))) {
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- return 0;
- }
- #ifdef CONFIG_RT_GROUP_SCHED
- if (user) {
- /*
- * Do not allow realtime tasks into groups that have no runtime
- * assigned.
- */
- if (rt_bandwidth_enabled() && rt_policy(policy) &&
- task_group(p)->rt_bandwidth.rt_runtime == 0 &&
- !task_group_is_autogroup(task_group(p))) {
- task_rq_unlock(rq, p, &flags);
- return -EPERM;
- }
- }
- #endif
- /* recheck policy now with rq lock held */
- if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
- policy = oldpolicy = -1;
- task_rq_unlock(rq, p, &flags);
- goto recheck;
- }
- on_rq = p->on_rq;
- running = task_current(rq, p);
- if (on_rq)
- dequeue_task(rq, p, 0);
- if (running)
- p->sched_class->put_prev_task(rq, p);
- p->sched_reset_on_fork = reset_on_fork;
- oldprio = p->prio;
- prev_class = p->sched_class;
- __setscheduler(rq, p, policy, param->sched_priority);
- if (running)
- p->sched_class->set_curr_task(rq);
- if (on_rq) {
- /*
- * We enqueue to tail when the priority of a task is
- * increased (user space view).
- */
- enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
- }
- check_class_changed(rq, p, prev_class, oldprio);
- task_rq_unlock(rq, p, &flags);
- rt_mutex_adjust_pi(p);
- return 0;
- }
- /**
- * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
- * @p: the task in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * NOTE that the task may be already dead.
- */
- int sched_setscheduler(struct task_struct *p, int policy,
- const struct sched_param *param)
- {
- return __sched_setscheduler(p, policy, param, true);
- }
- EXPORT_SYMBOL_GPL(sched_setscheduler);
- /**
- * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
- * @p: the task in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * Just like sched_setscheduler, only don't bother checking if the
- * current context has permission. For example, this is needed in
- * stop_machine(): we create temporary high priority worker threads,
- * but our caller might not have that capability.
- */
- int sched_setscheduler_nocheck(struct task_struct *p, int policy,
- const struct sched_param *param)
- {
- return __sched_setscheduler(p, policy, param, false);
- }
- static int
- do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
- {
- struct sched_param lparam;
- struct task_struct *p;
- int retval;
- if (!param || pid < 0)
- return -EINVAL;
- if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
- return -EFAULT;
- rcu_read_lock();
- retval = -ESRCH;
- p = find_process_by_pid(pid);
- if (p != NULL)
- retval = sched_setscheduler(p, policy, &lparam);
- rcu_read_unlock();
- return retval;
- }
- /**
- * sys_sched_setscheduler - set/change the scheduler policy and RT priority
- * @pid: the pid in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- */
- SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
- struct sched_param __user *, param)
- {
- /* negative values for policy are not valid */
- if (policy < 0)
- return -EINVAL;
- return do_sched_setscheduler(pid, policy, param);
- }
- /**
- * sys_sched_setparam - set/change the RT priority of a thread
- * @pid: the pid in question.
- * @param: structure containing the new RT priority.
- */
- SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
- {
- return do_sched_setscheduler(pid, -1, param);
- }
- /**
- * sys_sched_getscheduler - get the policy (scheduling class) of a thread
- * @pid: the pid in question.
- */
- SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
- {
- struct task_struct *p;
- int retval;
- if (pid < 0)
- return -EINVAL;
- retval = -ESRCH;
- rcu_read_lock();
- p = find_process_by_pid(pid);
- if (p) {
- retval = security_task_getscheduler(p);
- if (!retval)
- retval = p->policy
- | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
- }
- rcu_read_unlock();
- return retval;
- }
- /**
- * sys_sched_getparam - get the RT priority of a thread
- * @pid: the pid in question.
- * @param: structure containing the RT priority.
- */
- SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
- {
- struct sched_param lp;
- struct task_struct *p;
- int retval;
- if (!param || pid < 0)
- return -EINVAL;
- rcu_read_lock();
- p = find_process_by_pid(pid);
- retval = -ESRCH;
- if (!p)
- goto out_unlock;
- retval = security_task_getscheduler(p);
- if (retval)
- goto out_unlock;
- lp.sched_priority = p->rt_priority;
- rcu_read_unlock();
- /*
- * This one might sleep, we cannot do it with a spinlock held ...
- */
- retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
- return retval;
- out_unlock:
- rcu_read_unlock();
- return retval;
- }
- long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
- {
- cpumask_var_t cpus_allowed, new_mask;
- struct task_struct *p;
- int retval;
- get_online_cpus();
- rcu_read_lock();
- p = find_process_by_pid(pid);
- if (!p) {
- rcu_read_unlock();
- put_online_cpus();
- return -ESRCH;
- }
- /* Prevent p going away */
- get_task_struct(p);
- rcu_read_unlock();
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
- retval = -ENOMEM;
- goto out_put_task;
- }
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
- retval = -ENOMEM;
- goto out_free_cpus_allowed;
- }
- retval = -EPERM;
- if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE))
- goto out_unlock;
- retval = security_task_setscheduler(p);
- if (retval)
- goto out_unlock;
- cpuset_cpus_allowed(p, cpus_allowed);
- cpumask_and(new_mask, in_mask, cpus_allowed);
- again:
- retval = set_cpus_allowed_ptr(p, new_mask);
- if (!retval) {
- cpuset_cpus_allowed(p, cpus_allowed);
- if (!cpumask_subset(new_mask, cpus_allowed)) {
- /*
- * We must have raced with a concurrent cpuset
- * update. Just reset the cpus_allowed to the
- * cpuset's cpus_allowed
- */
- cpumask_copy(new_mask, cpus_allowed);
- goto again;
- }
- }
- out_unlock:
- free_cpumask_var(new_mask);
- out_free_cpus_allowed:
- free_cpumask_var(cpus_allowed);
- out_put_task:
- put_task_struct(p);
- put_online_cpus();
- return retval;
- }
- static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
- struct cpumask *new_mask)
- {
- if (len < cpumask_size())
- cpumask_clear(new_mask);
- else if (len > cpumask_size())
- len = cpumask_size();
- return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
- }
- /**
- * sys_sched_setaffinity - set the cpu affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to the new cpu mask
- */
- SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
- unsigned long __user *, user_mask_ptr)
- {
- cpumask_var_t new_mask;
- int retval;
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
- return -ENOMEM;
- retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
- if (retval == 0)
- retval = sched_setaffinity(pid, new_mask);
- free_cpumask_var(new_mask);
- return retval;
- }
- long sched_getaffinity(pid_t pid, struct cpumask *mask)
- {
- struct task_struct *p;
- unsigned long flags;
- int retval;
- get_online_cpus();
- rcu_read_lock();
- retval = -ESRCH;
- p = find_process_by_pid(pid);
- if (!p)
- goto out_unlock;
- retval = security_task_getscheduler(p);
- if (retval)
- goto out_unlock;
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- out_unlock:
- rcu_read_unlock();
- put_online_cpus();
- return retval;
- }
- /**
- * sys_sched_getaffinity - get the cpu affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to hold the current cpu mask
- */
- SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
- unsigned long __user *, user_mask_ptr)
- {
- int ret;
- cpumask_var_t mask;
- if ((len * BITS_PER_BYTE) < nr_cpu_ids)
- return -EINVAL;
- if (len & (sizeof(unsigned long)-1))
- return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
- return -ENOMEM;
- ret = sched_getaffinity(pid, mask);
- if (ret == 0) {
- size_t retlen = min_t(size_t, len, cpumask_size());
- if (copy_to_user(user_mask_ptr, mask, retlen))
- ret = -EFAULT;
- else
- ret = retlen;
- }
- free_cpumask_var(mask);
- return ret;
- }
- /**
- * sys_sched_yield - yield the current processor to other threads.
- *
- * This function yields the current CPU to other tasks. If there are no
- * other threads running on this CPU then this function will return.
- */
- SYSCALL_DEFINE0(sched_yield)
- {
- struct rq *rq = this_rq_lock();
- schedstat_inc(rq, yld_count);
- current->sched_class->yield_task(rq);
- /*
- * Since we are going to call schedule() anyway, there's
- * no need to preempt or enable interrupts:
- */
- __release(rq->lock);
- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
- do_raw_spin_unlock(&rq->lock);
- sched_preempt_enable_no_resched();
- schedule();
- return 0;
- }
- static inline int should_resched(void)
- {
- return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
- }
- static void __cond_resched(void)
- {
- add_preempt_count(PREEMPT_ACTIVE);
- __schedule();
- sub_preempt_count(PREEMPT_ACTIVE);
- }
- int __sched _cond_resched(void)
- {
- if (should_resched()) {
- __cond_resched();
- return 1;
- }
- return 0;
- }
- EXPORT_SYMBOL(_cond_resched);
- /*
- * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
- * call schedule, and on return reacquire the lock.
- *
- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
- * operations here to prevent schedule() from being called twice (once via
- * spin_unlock(), once by hand).
- */
- int __cond_resched_lock(spinlock_t *lock)
- {
- int resched = should_resched();
- int ret = 0;
- lockdep_assert_held(lock);
- if (spin_needbreak(lock) || resched) {
- spin_unlock(lock);
- if (resched)
- __cond_resched();
- else
- cpu_relax();
- ret = 1;
- spin_lock(lock);
- }
- return ret;
- }
- EXPORT_SYMBOL(__cond_resched_lock);
- int __sched __cond_resched_softirq(void)
- {
- BUG_ON(!in_softirq());
- if (should_resched()) {
- local_bh_enable();
- __cond_resched();
- local_bh_disable();
- return 1;
- }
- return 0;
- }
- EXPORT_SYMBOL(__cond_resched_softirq);
- /**
- * yield - yield the current processor to other threads.
- *
- * Do not ever use this function, there's a 99% chance you're doing it wrong.
- *
- * The scheduler is at all times free to pick the calling task as the most
- * eligible task to run, if removing the yield() call from your code breaks
- * it, its already broken.
- *
- * Typical broken usage is:
- *
- * while (!event)
- * yield();
- *
- * where one assumes that yield() will let 'the other' process run that will
- * make event true. If the current task is a SCHED_FIFO task that will never
- * happen. Never use yield() as a progress guarantee!!
- *
- * If you want to use yield() to wait for something, use wait_event().
- * If you want to use yield() to be 'nice' for others, use cond_resched().
- * If you still want to use yield(), do not!
- */
- void __sched yield(void)
- {
- set_current_state(TASK_RUNNING);
- sys_sched_yield();
- }
- EXPORT_SYMBOL(yield);
- /**
- * yield_to - yield the current processor to another thread in
- * your thread group, or accelerate that thread toward the
- * processor it's on.
- * @p: target task
- * @preempt: whether task preemption is allowed or not
- *
- * It's the caller's job to ensure that the target task struct
- * can't go away on us before we can do any checks.
- *
- * Returns true if we indeed boosted the target task.
- */
- bool __sched yield_to(struct task_struct *p, bool preempt)
- {
- struct task_struct *curr = current;
- struct rq *rq, *p_rq;
- unsigned long flags;
- bool yielded = 0;
- local_irq_save(flags);
- rq = this_rq();
- again:
- p_rq = task_rq(p);
- double_rq_lock(rq, p_rq);
- while (task_rq(p) != p_rq) {
- double_rq_unlock(rq, p_rq);
- goto again;
- }
- if (!curr->sched_class->yield_to_task)
- goto out;
- if (curr->sched_class != p->sched_class)
- goto out;
- if (task_running(p_rq, p) || p->state)
- goto out;
- yielded = curr->sched_class->yield_to_task(rq, p, preempt);
- if (yielded) {
- schedstat_inc(rq, yld_count);
- /*
- * Make p's CPU reschedule; pick_next_entity takes care of
- * fairness.
- */
- if (preempt && rq != p_rq)
- resched_task(p_rq->curr);
- }
- out:
- double_rq_unlock(rq, p_rq);
- local_irq_restore(flags);
- if (yielded)
- schedule();
- return yielded;
- }
- EXPORT_SYMBOL_GPL(yield_to);
- /*
- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
- * that process accounting knows that this is a task in IO wait state.
- */
- void __sched io_schedule(void)
- {
- struct rq *rq = raw_rq();
- delayacct_blkio_start();
- atomic_inc(&rq->nr_iowait);
- blk_flush_plug(current);
- current->in_iowait = 1;
- schedule();
- current->in_iowait = 0;
- atomic_dec(&rq->nr_iowait);
- delayacct_blkio_end();
- }
- EXPORT_SYMBOL(io_schedule);
- long __sched io_schedule_timeout(long timeout)
- {
- struct rq *rq = raw_rq();
- long ret;
- delayacct_blkio_start();
- atomic_inc(&rq->nr_iowait);
- blk_flush_plug(current);
- current->in_iowait = 1;
- ret = schedule_timeout(timeout);
- current->in_iowait = 0;
- atomic_dec(&rq->nr_iowait);
- delayacct_blkio_end();
- return ret;
- }
- EXPORT_SYMBOL(io_schedule_timeout);
- /**
- * sys_sched_get_priority_max - return maximum RT priority.
- * @policy: scheduling class.
- *
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
- */
- SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
- {
- int ret = -EINVAL;
- switch (policy) {
- case SCHED_FIFO:
- case SCHED_RR:
- ret = MAX_USER_RT_PRIO-1;
- break;
- case SCHED_NORMAL:
- case SCHED_BATCH:
- case SCHED_IDLE:
- ret = 0;
- break;
- }
- return ret;
- }
- /**
- * sys_sched_get_priority_min - return minimum RT priority.
- * @policy: scheduling class.
- *
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
- */
- SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
- {
- int ret = -EINVAL;
- switch (policy) {
- case SCHED_FIFO:
- case SCHED_RR:
- ret = 1;
- break;
- case SCHED_NORMAL:
- case SCHED_BATCH:
- case SCHED_IDLE:
- ret = 0;
- }
- return ret;
- }
- /**
- * sys_sched_rr_get_interval - return the default timeslice of a process.
- * @pid: pid of the process.
- * @interval: userspace pointer to the timeslice value.
- *
- * this syscall writes the default timeslice value of a given process
- * into the user-space timespec buffer. A value of '0' means infinity.
- */
- SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
- struct timespec __user *, interval)
- {
- struct task_struct *p;
- unsigned int time_slice;
- unsigned long flags;
- struct rq *rq;
- int retval;
- struct timespec t;
- if (pid < 0)
- return -EINVAL;
- retval = -ESRCH;
- rcu_read_lock();
- p = find_process_by_pid(pid);
- if (!p)
- goto out_unlock;
- retval = security_task_getscheduler(p);
- if (retval)
- goto out_unlock;
- rq = task_rq_lock(p, &flags);
- time_slice = p->sched_class->get_rr_interval(rq, p);
- task_rq_unlock(rq, p, &flags);
- rcu_read_unlock();
- jiffies_to_timespec(time_slice, &t);
- retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
- return retval;
- out_unlock:
- rcu_read_unlock();
- return retval;
- }
- static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
- void sched_show_task(struct task_struct *p)
- {
- unsigned long free = 0;
- unsigned state;
- state = p->state ? __ffs(p->state) + 1 : 0;
- printk(KERN_INFO "%-15.15s %c", p->comm,
- state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
- #if BITS_PER_LONG == 32
- if (state == TASK_RUNNING)
- printk(KERN_CONT " running ");
- else
- printk(KERN_CONT " %08lx ", thread_saved_pc(p));
- #else
- if (state == TASK_RUNNING)
- printk(KERN_CONT " running task ");
- else
- printk(KERN_CONT " %016lx ", thread_saved_pc(p));
- #endif
- #ifdef CONFIG_DEBUG_STACK_USAGE
- free = stack_not_used(p);
- #endif
- printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
- task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
- (unsigned long)task_thread_info(p)->flags);
- show_stack(p, NULL);
- }
- void show_state_filter(unsigned long state_filter)
- {
- struct task_struct *g, *p;
- #if BITS_PER_LONG == 32
- printk(KERN_INFO
- " task PC stack pid father\n");
- #else
- printk(KERN_INFO
- " task PC stack pid father\n");
- #endif
- rcu_read_lock();
- do_each_thread(g, p) {
- /*
- * reset the NMI-timeout, listing all files on a slow
- * console might take a lot of time:
- */
- touch_nmi_watchdog();
- if (!state_filter || (p->state & state_filter))
- sched_show_task(p);
- } while_each_thread(g, p);
- touch_all_softlockup_watchdogs();
- #ifdef CONFIG_SYSRQ_SCHED_DEBUG
- sysrq_sched_debug_show();
- #endif
- rcu_read_unlock();
- /*
- * Only show locks if all tasks are dumped:
- */
- if (!state_filter)
- debug_show_all_locks();
- }
- void __cpuinit init_idle_bootup_task(struct task_struct *idle)
- {
- idle->sched_class = &idle_sched_class;
- }
- /**
- * init_idle - set up an idle thread for a given CPU
- * @idle: task in question
- * @cpu: cpu the idle task belongs to
- *
- * NOTE: this function does not set the idle thread's NEED_RESCHED
- * flag, to make booting more robust.
- */
- void __cpuinit init_idle(struct task_struct *idle, int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long flags;
- raw_spin_lock_irqsave(&rq->lock, flags);
- __sched_fork(idle);
- idle->state = TASK_RUNNING;
- idle->se.exec_start = sched_clock();
- do_set_cpus_allowed(idle, cpumask_of(cpu));
- /*
- * We're having a chicken and egg problem, even though we are
- * holding rq->lock, the cpu isn't yet set to this cpu so the
- * lockdep check in task_group() will fail.
- *
- * Similar case to sched_fork(). / Alternatively we could
- * use task_rq_lock() here and obtain the other rq->lock.
- *
- * Silence PROVE_RCU
- */
- rcu_read_lock();
- __set_task_cpu(idle, cpu);
- rcu_read_unlock();
- rq->curr = rq->idle = idle;
- #if defined(CONFIG_SMP)
- idle->on_cpu = 1;
- #endif
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- /* Set the preempt count _outside_ the spinlocks! */
- task_thread_info(idle)->preempt_count = 0;
- /*
- * The idle tasks have their own, simple scheduling class:
- */
- idle->sched_class = &idle_sched_class;
- ftrace_graph_init_idle_task(idle, cpu);
- #if defined(CONFIG_SMP)
- sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
- #endif
- }
- #ifdef CONFIG_SMP
- void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- {
- if (p->sched_class && p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, new_mask);
- cpumask_copy(&p->cpus_allowed, new_mask);
- p->nr_cpus_allowed = cpumask_weight(new_mask);
- }
- /*
- * This is how migration works:
- *
- * 1) we invoke migration_cpu_stop() on the target CPU using
- * stop_one_cpu().
- * 2) stopper starts to run (implicitly forcing the migrated thread
- * off the CPU)
- * 3) it checks whether the migrated task is still in the wrong runqueue.
- * 4) if it's in the wrong runqueue then the migration thread removes
- * it and puts it into the right queue.
- * 5) stopper completes and stop_one_cpu() returns and the migration
- * is done.
- */
- /*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
- * is removed from the allowed bitmask.
- *
- * NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
- * call is not atomic; no spinlocks may be held.
- */
- int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
- {
- unsigned long flags;
- struct rq *rq;
- unsigned int dest_cpu;
- int ret = 0;
- rq = task_rq_lock(p, &flags);
- if (cpumask_equal(&p->cpus_allowed, new_mask))
- goto out;
- if (!cpumask_intersects(new_mask, cpu_active_mask)) {
- ret = -EINVAL;
- goto out;
- }
- if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
- ret = -EINVAL;
- goto out;
- }
- do_set_cpus_allowed(p, new_mask);
- /* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
- goto out;
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
- if (p->on_rq) {
- struct migration_arg arg = { p, dest_cpu };
- /* Need help from migration thread: drop lock and wait. */
- task_rq_unlock(rq, p, &flags);
- stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
- tlb_migrate_finish(p->mm);
- return 0;
- }
- out:
- task_rq_unlock(rq, p, &flags);
- return ret;
- }
- EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
- /*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
- * this because either it can't run here any more (set_cpus_allowed()
- * away from this CPU, or CPU going down), or because we're
- * attempting to rebalance this task on exec (sched_exec).
- *
- * So we race with normal scheduler movements, but that's OK, as long
- * as the task is no longer on this CPU.
- *
- * Returns non-zero if task was successfully migrated.
- */
- static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
- {
- struct rq *rq_dest, *rq_src;
- bool moved = false;
- int ret = 0;
- if (unlikely(!cpu_active(dest_cpu)))
- return ret;
- rq_src = cpu_rq(src_cpu);
- rq_dest = cpu_rq(dest_cpu);
- raw_spin_lock(&p->pi_lock);
- double_rq_lock(rq_src, rq_dest);
- /* Already moved. */
- if (task_cpu(p) != src_cpu)
- goto done;
- /* Affinity changed (again). */
- if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
- goto fail;
- /*
- * If we're not on a rq, the next wake-up will ensure we're
- * placed properly.
- */
- if (p->on_rq) {
- dequeue_task(rq_src, p, 0);
- set_task_cpu(p, dest_cpu);
- enqueue_task(rq_dest, p, 0);
- check_preempt_curr(rq_dest, p, 0);
- moved = true;
- }
- done:
- ret = 1;
- fail:
- double_rq_unlock(rq_src, rq_dest);
- raw_spin_unlock(&p->pi_lock);
- if (moved && task_notify_on_migrate(p))
- atomic_notifier_call_chain(&migration_notifier_head,
- dest_cpu, (void *)src_cpu);
- return ret;
- }
- /*
- * migration_cpu_stop - this will be executed by a highprio stopper thread
- * and performs thread migration by bumping thread off CPU then
- * 'pushing' onto another runqueue.
- */
- static int migration_cpu_stop(void *data)
- {
- struct migration_arg *arg = data;
- /*
- * The original target cpu might have gone down and we might
- * be on another cpu but it doesn't matter.
- */
- local_irq_disable();
- __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
- local_irq_enable();
- return 0;
- }
- #ifdef CONFIG_HOTPLUG_CPU
- /*
- * Ensures that the idle task is using init_mm right before its cpu goes
- * offline.
- */
- void idle_task_exit(void)
- {
- struct mm_struct *mm = current->active_mm;
- BUG_ON(cpu_online(smp_processor_id()));
- if (mm != &init_mm)
- switch_mm(mm, &init_mm, current);
- mmdrop(mm);
- }
- /*
- * Since this CPU is going 'away' for a while, fold any nr_active delta
- * we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable.
- *
- * Also see the comment "Global load-average calculations".
- */
- static void calc_load_migrate(struct rq *rq)
- {
- long delta = calc_load_fold_active(rq);
- if (delta)
- atomic_long_add(delta, &calc_load_tasks);
- }
- /*
- * Migrate all tasks from the rq, sleeping tasks will be migrated by
- * try_to_wake_up()->select_task_rq().
- *
- * Called with rq->lock held even though we'er in stop_machine() and
- * there's no concurrency possible, we hold the required locks anyway
- * because of lock validation efforts.
- */
- static void migrate_tasks(unsigned int dead_cpu)
- {
- struct rq *rq = cpu_rq(dead_cpu);
- struct task_struct *next, *stop = rq->stop;
- int dest_cpu;
- /*
- * Fudge the rq selection such that the below task selection loop
- * doesn't get stuck on the currently eligible stop task.
- *
- * We're currently inside stop_machine() and the rq is either stuck
- * in the stop_machine_cpu_stop() loop, or we're executing this code,
- * either way we should never end up calling schedule() until we're
- * done here.
- */
- rq->stop = NULL;
- for ( ; ; ) {
- /*
- * There's this thread running, bail when that's the only
- * remaining thread.
- */
- if (rq->nr_running == 1)
- break;
- next = pick_next_task(rq);
- BUG_ON(!next);
- next->sched_class->put_prev_task(rq, next);
- /* Find suitable destination for @next, with force if needed. */
- dest_cpu = select_fallback_rq(dead_cpu, next);
- raw_spin_unlock(&rq->lock);
- cpu_relax();
- __migrate_task(next, dead_cpu, dest_cpu);
- raw_spin_lock(&rq->lock);
- }
- rq->stop = stop;
- }
- #endif /* CONFIG_HOTPLUG_CPU */
- #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
- static struct ctl_table sd_ctl_dir[] = {
- {
- .procname = "sched_domain",
- .mode = 0555,
- },
- {}
- };
- static struct ctl_table sd_ctl_root[] = {
- {
- .procname = "kernel",
- .mode = 0555,
- .child = sd_ctl_dir,
- },
- {}
- };
- static struct ctl_table *sd_alloc_ctl_entry(int n)
- {
- struct ctl_table *entry =
- kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
- return entry;
- }
- static void sd_free_ctl_entry(struct ctl_table **tablep)
- {
- struct ctl_table *entry;
- /*
- * In the intermediate directories, both the child directory and
- * procname are dynamically allocated and could fail but the mode
- * will always be set. In the lowest directory the names are
- * static strings and all have proc handlers.
- */
- for (entry = *tablep; entry->mode; entry++) {
- if (entry->child)
- sd_free_ctl_entry(&entry->child);
- if (entry->proc_handler == NULL)
- kfree(entry->procname);
- }
- kfree(*tablep);
- *tablep = NULL;
- }
- static int min_load_idx = 0;
- static int max_load_idx = CPU_LOAD_IDX_MAX-1;
- static void
- set_table_entry(struct ctl_table *entry,
- const char *procname, void *data, int maxlen,
- umode_t mode, proc_handler *proc_handler,
- bool load_idx)
- {
- entry->procname = procname;
- entry->data = data;
- entry->maxlen = maxlen;
- entry->mode = mode;
- entry->proc_handler = proc_handler;
- if (load_idx) {
- entry->extra1 = &min_load_idx;
- entry->extra2 = &max_load_idx;
- }
- }
- static struct ctl_table *
- sd_alloc_ctl_domain_table(struct sched_domain *sd)
- {
- struct ctl_table *table = sd_alloc_ctl_entry(13);
- if (table == NULL)
- return NULL;
- set_table_entry(&table[0], "min_interval", &sd->min_interval,
- sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[1], "max_interval", &sd->max_interval,
- sizeof(long), 0644, proc_doulongvec_minmax, false);
- set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
- sizeof(int), 0644, proc_dointvec_minmax, true);
- set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
- sizeof(int), 0644, proc_dointvec_minmax, true);
- set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
- sizeof(int), 0644, proc_dointvec_minmax, true);
- set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
- sizeof(int), 0644, proc_dointvec_minmax, true);
- set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
- sizeof(int), 0644, proc_dointvec_minmax, true);
- set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
- sizeof(int), 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
- sizeof(int), 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[9], "cache_nice_tries",
- &sd->cache_nice_tries,
- sizeof(int), 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[10], "flags", &sd->flags,
- sizeof(int), 0644, proc_dointvec_minmax, false);
- set_table_entry(&table[11], "name", sd->name,
- CORENAME_MAX_SIZE, 0444, proc_dostring, false);
- /* &table[12] is terminator */
- return table;
- }
- static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
- {
- struct ctl_table *entry, *table;
- struct sched_domain *sd;
- int domain_num = 0, i;
- char buf[32];
- for_each_domain(cpu, sd)
- domain_num++;
- entry = table = sd_alloc_ctl_entry(domain_num + 1);
- if (table == NULL)
- return NULL;
- i = 0;
- for_each_domain(cpu, sd) {
- snprintf(buf, 32, "domain%d", i);
- entry->procname = kstrdup(buf, GFP_KERNEL);
- entry->mode = 0555;
- entry->child = sd_alloc_ctl_domain_table(sd);
- entry++;
- i++;
- }
- return table;
- }
- static struct ctl_table_header *sd_sysctl_header;
- static void register_sched_domain_sysctl(void)
- {
- int i, cpu_num = num_possible_cpus();
- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
- char buf[32];
- WARN_ON(sd_ctl_dir[0].child);
- sd_ctl_dir[0].child = entry;
- if (entry == NULL)
- return;
- for_each_possible_cpu(i) {
- snprintf(buf, 32, "cpu%d", i);
- entry->procname = kstrdup(buf, GFP_KERNEL);
- entry->mode = 0555;
- entry->child = sd_alloc_ctl_cpu_table(i);
- entry++;
- }
- WARN_ON(sd_sysctl_header);
- sd_sysctl_header = register_sysctl_table(sd_ctl_root);
- }
- /* may be called multiple times per register */
- static void unregister_sched_domain_sysctl(void)
- {
- if (sd_sysctl_header)
- unregister_sysctl_table(sd_sysctl_header);
- sd_sysctl_header = NULL;
- if (sd_ctl_dir[0].child)
- sd_free_ctl_entry(&sd_ctl_dir[0].child);
- }
- #else
- static void register_sched_domain_sysctl(void)
- {
- }
- static void unregister_sched_domain_sysctl(void)
- {
- }
- #endif
- static void set_rq_online(struct rq *rq)
- {
- if (!rq->online) {
- const struct sched_class *class;
- cpumask_set_cpu(rq->cpu, rq->rd->online);
- rq->online = 1;
- for_each_class(class) {
- if (class->rq_online)
- class->rq_online(rq);
- }
- }
- }
- static void set_rq_offline(struct rq *rq)
- {
- if (rq->online) {
- const struct sched_class *class;
- for_each_class(class) {
- if (class->rq_offline)
- class->rq_offline(rq);
- }
- cpumask_clear_cpu(rq->cpu, rq->rd->online);
- rq->online = 0;
- }
- }
- /*
- * migration_call - callback that gets triggered when a CPU is added.
- * Here we can start up the necessary migration thread for the new CPU.
- */
- static int __cpuinit
- migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
- {
- int cpu = (long)hcpu;
- unsigned long flags;
- struct rq *rq = cpu_rq(cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- rq->calc_load_update = calc_load_update;
- break;
- case CPU_ONLINE:
- /* Update our root-domain */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
- }
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- break;
- #ifdef CONFIG_HOTPLUG_CPU
- case CPU_DYING:
- sched_ttwu_pending();
- /* Update our root-domain */
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_offline(rq);
- }
- migrate_tasks(cpu);
- BUG_ON(rq->nr_running != 1); /* the migration thread */
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- break;
- case CPU_DEAD:
- calc_load_migrate(rq);
- break;
- #endif
- }
- update_max_interval();
- return NOTIFY_OK;
- }
- /*
- * Register at high priority so that task migration (migrate_all_tasks)
- * happens before everything else. This has to be lower priority than
- * the notifier in the perf_event subsystem, though.
- */
- static struct notifier_block __cpuinitdata migration_notifier = {
- .notifier_call = migration_call,
- .priority = CPU_PRI_MIGRATION,
- };
- static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
- {
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- set_cpu_active((long)hcpu, true);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
- {
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- set_cpu_active((long)hcpu, false);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- static int __init migration_init(void)
- {
- void *cpu = (void *)(long)smp_processor_id();
- int err;
- /* Initialize migration for the boot CPU */
- err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
- BUG_ON(err == NOTIFY_BAD);
- migration_call(&migration_notifier, CPU_ONLINE, cpu);
- register_cpu_notifier(&migration_notifier);
- /* Register cpu active notifiers */
- cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
- cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
- return 0;
- }
- early_initcall(migration_init);
- #endif
- #ifdef CONFIG_SMP
- static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
- #ifdef CONFIG_SCHED_DEBUG
- static __read_mostly int sched_debug_enabled;
- static int __init sched_debug_setup(char *str)
- {
- sched_debug_enabled = 1;
- return 0;
- }
- early_param("sched_debug", sched_debug_setup);
- static inline bool sched_debug(void)
- {
- return sched_debug_enabled;
- }
- static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
- struct cpumask *groupmask)
- {
- struct sched_group *group = sd->groups;
- char str[256];
- cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
- cpumask_clear(groupmask);
- printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
- if (!(sd->flags & SD_LOAD_BALANCE)) {
- printk("does not load-balance\n");
- if (sd->parent)
- printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
- " has parent");
- return -1;
- }
- printk(KERN_CONT "span %s level %s\n", str, sd->name);
- if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
- printk(KERN_ERR "ERROR: domain->span does not contain "
- "CPU%d\n", cpu);
- }
- if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
- printk(KERN_ERR "ERROR: domain->groups does not contain"
- " CPU%d\n", cpu);
- }
- printk(KERN_DEBUG "%*s groups:", level + 1, "");
- do {
- if (!group) {
- printk("\n");
- printk(KERN_ERR "ERROR: group is NULL\n");
- break;
- }
- /*
- * Even though we initialize ->power to something semi-sane,
- * we leave power_orig unset. This allows us to detect if
- * domain iteration is still funny without causing /0 traps.
- */
- if (!group->sgp->power_orig) {
- printk(KERN_CONT "\n");
- printk(KERN_ERR "ERROR: domain->cpu_power not "
- "set\n");
- break;
- }
- if (!cpumask_weight(sched_group_cpus(group))) {
- printk(KERN_CONT "\n");
- printk(KERN_ERR "ERROR: empty group\n");
- break;
- }
- if (!(sd->flags & SD_OVERLAP) &&
- cpumask_intersects(groupmask, sched_group_cpus(group))) {
- printk(KERN_CONT "\n");
- printk(KERN_ERR "ERROR: repeated CPUs\n");
- break;
- }
- cpumask_or(groupmask, groupmask, sched_group_cpus(group));
- cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
- printk(KERN_CONT " %s", str);
- if (group->sgp->power != SCHED_POWER_SCALE) {
- printk(KERN_CONT " (cpu_power = %d)",
- group->sgp->power);
- }
- group = group->next;
- } while (group != sd->groups);
- printk(KERN_CONT "\n");
- if (!cpumask_equal(sched_domain_span(sd), groupmask))
- printk(KERN_ERR "ERROR: groups don't span domain->span\n");
- if (sd->parent &&
- !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
- printk(KERN_ERR "ERROR: parent span is not a superset "
- "of domain->span\n");
- return 0;
- }
- static void sched_domain_debug(struct sched_domain *sd, int cpu)
- {
- int level = 0;
- if (!sched_debug_enabled)
- return;
- if (!sd) {
- printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
- return;
- }
- printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
- for (;;) {
- if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
- break;
- level++;
- sd = sd->parent;
- if (!sd)
- break;
- }
- }
- #else /* !CONFIG_SCHED_DEBUG */
- # define sched_domain_debug(sd, cpu) do { } while (0)
- static inline bool sched_debug(void)
- {
- return false;
- }
- #endif /* CONFIG_SCHED_DEBUG */
- static int sd_degenerate(struct sched_domain *sd)
- {
- if (cpumask_weight(sched_domain_span(sd)) == 1)
- return 1;
- /* Following flags need at least 2 groups */
- if (sd->flags & (SD_LOAD_BALANCE |
- SD_BALANCE_NEWIDLE |
- SD_BALANCE_FORK |
- SD_BALANCE_EXEC |
- SD_SHARE_CPUPOWER |
- SD_SHARE_PKG_RESOURCES)) {
- if (sd->groups != sd->groups->next)
- return 0;
- }
- /* Following flags don't use groups */
- if (sd->flags & (SD_WAKE_AFFINE))
- return 0;
- return 1;
- }
- static int
- sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
- {
- unsigned long cflags = sd->flags, pflags = parent->flags;
- if (sd_degenerate(parent))
- return 1;
- if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
- return 0;
- /* Flags needing groups don't count if only 1 group in parent */
- if (parent->groups == parent->groups->next) {
- pflags &= ~(SD_LOAD_BALANCE |
- SD_BALANCE_NEWIDLE |
- SD_BALANCE_FORK |
- SD_BALANCE_EXEC |
- SD_SHARE_CPUPOWER |
- SD_SHARE_PKG_RESOURCES);
- if (nr_node_ids == 1)
- pflags &= ~SD_SERIALIZE;
- }
- if (~cflags & pflags)
- return 0;
- return 1;
- }
- static void free_rootdomain(struct rcu_head *rcu)
- {
- struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
- cpupri_cleanup(&rd->cpupri);
- free_cpumask_var(rd->rto_mask);
- free_cpumask_var(rd->online);
- free_cpumask_var(rd->span);
- kfree(rd);
- }
- static void rq_attach_root(struct rq *rq, struct root_domain *rd)
- {
- struct root_domain *old_rd = NULL;
- unsigned long flags;
- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rq->rd) {
- old_rd = rq->rd;
- if (cpumask_test_cpu(rq->cpu, old_rd->online))
- set_rq_offline(rq);
- cpumask_clear_cpu(rq->cpu, old_rd->span);
- /*
- * If we dont want to free the old_rt yet then
- * set old_rd to NULL to skip the freeing later
- * in this function:
- */
- if (!atomic_dec_and_test(&old_rd->refcount))
- old_rd = NULL;
- }
- atomic_inc(&rd->refcount);
- rq->rd = rd;
- cpumask_set_cpu(rq->cpu, rd->span);
- if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
- set_rq_online(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
- if (old_rd)
- call_rcu_sched(&old_rd->rcu, free_rootdomain);
- }
- static int init_rootdomain(struct root_domain *rd)
- {
- memset(rd, 0, sizeof(*rd));
- if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
- goto out;
- if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
- goto free_span;
- if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
- goto free_online;
- if (cpupri_init(&rd->cpupri) != 0)
- goto free_rto_mask;
- return 0;
- free_rto_mask:
- free_cpumask_var(rd->rto_mask);
- free_online:
- free_cpumask_var(rd->online);
- free_span:
- free_cpumask_var(rd->span);
- out:
- return -ENOMEM;
- }
- /*
- * By default the system creates a single root-domain with all cpus as
- * members (mimicking the global state we have today).
- */
- struct root_domain def_root_domain;
- static void init_defrootdomain(void)
- {
- init_rootdomain(&def_root_domain);
- atomic_set(&def_root_domain.refcount, 1);
- }
- static struct root_domain *alloc_rootdomain(void)
- {
- struct root_domain *rd;
- rd = kmalloc(sizeof(*rd), GFP_KERNEL);
- if (!rd)
- return NULL;
- if (init_rootdomain(rd) != 0) {
- kfree(rd);
- return NULL;
- }
- return rd;
- }
- static void free_sched_groups(struct sched_group *sg, int free_sgp)
- {
- struct sched_group *tmp, *first;
- if (!sg)
- return;
- first = sg;
- do {
- tmp = sg->next;
- if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
- kfree(sg->sgp);
- kfree(sg);
- sg = tmp;
- } while (sg != first);
- }
- static void free_sched_domain(struct rcu_head *rcu)
- {
- struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
- /*
- * If its an overlapping domain it has private groups, iterate and
- * nuke them all.
- */
- if (sd->flags & SD_OVERLAP) {
- free_sched_groups(sd->groups, 1);
- } else if (atomic_dec_and_test(&sd->groups->ref)) {
- kfree(sd->groups->sgp);
- kfree(sd->groups);
- }
- kfree(sd);
- }
- static void destroy_sched_domain(struct sched_domain *sd, int cpu)
- {
- call_rcu(&sd->rcu, free_sched_domain);
- }
- static void destroy_sched_domains(struct sched_domain *sd, int cpu)
- {
- for (; sd; sd = sd->parent)
- destroy_sched_domain(sd, cpu);
- }
- /*
- * Keep a special pointer to the highest sched_domain that has
- * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
- * allows us to avoid some pointer chasing select_idle_sibling().
- *
- * Also keep a unique ID per domain (we use the first cpu number in
- * the cpumask of the domain), this allows us to quickly tell if
- * two cpus are in the same cache domain, see cpus_share_cache().
- */
- DEFINE_PER_CPU(struct sched_domain *, sd_llc);
- DEFINE_PER_CPU(int, sd_llc_size);
- DEFINE_PER_CPU(int, sd_llc_id);
- static void update_top_cache_domain(int cpu)
- {
- struct sched_domain *sd;
- int id = cpu;
- int size = 1;
- sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
- if (sd) {
- id = cpumask_first(sched_domain_span(sd));
- size = cpumask_weight(sched_domain_span(sd));
- }
- rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
- per_cpu(sd_llc_size, cpu) = size;
- per_cpu(sd_llc_id, cpu) = id;
- }
- /*
- * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
- * hold the hotplug lock.
- */
- static void
- cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- struct sched_domain *tmp;
- unsigned long next_balance = rq->next_balance;
- /* Remove the sched domains which do not contribute to scheduling. */
- for (tmp = sd; tmp; ) {
- struct sched_domain *parent = tmp->parent;
- if (!parent)
- break;
- if (sd_parent_degenerate(tmp, parent)) {
- tmp->parent = parent->parent;
- if (parent->parent)
- parent->parent->child = tmp;
- destroy_sched_domain(parent, cpu);
- } else
- tmp = tmp->parent;
- }
- if (sd && sd_degenerate(sd)) {
- tmp = sd;
- sd = sd->parent;
- destroy_sched_domain(tmp, cpu);
- if (sd)
- sd->child = NULL;
- }
- for (tmp = sd; tmp; ) {
- unsigned long interval;
- interval = msecs_to_jiffies(tmp->balance_interval);
- if (time_after(next_balance, tmp->last_balance + interval))
- next_balance = tmp->last_balance + interval;
- tmp = tmp->parent;
- }
- rq->next_balance = next_balance;
- sched_domain_debug(sd, cpu);
- rq_attach_root(rq, rd);
- tmp = rq->sd;
- rcu_assign_pointer(rq->sd, sd);
- destroy_sched_domains(tmp, cpu);
- update_top_cache_domain(cpu);
- }
- /* cpus with isolated domains */
- static cpumask_var_t cpu_isolated_map;
- /* Setup the mask of cpus configured for isolated domains */
- static int __init isolated_cpu_setup(char *str)
- {
- alloc_bootmem_cpumask_var(&cpu_isolated_map);
- cpulist_parse(str, cpu_isolated_map);
- return 1;
- }
- __setup("isolcpus=", isolated_cpu_setup);
- static const struct cpumask *cpu_cpu_mask(int cpu)
- {
- return cpumask_of_node(cpu_to_node(cpu));
- }
- int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
- struct sd_data {
- struct sched_domain **__percpu sd;
- struct sched_group **__percpu sg;
- struct sched_group_power **__percpu sgp;
- };
- struct s_data {
- struct sched_domain ** __percpu sd;
- struct root_domain *rd;
- };
- enum s_alloc {
- sa_rootdomain,
- sa_sd,
- sa_sd_storage,
- sa_none,
- };
- struct sched_domain_topology_level;
- typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
- typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
- #define SDTL_OVERLAP 0x01
- struct sched_domain_topology_level {
- sched_domain_init_f init;
- sched_domain_mask_f mask;
- int flags;
- int numa_level;
- struct sd_data data;
- };
- /*
- * Build an iteration mask that can exclude certain CPUs from the upwards
- * domain traversal.
- *
- * Asymmetric node setups can result in situations where the domain tree is of
- * unequal depth, make sure to skip domains that already cover the entire
- * range.
- *
- * In that case build_sched_domains() will have terminated the iteration early
- * and our sibling sd spans will be empty. Domains should always include the
- * cpu they're built on, so check that.
- *
- */
- static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
- {
- const struct cpumask *span = sched_domain_span(sd);
- struct sd_data *sdd = sd->private;
- struct sched_domain *sibling;
- int i;
- for_each_cpu(i, span) {
- sibling = *per_cpu_ptr(sdd->sd, i);
- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
- continue;
- cpumask_set_cpu(i, sched_group_mask(sg));
- }
- }
- /*
- * Return the canonical balance cpu for this group, this is the first cpu
- * of this group that's also in the iteration mask.
- */
- int group_balance_cpu(struct sched_group *sg)
- {
- return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
- }
- static int
- build_overlap_sched_groups(struct sched_domain *sd, int cpu)
- {
- struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
- const struct cpumask *span = sched_domain_span(sd);
- struct cpumask *covered = sched_domains_tmpmask;
- struct sd_data *sdd = sd->private;
- struct sched_domain *child;
- int i;
- cpumask_clear(covered);
- for_each_cpu(i, span) {
- struct cpumask *sg_span;
- if (cpumask_test_cpu(i, covered))
- continue;
- child = *per_cpu_ptr(sdd->sd, i);
- /* See the comment near build_group_mask(). */
- if (!cpumask_test_cpu(i, sched_domain_span(child)))
- continue;
- sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, cpu_to_node(cpu));
- if (!sg)
- goto fail;
- sg_span = sched_group_cpus(sg);
- if (child->child) {
- child = child->child;
- cpumask_copy(sg_span, sched_domain_span(child));
- } else
- cpumask_set_cpu(i, sg_span);
- cpumask_or(covered, covered, sg_span);
- sg->sgp = *per_cpu_ptr(sdd->sgp, i);
- if (atomic_inc_return(&sg->sgp->ref) == 1)
- build_group_mask(sd, sg);
- /*
- * Initialize sgp->power such that even if we mess up the
- * domains and no possible iteration will get us here, we won't
- * die on a /0 trap.
- */
- sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
- /*
- * Make sure the first group of this domain contains the
- * canonical balance cpu. Otherwise the sched_domain iteration
- * breaks. See update_sg_lb_stats().
- */
- if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
- group_balance_cpu(sg) == cpu)
- groups = sg;
- if (!first)
- first = sg;
- if (last)
- last->next = sg;
- last = sg;
- last->next = first;
- }
- sd->groups = groups;
- return 0;
- fail:
- free_sched_groups(first, 0);
- return -ENOMEM;
- }
- static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
- {
- struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
- struct sched_domain *child = sd->child;
- if (child)
- cpu = cpumask_first(sched_domain_span(child));
- if (sg) {
- *sg = *per_cpu_ptr(sdd->sg, cpu);
- (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
- atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
- }
- return cpu;
- }
- /*
- * build_sched_groups will build a circular linked list of the groups
- * covered by the given span, and will set each group's ->cpumask correctly,
- * and ->cpu_power to 0.
- *
- * Assumes the sched_domain tree is fully constructed
- */
- static int
- build_sched_groups(struct sched_domain *sd, int cpu)
- {
- struct sched_group *first = NULL, *last = NULL;
- struct sd_data *sdd = sd->private;
- const struct cpumask *span = sched_domain_span(sd);
- struct cpumask *covered;
- int i;
- get_group(cpu, sdd, &sd->groups);
- atomic_inc(&sd->groups->ref);
- if (cpu != cpumask_first(sched_domain_span(sd)))
- return 0;
- lockdep_assert_held(&sched_domains_mutex);
- covered = sched_domains_tmpmask;
- cpumask_clear(covered);
- for_each_cpu(i, span) {
- struct sched_group *sg;
- int group, j;
- if (cpumask_test_cpu(i, covered))
- continue;
- group = get_group(i, sdd, &sg);
- cpumask_clear(sched_group_cpus(sg));
- sg->sgp->power = 0;
- cpumask_setall(sched_group_mask(sg));
- for_each_cpu(j, span) {
- if (get_group(j, sdd, NULL) != group)
- continue;
- cpumask_set_cpu(j, covered);
- cpumask_set_cpu(j, sched_group_cpus(sg));
- }
- if (!first)
- first = sg;
- if (last)
- last->next = sg;
- last = sg;
- }
- last->next = first;
- return 0;
- }
- /*
- * Initialize sched groups cpu_power.
- *
- * cpu_power indicates the capacity of sched group, which is used while
- * distributing the load between different sched groups in a sched domain.
- * Typically cpu_power for all the groups in a sched domain will be same unless
- * there are asymmetries in the topology. If there are asymmetries, group
- * having more cpu_power will pickup more load compared to the group having
- * less cpu_power.
- */
- static void init_sched_groups_power(int cpu, struct sched_domain *sd)
- {
- struct sched_group *sg = sd->groups;
- WARN_ON(!sd || !sg);
- do {
- sg->group_weight = cpumask_weight(sched_group_cpus(sg));
- sg = sg->next;
- } while (sg != sd->groups);
- if (cpu != group_balance_cpu(sg))
- return;
- update_group_power(sd, cpu);
- atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
- }
- int __weak arch_sd_sibling_asym_packing(void)
- {
- return 0*SD_ASYM_PACKING;
- }
- /*
- * Initializers for schedule domains
- * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
- */
- #ifdef CONFIG_SCHED_DEBUG
- # define SD_INIT_NAME(sd, type) sd->name = #type
- #else
- # define SD_INIT_NAME(sd, type) do { } while (0)
- #endif
- #define SD_INIT_FUNC(type) \
- static noinline struct sched_domain * \
- sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
- { \
- struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
- *sd = SD_##type##_INIT; \
- SD_INIT_NAME(sd, type); \
- sd->private = &tl->data; \
- return sd; \
- }
- SD_INIT_FUNC(CPU)
- #ifdef CONFIG_SCHED_SMT
- SD_INIT_FUNC(SIBLING)
- #endif
- #ifdef CONFIG_SCHED_MC
- SD_INIT_FUNC(MC)
- #endif
- #ifdef CONFIG_SCHED_BOOK
- SD_INIT_FUNC(BOOK)
- #endif
- static int default_relax_domain_level = -1;
- int sched_domain_level_max;
- static int __init setup_relax_domain_level(char *str)
- {
- if (kstrtoint(str, 0, &default_relax_domain_level))
- pr_warn("Unable to set relax_domain_level\n");
- return 1;
- }
- __setup("relax_domain_level=", setup_relax_domain_level);
- static void set_domain_attribute(struct sched_domain *sd,
- struct sched_domain_attr *attr)
- {
- int request;
- if (!attr || attr->relax_domain_level < 0) {
- if (default_relax_domain_level < 0)
- return;
- else
- request = default_relax_domain_level;
- } else
- request = attr->relax_domain_level;
- if (request < sd->level) {
- /* turn off idle balance on this domain */
- sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
- } else {
- /* turn on idle balance on this domain */
- sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
- }
- }
- static void __sdt_free(const struct cpumask *cpu_map);
- static int __sdt_alloc(const struct cpumask *cpu_map);
- static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
- const struct cpumask *cpu_map)
- {
- switch (what) {
- case sa_rootdomain:
- if (!atomic_read(&d->rd->refcount))
- free_rootdomain(&d->rd->rcu); /* fall through */
- case sa_sd:
- free_percpu(d->sd); /* fall through */
- case sa_sd_storage:
- __sdt_free(cpu_map); /* fall through */
- case sa_none:
- break;
- }
- }
- static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
- const struct cpumask *cpu_map)
- {
- memset(d, 0, sizeof(*d));
- if (__sdt_alloc(cpu_map))
- return sa_sd_storage;
- d->sd = alloc_percpu(struct sched_domain *);
- if (!d->sd)
- return sa_sd_storage;
- d->rd = alloc_rootdomain();
- if (!d->rd)
- return sa_sd;
- return sa_rootdomain;
- }
- /*
- * NULL the sd_data elements we've used to build the sched_domain and
- * sched_group structure so that the subsequent __free_domain_allocs()
- * will not free the data we're using.
- */
- static void claim_allocations(int cpu, struct sched_domain *sd)
- {
- struct sd_data *sdd = sd->private;
- WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
- *per_cpu_ptr(sdd->sd, cpu) = NULL;
- if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
- *per_cpu_ptr(sdd->sg, cpu) = NULL;
- if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
- *per_cpu_ptr(sdd->sgp, cpu) = NULL;
- }
- #ifdef CONFIG_SCHED_SMT
- static const struct cpumask *cpu_smt_mask(int cpu)
- {
- return topology_thread_cpumask(cpu);
- }
- #endif
- /*
- * Topology list, bottom-up.
- */
- static struct sched_domain_topology_level default_topology[] = {
- #ifdef CONFIG_SCHED_SMT
- { sd_init_SIBLING, cpu_smt_mask, },
- #endif
- #ifdef CONFIG_SCHED_MC
- { sd_init_MC, cpu_coregroup_mask, },
- #endif
- #ifdef CONFIG_SCHED_BOOK
- { sd_init_BOOK, cpu_book_mask, },
- #endif
- { sd_init_CPU, cpu_cpu_mask, },
- { NULL, },
- };
- static struct sched_domain_topology_level *sched_domain_topology = default_topology;
- #ifdef CONFIG_NUMA
- static int sched_domains_numa_levels;
- static int *sched_domains_numa_distance;
- static struct cpumask ***sched_domains_numa_masks;
- static int sched_domains_curr_level;
- static inline unsigned long numa_scale(unsigned long x, int level)
- {
- return x * sched_domains_numa_distance[level] / sched_domains_numa_scale;
- }
- static inline int sd_local_flags(int level)
- {
- if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
- return 0;
- return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
- }
- static struct sched_domain *
- sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
- {
- struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
- int level = tl->numa_level;
- int sd_weight = cpumask_weight(
- sched_domains_numa_masks[level][cpu_to_node(cpu)]);
- *sd = (struct sched_domain){
- .min_interval = sd_weight,
- .max_interval = 2*sd_weight,
- .busy_factor = 32,
- .imbalance_pct = 100 + numa_scale(25, level),
- .cache_nice_tries = 2,
- .busy_idx = 3,
- .idle_idx = 2,
- .newidle_idx = 0,
- .wake_idx = 0,
- .forkexec_idx = 0,
- .flags = 1*SD_LOAD_BALANCE
- | 1*SD_BALANCE_NEWIDLE
- | 0*SD_BALANCE_EXEC
- | 0*SD_BALANCE_FORK
- | 0*SD_BALANCE_WAKE
- | 0*SD_WAKE_AFFINE
- | 0*SD_PREFER_LOCAL
- | 0*SD_SHARE_CPUPOWER
- | 0*SD_POWERSAVINGS_BALANCE
- | 0*SD_SHARE_PKG_RESOURCES
- | 1*SD_SERIALIZE
- | 0*SD_PREFER_SIBLING
- | sd_local_flags(level)
- ,
- .last_balance = jiffies,
- .balance_interval = sd_weight,
- };
- SD_INIT_NAME(sd, NUMA);
- sd->private = &tl->data;
- /*
- * Ugly hack to pass state to sd_numa_mask()...
- */
- sched_domains_curr_level = tl->numa_level;
- return sd;
- }
- static const struct cpumask *sd_numa_mask(int cpu)
- {
- return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
- }
- static void sched_numa_warn(const char *str)
- {
- static int done = false;
- int i,j;
- if (done)
- return;
- done = true;
- printk(KERN_WARNING "ERROR: %s\n\n", str);
- for (i = 0; i < nr_node_ids; i++) {
- printk(KERN_WARNING " ");
- for (j = 0; j < nr_node_ids; j++)
- printk(KERN_CONT "%02d ", node_distance(i,j));
- printk(KERN_CONT "\n");
- }
- printk(KERN_WARNING "\n");
- }
- static bool find_numa_distance(int distance)
- {
- int i;
- if (distance == node_distance(0, 0))
- return true;
- for (i = 0; i < sched_domains_numa_levels; i++) {
- if (sched_domains_numa_distance[i] == distance)
- return true;
- }
- return false;
- }
- static void sched_init_numa(void)
- {
- int next_distance, curr_distance = node_distance(0, 0);
- struct sched_domain_topology_level *tl;
- int level = 0;
- int i, j, k;
- sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
- if (!sched_domains_numa_distance)
- return;
- /*
- * O(nr_nodes^2) deduplicating selection sort -- in order to find the
- * unique distances in the node_distance() table.
- *
- * Assumes node_distance(0,j) includes all distances in
- * node_distance(i,j) in order to avoid cubic time.
- */
- next_distance = curr_distance;
- for (i = 0; i < nr_node_ids; i++) {
- for (j = 0; j < nr_node_ids; j++) {
- for (k = 0; k < nr_node_ids; k++) {
- int distance = node_distance(i, k);
- if (distance > curr_distance &&
- (distance < next_distance ||
- next_distance == curr_distance))
- next_distance = distance;
- /*
- * While not a strong assumption it would be nice to know
- * about cases where if node A is connected to B, B is not
- * equally connected to A.
- */
- if (sched_debug() && node_distance(k, i) != distance)
- sched_numa_warn("Node-distance not symmetric");
- if (sched_debug() && i && !find_numa_distance(distance))
- sched_numa_warn("Node-0 not representative");
- }
- if (next_distance != curr_distance) {
- sched_domains_numa_distance[level++] = next_distance;
- sched_domains_numa_levels = level;
- curr_distance = next_distance;
- } else break;
- }
- /*
- * In case of sched_debug() we verify the above assumption.
- */
- if (!sched_debug())
- break;
- }
- /*
- * 'level' contains the number of unique distances, excluding the
- * identity distance node_distance(i,i).
- *
- * The sched_domains_nume_distance[] array includes the actual distance
- * numbers.
- */
- sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
- if (!sched_domains_numa_masks)
- return;
- /*
- * Now for each level, construct a mask per node which contains all
- * cpus of nodes that are that many hops away from us.
- */
- for (i = 0; i < level; i++) {
- sched_domains_numa_masks[i] =
- kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
- if (!sched_domains_numa_masks[i])
- return;
- for (j = 0; j < nr_node_ids; j++) {
- struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j);
- if (!mask)
- return;
- sched_domains_numa_masks[i][j] = mask;
- for (k = 0; k < nr_node_ids; k++) {
- if (node_distance(cpu_to_node(j), k) >
- sched_domains_numa_distance[i])
- continue;
- cpumask_or(mask, mask, cpumask_of_node(k));
- }
- }
- }
- tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
- sizeof(struct sched_domain_topology_level), GFP_KERNEL);
- if (!tl)
- return;
- /*
- * Copy the default topology bits..
- */
- for (i = 0; default_topology[i].init; i++)
- tl[i] = default_topology[i];
- /*
- * .. and append 'j' levels of NUMA goodness.
- */
- for (j = 0; j < level; i++, j++) {
- tl[i] = (struct sched_domain_topology_level){
- .init = sd_numa_init,
- .mask = sd_numa_mask,
- .flags = SDTL_OVERLAP,
- .numa_level = j,
- };
- }
- sched_domain_topology = tl;
- }
- #else
- static inline void sched_init_numa(void)
- {
- }
- #endif /* CONFIG_NUMA */
- static int __sdt_alloc(const struct cpumask *cpu_map)
- {
- struct sched_domain_topology_level *tl;
- int j;
- for (tl = sched_domain_topology; tl->init; tl++) {
- struct sd_data *sdd = &tl->data;
- sdd->sd = alloc_percpu(struct sched_domain *);
- if (!sdd->sd)
- return -ENOMEM;
- sdd->sg = alloc_percpu(struct sched_group *);
- if (!sdd->sg)
- return -ENOMEM;
- sdd->sgp = alloc_percpu(struct sched_group_power *);
- if (!sdd->sgp)
- return -ENOMEM;
- for_each_cpu(j, cpu_map) {
- struct sched_domain *sd;
- struct sched_group *sg;
- struct sched_group_power *sgp;
- sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
- GFP_KERNEL, cpu_to_node(j));
- if (!sd)
- return -ENOMEM;
- *per_cpu_ptr(sdd->sd, j) = sd;
- sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
- GFP_KERNEL, cpu_to_node(j));
- if (!sg)
- return -ENOMEM;
- sg->next = sg;
- *per_cpu_ptr(sdd->sg, j) = sg;
- sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
- GFP_KERNEL, cpu_to_node(j));
- if (!sgp)
- return -ENOMEM;
- *per_cpu_ptr(sdd->sgp, j) = sgp;
- }
- }
- return 0;
- }
- static void __sdt_free(const struct cpumask *cpu_map)
- {
- struct sched_domain_topology_level *tl;
- int j;
- for (tl = sched_domain_topology; tl->init; tl++) {
- struct sd_data *sdd = &tl->data;
- for_each_cpu(j, cpu_map) {
- struct sched_domain *sd;
- if (sdd->sd) {
- sd = *per_cpu_ptr(sdd->sd, j);
- if (sd && (sd->flags & SD_OVERLAP))
- free_sched_groups(sd->groups, 0);
- kfree(*per_cpu_ptr(sdd->sd, j));
- }
- if (sdd->sg)
- kfree(*per_cpu_ptr(sdd->sg, j));
- if (sdd->sgp)
- kfree(*per_cpu_ptr(sdd->sgp, j));
- }
- free_percpu(sdd->sd);
- sdd->sd = NULL;
- free_percpu(sdd->sg);
- sdd->sg = NULL;
- free_percpu(sdd->sgp);
- sdd->sgp = NULL;
- }
- }
- struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
- struct s_data *d, const struct cpumask *cpu_map,
- struct sched_domain_attr *attr, struct sched_domain *child,
- int cpu)
- {
- struct sched_domain *sd = tl->init(tl, cpu);
- if (!sd)
- return child;
- cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
- if (child) {
- sd->level = child->level + 1;
- sched_domain_level_max = max(sched_domain_level_max, sd->level);
- child->parent = sd;
- }
- sd->child = child;
- set_domain_attribute(sd, attr);
- return sd;
- }
- /*
- * Build sched domains for a given set of cpus and attach the sched domains
- * to the individual cpus
- */
- static int build_sched_domains(const struct cpumask *cpu_map,
- struct sched_domain_attr *attr)
- {
- enum s_alloc alloc_state = sa_none;
- struct sched_domain *sd;
- struct s_data d;
- int i, ret = -ENOMEM;
- alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
- if (alloc_state != sa_rootdomain)
- goto error;
- /* Set up domains for cpus specified by the cpu_map. */
- for_each_cpu(i, cpu_map) {
- struct sched_domain_topology_level *tl;
- sd = NULL;
- for (tl = sched_domain_topology; tl->init; tl++) {
- sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
- if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
- sd->flags |= SD_OVERLAP;
- if (cpumask_equal(cpu_map, sched_domain_span(sd)))
- break;
- }
- while (sd->child)
- sd = sd->child;
- *per_cpu_ptr(d.sd, i) = sd;
- }
- /* Build the groups for the domains */
- for_each_cpu(i, cpu_map) {
- for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
- sd->span_weight = cpumask_weight(sched_domain_span(sd));
- if (sd->flags & SD_OVERLAP) {
- if (build_overlap_sched_groups(sd, i))
- goto error;
- } else {
- if (build_sched_groups(sd, i))
- goto error;
- }
- }
- }
- /* Calculate CPU power for physical packages and nodes */
- for (i = nr_cpumask_bits-1; i >= 0; i--) {
- if (!cpumask_test_cpu(i, cpu_map))
- continue;
- for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
- claim_allocations(i, sd);
- init_sched_groups_power(i, sd);
- }
- }
- /* Attach the domains */
- rcu_read_lock();
- for_each_cpu(i, cpu_map) {
- sd = *per_cpu_ptr(d.sd, i);
- cpu_attach_domain(sd, d.rd, i);
- }
- rcu_read_unlock();
- ret = 0;
- error:
- __free_domain_allocs(&d, alloc_state, cpu_map);
- return ret;
- }
- static cpumask_var_t *doms_cur; /* current sched domains */
- static int ndoms_cur; /* number of sched domains in 'doms_cur' */
- static struct sched_domain_attr *dattr_cur;
- /* attribues of custom domains in 'doms_cur' */
- /*
- * Special case: If a kmalloc of a doms_cur partition (array of
- * cpumask) fails, then fallback to a single sched domain,
- * as determined by the single cpumask fallback_doms.
- */
- static cpumask_var_t fallback_doms;
- /*
- * arch_update_cpu_topology lets virtualized architectures update the
- * cpu core maps. It is supposed to return 1 if the topology changed
- * or 0 if it stayed the same.
- */
- int __weak arch_update_cpu_topology(void)
- {
- return 0;
- }
- cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
- {
- int i;
- cpumask_var_t *doms;
- doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
- if (!doms)
- return NULL;
- for (i = 0; i < ndoms; i++) {
- if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
- free_sched_domains(doms, i);
- return NULL;
- }
- }
- return doms;
- }
- void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
- {
- unsigned int i;
- for (i = 0; i < ndoms; i++)
- free_cpumask_var(doms[i]);
- kfree(doms);
- }
- /*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
- * For now this just excludes isolated cpus, but could be used to
- * exclude other special cases in the future.
- */
- static int init_sched_domains(const struct cpumask *cpu_map)
- {
- int err;
- arch_update_cpu_topology();
- ndoms_cur = 1;
- doms_cur = alloc_sched_domains(ndoms_cur);
- if (!doms_cur)
- doms_cur = &fallback_doms;
- cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
- err = build_sched_domains(doms_cur[0], NULL);
- register_sched_domain_sysctl();
- return err;
- }
- /*
- * Detach sched domains from a group of cpus specified in cpu_map
- * These cpus will now be attached to the NULL domain
- */
- static void detach_destroy_domains(const struct cpumask *cpu_map)
- {
- int i;
- rcu_read_lock();
- for_each_cpu(i, cpu_map)
- cpu_attach_domain(NULL, &def_root_domain, i);
- rcu_read_unlock();
- }
- /* handle null as "default" */
- static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
- struct sched_domain_attr *new, int idx_new)
- {
- struct sched_domain_attr tmp;
- /* fast path */
- if (!new && !cur)
- return 1;
- tmp = SD_ATTR_INIT;
- return !memcmp(cur ? (cur + idx_cur) : &tmp,
- new ? (new + idx_new) : &tmp,
- sizeof(struct sched_domain_attr));
- }
- /*
- * Partition sched domains as specified by the 'ndoms_new'
- * cpumasks in the array doms_new[] of cpumasks. This compares
- * doms_new[] to the current sched domain partitioning, doms_cur[].
- * It destroys each deleted domain and builds each new domain.
- *
- * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
- * The masks don't intersect (don't overlap.) We should setup one
- * sched domain for each mask. CPUs not in any of the cpumasks will
- * not be load balanced. If the same cpumask appears both in the
- * current 'doms_cur' domains and in the new 'doms_new', we can leave
- * it as it is.
- *
- * The passed in 'doms_new' should be allocated using
- * alloc_sched_domains. This routine takes ownership of it and will
- * free_sched_domains it when done with it. If the caller failed the
- * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
- * and partition_sched_domains() will fallback to the single partition
- * 'fallback_doms', it also forces the domains to be rebuilt.
- *
- * If doms_new == NULL it will be replaced with cpu_online_mask.
- * ndoms_new == 0 is a special case for destroying existing domains,
- * and it will not create the default domain.
- *
- * Call with hotplug lock held
- */
- void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
- {
- int i, j, n;
- int new_topology;
- mutex_lock(&sched_domains_mutex);
- /* always unregister in case we don't destroy any domains */
- unregister_sched_domain_sysctl();
- /* Let architecture update cpu core mappings. */
- new_topology = arch_update_cpu_topology();
- n = doms_new ? ndoms_new : 0;
- /* Destroy deleted domains */
- for (i = 0; i < ndoms_cur; i++) {
- for (j = 0; j < n && !new_topology; j++) {
- if (cpumask_equal(doms_cur[i], doms_new[j])
- && dattrs_equal(dattr_cur, i, dattr_new, j))
- goto match1;
- }
- /* no match - a current sched domain not in new doms_new[] */
- detach_destroy_domains(doms_cur[i]);
- match1:
- ;
- }
- if (doms_new == NULL) {
- ndoms_cur = 0;
- doms_new = &fallback_doms;
- cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
- WARN_ON_ONCE(dattr_new);
- }
- /* Build new domains */
- for (i = 0; i < ndoms_new; i++) {
- for (j = 0; j < ndoms_cur && !new_topology; j++) {
- if (cpumask_equal(doms_new[i], doms_cur[j])
- && dattrs_equal(dattr_new, i, dattr_cur, j))
- goto match2;
- }
- /* no match - add a new doms_new */
- build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
- match2:
- ;
- }
- /* Remember the new sched domains */
- if (doms_cur != &fallback_doms)
- free_sched_domains(doms_cur, ndoms_cur);
- kfree(dattr_cur); /* kfree(NULL) is safe */
- doms_cur = doms_new;
- dattr_cur = dattr_new;
- ndoms_cur = ndoms_new;
- register_sched_domain_sysctl();
- mutex_unlock(&sched_domains_mutex);
- }
- #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- static void reinit_sched_domains(void)
- {
- get_online_cpus();
- /* Destroy domains first to force the rebuild */
- partition_sched_domains(0, NULL, NULL);
- rebuild_sched_domains();
- put_online_cpus();
- }
- static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
- {
- unsigned int level = 0;
- if (sscanf(buf, "%u", &level) != 1)
- return -EINVAL;
- /*
- * level is always be positive so don't check for
- * level < POWERSAVINGS_BALANCE_NONE which is 0
- * What happens on 0 or 1 byte write,
- * need to check for count as well?
- */
- if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
- return -EINVAL;
- if (smt)
- sched_smt_power_savings = level;
- else
- sched_mc_power_savings = level;
- reinit_sched_domains();
- return count;
- }
- #ifdef CONFIG_SCHED_MC
- static ssize_t sched_mc_power_savings_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- return sprintf(buf, "%u\n", sched_mc_power_savings);
- }
- static ssize_t sched_mc_power_savings_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- return sched_power_savings_store(buf, count, 0);
- }
- static DEVICE_ATTR(sched_mc_power_savings, 0644,
- sched_mc_power_savings_show,
- sched_mc_power_savings_store);
- #endif
- #ifdef CONFIG_SCHED_SMT
- static ssize_t sched_smt_power_savings_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- return sprintf(buf, "%u\n", sched_smt_power_savings);
- }
- static ssize_t sched_smt_power_savings_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- return sched_power_savings_store(buf, count, 1);
- }
- static DEVICE_ATTR(sched_smt_power_savings, 0644,
- sched_smt_power_savings_show,
- sched_smt_power_savings_store);
- #endif
- int __init sched_create_sysfs_power_savings_entries(struct device *dev)
- {
- int err = 0;
- #ifdef CONFIG_SCHED_SMT
- if (smt_capable())
- err = device_create_file(dev, &dev_attr_sched_smt_power_savings);
- #endif
- #ifdef CONFIG_SCHED_MC
- if (!err && mc_capable())
- err = device_create_file(dev, &dev_attr_sched_mc_power_savings);
- #endif
- return err;
- }
- #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
- static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
- /*
- * Update cpusets according to cpu_active mask. If cpusets are
- * disabled, cpuset_update_active_cpus() becomes a simple wrapper
- * around partition_sched_domains().
- *
- * If we come here as part of a suspend/resume, don't touch cpusets because we
- * want to restore it back to its original state upon resume anyway.
- */
- static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
- {
- switch (action) {
- case CPU_ONLINE_FROZEN:
- case CPU_DOWN_FAILED_FROZEN:
- /*
- * num_cpus_frozen tracks how many CPUs are involved in suspend
- * resume sequence. As long as this is not the last online
- * operation in the resume sequence, just build a single sched
- * domain, ignoring cpusets.
- */
- num_cpus_frozen--;
- if (likely(num_cpus_frozen)) {
- partition_sched_domains(1, NULL, NULL);
- break;
- }
- /*
- * This is the last CPU online operation. So fall through and
- * restore the original sched domains by considering the
- * cpuset configurations.
- */
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- cpuset_update_active_cpus(true);
- break;
- default:
- return NOTIFY_DONE;
- }
- return NOTIFY_OK;
- }
- static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
- {
- switch (action) {
- case CPU_DOWN_PREPARE:
- cpuset_update_active_cpus(false);
- break;
- case CPU_DOWN_PREPARE_FROZEN:
- num_cpus_frozen++;
- partition_sched_domains(1, NULL, NULL);
- break;
- default:
- return NOTIFY_DONE;
- }
- return NOTIFY_OK;
- }
- void __init sched_init_smp(void)
- {
- cpumask_var_t non_isolated_cpus;
- alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
- alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
- sched_init_numa();
- get_online_cpus();
- mutex_lock(&sched_domains_mutex);
- init_sched_domains(cpu_active_mask);
- cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
- if (cpumask_empty(non_isolated_cpus))
- cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
- mutex_unlock(&sched_domains_mutex);
- put_online_cpus();
- hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
- hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
- init_hrtick();
- /* Move init over to a non-isolated CPU */
- if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
- BUG();
- sched_init_granularity();
- free_cpumask_var(non_isolated_cpus);
- init_sched_rt_class();
- }
- #else
- void __init sched_init_smp(void)
- {
- sched_init_granularity();
- }
- #endif /* CONFIG_SMP */
- const_debug unsigned int sysctl_timer_migration = 1;
- int in_sched_functions(unsigned long addr)
- {
- return in_lock_functions(addr) ||
- (addr >= (unsigned long)__sched_text_start
- && addr < (unsigned long)__sched_text_end);
- }
- #ifdef CONFIG_CGROUP_SCHED
- struct task_group root_task_group;
- LIST_HEAD(task_groups);
- #endif
- DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
- void __init sched_init(void)
- {
- int i, j;
- unsigned long alloc_size = 0, ptr;
- sec_gaf_supply_rqinfo(offsetof(struct rq, curr),
- offsetof(struct cfs_rq, rq));
- #ifdef CONFIG_FAIR_GROUP_SCHED
- alloc_size += 2 * nr_cpu_ids * sizeof(void **);
- #endif
- #ifdef CONFIG_RT_GROUP_SCHED
- alloc_size += 2 * nr_cpu_ids * sizeof(void **);
- #endif
- #ifdef CONFIG_CPUMASK_OFFSTACK
- alloc_size += num_possible_cpus() * cpumask_size();
- #endif
- if (alloc_size) {
- ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
- #ifdef CONFIG_FAIR_GROUP_SCHED
- root_task_group.se = (struct sched_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
- root_task_group.cfs_rq = (struct cfs_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- #ifdef CONFIG_RT_GROUP_SCHED
- root_task_group.rt_se = (struct sched_rt_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
- root_task_group.rt_rq = (struct rt_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
- #endif /* CONFIG_RT_GROUP_SCHED */
- #ifdef CONFIG_CPUMASK_OFFSTACK
- for_each_possible_cpu(i) {
- per_cpu(load_balance_tmpmask, i) = (void *)ptr;
- ptr += cpumask_size();
- }
- #endif /* CONFIG_CPUMASK_OFFSTACK */
- }
- #ifdef CONFIG_SMP
- init_defrootdomain();
- #endif
- init_rt_bandwidth(&def_rt_bandwidth,
- global_rt_period(), global_rt_runtime());
- #ifdef CONFIG_RT_GROUP_SCHED
- init_rt_bandwidth(&root_task_group.rt_bandwidth,
- global_rt_period(), global_rt_runtime());
- #endif /* CONFIG_RT_GROUP_SCHED */
- #ifdef CONFIG_CGROUP_SCHED
- list_add(&root_task_group.list, &task_groups);
- INIT_LIST_HEAD(&root_task_group.children);
- INIT_LIST_HEAD(&root_task_group.siblings);
- autogroup_init(&init_task);
- #endif /* CONFIG_CGROUP_SCHED */
- #ifdef CONFIG_CGROUP_CPUACCT
- root_cpuacct.cpustat = &kernel_cpustat;
- root_cpuacct.cpuusage = alloc_percpu(u64);
- /* Too early, not expected to fail */
- BUG_ON(!root_cpuacct.cpuusage);
- #endif
- for_each_possible_cpu(i) {
- struct rq *rq;
- rq = cpu_rq(i);
- raw_spin_lock_init(&rq->lock);
- rq->nr_running = 0;
- rq->calc_load_active = 0;
- rq->calc_load_update = jiffies + LOAD_FREQ;
- init_cfs_rq(&rq->cfs);
- init_rt_rq(&rq->rt, rq);
- #ifdef CONFIG_FAIR_GROUP_SCHED
- root_task_group.shares = ROOT_TASK_GROUP_LOAD;
- INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
- /*
- * How much cpu bandwidth does root_task_group get?
- *
- * In case of task-groups formed thr' the cgroup filesystem, it
- * gets 100% of the cpu resources in the system. This overall
- * system cpu resource is divided among the tasks of
- * root_task_group and its child task-groups in a fair manner,
- * based on each entity's (task or task-group's) weight
- * (se->load.weight).
- *
- * In other words, if root_task_group has 10 tasks of weight
- * 1024) and two child groups A0 and A1 (of weight 1024 each),
- * then A0's share of the cpu resource is:
- *
- * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
- *
- * We achieve this by letting root_task_group's tasks sit
- * directly in rq->cfs (i.e root_task_group->se[] = NULL).
- */
- init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
- init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
- #ifdef CONFIG_RT_GROUP_SCHED
- INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
- init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
- #endif
- for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
- rq->cpu_load[j] = 0;
- rq->last_load_update_tick = jiffies;
- #ifdef CONFIG_SMP
- rq->sd = NULL;
- rq->rd = NULL;
- rq->cpu_power = SCHED_POWER_SCALE;
- rq->post_schedule = 0;
- rq->active_balance = 0;
- rq->next_balance = jiffies;
- rq->push_cpu = 0;
- rq->cpu = i;
- rq->online = 0;
- rq->idle_stamp = 0;
- rq->avg_idle = 2*sysctl_sched_migration_cost;
- INIT_LIST_HEAD(&rq->cfs_tasks);
- rq_attach_root(rq, &def_root_domain);
- #ifdef CONFIG_NO_HZ
- rq->nohz_flags = 0;
- #endif
- #endif
- init_rq_hrtick(rq);
- atomic_set(&rq->nr_iowait, 0);
- }
- set_load_weight(&init_task);
- #ifdef CONFIG_PREEMPT_NOTIFIERS
- INIT_HLIST_HEAD(&init_task.preempt_notifiers);
- #endif
- /*
- * The boot idle thread does lazy MMU switching as well:
- */
- atomic_inc(&init_mm.mm_count);
- enter_lazy_tlb(&init_mm, current);
- /*
- * Make us the idle thread. Technically, schedule() should not be
- * called from this thread, however somewhere below it might be,
- * but because we are the idle thread, we just pick up running again
- * when this runqueue becomes "idle".
- */
- init_idle(current, smp_processor_id());
- calc_load_update = jiffies + LOAD_FREQ;
- /*
- * During early bootup we pretend to be a normal task:
- */
- current->sched_class = &fair_sched_class;
- #ifdef CONFIG_SMP
- zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
- /* May be allocated at isolcpus cmdline parse time */
- if (cpu_isolated_map == NULL)
- zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
- #endif
- init_sched_fair_class();
- scheduler_running = 1;
- }
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- static inline int preempt_count_equals(int preempt_offset)
- {
- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
- return (nested == preempt_offset);
- }
- static int __might_sleep_init_called;
- int __init __might_sleep_init(void)
- {
- __might_sleep_init_called = 1;
- return 0;
- }
- early_initcall(__might_sleep_init);
- void __might_sleep(const char *file, int line, int preempt_offset)
- {
- static unsigned long prev_jiffy; /* ratelimiting */
- rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
- if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- oops_in_progress)
- return;
- if (system_state != SYSTEM_RUNNING &&
- (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
- return;
- if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
- return;
- prev_jiffy = jiffies;
- printk(KERN_ERR
- "BUG: sleeping function called from invalid context at %s:%d\n",
- file, line);
- printk(KERN_ERR
- "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
- in_atomic(), irqs_disabled(),
- current->pid, current->comm);
- debug_show_held_locks(current);
- if (irqs_disabled())
- print_irqtrace_events(current);
- dump_stack();
- }
- EXPORT_SYMBOL(__might_sleep);
- #endif
- #ifdef CONFIG_MAGIC_SYSRQ
- static void normalize_task(struct rq *rq, struct task_struct *p)
- {
- const struct sched_class *prev_class = p->sched_class;
- int old_prio = p->prio;
- int on_rq;
- on_rq = p->on_rq;
- if (on_rq)
- dequeue_task(rq, p, 0);
- __setscheduler(rq, p, SCHED_NORMAL, 0);
- if (on_rq) {
- enqueue_task(rq, p, 0);
- resched_task(rq->curr);
- }
- check_class_changed(rq, p, prev_class, old_prio);
- }
- void normalize_rt_tasks(void)
- {
- struct task_struct *g, *p;
- unsigned long flags;
- struct rq *rq;
- read_lock_irqsave(&tasklist_lock, flags);
- do_each_thread(g, p) {
- /*
- * Only normalize user tasks:
- */
- if (!p->mm)
- continue;
- p->se.exec_start = 0;
- #ifdef CONFIG_SCHEDSTATS
- p->se.statistics.wait_start = 0;
- p->se.statistics.sleep_start = 0;
- p->se.statistics.block_start = 0;
- #endif
- if (!rt_task(p)) {
- /*
- * Renice negative nice level userspace
- * tasks back to 0:
- */
- if (TASK_NICE(p) < 0 && p->mm)
- set_user_nice(p, 0);
- continue;
- }
- raw_spin_lock(&p->pi_lock);
- rq = __task_rq_lock(p);
- normalize_task(rq, p);
- __task_rq_unlock(rq);
- raw_spin_unlock(&p->pi_lock);
- } while_each_thread(g, p);
- read_unlock_irqrestore(&tasklist_lock, flags);
- }
- #endif /* CONFIG_MAGIC_SYSRQ */
- #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
- /*
- * These functions are only useful for the IA64 MCA handling, or kdb.
- *
- * They can only be called when the whole system has been
- * stopped - every CPU needs to be quiescent, and no scheduling
- * activity can take place. Using them for anything else would
- * be a serious bug, and as a result, they aren't even visible
- * under any other configuration.
- */
- /**
- * curr_task - return the current task for a given cpu.
- * @cpu: the processor in question.
- *
- * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
- */
- struct task_struct *curr_task(int cpu)
- {
- return cpu_curr(cpu);
- }
- #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
- #ifdef CONFIG_IA64
- /**
- * set_curr_task - set the current task for a given cpu.
- * @cpu: the processor in question.
- * @p: the task pointer to set.
- *
- * Description: This function must only be used when non-maskable interrupts
- * are serviced on a separate stack. It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner. This function
- * must be called with all CPU's synchronized, and interrupts disabled, the
- * and caller must save the original value of the current task (see
- * curr_task() above) and restore that value before reenabling interrupts and
- * re-starting the system.
- *
- * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
- */
- void set_curr_task(int cpu, struct task_struct *p)
- {
- cpu_curr(cpu) = p;
- }
- #endif
- #ifdef CONFIG_CGROUP_SCHED
- /* task_group_lock serializes the addition/removal of task groups */
- static DEFINE_SPINLOCK(task_group_lock);
- static void free_sched_group(struct task_group *tg)
- {
- free_fair_sched_group(tg);
- free_rt_sched_group(tg);
- autogroup_free(tg);
- kfree(tg);
- }
- /* allocate runqueue etc for a new task group */
- struct task_group *sched_create_group(struct task_group *parent)
- {
- struct task_group *tg;
- unsigned long flags;
- tg = kzalloc(sizeof(*tg), GFP_KERNEL);
- if (!tg)
- return ERR_PTR(-ENOMEM);
- if (!alloc_fair_sched_group(tg, parent))
- goto err;
- if (!alloc_rt_sched_group(tg, parent))
- goto err;
- spin_lock_irqsave(&task_group_lock, flags);
- list_add_rcu(&tg->list, &task_groups);
- WARN_ON(!parent); /* root should already exist */
- tg->parent = parent;
- INIT_LIST_HEAD(&tg->children);
- list_add_rcu(&tg->siblings, &parent->children);
- spin_unlock_irqrestore(&task_group_lock, flags);
- return tg;
- err:
- free_sched_group(tg);
- return ERR_PTR(-ENOMEM);
- }
- /* rcu callback to free various structures associated with a task group */
- static void free_sched_group_rcu(struct rcu_head *rhp)
- {
- /* now it should be safe to free those cfs_rqs */
- free_sched_group(container_of(rhp, struct task_group, rcu));
- }
- /* Destroy runqueue etc associated with a task group */
- void sched_destroy_group(struct task_group *tg)
- {
- unsigned long flags;
- int i;
- /* end participation in shares distribution */
- for_each_possible_cpu(i)
- unregister_fair_sched_group(tg, i);
- spin_lock_irqsave(&task_group_lock, flags);
- list_del_rcu(&tg->list);
- list_del_rcu(&tg->siblings);
- spin_unlock_irqrestore(&task_group_lock, flags);
- /* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&tg->rcu, free_sched_group_rcu);
- }
- /* change task's runqueue when it moves between groups.
- * The caller of this function should have put the task in its new group
- * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- * reflect its new group.
- */
- void sched_move_task(struct task_struct *tsk)
- {
- struct task_group *tg;
- int on_rq, running;
- unsigned long flags;
- struct rq *rq;
- rq = task_rq_lock(tsk, &flags);
- running = task_current(rq, tsk);
- on_rq = tsk->on_rq;
- if (on_rq)
- dequeue_task(rq, tsk, 0);
- if (unlikely(running))
- tsk->sched_class->put_prev_task(rq, tsk);
- tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
- lockdep_is_held(&tsk->sighand->siglock)),
- struct task_group, css);
- tg = autogroup_task_group(tsk, tg);
- tsk->sched_task_group = tg;
- #ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->task_move_group)
- tsk->sched_class->task_move_group(tsk, on_rq);
- else
- #endif
- set_task_rq(tsk, task_cpu(tsk));
- if (unlikely(running))
- tsk->sched_class->set_curr_task(rq);
- if (on_rq)
- enqueue_task(rq, tsk, 0);
- task_rq_unlock(rq, tsk, &flags);
- }
- #endif /* CONFIG_CGROUP_SCHED */
- #if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
- static unsigned long to_ratio(u64 period, u64 runtime)
- {
- if (runtime == RUNTIME_INF)
- return 1ULL << 20;
- return div64_u64(runtime << 20, period);
- }
- #endif
- #ifdef CONFIG_RT_GROUP_SCHED
- /*
- * Ensure that the real time constraints are schedulable.
- */
- static DEFINE_MUTEX(rt_constraints_mutex);
- /* Must be called with tasklist_lock held */
- static inline int tg_has_rt_tasks(struct task_group *tg)
- {
- struct task_struct *g, *p;
- do_each_thread(g, p) {
- if (rt_task(p) && task_rq(p)->rt.tg == tg)
- return 1;
- } while_each_thread(g, p);
- return 0;
- }
- struct rt_schedulable_data {
- struct task_group *tg;
- u64 rt_period;
- u64 rt_runtime;
- };
- static int tg_rt_schedulable(struct task_group *tg, void *data)
- {
- struct rt_schedulable_data *d = data;
- struct task_group *child;
- unsigned long total, sum = 0;
- u64 period, runtime;
- period = ktime_to_ns(tg->rt_bandwidth.rt_period);
- runtime = tg->rt_bandwidth.rt_runtime;
- if (tg == d->tg) {
- period = d->rt_period;
- runtime = d->rt_runtime;
- }
- /*
- * Cannot have more runtime than the period.
- */
- if (runtime > period && runtime != RUNTIME_INF)
- return -EINVAL;
- /*
- * Ensure we don't starve existing RT tasks.
- */
- if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
- return -EBUSY;
- total = to_ratio(period, runtime);
- /*
- * Nobody can have more than the global setting allows.
- */
- if (total > to_ratio(global_rt_period(), global_rt_runtime()))
- return -EINVAL;
- /*
- * The sum of our children's runtime should not exceed our own.
- */
- list_for_each_entry_rcu(child, &tg->children, siblings) {
- period = ktime_to_ns(child->rt_bandwidth.rt_period);
- runtime = child->rt_bandwidth.rt_runtime;
- if (child == d->tg) {
- period = d->rt_period;
- runtime = d->rt_runtime;
- }
- sum += to_ratio(period, runtime);
- }
- if (sum > total)
- return -EINVAL;
- return 0;
- }
- static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
- {
- int ret;
- struct rt_schedulable_data data = {
- .tg = tg,
- .rt_period = period,
- .rt_runtime = runtime,
- };
- rcu_read_lock();
- ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
- rcu_read_unlock();
- return ret;
- }
- static int tg_set_rt_bandwidth(struct task_group *tg,
- u64 rt_period, u64 rt_runtime)
- {
- int i, err = 0;
- mutex_lock(&rt_constraints_mutex);
- read_lock(&tasklist_lock);
- err = __rt_schedulable(tg, rt_period, rt_runtime);
- if (err)
- goto unlock;
- raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
- tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
- tg->rt_bandwidth.rt_runtime = rt_runtime;
- for_each_possible_cpu(i) {
- struct rt_rq *rt_rq = tg->rt_rq[i];
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = rt_runtime;
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
- unlock:
- read_unlock(&tasklist_lock);
- mutex_unlock(&rt_constraints_mutex);
- return err;
- }
- int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
- {
- u64 rt_runtime, rt_period;
- rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
- rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
- if (rt_runtime_us < 0)
- rt_runtime = RUNTIME_INF;
- return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
- }
- long sched_group_rt_runtime(struct task_group *tg)
- {
- u64 rt_runtime_us;
- if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
- return -1;
- rt_runtime_us = tg->rt_bandwidth.rt_runtime;
- do_div(rt_runtime_us, NSEC_PER_USEC);
- return rt_runtime_us;
- }
- int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
- {
- u64 rt_runtime, rt_period;
- rt_period = (u64)rt_period_us * NSEC_PER_USEC;
- rt_runtime = tg->rt_bandwidth.rt_runtime;
- if (rt_period == 0)
- return -EINVAL;
- return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
- }
- long sched_group_rt_period(struct task_group *tg)
- {
- u64 rt_period_us;
- rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
- do_div(rt_period_us, NSEC_PER_USEC);
- return rt_period_us;
- }
- static int sched_rt_global_constraints(void)
- {
- u64 runtime, period;
- int ret = 0;
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
- runtime = global_rt_runtime();
- period = global_rt_period();
- /*
- * Sanity check on the sysctl variables.
- */
- if (runtime > period && runtime != RUNTIME_INF)
- return -EINVAL;
- mutex_lock(&rt_constraints_mutex);
- read_lock(&tasklist_lock);
- ret = __rt_schedulable(NULL, 0, 0);
- read_unlock(&tasklist_lock);
- mutex_unlock(&rt_constraints_mutex);
- return ret;
- }
- int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
- {
- /* Don't accept realtime tasks when there is no way for them to run */
- if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
- return 0;
- return 1;
- }
- #else /* !CONFIG_RT_GROUP_SCHED */
- static int sched_rt_global_constraints(void)
- {
- unsigned long flags;
- int i;
- if (sysctl_sched_rt_period <= 0)
- return -EINVAL;
- /*
- * There's always some RT tasks in the root group
- * -- migration, kstopmachine etc..
- */
- if (sysctl_sched_rt_runtime == 0)
- return -EBUSY;
- raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
- for_each_possible_cpu(i) {
- struct rt_rq *rt_rq = &cpu_rq(i)->rt;
- raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_runtime = global_rt_runtime();
- raw_spin_unlock(&rt_rq->rt_runtime_lock);
- }
- raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
- return 0;
- }
- #endif /* CONFIG_RT_GROUP_SCHED */
- int sched_rt_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- int old_period, old_runtime;
- static DEFINE_MUTEX(mutex);
- mutex_lock(&mutex);
- old_period = sysctl_sched_rt_period;
- old_runtime = sysctl_sched_rt_runtime;
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (!ret && write) {
- ret = sched_rt_global_constraints();
- if (ret) {
- sysctl_sched_rt_period = old_period;
- sysctl_sched_rt_runtime = old_runtime;
- } else {
- def_rt_bandwidth.rt_runtime = global_rt_runtime();
- def_rt_bandwidth.rt_period =
- ns_to_ktime(global_rt_period());
- }
- }
- mutex_unlock(&mutex);
- return ret;
- }
- #ifdef CONFIG_CGROUP_SCHED
- /* return corresponding task_group object of a cgroup */
- static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
- {
- return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
- struct task_group, css);
- }
- static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp)
- {
- struct task_group *tg, *parent;
- if (!cgrp->parent) {
- /* This is early initialization for the top cgroup */
- return &root_task_group.css;
- }
- parent = cgroup_tg(cgrp->parent);
- tg = sched_create_group(parent);
- if (IS_ERR(tg))
- return ERR_PTR(-ENOMEM);
- return &tg->css;
- }
- static void cpu_cgroup_destroy(struct cgroup *cgrp)
- {
- struct task_group *tg = cgroup_tg(cgrp);
- sched_destroy_group(tg);
- }
- static int cpu_cgroup_can_attach(struct cgroup *cgrp,
- struct cgroup_taskset *tset)
- {
- struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset) {
- #ifdef CONFIG_RT_GROUP_SCHED
- if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
- return -EINVAL;
- #else
- /* We don't support RT-tasks being in separate groups */
- if (task->sched_class != &fair_sched_class)
- return -EINVAL;
- #endif
- }
- return 0;
- }
- static void cpu_cgroup_attach(struct cgroup *cgrp,
- struct cgroup_taskset *tset)
- {
- struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset) {
- sched_move_task(task);
- #ifdef CONFIG_ANDROID_BG_SCAN_MEM
- if (task_notify_on_migrate(task) && thread_group_leader(task))
- raw_notifier_call_chain(&bgtsk_migration_notifier_head,
- 0, NULL);
- #endif
- }
- }
- static void
- cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
- struct task_struct *task)
- {
- /*
- * cgroup_exit() is called in the copy_process() failure path.
- * Ignore this case since the task hasn't ran yet, this avoids
- * trying to poke a half freed task state from generic code.
- */
- if (!(task->flags & PF_EXITING))
- return;
- sched_move_task(task);
- }
- static u64 cpu_notify_on_migrate_read_u64(struct cgroup *cgrp,
- struct cftype *cft)
- {
- struct task_group *tg = cgroup_tg(cgrp);
- return tg->notify_on_migrate;
- }
- static int cpu_notify_on_migrate_write_u64(struct cgroup *cgrp,
- struct cftype *cft, u64 notify)
- {
- struct task_group *tg = cgroup_tg(cgrp);
- tg->notify_on_migrate = (notify > 0);
- return 0;
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
- u64 shareval)
- {
- return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
- }
- static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
- {
- struct task_group *tg = cgroup_tg(cgrp);
- return (u64) scale_load_down(tg->shares);
- }
- #ifdef CONFIG_CFS_BANDWIDTH
- static DEFINE_MUTEX(cfs_constraints_mutex);
- const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
- const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
- static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
- static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
- {
- int i, ret = 0, runtime_enabled, runtime_was_enabled;
- struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
- if (tg == &root_task_group)
- return -EINVAL;
- /*
- * Ensure we have at some amount of bandwidth every period. This is
- * to prevent reaching a state of large arrears when throttled via
- * entity_tick() resulting in prolonged exit starvation.
- */
- if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
- return -EINVAL;
- /*
- * Likewise, bound things on the otherside by preventing insane quota
- * periods. This also allows us to normalize in computing quota
- * feasibility.
- */
- if (period > max_cfs_quota_period)
- return -EINVAL;
- mutex_lock(&cfs_constraints_mutex);
- ret = __cfs_schedulable(tg, period, quota);
- if (ret)
- goto out_unlock;
- runtime_enabled = quota != RUNTIME_INF;
- runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
- /*
- * If we need to toggle cfs_bandwidth_used, off->on must occur
- * before making related changes, and on->off must occur afterwards
- */
- if (runtime_enabled && !runtime_was_enabled)
- cfs_bandwidth_usage_inc();
- raw_spin_lock_irq(&cfs_b->lock);
- cfs_b->period = ns_to_ktime(period);
- cfs_b->quota = quota;
- __refill_cfs_bandwidth_runtime(cfs_b);
- /* restart the period timer (if active) to handle new period expiry */
- if (runtime_enabled && cfs_b->timer_active) {
- /* force a reprogram */
- cfs_b->timer_active = 0;
- __start_cfs_bandwidth(cfs_b);
- }
- raw_spin_unlock_irq(&cfs_b->lock);
- for_each_possible_cpu(i) {
- struct cfs_rq *cfs_rq = tg->cfs_rq[i];
- struct rq *rq = cfs_rq->rq;
- raw_spin_lock_irq(&rq->lock);
- cfs_rq->runtime_enabled = runtime_enabled;
- cfs_rq->runtime_remaining = 0;
- if (cfs_rq->throttled)
- unthrottle_cfs_rq(cfs_rq);
- raw_spin_unlock_irq(&rq->lock);
- }
- if (runtime_was_enabled && !runtime_enabled)
- cfs_bandwidth_usage_dec();
- out_unlock:
- mutex_unlock(&cfs_constraints_mutex);
- return ret;
- }
- int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
- {
- u64 quota, period;
- period = ktime_to_ns(tg->cfs_bandwidth.period);
- if (cfs_quota_us < 0)
- quota = RUNTIME_INF;
- else
- quota = (u64)cfs_quota_us * NSEC_PER_USEC;
- return tg_set_cfs_bandwidth(tg, period, quota);
- }
- long tg_get_cfs_quota(struct task_group *tg)
- {
- u64 quota_us;
- if (tg->cfs_bandwidth.quota == RUNTIME_INF)
- return -1;
- quota_us = tg->cfs_bandwidth.quota;
- do_div(quota_us, NSEC_PER_USEC);
- return quota_us;
- }
- int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
- {
- u64 quota, period;
- period = (u64)cfs_period_us * NSEC_PER_USEC;
- quota = tg->cfs_bandwidth.quota;
- return tg_set_cfs_bandwidth(tg, period, quota);
- }
- long tg_get_cfs_period(struct task_group *tg)
- {
- u64 cfs_period_us;
- cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
- do_div(cfs_period_us, NSEC_PER_USEC);
- return cfs_period_us;
- }
- static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
- {
- return tg_get_cfs_quota(cgroup_tg(cgrp));
- }
- static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
- s64 cfs_quota_us)
- {
- return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
- }
- static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
- {
- return tg_get_cfs_period(cgroup_tg(cgrp));
- }
- static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
- u64 cfs_period_us)
- {
- return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
- }
- struct cfs_schedulable_data {
- struct task_group *tg;
- u64 period, quota;
- };
- /*
- * normalize group quota/period to be quota/max_period
- * note: units are usecs
- */
- static u64 normalize_cfs_quota(struct task_group *tg,
- struct cfs_schedulable_data *d)
- {
- u64 quota, period;
- if (tg == d->tg) {
- period = d->period;
- quota = d->quota;
- } else {
- period = tg_get_cfs_period(tg);
- quota = tg_get_cfs_quota(tg);
- }
- /* note: these should typically be equivalent */
- if (quota == RUNTIME_INF || quota == -1)
- return RUNTIME_INF;
- return to_ratio(period, quota);
- }
- static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
- {
- struct cfs_schedulable_data *d = data;
- struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
- s64 quota = 0, parent_quota = -1;
- if (!tg->parent) {
- quota = RUNTIME_INF;
- } else {
- struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
- quota = normalize_cfs_quota(tg, d);
- parent_quota = parent_b->hierarchal_quota;
- /*
- * ensure max(child_quota) <= parent_quota, inherit when no
- * limit is set
- */
- if (quota == RUNTIME_INF)
- quota = parent_quota;
- else if (parent_quota != RUNTIME_INF && quota > parent_quota)
- return -EINVAL;
- }
- cfs_b->hierarchal_quota = quota;
- return 0;
- }
- static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
- {
- int ret;
- struct cfs_schedulable_data data = {
- .tg = tg,
- .period = period,
- .quota = quota,
- };
- if (quota != RUNTIME_INF) {
- do_div(data.period, NSEC_PER_USEC);
- do_div(data.quota, NSEC_PER_USEC);
- }
- rcu_read_lock();
- ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
- rcu_read_unlock();
- return ret;
- }
- static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
- {
- struct task_group *tg = cgroup_tg(cgrp);
- struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
- cb->fill(cb, "nr_periods", cfs_b->nr_periods);
- cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
- cb->fill(cb, "throttled_time", cfs_b->throttled_time);
- return 0;
- }
- #endif /* CONFIG_CFS_BANDWIDTH */
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- #ifdef CONFIG_RT_GROUP_SCHED
- static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
- s64 val)
- {
- return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
- }
- static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
- {
- return sched_group_rt_runtime(cgroup_tg(cgrp));
- }
- static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
- u64 rt_period_us)
- {
- return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
- }
- static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
- {
- return sched_group_rt_period(cgroup_tg(cgrp));
- }
- #endif /* CONFIG_RT_GROUP_SCHED */
- static struct cftype cpu_files[] = {
- {
- .name = "notify_on_migrate",
- .read_u64 = cpu_notify_on_migrate_read_u64,
- .write_u64 = cpu_notify_on_migrate_write_u64,
- },
- #ifdef CONFIG_FAIR_GROUP_SCHED
- {
- .name = "shares",
- .read_u64 = cpu_shares_read_u64,
- .write_u64 = cpu_shares_write_u64,
- },
- #endif
- #ifdef CONFIG_CFS_BANDWIDTH
- {
- .name = "cfs_quota_us",
- .read_s64 = cpu_cfs_quota_read_s64,
- .write_s64 = cpu_cfs_quota_write_s64,
- },
- {
- .name = "cfs_period_us",
- .read_u64 = cpu_cfs_period_read_u64,
- .write_u64 = cpu_cfs_period_write_u64,
- },
- {
- .name = "stat",
- .read_map = cpu_stats_show,
- },
- #endif
- #ifdef CONFIG_RT_GROUP_SCHED
- {
- .name = "rt_runtime_us",
- .read_s64 = cpu_rt_runtime_read,
- .write_s64 = cpu_rt_runtime_write,
- },
- {
- .name = "rt_period_us",
- .read_u64 = cpu_rt_period_read_uint,
- .write_u64 = cpu_rt_period_write_uint,
- },
- #endif
- };
- static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
- {
- return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
- }
- struct cgroup_subsys cpu_cgroup_subsys = {
- .name = "cpu",
- .create = cpu_cgroup_create,
- .destroy = cpu_cgroup_destroy,
- .can_attach = cpu_cgroup_can_attach,
- .attach = cpu_cgroup_attach,
- .allow_attach = subsys_cgroup_allow_attach,
- .exit = cpu_cgroup_exit,
- .populate = cpu_cgroup_populate,
- .subsys_id = cpu_cgroup_subsys_id,
- .early_init = 1,
- };
- #endif /* CONFIG_CGROUP_SCHED */
- #ifdef CONFIG_CGROUP_CPUACCT
- /*
- * CPU accounting code for task groups.
- *
- * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
- * (balbir@in.ibm.com).
- */
- /* create a new cpu accounting group */
- static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp)
- {
- struct cpuacct *ca;
- if (!cgrp->parent)
- return &root_cpuacct.css;
- ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca)
- goto out;
- ca->cpuusage = alloc_percpu(u64);
- if (!ca->cpuusage)
- goto out_free_ca;
- ca->cpustat = alloc_percpu(struct kernel_cpustat);
- if (!ca->cpustat)
- goto out_free_cpuusage;
- return &ca->css;
- out_free_cpuusage:
- free_percpu(ca->cpuusage);
- out_free_ca:
- kfree(ca);
- out:
- return ERR_PTR(-ENOMEM);
- }
- /* destroy an existing cpu accounting group */
- static void cpuacct_destroy(struct cgroup *cgrp)
- {
- struct cpuacct *ca = cgroup_ca(cgrp);
- free_percpu(ca->cpustat);
- free_percpu(ca->cpuusage);
- kfree(ca);
- }
- static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
- {
- u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
- u64 data;
- #ifndef CONFIG_64BIT
- /*
- * Take rq->lock to make 64-bit read safe on 32-bit platforms.
- */
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
- data = *cpuusage;
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
- #else
- data = *cpuusage;
- #endif
- return data;
- }
- static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
- {
- u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
- #ifndef CONFIG_64BIT
- /*
- * Take rq->lock to make 64-bit write safe on 32-bit platforms.
- */
- raw_spin_lock_irq(&cpu_rq(cpu)->lock);
- *cpuusage = val;
- raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
- #else
- *cpuusage = val;
- #endif
- }
- /* return total cpu usage (in nanoseconds) of a group */
- static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
- {
- struct cpuacct *ca = cgroup_ca(cgrp);
- u64 totalcpuusage = 0;
- int i;
- for_each_present_cpu(i)
- totalcpuusage += cpuacct_cpuusage_read(ca, i);
- return totalcpuusage;
- }
- static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
- u64 reset)
- {
- struct cpuacct *ca = cgroup_ca(cgrp);
- int err = 0;
- int i;
- if (reset) {
- err = -EINVAL;
- goto out;
- }
- for_each_present_cpu(i)
- cpuacct_cpuusage_write(ca, i, 0);
- out:
- return err;
- }
- static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
- struct seq_file *m)
- {
- struct cpuacct *ca = cgroup_ca(cgroup);
- u64 percpu;
- int i;
- for_each_present_cpu(i) {
- percpu = cpuacct_cpuusage_read(ca, i);
- seq_printf(m, "%llu ", (unsigned long long) percpu);
- }
- seq_printf(m, "\n");
- return 0;
- }
- static const char *cpuacct_stat_desc[] = {
- [CPUACCT_STAT_USER] = "user",
- [CPUACCT_STAT_SYSTEM] = "system",
- };
- static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
- {
- struct cpuacct *ca = cgroup_ca(cgrp);
- int cpu;
- s64 val = 0;
- for_each_online_cpu(cpu) {
- struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
- val += kcpustat->cpustat[CPUTIME_USER];
- val += kcpustat->cpustat[CPUTIME_NICE];
- }
- val = cputime64_to_clock_t(val);
- cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_USER], val);
- val = 0;
- for_each_online_cpu(cpu) {
- struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
- val += kcpustat->cpustat[CPUTIME_SYSTEM];
- val += kcpustat->cpustat[CPUTIME_IRQ];
- val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
- }
- val = cputime64_to_clock_t(val);
- cb->fill(cb, cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
- return 0;
- }
- static struct cftype files[] = {
- {
- .name = "usage",
- .read_u64 = cpuusage_read,
- .write_u64 = cpuusage_write,
- },
- {
- .name = "usage_percpu",
- .read_seq_string = cpuacct_percpu_seq_read,
- },
- {
- .name = "stat",
- .read_map = cpuacct_stats_show,
- },
- };
- static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
- {
- return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
- }
- /*
- * charge this task's execution time to its accounting group.
- *
- * called with rq->lock held.
- */
- void cpuacct_charge(struct task_struct *tsk, u64 cputime)
- {
- struct cpuacct *ca;
- int cpu;
- if (unlikely(!cpuacct_subsys.active))
- return;
- cpu = task_cpu(tsk);
- rcu_read_lock();
- ca = task_ca(tsk);
- for (; ca; ca = parent_ca(ca)) {
- u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
- *cpuusage += cputime;
- }
- rcu_read_unlock();
- }
- struct cgroup_subsys cpuacct_subsys = {
- .name = "cpuacct",
- .create = cpuacct_create,
- .destroy = cpuacct_destroy,
- .populate = cpuacct_populate,
- .subsys_id = cpuacct_subsys_id,
- };
- #endif /* CONFIG_CGROUP_CPUACCT */
|