emulate.c 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475
  1. /******************************************************************************
  2. * emulate.c
  3. *
  4. * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
  5. *
  6. * Copyright (c) 2005 Keir Fraser
  7. *
  8. * Linux coding style, mod r/m decoder, segment base fixes, real-mode
  9. * privileged instructions:
  10. *
  11. * Copyright (C) 2006 Qumranet
  12. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  13. *
  14. * Avi Kivity <avi@qumranet.com>
  15. * Yaniv Kamay <yaniv@qumranet.com>
  16. *
  17. * This work is licensed under the terms of the GNU GPL, version 2. See
  18. * the COPYING file in the top-level directory.
  19. *
  20. * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  21. */
  22. #include <linux/kvm_host.h>
  23. #include "kvm_cache_regs.h"
  24. #include <linux/module.h>
  25. #include <asm/kvm_emulate.h>
  26. #include "x86.h"
  27. #include "tss.h"
  28. /*
  29. * Operand types
  30. */
  31. #define OpNone 0ull
  32. #define OpImplicit 1ull /* No generic decode */
  33. #define OpReg 2ull /* Register */
  34. #define OpMem 3ull /* Memory */
  35. #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
  36. #define OpDI 5ull /* ES:DI/EDI/RDI */
  37. #define OpMem64 6ull /* Memory, 64-bit */
  38. #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
  39. #define OpDX 8ull /* DX register */
  40. #define OpCL 9ull /* CL register (for shifts) */
  41. #define OpImmByte 10ull /* 8-bit sign extended immediate */
  42. #define OpOne 11ull /* Implied 1 */
  43. #define OpImm 12ull /* Sign extended immediate */
  44. #define OpMem16 13ull /* Memory operand (16-bit). */
  45. #define OpMem32 14ull /* Memory operand (32-bit). */
  46. #define OpImmU 15ull /* Immediate operand, zero extended */
  47. #define OpSI 16ull /* SI/ESI/RSI */
  48. #define OpImmFAddr 17ull /* Immediate far address */
  49. #define OpMemFAddr 18ull /* Far address in memory */
  50. #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
  51. #define OpES 20ull /* ES */
  52. #define OpCS 21ull /* CS */
  53. #define OpSS 22ull /* SS */
  54. #define OpDS 23ull /* DS */
  55. #define OpFS 24ull /* FS */
  56. #define OpGS 25ull /* GS */
  57. #define OpMem8 26ull /* 8-bit zero extended memory operand */
  58. #define OpBits 5 /* Width of operand field */
  59. #define OpMask ((1ull << OpBits) - 1)
  60. /*
  61. * Opcode effective-address decode tables.
  62. * Note that we only emulate instructions that have at least one memory
  63. * operand (excluding implicit stack references). We assume that stack
  64. * references and instruction fetches will never occur in special memory
  65. * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
  66. * not be handled.
  67. */
  68. /* Operand sizes: 8-bit operands or specified/overridden size. */
  69. #define ByteOp (1<<0) /* 8-bit operands. */
  70. /* Destination operand type. */
  71. #define DstShift 1
  72. #define ImplicitOps (OpImplicit << DstShift)
  73. #define DstReg (OpReg << DstShift)
  74. #define DstMem (OpMem << DstShift)
  75. #define DstAcc (OpAcc << DstShift)
  76. #define DstDI (OpDI << DstShift)
  77. #define DstMem64 (OpMem64 << DstShift)
  78. #define DstImmUByte (OpImmUByte << DstShift)
  79. #define DstDX (OpDX << DstShift)
  80. #define DstMask (OpMask << DstShift)
  81. /* Source operand type. */
  82. #define SrcShift 6
  83. #define SrcNone (OpNone << SrcShift)
  84. #define SrcReg (OpReg << SrcShift)
  85. #define SrcMem (OpMem << SrcShift)
  86. #define SrcMem16 (OpMem16 << SrcShift)
  87. #define SrcMem32 (OpMem32 << SrcShift)
  88. #define SrcImm (OpImm << SrcShift)
  89. #define SrcImmByte (OpImmByte << SrcShift)
  90. #define SrcOne (OpOne << SrcShift)
  91. #define SrcImmUByte (OpImmUByte << SrcShift)
  92. #define SrcImmU (OpImmU << SrcShift)
  93. #define SrcSI (OpSI << SrcShift)
  94. #define SrcImmFAddr (OpImmFAddr << SrcShift)
  95. #define SrcMemFAddr (OpMemFAddr << SrcShift)
  96. #define SrcAcc (OpAcc << SrcShift)
  97. #define SrcImmU16 (OpImmU16 << SrcShift)
  98. #define SrcDX (OpDX << SrcShift)
  99. #define SrcMem8 (OpMem8 << SrcShift)
  100. #define SrcMask (OpMask << SrcShift)
  101. #define BitOp (1<<11)
  102. #define MemAbs (1<<12) /* Memory operand is absolute displacement */
  103. #define String (1<<13) /* String instruction (rep capable) */
  104. #define Stack (1<<14) /* Stack instruction (push/pop) */
  105. #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
  106. #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
  107. #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
  108. #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
  109. #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
  110. #define Sse (1<<18) /* SSE Vector instruction */
  111. /* Generic ModRM decode. */
  112. #define ModRM (1<<19)
  113. /* Destination is only written; never read. */
  114. #define Mov (1<<20)
  115. /* Misc flags */
  116. #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
  117. #define VendorSpecific (1<<22) /* Vendor specific instruction */
  118. #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
  119. #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
  120. #define Undefined (1<<25) /* No Such Instruction */
  121. #define Lock (1<<26) /* lock prefix is allowed for the instruction */
  122. #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
  123. #define No64 (1<<28)
  124. #define PageTable (1 << 29) /* instruction used to write page table */
  125. /* Source 2 operand type */
  126. #define Src2Shift (30)
  127. #define Src2None (OpNone << Src2Shift)
  128. #define Src2CL (OpCL << Src2Shift)
  129. #define Src2ImmByte (OpImmByte << Src2Shift)
  130. #define Src2One (OpOne << Src2Shift)
  131. #define Src2Imm (OpImm << Src2Shift)
  132. #define Src2ES (OpES << Src2Shift)
  133. #define Src2CS (OpCS << Src2Shift)
  134. #define Src2SS (OpSS << Src2Shift)
  135. #define Src2DS (OpDS << Src2Shift)
  136. #define Src2FS (OpFS << Src2Shift)
  137. #define Src2GS (OpGS << Src2Shift)
  138. #define Src2Mask (OpMask << Src2Shift)
  139. #define X2(x...) x, x
  140. #define X3(x...) X2(x), x
  141. #define X4(x...) X2(x), X2(x)
  142. #define X5(x...) X4(x), x
  143. #define X6(x...) X4(x), X2(x)
  144. #define X7(x...) X4(x), X3(x)
  145. #define X8(x...) X4(x), X4(x)
  146. #define X16(x...) X8(x), X8(x)
  147. struct opcode {
  148. u64 flags : 56;
  149. u64 intercept : 8;
  150. union {
  151. int (*execute)(struct x86_emulate_ctxt *ctxt);
  152. struct opcode *group;
  153. struct group_dual *gdual;
  154. struct gprefix *gprefix;
  155. } u;
  156. int (*check_perm)(struct x86_emulate_ctxt *ctxt);
  157. };
  158. struct group_dual {
  159. struct opcode mod012[8];
  160. struct opcode mod3[8];
  161. };
  162. struct gprefix {
  163. struct opcode pfx_no;
  164. struct opcode pfx_66;
  165. struct opcode pfx_f2;
  166. struct opcode pfx_f3;
  167. };
  168. /* EFLAGS bit definitions. */
  169. #define EFLG_ID (1<<21)
  170. #define EFLG_VIP (1<<20)
  171. #define EFLG_VIF (1<<19)
  172. #define EFLG_AC (1<<18)
  173. #define EFLG_VM (1<<17)
  174. #define EFLG_RF (1<<16)
  175. #define EFLG_IOPL (3<<12)
  176. #define EFLG_NT (1<<14)
  177. #define EFLG_OF (1<<11)
  178. #define EFLG_DF (1<<10)
  179. #define EFLG_IF (1<<9)
  180. #define EFLG_TF (1<<8)
  181. #define EFLG_SF (1<<7)
  182. #define EFLG_ZF (1<<6)
  183. #define EFLG_AF (1<<4)
  184. #define EFLG_PF (1<<2)
  185. #define EFLG_CF (1<<0)
  186. #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
  187. #define EFLG_RESERVED_ONE_MASK 2
  188. /*
  189. * Instruction emulation:
  190. * Most instructions are emulated directly via a fragment of inline assembly
  191. * code. This allows us to save/restore EFLAGS and thus very easily pick up
  192. * any modified flags.
  193. */
  194. #if defined(CONFIG_X86_64)
  195. #define _LO32 "k" /* force 32-bit operand */
  196. #define _STK "%%rsp" /* stack pointer */
  197. #elif defined(__i386__)
  198. #define _LO32 "" /* force 32-bit operand */
  199. #define _STK "%%esp" /* stack pointer */
  200. #endif
  201. /*
  202. * These EFLAGS bits are restored from saved value during emulation, and
  203. * any changes are written back to the saved value after emulation.
  204. */
  205. #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
  206. /* Before executing instruction: restore necessary bits in EFLAGS. */
  207. #define _PRE_EFLAGS(_sav, _msk, _tmp) \
  208. /* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
  209. "movl %"_sav",%"_LO32 _tmp"; " \
  210. "push %"_tmp"; " \
  211. "push %"_tmp"; " \
  212. "movl %"_msk",%"_LO32 _tmp"; " \
  213. "andl %"_LO32 _tmp",("_STK"); " \
  214. "pushf; " \
  215. "notl %"_LO32 _tmp"; " \
  216. "andl %"_LO32 _tmp",("_STK"); " \
  217. "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
  218. "pop %"_tmp"; " \
  219. "orl %"_LO32 _tmp",("_STK"); " \
  220. "popf; " \
  221. "pop %"_sav"; "
  222. /* After executing instruction: write-back necessary bits in EFLAGS. */
  223. #define _POST_EFLAGS(_sav, _msk, _tmp) \
  224. /* _sav |= EFLAGS & _msk; */ \
  225. "pushf; " \
  226. "pop %"_tmp"; " \
  227. "andl %"_msk",%"_LO32 _tmp"; " \
  228. "orl %"_LO32 _tmp",%"_sav"; "
  229. #ifdef CONFIG_X86_64
  230. #define ON64(x) x
  231. #else
  232. #define ON64(x)
  233. #endif
  234. #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
  235. do { \
  236. __asm__ __volatile__ ( \
  237. _PRE_EFLAGS("0", "4", "2") \
  238. _op _suffix " %"_x"3,%1; " \
  239. _POST_EFLAGS("0", "4", "2") \
  240. : "=m" ((ctxt)->eflags), \
  241. "+q" (*(_dsttype*)&(ctxt)->dst.val), \
  242. "=&r" (_tmp) \
  243. : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
  244. } while (0)
  245. /* Raw emulation: instruction has two explicit operands. */
  246. #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
  247. do { \
  248. unsigned long _tmp; \
  249. \
  250. switch ((ctxt)->dst.bytes) { \
  251. case 2: \
  252. ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
  253. break; \
  254. case 4: \
  255. ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
  256. break; \
  257. case 8: \
  258. ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
  259. break; \
  260. } \
  261. } while (0)
  262. #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
  263. do { \
  264. unsigned long _tmp; \
  265. switch ((ctxt)->dst.bytes) { \
  266. case 1: \
  267. ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
  268. break; \
  269. default: \
  270. __emulate_2op_nobyte(ctxt, _op, \
  271. _wx, _wy, _lx, _ly, _qx, _qy); \
  272. break; \
  273. } \
  274. } while (0)
  275. /* Source operand is byte-sized and may be restricted to just %cl. */
  276. #define emulate_2op_SrcB(ctxt, _op) \
  277. __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
  278. /* Source operand is byte, word, long or quad sized. */
  279. #define emulate_2op_SrcV(ctxt, _op) \
  280. __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
  281. /* Source operand is word, long or quad sized. */
  282. #define emulate_2op_SrcV_nobyte(ctxt, _op) \
  283. __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
  284. /* Instruction has three operands and one operand is stored in ECX register */
  285. #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
  286. do { \
  287. unsigned long _tmp; \
  288. _type _clv = (ctxt)->src2.val; \
  289. _type _srcv = (ctxt)->src.val; \
  290. _type _dstv = (ctxt)->dst.val; \
  291. \
  292. __asm__ __volatile__ ( \
  293. _PRE_EFLAGS("0", "5", "2") \
  294. _op _suffix " %4,%1 \n" \
  295. _POST_EFLAGS("0", "5", "2") \
  296. : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
  297. : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
  298. ); \
  299. \
  300. (ctxt)->src2.val = (unsigned long) _clv; \
  301. (ctxt)->src2.val = (unsigned long) _srcv; \
  302. (ctxt)->dst.val = (unsigned long) _dstv; \
  303. } while (0)
  304. #define emulate_2op_cl(ctxt, _op) \
  305. do { \
  306. switch ((ctxt)->dst.bytes) { \
  307. case 2: \
  308. __emulate_2op_cl(ctxt, _op, "w", u16); \
  309. break; \
  310. case 4: \
  311. __emulate_2op_cl(ctxt, _op, "l", u32); \
  312. break; \
  313. case 8: \
  314. ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
  315. break; \
  316. } \
  317. } while (0)
  318. #define __emulate_1op(ctxt, _op, _suffix) \
  319. do { \
  320. unsigned long _tmp; \
  321. \
  322. __asm__ __volatile__ ( \
  323. _PRE_EFLAGS("0", "3", "2") \
  324. _op _suffix " %1; " \
  325. _POST_EFLAGS("0", "3", "2") \
  326. : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
  327. "=&r" (_tmp) \
  328. : "i" (EFLAGS_MASK)); \
  329. } while (0)
  330. /* Instruction has only one explicit operand (no source operand). */
  331. #define emulate_1op(ctxt, _op) \
  332. do { \
  333. switch ((ctxt)->dst.bytes) { \
  334. case 1: __emulate_1op(ctxt, _op, "b"); break; \
  335. case 2: __emulate_1op(ctxt, _op, "w"); break; \
  336. case 4: __emulate_1op(ctxt, _op, "l"); break; \
  337. case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
  338. } \
  339. } while (0)
  340. #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
  341. do { \
  342. unsigned long _tmp; \
  343. ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \
  344. ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \
  345. \
  346. __asm__ __volatile__ ( \
  347. _PRE_EFLAGS("0", "5", "1") \
  348. "1: \n\t" \
  349. _op _suffix " %6; " \
  350. "2: \n\t" \
  351. _POST_EFLAGS("0", "5", "1") \
  352. ".pushsection .fixup,\"ax\" \n\t" \
  353. "3: movb $1, %4 \n\t" \
  354. "jmp 2b \n\t" \
  355. ".popsection \n\t" \
  356. _ASM_EXTABLE(1b, 3b) \
  357. : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
  358. "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
  359. : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \
  360. "a" (*rax), "d" (*rdx)); \
  361. } while (0)
  362. /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
  363. #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
  364. do { \
  365. switch((ctxt)->src.bytes) { \
  366. case 1: \
  367. __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
  368. break; \
  369. case 2: \
  370. __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
  371. break; \
  372. case 4: \
  373. __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
  374. break; \
  375. case 8: ON64( \
  376. __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
  377. break; \
  378. } \
  379. } while (0)
  380. static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
  381. enum x86_intercept intercept,
  382. enum x86_intercept_stage stage)
  383. {
  384. struct x86_instruction_info info = {
  385. .intercept = intercept,
  386. .rep_prefix = ctxt->rep_prefix,
  387. .modrm_mod = ctxt->modrm_mod,
  388. .modrm_reg = ctxt->modrm_reg,
  389. .modrm_rm = ctxt->modrm_rm,
  390. .src_val = ctxt->src.val64,
  391. .src_bytes = ctxt->src.bytes,
  392. .dst_bytes = ctxt->dst.bytes,
  393. .ad_bytes = ctxt->ad_bytes,
  394. .next_rip = ctxt->eip,
  395. };
  396. return ctxt->ops->intercept(ctxt, &info, stage);
  397. }
  398. static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
  399. {
  400. return (1UL << (ctxt->ad_bytes << 3)) - 1;
  401. }
  402. /* Access/update address held in a register, based on addressing mode. */
  403. static inline unsigned long
  404. address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
  405. {
  406. if (ctxt->ad_bytes == sizeof(unsigned long))
  407. return reg;
  408. else
  409. return reg & ad_mask(ctxt);
  410. }
  411. static inline unsigned long
  412. register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
  413. {
  414. return address_mask(ctxt, reg);
  415. }
  416. static inline void
  417. register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
  418. {
  419. if (ctxt->ad_bytes == sizeof(unsigned long))
  420. *reg += inc;
  421. else
  422. *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
  423. }
  424. static u32 desc_limit_scaled(struct desc_struct *desc)
  425. {
  426. u32 limit = get_desc_limit(desc);
  427. return desc->g ? (limit << 12) | 0xfff : limit;
  428. }
  429. static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
  430. {
  431. ctxt->has_seg_override = true;
  432. ctxt->seg_override = seg;
  433. }
  434. static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
  435. {
  436. if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
  437. return 0;
  438. return ctxt->ops->get_cached_segment_base(ctxt, seg);
  439. }
  440. static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
  441. {
  442. if (!ctxt->has_seg_override)
  443. return 0;
  444. return ctxt->seg_override;
  445. }
  446. static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
  447. u32 error, bool valid)
  448. {
  449. ctxt->exception.vector = vec;
  450. ctxt->exception.error_code = error;
  451. ctxt->exception.error_code_valid = valid;
  452. return X86EMUL_PROPAGATE_FAULT;
  453. }
  454. static int emulate_db(struct x86_emulate_ctxt *ctxt)
  455. {
  456. return emulate_exception(ctxt, DB_VECTOR, 0, false);
  457. }
  458. static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
  459. {
  460. return emulate_exception(ctxt, GP_VECTOR, err, true);
  461. }
  462. static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
  463. {
  464. return emulate_exception(ctxt, SS_VECTOR, err, true);
  465. }
  466. static int emulate_ud(struct x86_emulate_ctxt *ctxt)
  467. {
  468. return emulate_exception(ctxt, UD_VECTOR, 0, false);
  469. }
  470. static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
  471. {
  472. return emulate_exception(ctxt, TS_VECTOR, err, true);
  473. }
  474. static int emulate_de(struct x86_emulate_ctxt *ctxt)
  475. {
  476. return emulate_exception(ctxt, DE_VECTOR, 0, false);
  477. }
  478. static int emulate_nm(struct x86_emulate_ctxt *ctxt)
  479. {
  480. return emulate_exception(ctxt, NM_VECTOR, 0, false);
  481. }
  482. static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
  483. int cs_l)
  484. {
  485. switch (ctxt->op_bytes) {
  486. case 2:
  487. ctxt->_eip = (u16)dst;
  488. break;
  489. case 4:
  490. ctxt->_eip = (u32)dst;
  491. break;
  492. #ifdef CONFIG_X86_64
  493. case 8:
  494. if ((cs_l && is_noncanonical_address(dst)) ||
  495. (!cs_l && (dst >> 32) != 0))
  496. return emulate_gp(ctxt, 0);
  497. ctxt->_eip = dst;
  498. break;
  499. #endif
  500. default:
  501. WARN(1, "unsupported eip assignment size\n");
  502. }
  503. return X86EMUL_CONTINUE;
  504. }
  505. static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
  506. {
  507. return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
  508. }
  509. static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
  510. {
  511. return assign_eip_near(ctxt, ctxt->_eip + rel);
  512. }
  513. static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
  514. {
  515. u16 selector;
  516. struct desc_struct desc;
  517. ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
  518. return selector;
  519. }
  520. static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
  521. unsigned seg)
  522. {
  523. u16 dummy;
  524. u32 base3;
  525. struct desc_struct desc;
  526. ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
  527. ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
  528. }
  529. static int __linearize(struct x86_emulate_ctxt *ctxt,
  530. struct segmented_address addr,
  531. unsigned size, bool write, bool fetch,
  532. ulong *linear)
  533. {
  534. struct desc_struct desc;
  535. bool usable;
  536. ulong la;
  537. u32 lim;
  538. u16 sel;
  539. unsigned cpl, rpl;
  540. la = seg_base(ctxt, addr.seg) + addr.ea;
  541. switch (ctxt->mode) {
  542. case X86EMUL_MODE_REAL:
  543. break;
  544. case X86EMUL_MODE_PROT64:
  545. if (((signed long)la << 16) >> 16 != la)
  546. return emulate_gp(ctxt, 0);
  547. break;
  548. default:
  549. usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
  550. addr.seg);
  551. if (!usable)
  552. goto bad;
  553. /* code segment or read-only data segment */
  554. if (((desc.type & 8) || !(desc.type & 2)) && write)
  555. goto bad;
  556. /* unreadable code segment */
  557. if (!fetch && (desc.type & 8) && !(desc.type & 2))
  558. goto bad;
  559. lim = desc_limit_scaled(&desc);
  560. if ((desc.type & 8) || !(desc.type & 4)) {
  561. /* expand-up segment */
  562. if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
  563. goto bad;
  564. } else {
  565. /* exapand-down segment */
  566. if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
  567. goto bad;
  568. lim = desc.d ? 0xffffffff : 0xffff;
  569. if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
  570. goto bad;
  571. }
  572. cpl = ctxt->ops->cpl(ctxt);
  573. rpl = sel & 3;
  574. cpl = max(cpl, rpl);
  575. if (!(desc.type & 8)) {
  576. /* data segment */
  577. if (cpl > desc.dpl)
  578. goto bad;
  579. } else if ((desc.type & 8) && !(desc.type & 4)) {
  580. /* nonconforming code segment */
  581. if (cpl != desc.dpl)
  582. goto bad;
  583. } else if ((desc.type & 8) && (desc.type & 4)) {
  584. /* conforming code segment */
  585. if (cpl < desc.dpl)
  586. goto bad;
  587. }
  588. break;
  589. }
  590. if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
  591. la &= (u32)-1;
  592. *linear = la;
  593. return X86EMUL_CONTINUE;
  594. bad:
  595. if (addr.seg == VCPU_SREG_SS)
  596. return emulate_ss(ctxt, addr.seg);
  597. else
  598. return emulate_gp(ctxt, addr.seg);
  599. }
  600. static int linearize(struct x86_emulate_ctxt *ctxt,
  601. struct segmented_address addr,
  602. unsigned size, bool write,
  603. ulong *linear)
  604. {
  605. return __linearize(ctxt, addr, size, write, false, linear);
  606. }
  607. static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
  608. struct segmented_address addr,
  609. void *data,
  610. unsigned size)
  611. {
  612. int rc;
  613. ulong linear;
  614. rc = linearize(ctxt, addr, size, false, &linear);
  615. if (rc != X86EMUL_CONTINUE)
  616. return rc;
  617. return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
  618. }
  619. /*
  620. * Fetch the next byte of the instruction being emulated which is pointed to
  621. * by ctxt->_eip, then increment ctxt->_eip.
  622. *
  623. * Also prefetch the remaining bytes of the instruction without crossing page
  624. * boundary if they are not in fetch_cache yet.
  625. */
  626. static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
  627. {
  628. struct fetch_cache *fc = &ctxt->fetch;
  629. int rc;
  630. int size, cur_size;
  631. if (ctxt->_eip == fc->end) {
  632. unsigned long linear;
  633. struct segmented_address addr = { .seg = VCPU_SREG_CS,
  634. .ea = ctxt->_eip };
  635. cur_size = fc->end - fc->start;
  636. size = min(15UL - cur_size,
  637. PAGE_SIZE - offset_in_page(ctxt->_eip));
  638. rc = __linearize(ctxt, addr, size, false, true, &linear);
  639. if (unlikely(rc != X86EMUL_CONTINUE))
  640. return rc;
  641. rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
  642. size, &ctxt->exception);
  643. if (unlikely(rc != X86EMUL_CONTINUE))
  644. return rc;
  645. fc->end += size;
  646. }
  647. *dest = fc->data[ctxt->_eip - fc->start];
  648. ctxt->_eip++;
  649. return X86EMUL_CONTINUE;
  650. }
  651. static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
  652. void *dest, unsigned size)
  653. {
  654. int rc;
  655. /* x86 instructions are limited to 15 bytes. */
  656. if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
  657. return X86EMUL_UNHANDLEABLE;
  658. while (size--) {
  659. rc = do_insn_fetch_byte(ctxt, dest++);
  660. if (rc != X86EMUL_CONTINUE)
  661. return rc;
  662. }
  663. return X86EMUL_CONTINUE;
  664. }
  665. /* Fetch next part of the instruction being emulated. */
  666. #define insn_fetch(_type, _ctxt) \
  667. ({ unsigned long _x; \
  668. rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
  669. if (rc != X86EMUL_CONTINUE) \
  670. goto done; \
  671. (_type)_x; \
  672. })
  673. #define insn_fetch_arr(_arr, _size, _ctxt) \
  674. ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
  675. if (rc != X86EMUL_CONTINUE) \
  676. goto done; \
  677. })
  678. /*
  679. * Given the 'reg' portion of a ModRM byte, and a register block, return a
  680. * pointer into the block that addresses the relevant register.
  681. * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
  682. */
  683. static void *decode_register(u8 modrm_reg, unsigned long *regs,
  684. int highbyte_regs)
  685. {
  686. void *p;
  687. p = &regs[modrm_reg];
  688. if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
  689. p = (unsigned char *)&regs[modrm_reg & 3] + 1;
  690. return p;
  691. }
  692. static int read_descriptor(struct x86_emulate_ctxt *ctxt,
  693. struct segmented_address addr,
  694. u16 *size, unsigned long *address, int op_bytes)
  695. {
  696. int rc;
  697. if (op_bytes == 2)
  698. op_bytes = 3;
  699. *address = 0;
  700. rc = segmented_read_std(ctxt, addr, size, 2);
  701. if (rc != X86EMUL_CONTINUE)
  702. return rc;
  703. addr.ea += 2;
  704. rc = segmented_read_std(ctxt, addr, address, op_bytes);
  705. return rc;
  706. }
  707. static int test_cc(unsigned int condition, unsigned int flags)
  708. {
  709. int rc = 0;
  710. switch ((condition & 15) >> 1) {
  711. case 0: /* o */
  712. rc |= (flags & EFLG_OF);
  713. break;
  714. case 1: /* b/c/nae */
  715. rc |= (flags & EFLG_CF);
  716. break;
  717. case 2: /* z/e */
  718. rc |= (flags & EFLG_ZF);
  719. break;
  720. case 3: /* be/na */
  721. rc |= (flags & (EFLG_CF|EFLG_ZF));
  722. break;
  723. case 4: /* s */
  724. rc |= (flags & EFLG_SF);
  725. break;
  726. case 5: /* p/pe */
  727. rc |= (flags & EFLG_PF);
  728. break;
  729. case 7: /* le/ng */
  730. rc |= (flags & EFLG_ZF);
  731. /* fall through */
  732. case 6: /* l/nge */
  733. rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
  734. break;
  735. }
  736. /* Odd condition identifiers (lsb == 1) have inverted sense. */
  737. return (!!rc ^ (condition & 1));
  738. }
  739. static void fetch_register_operand(struct operand *op)
  740. {
  741. switch (op->bytes) {
  742. case 1:
  743. op->val = *(u8 *)op->addr.reg;
  744. break;
  745. case 2:
  746. op->val = *(u16 *)op->addr.reg;
  747. break;
  748. case 4:
  749. op->val = *(u32 *)op->addr.reg;
  750. break;
  751. case 8:
  752. op->val = *(u64 *)op->addr.reg;
  753. break;
  754. }
  755. }
  756. static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
  757. {
  758. ctxt->ops->get_fpu(ctxt);
  759. switch (reg) {
  760. case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
  761. case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
  762. case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
  763. case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
  764. case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
  765. case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
  766. case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
  767. case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
  768. #ifdef CONFIG_X86_64
  769. case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
  770. case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
  771. case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
  772. case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
  773. case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
  774. case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
  775. case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
  776. case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
  777. #endif
  778. default: BUG();
  779. }
  780. ctxt->ops->put_fpu(ctxt);
  781. }
  782. static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
  783. int reg)
  784. {
  785. ctxt->ops->get_fpu(ctxt);
  786. switch (reg) {
  787. case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
  788. case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
  789. case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
  790. case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
  791. case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
  792. case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
  793. case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
  794. case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
  795. #ifdef CONFIG_X86_64
  796. case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
  797. case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
  798. case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
  799. case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
  800. case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
  801. case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
  802. case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
  803. case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
  804. #endif
  805. default: BUG();
  806. }
  807. ctxt->ops->put_fpu(ctxt);
  808. }
  809. static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
  810. struct operand *op)
  811. {
  812. unsigned reg = ctxt->modrm_reg;
  813. int highbyte_regs = ctxt->rex_prefix == 0;
  814. if (!(ctxt->d & ModRM))
  815. reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
  816. if (ctxt->d & Sse) {
  817. op->type = OP_XMM;
  818. op->bytes = 16;
  819. op->addr.xmm = reg;
  820. read_sse_reg(ctxt, &op->vec_val, reg);
  821. return;
  822. }
  823. op->type = OP_REG;
  824. if (ctxt->d & ByteOp) {
  825. op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
  826. op->bytes = 1;
  827. } else {
  828. op->addr.reg = decode_register(reg, ctxt->regs, 0);
  829. op->bytes = ctxt->op_bytes;
  830. }
  831. fetch_register_operand(op);
  832. op->orig_val = op->val;
  833. }
  834. static int decode_modrm(struct x86_emulate_ctxt *ctxt,
  835. struct operand *op)
  836. {
  837. u8 sib;
  838. int index_reg = 0, base_reg = 0, scale;
  839. int rc = X86EMUL_CONTINUE;
  840. ulong modrm_ea = 0;
  841. if (ctxt->rex_prefix) {
  842. ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */
  843. index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
  844. ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
  845. }
  846. ctxt->modrm = insn_fetch(u8, ctxt);
  847. ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
  848. ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
  849. ctxt->modrm_rm |= (ctxt->modrm & 0x07);
  850. ctxt->modrm_seg = VCPU_SREG_DS;
  851. if (ctxt->modrm_mod == 3) {
  852. op->type = OP_REG;
  853. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  854. op->addr.reg = decode_register(ctxt->modrm_rm,
  855. ctxt->regs, ctxt->d & ByteOp);
  856. if (ctxt->d & Sse) {
  857. op->type = OP_XMM;
  858. op->bytes = 16;
  859. op->addr.xmm = ctxt->modrm_rm;
  860. read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
  861. return rc;
  862. }
  863. fetch_register_operand(op);
  864. return rc;
  865. }
  866. op->type = OP_MEM;
  867. if (ctxt->ad_bytes == 2) {
  868. unsigned bx = ctxt->regs[VCPU_REGS_RBX];
  869. unsigned bp = ctxt->regs[VCPU_REGS_RBP];
  870. unsigned si = ctxt->regs[VCPU_REGS_RSI];
  871. unsigned di = ctxt->regs[VCPU_REGS_RDI];
  872. /* 16-bit ModR/M decode. */
  873. switch (ctxt->modrm_mod) {
  874. case 0:
  875. if (ctxt->modrm_rm == 6)
  876. modrm_ea += insn_fetch(u16, ctxt);
  877. break;
  878. case 1:
  879. modrm_ea += insn_fetch(s8, ctxt);
  880. break;
  881. case 2:
  882. modrm_ea += insn_fetch(u16, ctxt);
  883. break;
  884. }
  885. switch (ctxt->modrm_rm) {
  886. case 0:
  887. modrm_ea += bx + si;
  888. break;
  889. case 1:
  890. modrm_ea += bx + di;
  891. break;
  892. case 2:
  893. modrm_ea += bp + si;
  894. break;
  895. case 3:
  896. modrm_ea += bp + di;
  897. break;
  898. case 4:
  899. modrm_ea += si;
  900. break;
  901. case 5:
  902. modrm_ea += di;
  903. break;
  904. case 6:
  905. if (ctxt->modrm_mod != 0)
  906. modrm_ea += bp;
  907. break;
  908. case 7:
  909. modrm_ea += bx;
  910. break;
  911. }
  912. if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
  913. (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
  914. ctxt->modrm_seg = VCPU_SREG_SS;
  915. modrm_ea = (u16)modrm_ea;
  916. } else {
  917. /* 32/64-bit ModR/M decode. */
  918. if ((ctxt->modrm_rm & 7) == 4) {
  919. sib = insn_fetch(u8, ctxt);
  920. index_reg |= (sib >> 3) & 7;
  921. base_reg |= sib & 7;
  922. scale = sib >> 6;
  923. if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
  924. modrm_ea += insn_fetch(s32, ctxt);
  925. else
  926. modrm_ea += ctxt->regs[base_reg];
  927. if (index_reg != 4)
  928. modrm_ea += ctxt->regs[index_reg] << scale;
  929. } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
  930. if (ctxt->mode == X86EMUL_MODE_PROT64)
  931. ctxt->rip_relative = 1;
  932. } else
  933. modrm_ea += ctxt->regs[ctxt->modrm_rm];
  934. switch (ctxt->modrm_mod) {
  935. case 0:
  936. if (ctxt->modrm_rm == 5)
  937. modrm_ea += insn_fetch(s32, ctxt);
  938. break;
  939. case 1:
  940. modrm_ea += insn_fetch(s8, ctxt);
  941. break;
  942. case 2:
  943. modrm_ea += insn_fetch(s32, ctxt);
  944. break;
  945. }
  946. }
  947. op->addr.mem.ea = modrm_ea;
  948. done:
  949. return rc;
  950. }
  951. static int decode_abs(struct x86_emulate_ctxt *ctxt,
  952. struct operand *op)
  953. {
  954. int rc = X86EMUL_CONTINUE;
  955. op->type = OP_MEM;
  956. switch (ctxt->ad_bytes) {
  957. case 2:
  958. op->addr.mem.ea = insn_fetch(u16, ctxt);
  959. break;
  960. case 4:
  961. op->addr.mem.ea = insn_fetch(u32, ctxt);
  962. break;
  963. case 8:
  964. op->addr.mem.ea = insn_fetch(u64, ctxt);
  965. break;
  966. }
  967. done:
  968. return rc;
  969. }
  970. static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
  971. {
  972. long sv = 0, mask;
  973. if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
  974. mask = ~(ctxt->dst.bytes * 8 - 1);
  975. if (ctxt->src.bytes == 2)
  976. sv = (s16)ctxt->src.val & (s16)mask;
  977. else if (ctxt->src.bytes == 4)
  978. sv = (s32)ctxt->src.val & (s32)mask;
  979. ctxt->dst.addr.mem.ea += (sv >> 3);
  980. }
  981. /* only subword offset */
  982. ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
  983. }
  984. static int read_emulated(struct x86_emulate_ctxt *ctxt,
  985. unsigned long addr, void *dest, unsigned size)
  986. {
  987. int rc;
  988. struct read_cache *mc = &ctxt->mem_read;
  989. while (size) {
  990. int n = min(size, 8u);
  991. size -= n;
  992. if (mc->pos < mc->end)
  993. goto read_cached;
  994. rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
  995. &ctxt->exception);
  996. if (rc != X86EMUL_CONTINUE)
  997. return rc;
  998. mc->end += n;
  999. read_cached:
  1000. memcpy(dest, mc->data + mc->pos, n);
  1001. mc->pos += n;
  1002. dest += n;
  1003. addr += n;
  1004. }
  1005. return X86EMUL_CONTINUE;
  1006. }
  1007. static int segmented_read(struct x86_emulate_ctxt *ctxt,
  1008. struct segmented_address addr,
  1009. void *data,
  1010. unsigned size)
  1011. {
  1012. int rc;
  1013. ulong linear;
  1014. rc = linearize(ctxt, addr, size, false, &linear);
  1015. if (rc != X86EMUL_CONTINUE)
  1016. return rc;
  1017. return read_emulated(ctxt, linear, data, size);
  1018. }
  1019. static int segmented_write(struct x86_emulate_ctxt *ctxt,
  1020. struct segmented_address addr,
  1021. const void *data,
  1022. unsigned size)
  1023. {
  1024. int rc;
  1025. ulong linear;
  1026. rc = linearize(ctxt, addr, size, true, &linear);
  1027. if (rc != X86EMUL_CONTINUE)
  1028. return rc;
  1029. return ctxt->ops->write_emulated(ctxt, linear, data, size,
  1030. &ctxt->exception);
  1031. }
  1032. static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
  1033. struct segmented_address addr,
  1034. const void *orig_data, const void *data,
  1035. unsigned size)
  1036. {
  1037. int rc;
  1038. ulong linear;
  1039. rc = linearize(ctxt, addr, size, true, &linear);
  1040. if (rc != X86EMUL_CONTINUE)
  1041. return rc;
  1042. return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
  1043. size, &ctxt->exception);
  1044. }
  1045. static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
  1046. unsigned int size, unsigned short port,
  1047. void *dest)
  1048. {
  1049. struct read_cache *rc = &ctxt->io_read;
  1050. if (rc->pos == rc->end) { /* refill pio read ahead */
  1051. unsigned int in_page, n;
  1052. unsigned int count = ctxt->rep_prefix ?
  1053. address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
  1054. in_page = (ctxt->eflags & EFLG_DF) ?
  1055. offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
  1056. PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
  1057. n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
  1058. count);
  1059. if (n == 0)
  1060. n = 1;
  1061. rc->pos = rc->end = 0;
  1062. if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
  1063. return 0;
  1064. rc->end = n * size;
  1065. }
  1066. memcpy(dest, rc->data + rc->pos, size);
  1067. rc->pos += size;
  1068. return 1;
  1069. }
  1070. static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
  1071. u16 index, struct desc_struct *desc)
  1072. {
  1073. struct desc_ptr dt;
  1074. ulong addr;
  1075. ctxt->ops->get_idt(ctxt, &dt);
  1076. if (dt.size < index * 8 + 7)
  1077. return emulate_gp(ctxt, index << 3 | 0x2);
  1078. addr = dt.address + index * 8;
  1079. return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
  1080. &ctxt->exception);
  1081. }
  1082. static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
  1083. u16 selector, struct desc_ptr *dt)
  1084. {
  1085. struct x86_emulate_ops *ops = ctxt->ops;
  1086. if (selector & 1 << 2) {
  1087. struct desc_struct desc;
  1088. u16 sel;
  1089. memset (dt, 0, sizeof *dt);
  1090. if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
  1091. return;
  1092. dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
  1093. dt->address = get_desc_base(&desc);
  1094. } else
  1095. ops->get_gdt(ctxt, dt);
  1096. }
  1097. /* allowed just for 8 bytes segments */
  1098. static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1099. u16 selector, struct desc_struct *desc)
  1100. {
  1101. struct desc_ptr dt;
  1102. u16 index = selector >> 3;
  1103. ulong addr;
  1104. get_descriptor_table_ptr(ctxt, selector, &dt);
  1105. if (dt.size < index * 8 + 7)
  1106. return emulate_gp(ctxt, selector & 0xfffc);
  1107. addr = dt.address + index * 8;
  1108. return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
  1109. &ctxt->exception);
  1110. }
  1111. /* allowed just for 8 bytes segments */
  1112. static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1113. u16 selector, struct desc_struct *desc)
  1114. {
  1115. struct desc_ptr dt;
  1116. u16 index = selector >> 3;
  1117. ulong addr;
  1118. get_descriptor_table_ptr(ctxt, selector, &dt);
  1119. if (dt.size < index * 8 + 7)
  1120. return emulate_gp(ctxt, selector & 0xfffc);
  1121. addr = dt.address + index * 8;
  1122. return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
  1123. &ctxt->exception);
  1124. }
  1125. /* Does not support long mode */
  1126. static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1127. u16 selector, int seg, u8 cpl,
  1128. bool in_task_switch,
  1129. struct desc_struct *desc)
  1130. {
  1131. struct desc_struct seg_desc;
  1132. u8 dpl, rpl;
  1133. unsigned err_vec = GP_VECTOR;
  1134. u32 err_code = 0;
  1135. bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
  1136. int ret;
  1137. memset(&seg_desc, 0, sizeof seg_desc);
  1138. if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
  1139. || ctxt->mode == X86EMUL_MODE_REAL) {
  1140. /* set real mode segment descriptor */
  1141. set_desc_base(&seg_desc, selector << 4);
  1142. set_desc_limit(&seg_desc, 0xffff);
  1143. seg_desc.type = 3;
  1144. seg_desc.p = 1;
  1145. seg_desc.s = 1;
  1146. if (ctxt->mode == X86EMUL_MODE_VM86)
  1147. seg_desc.dpl = 3;
  1148. goto load;
  1149. }
  1150. /* NULL selector is not valid for TR, CS and SS */
  1151. if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
  1152. && null_selector)
  1153. goto exception;
  1154. /* TR should be in GDT only */
  1155. if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
  1156. goto exception;
  1157. if (null_selector) /* for NULL selector skip all following checks */
  1158. goto load;
  1159. ret = read_segment_descriptor(ctxt, selector, &seg_desc);
  1160. if (ret != X86EMUL_CONTINUE)
  1161. return ret;
  1162. err_code = selector & 0xfffc;
  1163. err_vec = GP_VECTOR;
  1164. /* can't load system descriptor into segment selecor */
  1165. if (seg <= VCPU_SREG_GS && !seg_desc.s)
  1166. goto exception;
  1167. if (!seg_desc.p) {
  1168. err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
  1169. goto exception;
  1170. }
  1171. rpl = selector & 3;
  1172. dpl = seg_desc.dpl;
  1173. switch (seg) {
  1174. case VCPU_SREG_SS:
  1175. /*
  1176. * segment is not a writable data segment or segment
  1177. * selector's RPL != CPL or segment selector's RPL != CPL
  1178. */
  1179. if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
  1180. goto exception;
  1181. break;
  1182. case VCPU_SREG_CS:
  1183. if (!(seg_desc.type & 8))
  1184. goto exception;
  1185. if (seg_desc.type & 4) {
  1186. /* conforming */
  1187. if (dpl > cpl)
  1188. goto exception;
  1189. } else {
  1190. /* nonconforming */
  1191. if (rpl > cpl || dpl != cpl)
  1192. goto exception;
  1193. }
  1194. /* CS(RPL) <- CPL */
  1195. selector = (selector & 0xfffc) | cpl;
  1196. break;
  1197. case VCPU_SREG_TR:
  1198. if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
  1199. goto exception;
  1200. break;
  1201. case VCPU_SREG_LDTR:
  1202. if (seg_desc.s || seg_desc.type != 2)
  1203. goto exception;
  1204. break;
  1205. default: /* DS, ES, FS, or GS */
  1206. /*
  1207. * segment is not a data or readable code segment or
  1208. * ((segment is a data or nonconforming code segment)
  1209. * and (both RPL and CPL > DPL))
  1210. */
  1211. if ((seg_desc.type & 0xa) == 0x8 ||
  1212. (((seg_desc.type & 0xc) != 0xc) &&
  1213. (rpl > dpl && cpl > dpl)))
  1214. goto exception;
  1215. break;
  1216. }
  1217. if (seg_desc.s) {
  1218. /* mark segment as accessed */
  1219. seg_desc.type |= 1;
  1220. ret = write_segment_descriptor(ctxt, selector, &seg_desc);
  1221. if (ret != X86EMUL_CONTINUE)
  1222. return ret;
  1223. }
  1224. load:
  1225. ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
  1226. if (desc)
  1227. *desc = seg_desc;
  1228. return X86EMUL_CONTINUE;
  1229. exception:
  1230. emulate_exception(ctxt, err_vec, err_code, true);
  1231. return X86EMUL_PROPAGATE_FAULT;
  1232. }
  1233. static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
  1234. u16 selector, int seg)
  1235. {
  1236. u8 cpl = ctxt->ops->cpl(ctxt);
  1237. return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
  1238. }
  1239. static void write_register_operand(struct operand *op)
  1240. {
  1241. /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
  1242. switch (op->bytes) {
  1243. case 1:
  1244. *(u8 *)op->addr.reg = (u8)op->val;
  1245. break;
  1246. case 2:
  1247. *(u16 *)op->addr.reg = (u16)op->val;
  1248. break;
  1249. case 4:
  1250. *op->addr.reg = (u32)op->val;
  1251. break; /* 64b: zero-extend */
  1252. case 8:
  1253. *op->addr.reg = op->val;
  1254. break;
  1255. }
  1256. }
  1257. static int writeback(struct x86_emulate_ctxt *ctxt)
  1258. {
  1259. int rc;
  1260. switch (ctxt->dst.type) {
  1261. case OP_REG:
  1262. write_register_operand(&ctxt->dst);
  1263. break;
  1264. case OP_MEM:
  1265. if (ctxt->lock_prefix)
  1266. rc = segmented_cmpxchg(ctxt,
  1267. ctxt->dst.addr.mem,
  1268. &ctxt->dst.orig_val,
  1269. &ctxt->dst.val,
  1270. ctxt->dst.bytes);
  1271. else
  1272. rc = segmented_write(ctxt,
  1273. ctxt->dst.addr.mem,
  1274. &ctxt->dst.val,
  1275. ctxt->dst.bytes);
  1276. if (rc != X86EMUL_CONTINUE)
  1277. return rc;
  1278. break;
  1279. case OP_XMM:
  1280. write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
  1281. break;
  1282. case OP_NONE:
  1283. /* no writeback */
  1284. break;
  1285. default:
  1286. break;
  1287. }
  1288. return X86EMUL_CONTINUE;
  1289. }
  1290. static int em_push(struct x86_emulate_ctxt *ctxt)
  1291. {
  1292. struct segmented_address addr;
  1293. register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
  1294. addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
  1295. addr.seg = VCPU_SREG_SS;
  1296. /* Disable writeback. */
  1297. ctxt->dst.type = OP_NONE;
  1298. return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
  1299. }
  1300. static int emulate_pop(struct x86_emulate_ctxt *ctxt,
  1301. void *dest, int len)
  1302. {
  1303. int rc;
  1304. struct segmented_address addr;
  1305. addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
  1306. addr.seg = VCPU_SREG_SS;
  1307. rc = segmented_read(ctxt, addr, dest, len);
  1308. if (rc != X86EMUL_CONTINUE)
  1309. return rc;
  1310. register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
  1311. return rc;
  1312. }
  1313. static int em_pop(struct x86_emulate_ctxt *ctxt)
  1314. {
  1315. return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  1316. }
  1317. static int emulate_popf(struct x86_emulate_ctxt *ctxt,
  1318. void *dest, int len)
  1319. {
  1320. int rc;
  1321. unsigned long val, change_mask;
  1322. int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
  1323. int cpl = ctxt->ops->cpl(ctxt);
  1324. rc = emulate_pop(ctxt, &val, len);
  1325. if (rc != X86EMUL_CONTINUE)
  1326. return rc;
  1327. change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
  1328. | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
  1329. switch(ctxt->mode) {
  1330. case X86EMUL_MODE_PROT64:
  1331. case X86EMUL_MODE_PROT32:
  1332. case X86EMUL_MODE_PROT16:
  1333. if (cpl == 0)
  1334. change_mask |= EFLG_IOPL;
  1335. if (cpl <= iopl)
  1336. change_mask |= EFLG_IF;
  1337. break;
  1338. case X86EMUL_MODE_VM86:
  1339. if (iopl < 3)
  1340. return emulate_gp(ctxt, 0);
  1341. change_mask |= EFLG_IF;
  1342. break;
  1343. default: /* real mode */
  1344. change_mask |= (EFLG_IOPL | EFLG_IF);
  1345. break;
  1346. }
  1347. *(unsigned long *)dest =
  1348. (ctxt->eflags & ~change_mask) | (val & change_mask);
  1349. return rc;
  1350. }
  1351. static int em_popf(struct x86_emulate_ctxt *ctxt)
  1352. {
  1353. ctxt->dst.type = OP_REG;
  1354. ctxt->dst.addr.reg = &ctxt->eflags;
  1355. ctxt->dst.bytes = ctxt->op_bytes;
  1356. return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  1357. }
  1358. static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
  1359. {
  1360. int seg = ctxt->src2.val;
  1361. ctxt->src.val = get_segment_selector(ctxt, seg);
  1362. return em_push(ctxt);
  1363. }
  1364. static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
  1365. {
  1366. int seg = ctxt->src2.val;
  1367. unsigned long selector;
  1368. int rc;
  1369. rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
  1370. if (rc != X86EMUL_CONTINUE)
  1371. return rc;
  1372. rc = load_segment_descriptor(ctxt, (u16)selector, seg);
  1373. return rc;
  1374. }
  1375. static int em_pusha(struct x86_emulate_ctxt *ctxt)
  1376. {
  1377. unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
  1378. int rc = X86EMUL_CONTINUE;
  1379. int reg = VCPU_REGS_RAX;
  1380. while (reg <= VCPU_REGS_RDI) {
  1381. (reg == VCPU_REGS_RSP) ?
  1382. (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
  1383. rc = em_push(ctxt);
  1384. if (rc != X86EMUL_CONTINUE)
  1385. return rc;
  1386. ++reg;
  1387. }
  1388. return rc;
  1389. }
  1390. static int em_pushf(struct x86_emulate_ctxt *ctxt)
  1391. {
  1392. ctxt->src.val = (unsigned long)ctxt->eflags;
  1393. return em_push(ctxt);
  1394. }
  1395. static int em_popa(struct x86_emulate_ctxt *ctxt)
  1396. {
  1397. int rc = X86EMUL_CONTINUE;
  1398. int reg = VCPU_REGS_RDI;
  1399. while (reg >= VCPU_REGS_RAX) {
  1400. if (reg == VCPU_REGS_RSP) {
  1401. register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
  1402. ctxt->op_bytes);
  1403. --reg;
  1404. }
  1405. rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
  1406. if (rc != X86EMUL_CONTINUE)
  1407. break;
  1408. --reg;
  1409. }
  1410. return rc;
  1411. }
  1412. int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
  1413. {
  1414. struct x86_emulate_ops *ops = ctxt->ops;
  1415. int rc;
  1416. struct desc_ptr dt;
  1417. gva_t cs_addr;
  1418. gva_t eip_addr;
  1419. u16 cs, eip;
  1420. /* TODO: Add limit checks */
  1421. ctxt->src.val = ctxt->eflags;
  1422. rc = em_push(ctxt);
  1423. if (rc != X86EMUL_CONTINUE)
  1424. return rc;
  1425. ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
  1426. ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
  1427. rc = em_push(ctxt);
  1428. if (rc != X86EMUL_CONTINUE)
  1429. return rc;
  1430. ctxt->src.val = ctxt->_eip;
  1431. rc = em_push(ctxt);
  1432. if (rc != X86EMUL_CONTINUE)
  1433. return rc;
  1434. ops->get_idt(ctxt, &dt);
  1435. eip_addr = dt.address + (irq << 2);
  1436. cs_addr = dt.address + (irq << 2) + 2;
  1437. rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
  1438. if (rc != X86EMUL_CONTINUE)
  1439. return rc;
  1440. rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
  1441. if (rc != X86EMUL_CONTINUE)
  1442. return rc;
  1443. rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
  1444. if (rc != X86EMUL_CONTINUE)
  1445. return rc;
  1446. ctxt->_eip = eip;
  1447. return rc;
  1448. }
  1449. static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
  1450. {
  1451. switch(ctxt->mode) {
  1452. case X86EMUL_MODE_REAL:
  1453. return emulate_int_real(ctxt, irq);
  1454. case X86EMUL_MODE_VM86:
  1455. case X86EMUL_MODE_PROT16:
  1456. case X86EMUL_MODE_PROT32:
  1457. case X86EMUL_MODE_PROT64:
  1458. default:
  1459. /* Protected mode interrupts unimplemented yet */
  1460. return X86EMUL_UNHANDLEABLE;
  1461. }
  1462. }
  1463. static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
  1464. {
  1465. int rc = X86EMUL_CONTINUE;
  1466. unsigned long temp_eip = 0;
  1467. unsigned long temp_eflags = 0;
  1468. unsigned long cs = 0;
  1469. unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
  1470. EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
  1471. EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
  1472. unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
  1473. /* TODO: Add stack limit check */
  1474. rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
  1475. if (rc != X86EMUL_CONTINUE)
  1476. return rc;
  1477. if (temp_eip & ~0xffff)
  1478. return emulate_gp(ctxt, 0);
  1479. rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
  1480. if (rc != X86EMUL_CONTINUE)
  1481. return rc;
  1482. rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
  1483. if (rc != X86EMUL_CONTINUE)
  1484. return rc;
  1485. rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
  1486. if (rc != X86EMUL_CONTINUE)
  1487. return rc;
  1488. ctxt->_eip = temp_eip;
  1489. if (ctxt->op_bytes == 4)
  1490. ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
  1491. else if (ctxt->op_bytes == 2) {
  1492. ctxt->eflags &= ~0xffff;
  1493. ctxt->eflags |= temp_eflags;
  1494. }
  1495. ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
  1496. ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
  1497. return rc;
  1498. }
  1499. static int em_iret(struct x86_emulate_ctxt *ctxt)
  1500. {
  1501. switch(ctxt->mode) {
  1502. case X86EMUL_MODE_REAL:
  1503. return emulate_iret_real(ctxt);
  1504. case X86EMUL_MODE_VM86:
  1505. case X86EMUL_MODE_PROT16:
  1506. case X86EMUL_MODE_PROT32:
  1507. case X86EMUL_MODE_PROT64:
  1508. default:
  1509. /* iret from protected mode unimplemented yet */
  1510. return X86EMUL_UNHANDLEABLE;
  1511. }
  1512. }
  1513. static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
  1514. {
  1515. int rc;
  1516. unsigned short sel, old_sel;
  1517. struct desc_struct old_desc, new_desc;
  1518. const struct x86_emulate_ops *ops = ctxt->ops;
  1519. u8 cpl = ctxt->ops->cpl(ctxt);
  1520. /* Assignment of RIP may only fail in 64-bit mode */
  1521. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1522. ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
  1523. VCPU_SREG_CS);
  1524. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  1525. rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
  1526. &new_desc);
  1527. if (rc != X86EMUL_CONTINUE)
  1528. return rc;
  1529. rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
  1530. if (rc != X86EMUL_CONTINUE) {
  1531. WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
  1532. /* assigning eip failed; restore the old cs */
  1533. ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
  1534. return rc;
  1535. }
  1536. return rc;
  1537. }
  1538. static int em_grp2(struct x86_emulate_ctxt *ctxt)
  1539. {
  1540. switch (ctxt->modrm_reg) {
  1541. case 0: /* rol */
  1542. emulate_2op_SrcB(ctxt, "rol");
  1543. break;
  1544. case 1: /* ror */
  1545. emulate_2op_SrcB(ctxt, "ror");
  1546. break;
  1547. case 2: /* rcl */
  1548. emulate_2op_SrcB(ctxt, "rcl");
  1549. break;
  1550. case 3: /* rcr */
  1551. emulate_2op_SrcB(ctxt, "rcr");
  1552. break;
  1553. case 4: /* sal/shl */
  1554. case 6: /* sal/shl */
  1555. emulate_2op_SrcB(ctxt, "sal");
  1556. break;
  1557. case 5: /* shr */
  1558. emulate_2op_SrcB(ctxt, "shr");
  1559. break;
  1560. case 7: /* sar */
  1561. emulate_2op_SrcB(ctxt, "sar");
  1562. break;
  1563. }
  1564. return X86EMUL_CONTINUE;
  1565. }
  1566. static int em_not(struct x86_emulate_ctxt *ctxt)
  1567. {
  1568. ctxt->dst.val = ~ctxt->dst.val;
  1569. return X86EMUL_CONTINUE;
  1570. }
  1571. static int em_neg(struct x86_emulate_ctxt *ctxt)
  1572. {
  1573. emulate_1op(ctxt, "neg");
  1574. return X86EMUL_CONTINUE;
  1575. }
  1576. static int em_mul_ex(struct x86_emulate_ctxt *ctxt)
  1577. {
  1578. u8 ex = 0;
  1579. emulate_1op_rax_rdx(ctxt, "mul", ex);
  1580. return X86EMUL_CONTINUE;
  1581. }
  1582. static int em_imul_ex(struct x86_emulate_ctxt *ctxt)
  1583. {
  1584. u8 ex = 0;
  1585. emulate_1op_rax_rdx(ctxt, "imul", ex);
  1586. return X86EMUL_CONTINUE;
  1587. }
  1588. static int em_div_ex(struct x86_emulate_ctxt *ctxt)
  1589. {
  1590. u8 de = 0;
  1591. emulate_1op_rax_rdx(ctxt, "div", de);
  1592. if (de)
  1593. return emulate_de(ctxt);
  1594. return X86EMUL_CONTINUE;
  1595. }
  1596. static int em_idiv_ex(struct x86_emulate_ctxt *ctxt)
  1597. {
  1598. u8 de = 0;
  1599. emulate_1op_rax_rdx(ctxt, "idiv", de);
  1600. if (de)
  1601. return emulate_de(ctxt);
  1602. return X86EMUL_CONTINUE;
  1603. }
  1604. static int em_grp45(struct x86_emulate_ctxt *ctxt)
  1605. {
  1606. int rc = X86EMUL_CONTINUE;
  1607. switch (ctxt->modrm_reg) {
  1608. case 0: /* inc */
  1609. emulate_1op(ctxt, "inc");
  1610. break;
  1611. case 1: /* dec */
  1612. emulate_1op(ctxt, "dec");
  1613. break;
  1614. case 2: /* call near abs */ {
  1615. long int old_eip;
  1616. old_eip = ctxt->_eip;
  1617. rc = assign_eip_near(ctxt, ctxt->src.val);
  1618. if (rc != X86EMUL_CONTINUE)
  1619. break;
  1620. ctxt->src.val = old_eip;
  1621. rc = em_push(ctxt);
  1622. break;
  1623. }
  1624. case 4: /* jmp abs */
  1625. rc = assign_eip_near(ctxt, ctxt->src.val);
  1626. break;
  1627. case 5: /* jmp far */
  1628. rc = em_jmp_far(ctxt);
  1629. break;
  1630. case 6: /* push */
  1631. rc = em_push(ctxt);
  1632. break;
  1633. }
  1634. return rc;
  1635. }
  1636. static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
  1637. {
  1638. u64 old = ctxt->dst.orig_val64;
  1639. if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
  1640. ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
  1641. ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
  1642. ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
  1643. ctxt->eflags &= ~EFLG_ZF;
  1644. } else {
  1645. ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
  1646. (u32) ctxt->regs[VCPU_REGS_RBX];
  1647. ctxt->eflags |= EFLG_ZF;
  1648. }
  1649. return X86EMUL_CONTINUE;
  1650. }
  1651. static int em_ret(struct x86_emulate_ctxt *ctxt)
  1652. {
  1653. int rc;
  1654. unsigned long eip;
  1655. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  1656. if (rc != X86EMUL_CONTINUE)
  1657. return rc;
  1658. return assign_eip_near(ctxt, eip);
  1659. }
  1660. static int em_ret_far(struct x86_emulate_ctxt *ctxt)
  1661. {
  1662. int rc;
  1663. unsigned long eip, cs;
  1664. u16 old_cs;
  1665. struct desc_struct old_desc, new_desc;
  1666. const struct x86_emulate_ops *ops = ctxt->ops;
  1667. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1668. ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
  1669. VCPU_SREG_CS);
  1670. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  1671. if (rc != X86EMUL_CONTINUE)
  1672. return rc;
  1673. rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
  1674. if (rc != X86EMUL_CONTINUE)
  1675. return rc;
  1676. rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
  1677. &new_desc);
  1678. if (rc != X86EMUL_CONTINUE)
  1679. return rc;
  1680. rc = assign_eip_far(ctxt, eip, new_desc.l);
  1681. if (rc != X86EMUL_CONTINUE) {
  1682. WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
  1683. ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
  1684. }
  1685. return rc;
  1686. }
  1687. static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
  1688. {
  1689. /* Save real source value, then compare EAX against destination. */
  1690. ctxt->src.orig_val = ctxt->src.val;
  1691. ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
  1692. emulate_2op_SrcV(ctxt, "cmp");
  1693. if (ctxt->eflags & EFLG_ZF) {
  1694. /* Success: write back to memory. */
  1695. ctxt->dst.val = ctxt->src.orig_val;
  1696. } else {
  1697. /* Failure: write the value we saw to EAX. */
  1698. ctxt->dst.type = OP_REG;
  1699. ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
  1700. }
  1701. return X86EMUL_CONTINUE;
  1702. }
  1703. static int em_lseg(struct x86_emulate_ctxt *ctxt)
  1704. {
  1705. int seg = ctxt->src2.val;
  1706. unsigned short sel;
  1707. int rc;
  1708. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  1709. rc = load_segment_descriptor(ctxt, sel, seg);
  1710. if (rc != X86EMUL_CONTINUE)
  1711. return rc;
  1712. ctxt->dst.val = ctxt->src.val;
  1713. return rc;
  1714. }
  1715. static void
  1716. setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
  1717. struct desc_struct *cs, struct desc_struct *ss)
  1718. {
  1719. u16 selector;
  1720. memset(cs, 0, sizeof(struct desc_struct));
  1721. ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
  1722. memset(ss, 0, sizeof(struct desc_struct));
  1723. cs->l = 0; /* will be adjusted later */
  1724. set_desc_base(cs, 0); /* flat segment */
  1725. cs->g = 1; /* 4kb granularity */
  1726. set_desc_limit(cs, 0xfffff); /* 4GB limit */
  1727. cs->type = 0x0b; /* Read, Execute, Accessed */
  1728. cs->s = 1;
  1729. cs->dpl = 0; /* will be adjusted later */
  1730. cs->p = 1;
  1731. cs->d = 1;
  1732. set_desc_base(ss, 0); /* flat segment */
  1733. set_desc_limit(ss, 0xfffff); /* 4GB limit */
  1734. ss->g = 1; /* 4kb granularity */
  1735. ss->s = 1;
  1736. ss->type = 0x03; /* Read/Write, Accessed */
  1737. ss->d = 1; /* 32bit stack segment */
  1738. ss->dpl = 0;
  1739. ss->p = 1;
  1740. }
  1741. static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
  1742. {
  1743. u32 eax, ebx, ecx, edx;
  1744. eax = ecx = 0;
  1745. return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
  1746. && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
  1747. && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
  1748. && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
  1749. }
  1750. static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
  1751. {
  1752. struct x86_emulate_ops *ops = ctxt->ops;
  1753. u32 eax, ebx, ecx, edx;
  1754. /*
  1755. * syscall should always be enabled in longmode - so only become
  1756. * vendor specific (cpuid) if other modes are active...
  1757. */
  1758. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1759. return true;
  1760. eax = 0x00000000;
  1761. ecx = 0x00000000;
  1762. if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
  1763. /*
  1764. * Intel ("GenuineIntel")
  1765. * remark: Intel CPUs only support "syscall" in 64bit
  1766. * longmode. Also an 64bit guest with a
  1767. * 32bit compat-app running will #UD !! While this
  1768. * behaviour can be fixed (by emulating) into AMD
  1769. * response - CPUs of AMD can't behave like Intel.
  1770. */
  1771. if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
  1772. ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
  1773. edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
  1774. return false;
  1775. /* AMD ("AuthenticAMD") */
  1776. if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
  1777. ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
  1778. edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
  1779. return true;
  1780. /* AMD ("AMDisbetter!") */
  1781. if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
  1782. ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
  1783. edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
  1784. return true;
  1785. }
  1786. /* default: (not Intel, not AMD), apply Intel's stricter rules... */
  1787. return false;
  1788. }
  1789. static int em_syscall(struct x86_emulate_ctxt *ctxt)
  1790. {
  1791. struct x86_emulate_ops *ops = ctxt->ops;
  1792. struct desc_struct cs, ss;
  1793. u64 msr_data;
  1794. u16 cs_sel, ss_sel;
  1795. u64 efer = 0;
  1796. /* syscall is not available in real mode */
  1797. if (ctxt->mode == X86EMUL_MODE_REAL ||
  1798. ctxt->mode == X86EMUL_MODE_VM86)
  1799. return emulate_ud(ctxt);
  1800. if (!(em_syscall_is_enabled(ctxt)))
  1801. return emulate_ud(ctxt);
  1802. ops->get_msr(ctxt, MSR_EFER, &efer);
  1803. setup_syscalls_segments(ctxt, &cs, &ss);
  1804. if (!(efer & EFER_SCE))
  1805. return emulate_ud(ctxt);
  1806. ops->get_msr(ctxt, MSR_STAR, &msr_data);
  1807. msr_data >>= 32;
  1808. cs_sel = (u16)(msr_data & 0xfffc);
  1809. ss_sel = (u16)(msr_data + 8);
  1810. if (efer & EFER_LMA) {
  1811. cs.d = 0;
  1812. cs.l = 1;
  1813. }
  1814. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  1815. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  1816. ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
  1817. if (efer & EFER_LMA) {
  1818. #ifdef CONFIG_X86_64
  1819. ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
  1820. ops->get_msr(ctxt,
  1821. ctxt->mode == X86EMUL_MODE_PROT64 ?
  1822. MSR_LSTAR : MSR_CSTAR, &msr_data);
  1823. ctxt->_eip = msr_data;
  1824. ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
  1825. ctxt->eflags &= ~(msr_data | EFLG_RF);
  1826. #endif
  1827. } else {
  1828. /* legacy mode */
  1829. ops->get_msr(ctxt, MSR_STAR, &msr_data);
  1830. ctxt->_eip = (u32)msr_data;
  1831. ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
  1832. }
  1833. return X86EMUL_CONTINUE;
  1834. }
  1835. static int em_sysenter(struct x86_emulate_ctxt *ctxt)
  1836. {
  1837. struct x86_emulate_ops *ops = ctxt->ops;
  1838. struct desc_struct cs, ss;
  1839. u64 msr_data;
  1840. u16 cs_sel, ss_sel;
  1841. u64 efer = 0;
  1842. ops->get_msr(ctxt, MSR_EFER, &efer);
  1843. /* inject #GP if in real mode */
  1844. if (ctxt->mode == X86EMUL_MODE_REAL)
  1845. return emulate_gp(ctxt, 0);
  1846. /*
  1847. * Not recognized on AMD in compat mode (but is recognized in legacy
  1848. * mode).
  1849. */
  1850. if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
  1851. && !vendor_intel(ctxt))
  1852. return emulate_ud(ctxt);
  1853. /* XXX sysenter/sysexit have not been tested in 64bit mode.
  1854. * Therefore, we inject an #UD.
  1855. */
  1856. if (ctxt->mode == X86EMUL_MODE_PROT64)
  1857. return emulate_ud(ctxt);
  1858. setup_syscalls_segments(ctxt, &cs, &ss);
  1859. ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
  1860. switch (ctxt->mode) {
  1861. case X86EMUL_MODE_PROT32:
  1862. if ((msr_data & 0xfffc) == 0x0)
  1863. return emulate_gp(ctxt, 0);
  1864. break;
  1865. case X86EMUL_MODE_PROT64:
  1866. if (msr_data == 0x0)
  1867. return emulate_gp(ctxt, 0);
  1868. break;
  1869. }
  1870. ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
  1871. cs_sel = (u16)msr_data;
  1872. cs_sel &= ~SELECTOR_RPL_MASK;
  1873. ss_sel = cs_sel + 8;
  1874. ss_sel &= ~SELECTOR_RPL_MASK;
  1875. if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
  1876. cs.d = 0;
  1877. cs.l = 1;
  1878. }
  1879. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  1880. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  1881. ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
  1882. ctxt->_eip = msr_data;
  1883. ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
  1884. ctxt->regs[VCPU_REGS_RSP] = msr_data;
  1885. return X86EMUL_CONTINUE;
  1886. }
  1887. static int em_sysexit(struct x86_emulate_ctxt *ctxt)
  1888. {
  1889. struct x86_emulate_ops *ops = ctxt->ops;
  1890. struct desc_struct cs, ss;
  1891. u64 msr_data, rcx, rdx;
  1892. int usermode;
  1893. u16 cs_sel = 0, ss_sel = 0;
  1894. /* inject #GP if in real mode or Virtual 8086 mode */
  1895. if (ctxt->mode == X86EMUL_MODE_REAL ||
  1896. ctxt->mode == X86EMUL_MODE_VM86)
  1897. return emulate_gp(ctxt, 0);
  1898. setup_syscalls_segments(ctxt, &cs, &ss);
  1899. if ((ctxt->rex_prefix & 0x8) != 0x0)
  1900. usermode = X86EMUL_MODE_PROT64;
  1901. else
  1902. usermode = X86EMUL_MODE_PROT32;
  1903. rcx = ctxt->regs[VCPU_REGS_RCX];
  1904. rdx = ctxt->regs[VCPU_REGS_RDX];
  1905. cs.dpl = 3;
  1906. ss.dpl = 3;
  1907. ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
  1908. switch (usermode) {
  1909. case X86EMUL_MODE_PROT32:
  1910. cs_sel = (u16)(msr_data + 16);
  1911. if ((msr_data & 0xfffc) == 0x0)
  1912. return emulate_gp(ctxt, 0);
  1913. ss_sel = (u16)(msr_data + 24);
  1914. break;
  1915. case X86EMUL_MODE_PROT64:
  1916. cs_sel = (u16)(msr_data + 32);
  1917. if (msr_data == 0x0)
  1918. return emulate_gp(ctxt, 0);
  1919. ss_sel = cs_sel + 8;
  1920. cs.d = 0;
  1921. cs.l = 1;
  1922. if (is_noncanonical_address(rcx) ||
  1923. is_noncanonical_address(rdx))
  1924. return emulate_gp(ctxt, 0);
  1925. break;
  1926. }
  1927. cs_sel |= SELECTOR_RPL_MASK;
  1928. ss_sel |= SELECTOR_RPL_MASK;
  1929. ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
  1930. ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
  1931. ctxt->_eip = rdx;
  1932. ctxt->regs[VCPU_REGS_RSP] = rcx;
  1933. return X86EMUL_CONTINUE;
  1934. }
  1935. static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
  1936. {
  1937. int iopl;
  1938. if (ctxt->mode == X86EMUL_MODE_REAL)
  1939. return false;
  1940. if (ctxt->mode == X86EMUL_MODE_VM86)
  1941. return true;
  1942. iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
  1943. return ctxt->ops->cpl(ctxt) > iopl;
  1944. }
  1945. static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
  1946. u16 port, u16 len)
  1947. {
  1948. struct x86_emulate_ops *ops = ctxt->ops;
  1949. struct desc_struct tr_seg;
  1950. u32 base3;
  1951. int r;
  1952. u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
  1953. unsigned mask = (1 << len) - 1;
  1954. unsigned long base;
  1955. ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
  1956. if (!tr_seg.p)
  1957. return false;
  1958. if (desc_limit_scaled(&tr_seg) < 103)
  1959. return false;
  1960. base = get_desc_base(&tr_seg);
  1961. #ifdef CONFIG_X86_64
  1962. base |= ((u64)base3) << 32;
  1963. #endif
  1964. r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
  1965. if (r != X86EMUL_CONTINUE)
  1966. return false;
  1967. if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
  1968. return false;
  1969. r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
  1970. if (r != X86EMUL_CONTINUE)
  1971. return false;
  1972. if ((perm >> bit_idx) & mask)
  1973. return false;
  1974. return true;
  1975. }
  1976. static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
  1977. u16 port, u16 len)
  1978. {
  1979. if (ctxt->perm_ok)
  1980. return true;
  1981. if (emulator_bad_iopl(ctxt))
  1982. if (!emulator_io_port_access_allowed(ctxt, port, len))
  1983. return false;
  1984. ctxt->perm_ok = true;
  1985. return true;
  1986. }
  1987. static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
  1988. struct tss_segment_16 *tss)
  1989. {
  1990. tss->ip = ctxt->_eip;
  1991. tss->flag = ctxt->eflags;
  1992. tss->ax = ctxt->regs[VCPU_REGS_RAX];
  1993. tss->cx = ctxt->regs[VCPU_REGS_RCX];
  1994. tss->dx = ctxt->regs[VCPU_REGS_RDX];
  1995. tss->bx = ctxt->regs[VCPU_REGS_RBX];
  1996. tss->sp = ctxt->regs[VCPU_REGS_RSP];
  1997. tss->bp = ctxt->regs[VCPU_REGS_RBP];
  1998. tss->si = ctxt->regs[VCPU_REGS_RSI];
  1999. tss->di = ctxt->regs[VCPU_REGS_RDI];
  2000. tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
  2001. tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
  2002. tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
  2003. tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
  2004. tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
  2005. }
  2006. static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
  2007. struct tss_segment_16 *tss)
  2008. {
  2009. int ret;
  2010. u8 cpl;
  2011. ctxt->_eip = tss->ip;
  2012. ctxt->eflags = tss->flag | 2;
  2013. ctxt->regs[VCPU_REGS_RAX] = tss->ax;
  2014. ctxt->regs[VCPU_REGS_RCX] = tss->cx;
  2015. ctxt->regs[VCPU_REGS_RDX] = tss->dx;
  2016. ctxt->regs[VCPU_REGS_RBX] = tss->bx;
  2017. ctxt->regs[VCPU_REGS_RSP] = tss->sp;
  2018. ctxt->regs[VCPU_REGS_RBP] = tss->bp;
  2019. ctxt->regs[VCPU_REGS_RSI] = tss->si;
  2020. ctxt->regs[VCPU_REGS_RDI] = tss->di;
  2021. /*
  2022. * SDM says that segment selectors are loaded before segment
  2023. * descriptors
  2024. */
  2025. set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
  2026. set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
  2027. set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
  2028. set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
  2029. set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
  2030. cpl = tss->cs & 3;
  2031. /*
  2032. * Now load segment descriptors. If fault happenes at this stage
  2033. * it is handled in a context of new task
  2034. */
  2035. ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
  2036. true, NULL);
  2037. if (ret != X86EMUL_CONTINUE)
  2038. return ret;
  2039. ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
  2040. true, NULL);
  2041. if (ret != X86EMUL_CONTINUE)
  2042. return ret;
  2043. ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
  2044. true, NULL);
  2045. if (ret != X86EMUL_CONTINUE)
  2046. return ret;
  2047. ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
  2048. true, NULL);
  2049. if (ret != X86EMUL_CONTINUE)
  2050. return ret;
  2051. ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
  2052. true, NULL);
  2053. if (ret != X86EMUL_CONTINUE)
  2054. return ret;
  2055. return X86EMUL_CONTINUE;
  2056. }
  2057. static int task_switch_16(struct x86_emulate_ctxt *ctxt,
  2058. u16 tss_selector, u16 old_tss_sel,
  2059. ulong old_tss_base, struct desc_struct *new_desc)
  2060. {
  2061. struct x86_emulate_ops *ops = ctxt->ops;
  2062. struct tss_segment_16 tss_seg;
  2063. int ret;
  2064. u32 new_tss_base = get_desc_base(new_desc);
  2065. ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2066. &ctxt->exception);
  2067. if (ret != X86EMUL_CONTINUE)
  2068. /* FIXME: need to provide precise fault address */
  2069. return ret;
  2070. save_state_to_tss16(ctxt, &tss_seg);
  2071. ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2072. &ctxt->exception);
  2073. if (ret != X86EMUL_CONTINUE)
  2074. /* FIXME: need to provide precise fault address */
  2075. return ret;
  2076. ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
  2077. &ctxt->exception);
  2078. if (ret != X86EMUL_CONTINUE)
  2079. /* FIXME: need to provide precise fault address */
  2080. return ret;
  2081. if (old_tss_sel != 0xffff) {
  2082. tss_seg.prev_task_link = old_tss_sel;
  2083. ret = ops->write_std(ctxt, new_tss_base,
  2084. &tss_seg.prev_task_link,
  2085. sizeof tss_seg.prev_task_link,
  2086. &ctxt->exception);
  2087. if (ret != X86EMUL_CONTINUE)
  2088. /* FIXME: need to provide precise fault address */
  2089. return ret;
  2090. }
  2091. return load_state_from_tss16(ctxt, &tss_seg);
  2092. }
  2093. static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
  2094. struct tss_segment_32 *tss)
  2095. {
  2096. tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
  2097. tss->eip = ctxt->_eip;
  2098. tss->eflags = ctxt->eflags;
  2099. tss->eax = ctxt->regs[VCPU_REGS_RAX];
  2100. tss->ecx = ctxt->regs[VCPU_REGS_RCX];
  2101. tss->edx = ctxt->regs[VCPU_REGS_RDX];
  2102. tss->ebx = ctxt->regs[VCPU_REGS_RBX];
  2103. tss->esp = ctxt->regs[VCPU_REGS_RSP];
  2104. tss->ebp = ctxt->regs[VCPU_REGS_RBP];
  2105. tss->esi = ctxt->regs[VCPU_REGS_RSI];
  2106. tss->edi = ctxt->regs[VCPU_REGS_RDI];
  2107. tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
  2108. tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
  2109. tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
  2110. tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
  2111. tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
  2112. tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
  2113. tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
  2114. }
  2115. static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
  2116. struct tss_segment_32 *tss)
  2117. {
  2118. int ret;
  2119. u8 cpl;
  2120. if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
  2121. return emulate_gp(ctxt, 0);
  2122. ctxt->_eip = tss->eip;
  2123. ctxt->eflags = tss->eflags | 2;
  2124. /* General purpose registers */
  2125. ctxt->regs[VCPU_REGS_RAX] = tss->eax;
  2126. ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
  2127. ctxt->regs[VCPU_REGS_RDX] = tss->edx;
  2128. ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
  2129. ctxt->regs[VCPU_REGS_RSP] = tss->esp;
  2130. ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
  2131. ctxt->regs[VCPU_REGS_RSI] = tss->esi;
  2132. ctxt->regs[VCPU_REGS_RDI] = tss->edi;
  2133. /*
  2134. * SDM says that segment selectors are loaded before segment
  2135. * descriptors. This is important because CPL checks will
  2136. * use CS.RPL.
  2137. */
  2138. set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
  2139. set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
  2140. set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
  2141. set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
  2142. set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
  2143. set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
  2144. set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
  2145. /*
  2146. * If we're switching between Protected Mode and VM86, we need to make
  2147. * sure to update the mode before loading the segment descriptors so
  2148. * that the selectors are interpreted correctly.
  2149. */
  2150. if (ctxt->eflags & X86_EFLAGS_VM) {
  2151. ctxt->mode = X86EMUL_MODE_VM86;
  2152. cpl = 3;
  2153. } else {
  2154. ctxt->mode = X86EMUL_MODE_PROT32;
  2155. cpl = tss->cs & 3;
  2156. }
  2157. /*
  2158. * Now load segment descriptors. If fault happenes at this stage
  2159. * it is handled in a context of new task
  2160. */
  2161. ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
  2162. cpl, true, NULL);
  2163. if (ret != X86EMUL_CONTINUE)
  2164. return ret;
  2165. ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
  2166. true, NULL);
  2167. if (ret != X86EMUL_CONTINUE)
  2168. return ret;
  2169. ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
  2170. true, NULL);
  2171. if (ret != X86EMUL_CONTINUE)
  2172. return ret;
  2173. ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
  2174. true, NULL);
  2175. if (ret != X86EMUL_CONTINUE)
  2176. return ret;
  2177. ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
  2178. true, NULL);
  2179. if (ret != X86EMUL_CONTINUE)
  2180. return ret;
  2181. ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
  2182. true, NULL);
  2183. if (ret != X86EMUL_CONTINUE)
  2184. return ret;
  2185. ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
  2186. true, NULL);
  2187. if (ret != X86EMUL_CONTINUE)
  2188. return ret;
  2189. return X86EMUL_CONTINUE;
  2190. }
  2191. static int task_switch_32(struct x86_emulate_ctxt *ctxt,
  2192. u16 tss_selector, u16 old_tss_sel,
  2193. ulong old_tss_base, struct desc_struct *new_desc)
  2194. {
  2195. struct x86_emulate_ops *ops = ctxt->ops;
  2196. struct tss_segment_32 tss_seg;
  2197. int ret;
  2198. u32 new_tss_base = get_desc_base(new_desc);
  2199. ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2200. &ctxt->exception);
  2201. if (ret != X86EMUL_CONTINUE)
  2202. /* FIXME: need to provide precise fault address */
  2203. return ret;
  2204. save_state_to_tss32(ctxt, &tss_seg);
  2205. ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
  2206. &ctxt->exception);
  2207. if (ret != X86EMUL_CONTINUE)
  2208. /* FIXME: need to provide precise fault address */
  2209. return ret;
  2210. ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
  2211. &ctxt->exception);
  2212. if (ret != X86EMUL_CONTINUE)
  2213. /* FIXME: need to provide precise fault address */
  2214. return ret;
  2215. if (old_tss_sel != 0xffff) {
  2216. tss_seg.prev_task_link = old_tss_sel;
  2217. ret = ops->write_std(ctxt, new_tss_base,
  2218. &tss_seg.prev_task_link,
  2219. sizeof tss_seg.prev_task_link,
  2220. &ctxt->exception);
  2221. if (ret != X86EMUL_CONTINUE)
  2222. /* FIXME: need to provide precise fault address */
  2223. return ret;
  2224. }
  2225. return load_state_from_tss32(ctxt, &tss_seg);
  2226. }
  2227. static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
  2228. u16 tss_selector, int idt_index, int reason,
  2229. bool has_error_code, u32 error_code)
  2230. {
  2231. struct x86_emulate_ops *ops = ctxt->ops;
  2232. struct desc_struct curr_tss_desc, next_tss_desc;
  2233. int ret;
  2234. u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
  2235. ulong old_tss_base =
  2236. ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
  2237. u32 desc_limit;
  2238. /* FIXME: old_tss_base == ~0 ? */
  2239. ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
  2240. if (ret != X86EMUL_CONTINUE)
  2241. return ret;
  2242. ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
  2243. if (ret != X86EMUL_CONTINUE)
  2244. return ret;
  2245. /* FIXME: check that next_tss_desc is tss */
  2246. /*
  2247. * Check privileges. The three cases are task switch caused by...
  2248. *
  2249. * 1. jmp/call/int to task gate: Check against DPL of the task gate
  2250. * 2. Exception/IRQ/iret: No check is performed
  2251. * 3. jmp/call to TSS: Check agains DPL of the TSS
  2252. */
  2253. if (reason == TASK_SWITCH_GATE) {
  2254. if (idt_index != -1) {
  2255. /* Software interrupts */
  2256. struct desc_struct task_gate_desc;
  2257. int dpl;
  2258. ret = read_interrupt_descriptor(ctxt, idt_index,
  2259. &task_gate_desc);
  2260. if (ret != X86EMUL_CONTINUE)
  2261. return ret;
  2262. dpl = task_gate_desc.dpl;
  2263. if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
  2264. return emulate_gp(ctxt, (idt_index << 3) | 0x2);
  2265. }
  2266. } else if (reason != TASK_SWITCH_IRET) {
  2267. int dpl = next_tss_desc.dpl;
  2268. if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
  2269. return emulate_gp(ctxt, tss_selector);
  2270. }
  2271. desc_limit = desc_limit_scaled(&next_tss_desc);
  2272. if (!next_tss_desc.p ||
  2273. ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
  2274. desc_limit < 0x2b)) {
  2275. emulate_ts(ctxt, tss_selector & 0xfffc);
  2276. return X86EMUL_PROPAGATE_FAULT;
  2277. }
  2278. if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
  2279. curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
  2280. write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
  2281. }
  2282. if (reason == TASK_SWITCH_IRET)
  2283. ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
  2284. /* set back link to prev task only if NT bit is set in eflags
  2285. note that old_tss_sel is not used afetr this point */
  2286. if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
  2287. old_tss_sel = 0xffff;
  2288. if (next_tss_desc.type & 8)
  2289. ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
  2290. old_tss_base, &next_tss_desc);
  2291. else
  2292. ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
  2293. old_tss_base, &next_tss_desc);
  2294. if (ret != X86EMUL_CONTINUE)
  2295. return ret;
  2296. if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
  2297. ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
  2298. if (reason != TASK_SWITCH_IRET) {
  2299. next_tss_desc.type |= (1 << 1); /* set busy flag */
  2300. write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
  2301. }
  2302. ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
  2303. ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
  2304. if (has_error_code) {
  2305. ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
  2306. ctxt->lock_prefix = 0;
  2307. ctxt->src.val = (unsigned long) error_code;
  2308. ret = em_push(ctxt);
  2309. }
  2310. return ret;
  2311. }
  2312. int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
  2313. u16 tss_selector, int idt_index, int reason,
  2314. bool has_error_code, u32 error_code)
  2315. {
  2316. int rc;
  2317. ctxt->_eip = ctxt->eip;
  2318. ctxt->dst.type = OP_NONE;
  2319. rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
  2320. has_error_code, error_code);
  2321. if (rc == X86EMUL_CONTINUE)
  2322. ctxt->eip = ctxt->_eip;
  2323. return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
  2324. }
  2325. static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
  2326. int reg, struct operand *op)
  2327. {
  2328. int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
  2329. register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
  2330. op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
  2331. op->addr.mem.seg = seg;
  2332. }
  2333. static int em_das(struct x86_emulate_ctxt *ctxt)
  2334. {
  2335. u8 al, old_al;
  2336. bool af, cf, old_cf;
  2337. cf = ctxt->eflags & X86_EFLAGS_CF;
  2338. al = ctxt->dst.val;
  2339. old_al = al;
  2340. old_cf = cf;
  2341. cf = false;
  2342. af = ctxt->eflags & X86_EFLAGS_AF;
  2343. if ((al & 0x0f) > 9 || af) {
  2344. al -= 6;
  2345. cf = old_cf | (al >= 250);
  2346. af = true;
  2347. } else {
  2348. af = false;
  2349. }
  2350. if (old_al > 0x99 || old_cf) {
  2351. al -= 0x60;
  2352. cf = true;
  2353. }
  2354. ctxt->dst.val = al;
  2355. /* Set PF, ZF, SF */
  2356. ctxt->src.type = OP_IMM;
  2357. ctxt->src.val = 0;
  2358. ctxt->src.bytes = 1;
  2359. emulate_2op_SrcV(ctxt, "or");
  2360. ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
  2361. if (cf)
  2362. ctxt->eflags |= X86_EFLAGS_CF;
  2363. if (af)
  2364. ctxt->eflags |= X86_EFLAGS_AF;
  2365. return X86EMUL_CONTINUE;
  2366. }
  2367. static int em_call(struct x86_emulate_ctxt *ctxt)
  2368. {
  2369. int rc;
  2370. long rel = ctxt->src.val;
  2371. ctxt->src.val = (unsigned long)ctxt->_eip;
  2372. rc = jmp_rel(ctxt, rel);
  2373. if (rc != X86EMUL_CONTINUE)
  2374. return rc;
  2375. return em_push(ctxt);
  2376. }
  2377. static int em_call_far(struct x86_emulate_ctxt *ctxt)
  2378. {
  2379. u16 sel, old_cs;
  2380. ulong old_eip;
  2381. int rc;
  2382. struct desc_struct old_desc, new_desc;
  2383. const struct x86_emulate_ops *ops = ctxt->ops;
  2384. int cpl = ctxt->ops->cpl(ctxt);
  2385. old_eip = ctxt->_eip;
  2386. ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
  2387. memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
  2388. rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
  2389. &new_desc);
  2390. if (rc != X86EMUL_CONTINUE)
  2391. return X86EMUL_CONTINUE;
  2392. rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
  2393. if (rc != X86EMUL_CONTINUE)
  2394. goto fail;
  2395. ctxt->src.val = old_cs;
  2396. rc = em_push(ctxt);
  2397. if (rc != X86EMUL_CONTINUE)
  2398. goto fail;
  2399. ctxt->src.val = old_eip;
  2400. rc = em_push(ctxt);
  2401. /* If we failed, we tainted the memory, but the very least we should
  2402. restore cs */
  2403. if (rc != X86EMUL_CONTINUE)
  2404. goto fail;
  2405. return rc;
  2406. fail:
  2407. ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
  2408. return rc;
  2409. }
  2410. static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
  2411. {
  2412. int rc;
  2413. unsigned long eip;
  2414. rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  2415. if (rc != X86EMUL_CONTINUE)
  2416. return rc;
  2417. rc = assign_eip_near(ctxt, eip);
  2418. if (rc != X86EMUL_CONTINUE)
  2419. return rc;
  2420. register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
  2421. return X86EMUL_CONTINUE;
  2422. }
  2423. static int em_add(struct x86_emulate_ctxt *ctxt)
  2424. {
  2425. emulate_2op_SrcV(ctxt, "add");
  2426. return X86EMUL_CONTINUE;
  2427. }
  2428. static int em_or(struct x86_emulate_ctxt *ctxt)
  2429. {
  2430. emulate_2op_SrcV(ctxt, "or");
  2431. return X86EMUL_CONTINUE;
  2432. }
  2433. static int em_adc(struct x86_emulate_ctxt *ctxt)
  2434. {
  2435. emulate_2op_SrcV(ctxt, "adc");
  2436. return X86EMUL_CONTINUE;
  2437. }
  2438. static int em_sbb(struct x86_emulate_ctxt *ctxt)
  2439. {
  2440. emulate_2op_SrcV(ctxt, "sbb");
  2441. return X86EMUL_CONTINUE;
  2442. }
  2443. static int em_and(struct x86_emulate_ctxt *ctxt)
  2444. {
  2445. emulate_2op_SrcV(ctxt, "and");
  2446. return X86EMUL_CONTINUE;
  2447. }
  2448. static int em_sub(struct x86_emulate_ctxt *ctxt)
  2449. {
  2450. emulate_2op_SrcV(ctxt, "sub");
  2451. return X86EMUL_CONTINUE;
  2452. }
  2453. static int em_xor(struct x86_emulate_ctxt *ctxt)
  2454. {
  2455. emulate_2op_SrcV(ctxt, "xor");
  2456. return X86EMUL_CONTINUE;
  2457. }
  2458. static int em_cmp(struct x86_emulate_ctxt *ctxt)
  2459. {
  2460. emulate_2op_SrcV(ctxt, "cmp");
  2461. /* Disable writeback. */
  2462. ctxt->dst.type = OP_NONE;
  2463. return X86EMUL_CONTINUE;
  2464. }
  2465. static int em_test(struct x86_emulate_ctxt *ctxt)
  2466. {
  2467. emulate_2op_SrcV(ctxt, "test");
  2468. /* Disable writeback. */
  2469. ctxt->dst.type = OP_NONE;
  2470. return X86EMUL_CONTINUE;
  2471. }
  2472. static int em_xchg(struct x86_emulate_ctxt *ctxt)
  2473. {
  2474. /* Write back the register source. */
  2475. ctxt->src.val = ctxt->dst.val;
  2476. write_register_operand(&ctxt->src);
  2477. /* Write back the memory destination with implicit LOCK prefix. */
  2478. ctxt->dst.val = ctxt->src.orig_val;
  2479. ctxt->lock_prefix = 1;
  2480. return X86EMUL_CONTINUE;
  2481. }
  2482. static int em_imul(struct x86_emulate_ctxt *ctxt)
  2483. {
  2484. emulate_2op_SrcV_nobyte(ctxt, "imul");
  2485. return X86EMUL_CONTINUE;
  2486. }
  2487. static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
  2488. {
  2489. ctxt->dst.val = ctxt->src2.val;
  2490. return em_imul(ctxt);
  2491. }
  2492. static int em_cwd(struct x86_emulate_ctxt *ctxt)
  2493. {
  2494. ctxt->dst.type = OP_REG;
  2495. ctxt->dst.bytes = ctxt->src.bytes;
  2496. ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
  2497. ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
  2498. return X86EMUL_CONTINUE;
  2499. }
  2500. static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
  2501. {
  2502. u64 tsc = 0;
  2503. ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
  2504. ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
  2505. ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
  2506. return X86EMUL_CONTINUE;
  2507. }
  2508. static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
  2509. {
  2510. u64 pmc;
  2511. if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc))
  2512. return emulate_gp(ctxt, 0);
  2513. ctxt->regs[VCPU_REGS_RAX] = (u32)pmc;
  2514. ctxt->regs[VCPU_REGS_RDX] = pmc >> 32;
  2515. return X86EMUL_CONTINUE;
  2516. }
  2517. static int em_mov(struct x86_emulate_ctxt *ctxt)
  2518. {
  2519. ctxt->dst.val = ctxt->src.val;
  2520. return X86EMUL_CONTINUE;
  2521. }
  2522. static int em_cr_write(struct x86_emulate_ctxt *ctxt)
  2523. {
  2524. if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
  2525. return emulate_gp(ctxt, 0);
  2526. /* Disable writeback. */
  2527. ctxt->dst.type = OP_NONE;
  2528. return X86EMUL_CONTINUE;
  2529. }
  2530. static int em_dr_write(struct x86_emulate_ctxt *ctxt)
  2531. {
  2532. unsigned long val;
  2533. if (ctxt->mode == X86EMUL_MODE_PROT64)
  2534. val = ctxt->src.val & ~0ULL;
  2535. else
  2536. val = ctxt->src.val & ~0U;
  2537. /* #UD condition is already handled. */
  2538. if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
  2539. return emulate_gp(ctxt, 0);
  2540. /* Disable writeback. */
  2541. ctxt->dst.type = OP_NONE;
  2542. return X86EMUL_CONTINUE;
  2543. }
  2544. static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
  2545. {
  2546. u64 msr_data;
  2547. msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
  2548. | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
  2549. if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data))
  2550. return emulate_gp(ctxt, 0);
  2551. return X86EMUL_CONTINUE;
  2552. }
  2553. static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
  2554. {
  2555. u64 msr_data;
  2556. if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data))
  2557. return emulate_gp(ctxt, 0);
  2558. ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
  2559. ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
  2560. return X86EMUL_CONTINUE;
  2561. }
  2562. static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
  2563. {
  2564. if (ctxt->modrm_reg > VCPU_SREG_GS)
  2565. return emulate_ud(ctxt);
  2566. ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
  2567. return X86EMUL_CONTINUE;
  2568. }
  2569. static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
  2570. {
  2571. u16 sel = ctxt->src.val;
  2572. if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
  2573. return emulate_ud(ctxt);
  2574. if (ctxt->modrm_reg == VCPU_SREG_SS)
  2575. ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
  2576. /* Disable writeback. */
  2577. ctxt->dst.type = OP_NONE;
  2578. return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
  2579. }
  2580. static int em_movdqu(struct x86_emulate_ctxt *ctxt)
  2581. {
  2582. memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
  2583. return X86EMUL_CONTINUE;
  2584. }
  2585. static int em_invlpg(struct x86_emulate_ctxt *ctxt)
  2586. {
  2587. int rc;
  2588. ulong linear;
  2589. rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
  2590. if (rc == X86EMUL_CONTINUE)
  2591. ctxt->ops->invlpg(ctxt, linear);
  2592. /* Disable writeback. */
  2593. ctxt->dst.type = OP_NONE;
  2594. return X86EMUL_CONTINUE;
  2595. }
  2596. static int em_clts(struct x86_emulate_ctxt *ctxt)
  2597. {
  2598. ulong cr0;
  2599. cr0 = ctxt->ops->get_cr(ctxt, 0);
  2600. cr0 &= ~X86_CR0_TS;
  2601. ctxt->ops->set_cr(ctxt, 0, cr0);
  2602. return X86EMUL_CONTINUE;
  2603. }
  2604. static int em_vmcall(struct x86_emulate_ctxt *ctxt)
  2605. {
  2606. int rc;
  2607. if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
  2608. return X86EMUL_UNHANDLEABLE;
  2609. rc = ctxt->ops->fix_hypercall(ctxt);
  2610. if (rc != X86EMUL_CONTINUE)
  2611. return rc;
  2612. /* Let the processor re-execute the fixed hypercall */
  2613. ctxt->_eip = ctxt->eip;
  2614. /* Disable writeback. */
  2615. ctxt->dst.type = OP_NONE;
  2616. return X86EMUL_CONTINUE;
  2617. }
  2618. static int em_lgdt(struct x86_emulate_ctxt *ctxt)
  2619. {
  2620. struct desc_ptr desc_ptr;
  2621. int rc;
  2622. rc = read_descriptor(ctxt, ctxt->src.addr.mem,
  2623. &desc_ptr.size, &desc_ptr.address,
  2624. ctxt->op_bytes);
  2625. if (rc != X86EMUL_CONTINUE)
  2626. return rc;
  2627. ctxt->ops->set_gdt(ctxt, &desc_ptr);
  2628. /* Disable writeback. */
  2629. ctxt->dst.type = OP_NONE;
  2630. return X86EMUL_CONTINUE;
  2631. }
  2632. static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
  2633. {
  2634. int rc;
  2635. rc = ctxt->ops->fix_hypercall(ctxt);
  2636. /* Disable writeback. */
  2637. ctxt->dst.type = OP_NONE;
  2638. return rc;
  2639. }
  2640. static int em_lidt(struct x86_emulate_ctxt *ctxt)
  2641. {
  2642. struct desc_ptr desc_ptr;
  2643. int rc;
  2644. rc = read_descriptor(ctxt, ctxt->src.addr.mem,
  2645. &desc_ptr.size, &desc_ptr.address,
  2646. ctxt->op_bytes);
  2647. if (rc != X86EMUL_CONTINUE)
  2648. return rc;
  2649. ctxt->ops->set_idt(ctxt, &desc_ptr);
  2650. /* Disable writeback. */
  2651. ctxt->dst.type = OP_NONE;
  2652. return X86EMUL_CONTINUE;
  2653. }
  2654. static int em_smsw(struct x86_emulate_ctxt *ctxt)
  2655. {
  2656. ctxt->dst.bytes = 2;
  2657. ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
  2658. return X86EMUL_CONTINUE;
  2659. }
  2660. static int em_lmsw(struct x86_emulate_ctxt *ctxt)
  2661. {
  2662. ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
  2663. | (ctxt->src.val & 0x0f));
  2664. ctxt->dst.type = OP_NONE;
  2665. return X86EMUL_CONTINUE;
  2666. }
  2667. static int em_loop(struct x86_emulate_ctxt *ctxt)
  2668. {
  2669. int rc = X86EMUL_CONTINUE;
  2670. register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
  2671. if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
  2672. (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
  2673. rc = jmp_rel(ctxt, ctxt->src.val);
  2674. return rc;
  2675. }
  2676. static int em_jcxz(struct x86_emulate_ctxt *ctxt)
  2677. {
  2678. int rc = X86EMUL_CONTINUE;
  2679. if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
  2680. rc = jmp_rel(ctxt, ctxt->src.val);
  2681. return rc;
  2682. }
  2683. static int em_in(struct x86_emulate_ctxt *ctxt)
  2684. {
  2685. if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
  2686. &ctxt->dst.val))
  2687. return X86EMUL_IO_NEEDED;
  2688. return X86EMUL_CONTINUE;
  2689. }
  2690. static int em_out(struct x86_emulate_ctxt *ctxt)
  2691. {
  2692. ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
  2693. &ctxt->src.val, 1);
  2694. /* Disable writeback. */
  2695. ctxt->dst.type = OP_NONE;
  2696. return X86EMUL_CONTINUE;
  2697. }
  2698. static int em_cli(struct x86_emulate_ctxt *ctxt)
  2699. {
  2700. if (emulator_bad_iopl(ctxt))
  2701. return emulate_gp(ctxt, 0);
  2702. ctxt->eflags &= ~X86_EFLAGS_IF;
  2703. return X86EMUL_CONTINUE;
  2704. }
  2705. static int em_sti(struct x86_emulate_ctxt *ctxt)
  2706. {
  2707. if (emulator_bad_iopl(ctxt))
  2708. return emulate_gp(ctxt, 0);
  2709. ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
  2710. ctxt->eflags |= X86_EFLAGS_IF;
  2711. return X86EMUL_CONTINUE;
  2712. }
  2713. static int em_bt(struct x86_emulate_ctxt *ctxt)
  2714. {
  2715. /* Disable writeback. */
  2716. ctxt->dst.type = OP_NONE;
  2717. /* only subword offset */
  2718. ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
  2719. emulate_2op_SrcV_nobyte(ctxt, "bt");
  2720. return X86EMUL_CONTINUE;
  2721. }
  2722. static int em_bts(struct x86_emulate_ctxt *ctxt)
  2723. {
  2724. emulate_2op_SrcV_nobyte(ctxt, "bts");
  2725. return X86EMUL_CONTINUE;
  2726. }
  2727. static int em_btr(struct x86_emulate_ctxt *ctxt)
  2728. {
  2729. emulate_2op_SrcV_nobyte(ctxt, "btr");
  2730. return X86EMUL_CONTINUE;
  2731. }
  2732. static int em_btc(struct x86_emulate_ctxt *ctxt)
  2733. {
  2734. emulate_2op_SrcV_nobyte(ctxt, "btc");
  2735. return X86EMUL_CONTINUE;
  2736. }
  2737. static int em_bsf(struct x86_emulate_ctxt *ctxt)
  2738. {
  2739. u8 zf;
  2740. __asm__ ("bsf %2, %0; setz %1"
  2741. : "=r"(ctxt->dst.val), "=q"(zf)
  2742. : "r"(ctxt->src.val));
  2743. ctxt->eflags &= ~X86_EFLAGS_ZF;
  2744. if (zf) {
  2745. ctxt->eflags |= X86_EFLAGS_ZF;
  2746. /* Disable writeback. */
  2747. ctxt->dst.type = OP_NONE;
  2748. }
  2749. return X86EMUL_CONTINUE;
  2750. }
  2751. static int em_bsr(struct x86_emulate_ctxt *ctxt)
  2752. {
  2753. u8 zf;
  2754. __asm__ ("bsr %2, %0; setz %1"
  2755. : "=r"(ctxt->dst.val), "=q"(zf)
  2756. : "r"(ctxt->src.val));
  2757. ctxt->eflags &= ~X86_EFLAGS_ZF;
  2758. if (zf) {
  2759. ctxt->eflags |= X86_EFLAGS_ZF;
  2760. /* Disable writeback. */
  2761. ctxt->dst.type = OP_NONE;
  2762. }
  2763. return X86EMUL_CONTINUE;
  2764. }
  2765. static bool valid_cr(int nr)
  2766. {
  2767. switch (nr) {
  2768. case 0:
  2769. case 2 ... 4:
  2770. case 8:
  2771. return true;
  2772. default:
  2773. return false;
  2774. }
  2775. }
  2776. static int check_cr_read(struct x86_emulate_ctxt *ctxt)
  2777. {
  2778. if (!valid_cr(ctxt->modrm_reg))
  2779. return emulate_ud(ctxt);
  2780. return X86EMUL_CONTINUE;
  2781. }
  2782. static int check_cr_write(struct x86_emulate_ctxt *ctxt)
  2783. {
  2784. u64 new_val = ctxt->src.val64;
  2785. int cr = ctxt->modrm_reg;
  2786. u64 efer = 0;
  2787. static u64 cr_reserved_bits[] = {
  2788. 0xffffffff00000000ULL,
  2789. 0, 0, 0, /* CR3 checked later */
  2790. CR4_RESERVED_BITS,
  2791. 0, 0, 0,
  2792. CR8_RESERVED_BITS,
  2793. };
  2794. if (!valid_cr(cr))
  2795. return emulate_ud(ctxt);
  2796. if (new_val & cr_reserved_bits[cr])
  2797. return emulate_gp(ctxt, 0);
  2798. switch (cr) {
  2799. case 0: {
  2800. u64 cr4;
  2801. if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
  2802. ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
  2803. return emulate_gp(ctxt, 0);
  2804. cr4 = ctxt->ops->get_cr(ctxt, 4);
  2805. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  2806. if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
  2807. !(cr4 & X86_CR4_PAE))
  2808. return emulate_gp(ctxt, 0);
  2809. break;
  2810. }
  2811. case 3: {
  2812. u64 rsvd = 0;
  2813. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  2814. if (efer & EFER_LMA)
  2815. rsvd = CR3_L_MODE_RESERVED_BITS;
  2816. else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
  2817. rsvd = CR3_PAE_RESERVED_BITS;
  2818. else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
  2819. rsvd = CR3_NONPAE_RESERVED_BITS;
  2820. if (new_val & rsvd)
  2821. return emulate_gp(ctxt, 0);
  2822. break;
  2823. }
  2824. case 4: {
  2825. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  2826. if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
  2827. return emulate_gp(ctxt, 0);
  2828. break;
  2829. }
  2830. }
  2831. return X86EMUL_CONTINUE;
  2832. }
  2833. static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
  2834. {
  2835. unsigned long dr7;
  2836. ctxt->ops->get_dr(ctxt, 7, &dr7);
  2837. /* Check if DR7.Global_Enable is set */
  2838. return dr7 & (1 << 13);
  2839. }
  2840. static int check_dr_read(struct x86_emulate_ctxt *ctxt)
  2841. {
  2842. int dr = ctxt->modrm_reg;
  2843. u64 cr4;
  2844. if (dr > 7)
  2845. return emulate_ud(ctxt);
  2846. cr4 = ctxt->ops->get_cr(ctxt, 4);
  2847. if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
  2848. return emulate_ud(ctxt);
  2849. if (check_dr7_gd(ctxt))
  2850. return emulate_db(ctxt);
  2851. return X86EMUL_CONTINUE;
  2852. }
  2853. static int check_dr_write(struct x86_emulate_ctxt *ctxt)
  2854. {
  2855. u64 new_val = ctxt->src.val64;
  2856. int dr = ctxt->modrm_reg;
  2857. if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
  2858. return emulate_gp(ctxt, 0);
  2859. return check_dr_read(ctxt);
  2860. }
  2861. static int check_svme(struct x86_emulate_ctxt *ctxt)
  2862. {
  2863. u64 efer;
  2864. ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
  2865. if (!(efer & EFER_SVME))
  2866. return emulate_ud(ctxt);
  2867. return X86EMUL_CONTINUE;
  2868. }
  2869. static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
  2870. {
  2871. u64 rax = ctxt->regs[VCPU_REGS_RAX];
  2872. /* Valid physical address? */
  2873. if (rax & 0xffff000000000000ULL)
  2874. return emulate_gp(ctxt, 0);
  2875. return check_svme(ctxt);
  2876. }
  2877. static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
  2878. {
  2879. u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
  2880. if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
  2881. return emulate_ud(ctxt);
  2882. return X86EMUL_CONTINUE;
  2883. }
  2884. static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
  2885. {
  2886. u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
  2887. u64 rcx = ctxt->regs[VCPU_REGS_RCX];
  2888. if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
  2889. (rcx > 3))
  2890. return emulate_gp(ctxt, 0);
  2891. return X86EMUL_CONTINUE;
  2892. }
  2893. static int check_perm_in(struct x86_emulate_ctxt *ctxt)
  2894. {
  2895. ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
  2896. if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
  2897. return emulate_gp(ctxt, 0);
  2898. return X86EMUL_CONTINUE;
  2899. }
  2900. static int check_perm_out(struct x86_emulate_ctxt *ctxt)
  2901. {
  2902. ctxt->src.bytes = min(ctxt->src.bytes, 4u);
  2903. if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
  2904. return emulate_gp(ctxt, 0);
  2905. return X86EMUL_CONTINUE;
  2906. }
  2907. #define D(_y) { .flags = (_y) }
  2908. #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
  2909. #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
  2910. .check_perm = (_p) }
  2911. #define N D(0)
  2912. #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
  2913. #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
  2914. #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
  2915. #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
  2916. #define II(_f, _e, _i) \
  2917. { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
  2918. #define IIP(_f, _e, _i, _p) \
  2919. { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
  2920. .check_perm = (_p) }
  2921. #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
  2922. #define D2bv(_f) D((_f) | ByteOp), D(_f)
  2923. #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
  2924. #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
  2925. #define I2bvIP(_f, _e, _i, _p) \
  2926. IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
  2927. #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
  2928. I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
  2929. I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
  2930. static struct opcode group7_rm1[] = {
  2931. DI(SrcNone | ModRM | Priv, monitor),
  2932. DI(SrcNone | ModRM | Priv, mwait),
  2933. N, N, N, N, N, N,
  2934. };
  2935. static struct opcode group7_rm3[] = {
  2936. DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
  2937. II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
  2938. DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
  2939. DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
  2940. DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
  2941. DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
  2942. DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
  2943. DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
  2944. };
  2945. static struct opcode group7_rm7[] = {
  2946. N,
  2947. DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
  2948. N, N, N, N, N, N,
  2949. };
  2950. static struct opcode group1[] = {
  2951. I(Lock, em_add),
  2952. I(Lock | PageTable, em_or),
  2953. I(Lock, em_adc),
  2954. I(Lock, em_sbb),
  2955. I(Lock | PageTable, em_and),
  2956. I(Lock, em_sub),
  2957. I(Lock, em_xor),
  2958. I(0, em_cmp),
  2959. };
  2960. static struct opcode group1A[] = {
  2961. I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
  2962. };
  2963. static struct opcode group3[] = {
  2964. I(DstMem | SrcImm | ModRM, em_test),
  2965. I(DstMem | SrcImm | ModRM, em_test),
  2966. I(DstMem | SrcNone | ModRM | Lock, em_not),
  2967. I(DstMem | SrcNone | ModRM | Lock, em_neg),
  2968. I(SrcMem | ModRM, em_mul_ex),
  2969. I(SrcMem | ModRM, em_imul_ex),
  2970. I(SrcMem | ModRM, em_div_ex),
  2971. I(SrcMem | ModRM, em_idiv_ex),
  2972. };
  2973. static struct opcode group4[] = {
  2974. I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
  2975. I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
  2976. N, N, N, N, N, N,
  2977. };
  2978. static struct opcode group5[] = {
  2979. I(DstMem | SrcNone | ModRM | Lock, em_grp45),
  2980. I(DstMem | SrcNone | ModRM | Lock, em_grp45),
  2981. I(SrcMem | ModRM | Stack, em_grp45),
  2982. I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
  2983. I(SrcMem | ModRM | Stack, em_grp45),
  2984. I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
  2985. I(SrcMem | ModRM | Stack, em_grp45), N,
  2986. };
  2987. static struct opcode group6[] = {
  2988. DI(ModRM | Prot, sldt),
  2989. DI(ModRM | Prot, str),
  2990. DI(ModRM | Prot | Priv, lldt),
  2991. DI(ModRM | Prot | Priv, ltr),
  2992. N, N, N, N,
  2993. };
  2994. static struct group_dual group7 = { {
  2995. DI(ModRM | Mov | DstMem | Priv, sgdt),
  2996. DI(ModRM | Mov | DstMem | Priv, sidt),
  2997. II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
  2998. II(ModRM | SrcMem | Priv, em_lidt, lidt),
  2999. II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
  3000. II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
  3001. II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
  3002. }, {
  3003. I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
  3004. EXT(0, group7_rm1),
  3005. N, EXT(0, group7_rm3),
  3006. II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
  3007. II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
  3008. } };
  3009. static struct opcode group8[] = {
  3010. N, N, N, N,
  3011. I(DstMem | SrcImmByte | ModRM, em_bt),
  3012. I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
  3013. I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
  3014. I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
  3015. };
  3016. static struct group_dual group9 = { {
  3017. N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
  3018. }, {
  3019. N, N, N, N, N, N, N, N,
  3020. } };
  3021. static struct opcode group11[] = {
  3022. I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
  3023. X7(D(Undefined)),
  3024. };
  3025. static struct gprefix pfx_0f_6f_0f_7f = {
  3026. N, N, N, I(Sse, em_movdqu),
  3027. };
  3028. static struct opcode opcode_table[256] = {
  3029. /* 0x00 - 0x07 */
  3030. I6ALU(Lock, em_add),
  3031. I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
  3032. I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
  3033. /* 0x08 - 0x0F */
  3034. I6ALU(Lock | PageTable, em_or),
  3035. I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
  3036. N,
  3037. /* 0x10 - 0x17 */
  3038. I6ALU(Lock, em_adc),
  3039. I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
  3040. I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
  3041. /* 0x18 - 0x1F */
  3042. I6ALU(Lock, em_sbb),
  3043. I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
  3044. I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
  3045. /* 0x20 - 0x27 */
  3046. I6ALU(Lock | PageTable, em_and), N, N,
  3047. /* 0x28 - 0x2F */
  3048. I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
  3049. /* 0x30 - 0x37 */
  3050. I6ALU(Lock, em_xor), N, N,
  3051. /* 0x38 - 0x3F */
  3052. I6ALU(0, em_cmp), N, N,
  3053. /* 0x40 - 0x4F */
  3054. X16(D(DstReg)),
  3055. /* 0x50 - 0x57 */
  3056. X8(I(SrcReg | Stack, em_push)),
  3057. /* 0x58 - 0x5F */
  3058. X8(I(DstReg | Stack, em_pop)),
  3059. /* 0x60 - 0x67 */
  3060. I(ImplicitOps | Stack | No64, em_pusha),
  3061. I(ImplicitOps | Stack | No64, em_popa),
  3062. N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
  3063. N, N, N, N,
  3064. /* 0x68 - 0x6F */
  3065. I(SrcImm | Mov | Stack, em_push),
  3066. I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
  3067. I(SrcImmByte | Mov | Stack, em_push),
  3068. I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
  3069. I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */
  3070. I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
  3071. /* 0x70 - 0x7F */
  3072. X16(D(SrcImmByte)),
  3073. /* 0x80 - 0x87 */
  3074. G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
  3075. G(DstMem | SrcImm | ModRM | Group, group1),
  3076. G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
  3077. G(DstMem | SrcImmByte | ModRM | Group, group1),
  3078. I2bv(DstMem | SrcReg | ModRM, em_test),
  3079. I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
  3080. /* 0x88 - 0x8F */
  3081. I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
  3082. I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
  3083. I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
  3084. D(ModRM | SrcMem | NoAccess | DstReg),
  3085. I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
  3086. G(0, group1A),
  3087. /* 0x90 - 0x97 */
  3088. DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
  3089. /* 0x98 - 0x9F */
  3090. D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
  3091. I(SrcImmFAddr | No64, em_call_far), N,
  3092. II(ImplicitOps | Stack, em_pushf, pushf),
  3093. II(ImplicitOps | Stack, em_popf, popf), N, N,
  3094. /* 0xA0 - 0xA7 */
  3095. I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
  3096. I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
  3097. I2bv(SrcSI | DstDI | Mov | String, em_mov),
  3098. I2bv(SrcSI | DstDI | String, em_cmp),
  3099. /* 0xA8 - 0xAF */
  3100. I2bv(DstAcc | SrcImm, em_test),
  3101. I2bv(SrcAcc | DstDI | Mov | String, em_mov),
  3102. I2bv(SrcSI | DstAcc | Mov | String, em_mov),
  3103. I2bv(SrcAcc | DstDI | String, em_cmp),
  3104. /* 0xB0 - 0xB7 */
  3105. X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
  3106. /* 0xB8 - 0xBF */
  3107. X8(I(DstReg | SrcImm | Mov, em_mov)),
  3108. /* 0xC0 - 0xC7 */
  3109. D2bv(DstMem | SrcImmByte | ModRM),
  3110. I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
  3111. I(ImplicitOps | Stack, em_ret),
  3112. I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
  3113. I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
  3114. G(ByteOp, group11), G(0, group11),
  3115. /* 0xC8 - 0xCF */
  3116. N, N, N, I(ImplicitOps | Stack, em_ret_far),
  3117. D(ImplicitOps), DI(SrcImmByte, intn),
  3118. D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
  3119. /* 0xD0 - 0xD7 */
  3120. D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
  3121. N, N, N, N,
  3122. /* 0xD8 - 0xDF */
  3123. N, N, N, N, N, N, N, N,
  3124. /* 0xE0 - 0xE7 */
  3125. X3(I(SrcImmByte, em_loop)),
  3126. I(SrcImmByte, em_jcxz),
  3127. I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
  3128. I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
  3129. /* 0xE8 - 0xEF */
  3130. I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
  3131. I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
  3132. I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
  3133. I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
  3134. /* 0xF0 - 0xF7 */
  3135. N, DI(ImplicitOps, icebp), N, N,
  3136. DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
  3137. G(ByteOp, group3), G(0, group3),
  3138. /* 0xF8 - 0xFF */
  3139. D(ImplicitOps), D(ImplicitOps),
  3140. I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
  3141. D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
  3142. };
  3143. static struct opcode twobyte_table[256] = {
  3144. /* 0x00 - 0x0F */
  3145. G(0, group6), GD(0, &group7), N, N,
  3146. N, I(ImplicitOps | VendorSpecific, em_syscall),
  3147. II(ImplicitOps | Priv, em_clts, clts), N,
  3148. DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
  3149. N, D(ImplicitOps | ModRM), N, N,
  3150. /* 0x10 - 0x1F */
  3151. N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
  3152. /* 0x20 - 0x2F */
  3153. DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
  3154. DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
  3155. IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
  3156. IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
  3157. N, N, N, N,
  3158. N, N, N, N, N, N, N, N,
  3159. /* 0x30 - 0x3F */
  3160. II(ImplicitOps | Priv, em_wrmsr, wrmsr),
  3161. IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
  3162. II(ImplicitOps | Priv, em_rdmsr, rdmsr),
  3163. IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
  3164. I(ImplicitOps | VendorSpecific, em_sysenter),
  3165. I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
  3166. N, N,
  3167. N, N, N, N, N, N, N, N,
  3168. /* 0x40 - 0x4F */
  3169. X16(D(DstReg | SrcMem | ModRM | Mov)),
  3170. /* 0x50 - 0x5F */
  3171. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  3172. /* 0x60 - 0x6F */
  3173. N, N, N, N,
  3174. N, N, N, N,
  3175. N, N, N, N,
  3176. N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
  3177. /* 0x70 - 0x7F */
  3178. N, N, N, N,
  3179. N, N, N, N,
  3180. N, N, N, N,
  3181. N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
  3182. /* 0x80 - 0x8F */
  3183. X16(D(SrcImm)),
  3184. /* 0x90 - 0x9F */
  3185. X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
  3186. /* 0xA0 - 0xA7 */
  3187. I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
  3188. DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
  3189. D(DstMem | SrcReg | Src2ImmByte | ModRM),
  3190. D(DstMem | SrcReg | Src2CL | ModRM), N, N,
  3191. /* 0xA8 - 0xAF */
  3192. I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
  3193. DI(ImplicitOps, rsm),
  3194. I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
  3195. D(DstMem | SrcReg | Src2ImmByte | ModRM),
  3196. D(DstMem | SrcReg | Src2CL | ModRM),
  3197. D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
  3198. /* 0xB0 - 0xB7 */
  3199. I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
  3200. I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
  3201. I(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
  3202. I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
  3203. I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
  3204. D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
  3205. /* 0xB8 - 0xBF */
  3206. N, N,
  3207. G(BitOp, group8),
  3208. I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
  3209. I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
  3210. D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
  3211. /* 0xC0 - 0xCF */
  3212. D2bv(DstMem | SrcReg | ModRM | Lock),
  3213. N, D(DstMem | SrcReg | ModRM | Mov),
  3214. N, N, N, GD(0, &group9),
  3215. N, N, N, N, N, N, N, N,
  3216. /* 0xD0 - 0xDF */
  3217. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  3218. /* 0xE0 - 0xEF */
  3219. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
  3220. /* 0xF0 - 0xFF */
  3221. N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
  3222. };
  3223. #undef D
  3224. #undef N
  3225. #undef G
  3226. #undef GD
  3227. #undef I
  3228. #undef GP
  3229. #undef EXT
  3230. #undef D2bv
  3231. #undef D2bvIP
  3232. #undef I2bv
  3233. #undef I2bvIP
  3234. #undef I6ALU
  3235. static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
  3236. {
  3237. unsigned size;
  3238. size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  3239. if (size == 8)
  3240. size = 4;
  3241. return size;
  3242. }
  3243. static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
  3244. unsigned size, bool sign_extension)
  3245. {
  3246. int rc = X86EMUL_CONTINUE;
  3247. op->type = OP_IMM;
  3248. op->bytes = size;
  3249. op->addr.mem.ea = ctxt->_eip;
  3250. /* NB. Immediates are sign-extended as necessary. */
  3251. switch (op->bytes) {
  3252. case 1:
  3253. op->val = insn_fetch(s8, ctxt);
  3254. break;
  3255. case 2:
  3256. op->val = insn_fetch(s16, ctxt);
  3257. break;
  3258. case 4:
  3259. op->val = insn_fetch(s32, ctxt);
  3260. break;
  3261. }
  3262. if (!sign_extension) {
  3263. switch (op->bytes) {
  3264. case 1:
  3265. op->val &= 0xff;
  3266. break;
  3267. case 2:
  3268. op->val &= 0xffff;
  3269. break;
  3270. case 4:
  3271. op->val &= 0xffffffff;
  3272. break;
  3273. }
  3274. }
  3275. done:
  3276. return rc;
  3277. }
  3278. static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
  3279. unsigned d)
  3280. {
  3281. int rc = X86EMUL_CONTINUE;
  3282. switch (d) {
  3283. case OpReg:
  3284. decode_register_operand(ctxt, op);
  3285. break;
  3286. case OpImmUByte:
  3287. rc = decode_imm(ctxt, op, 1, false);
  3288. break;
  3289. case OpMem:
  3290. ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  3291. mem_common:
  3292. *op = ctxt->memop;
  3293. ctxt->memopp = op;
  3294. if ((ctxt->d & BitOp) && op == &ctxt->dst)
  3295. fetch_bit_operand(ctxt);
  3296. op->orig_val = op->val;
  3297. break;
  3298. case OpMem64:
  3299. ctxt->memop.bytes = 8;
  3300. goto mem_common;
  3301. case OpAcc:
  3302. op->type = OP_REG;
  3303. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  3304. op->addr.reg = &ctxt->regs[VCPU_REGS_RAX];
  3305. fetch_register_operand(op);
  3306. op->orig_val = op->val;
  3307. break;
  3308. case OpDI:
  3309. op->type = OP_MEM;
  3310. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  3311. op->addr.mem.ea =
  3312. register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
  3313. op->addr.mem.seg = VCPU_SREG_ES;
  3314. op->val = 0;
  3315. break;
  3316. case OpDX:
  3317. op->type = OP_REG;
  3318. op->bytes = 2;
  3319. op->addr.reg = &ctxt->regs[VCPU_REGS_RDX];
  3320. fetch_register_operand(op);
  3321. break;
  3322. case OpCL:
  3323. op->bytes = 1;
  3324. op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff;
  3325. break;
  3326. case OpImmByte:
  3327. rc = decode_imm(ctxt, op, 1, true);
  3328. break;
  3329. case OpOne:
  3330. op->bytes = 1;
  3331. op->val = 1;
  3332. break;
  3333. case OpImm:
  3334. rc = decode_imm(ctxt, op, imm_size(ctxt), true);
  3335. break;
  3336. case OpMem8:
  3337. ctxt->memop.bytes = 1;
  3338. goto mem_common;
  3339. case OpMem16:
  3340. ctxt->memop.bytes = 2;
  3341. goto mem_common;
  3342. case OpMem32:
  3343. ctxt->memop.bytes = 4;
  3344. goto mem_common;
  3345. case OpImmU16:
  3346. rc = decode_imm(ctxt, op, 2, false);
  3347. break;
  3348. case OpImmU:
  3349. rc = decode_imm(ctxt, op, imm_size(ctxt), false);
  3350. break;
  3351. case OpSI:
  3352. op->type = OP_MEM;
  3353. op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
  3354. op->addr.mem.ea =
  3355. register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
  3356. op->addr.mem.seg = seg_override(ctxt);
  3357. op->val = 0;
  3358. break;
  3359. case OpImmFAddr:
  3360. op->type = OP_IMM;
  3361. op->addr.mem.ea = ctxt->_eip;
  3362. op->bytes = ctxt->op_bytes + 2;
  3363. insn_fetch_arr(op->valptr, op->bytes, ctxt);
  3364. break;
  3365. case OpMemFAddr:
  3366. ctxt->memop.bytes = ctxt->op_bytes + 2;
  3367. goto mem_common;
  3368. case OpES:
  3369. op->val = VCPU_SREG_ES;
  3370. break;
  3371. case OpCS:
  3372. op->val = VCPU_SREG_CS;
  3373. break;
  3374. case OpSS:
  3375. op->val = VCPU_SREG_SS;
  3376. break;
  3377. case OpDS:
  3378. op->val = VCPU_SREG_DS;
  3379. break;
  3380. case OpFS:
  3381. op->val = VCPU_SREG_FS;
  3382. break;
  3383. case OpGS:
  3384. op->val = VCPU_SREG_GS;
  3385. break;
  3386. case OpImplicit:
  3387. /* Special instructions do their own operand decoding. */
  3388. default:
  3389. op->type = OP_NONE; /* Disable writeback. */
  3390. break;
  3391. }
  3392. done:
  3393. return rc;
  3394. }
  3395. int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
  3396. {
  3397. int rc = X86EMUL_CONTINUE;
  3398. int mode = ctxt->mode;
  3399. int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
  3400. bool op_prefix = false;
  3401. struct opcode opcode;
  3402. ctxt->memop.type = OP_NONE;
  3403. ctxt->memopp = NULL;
  3404. ctxt->_eip = ctxt->eip;
  3405. ctxt->fetch.start = ctxt->_eip;
  3406. ctxt->fetch.end = ctxt->fetch.start + insn_len;
  3407. if (insn_len > 0)
  3408. memcpy(ctxt->fetch.data, insn, insn_len);
  3409. switch (mode) {
  3410. case X86EMUL_MODE_REAL:
  3411. case X86EMUL_MODE_VM86:
  3412. case X86EMUL_MODE_PROT16:
  3413. def_op_bytes = def_ad_bytes = 2;
  3414. break;
  3415. case X86EMUL_MODE_PROT32:
  3416. def_op_bytes = def_ad_bytes = 4;
  3417. break;
  3418. #ifdef CONFIG_X86_64
  3419. case X86EMUL_MODE_PROT64:
  3420. def_op_bytes = 4;
  3421. def_ad_bytes = 8;
  3422. break;
  3423. #endif
  3424. default:
  3425. return EMULATION_FAILED;
  3426. }
  3427. ctxt->op_bytes = def_op_bytes;
  3428. ctxt->ad_bytes = def_ad_bytes;
  3429. /* Legacy prefixes. */
  3430. for (;;) {
  3431. switch (ctxt->b = insn_fetch(u8, ctxt)) {
  3432. case 0x66: /* operand-size override */
  3433. op_prefix = true;
  3434. /* switch between 2/4 bytes */
  3435. ctxt->op_bytes = def_op_bytes ^ 6;
  3436. break;
  3437. case 0x67: /* address-size override */
  3438. if (mode == X86EMUL_MODE_PROT64)
  3439. /* switch between 4/8 bytes */
  3440. ctxt->ad_bytes = def_ad_bytes ^ 12;
  3441. else
  3442. /* switch between 2/4 bytes */
  3443. ctxt->ad_bytes = def_ad_bytes ^ 6;
  3444. break;
  3445. case 0x26: /* ES override */
  3446. case 0x2e: /* CS override */
  3447. case 0x36: /* SS override */
  3448. case 0x3e: /* DS override */
  3449. set_seg_override(ctxt, (ctxt->b >> 3) & 3);
  3450. break;
  3451. case 0x64: /* FS override */
  3452. case 0x65: /* GS override */
  3453. set_seg_override(ctxt, ctxt->b & 7);
  3454. break;
  3455. case 0x40 ... 0x4f: /* REX */
  3456. if (mode != X86EMUL_MODE_PROT64)
  3457. goto done_prefixes;
  3458. ctxt->rex_prefix = ctxt->b;
  3459. continue;
  3460. case 0xf0: /* LOCK */
  3461. ctxt->lock_prefix = 1;
  3462. break;
  3463. case 0xf2: /* REPNE/REPNZ */
  3464. case 0xf3: /* REP/REPE/REPZ */
  3465. ctxt->rep_prefix = ctxt->b;
  3466. break;
  3467. default:
  3468. goto done_prefixes;
  3469. }
  3470. /* Any legacy prefix after a REX prefix nullifies its effect. */
  3471. ctxt->rex_prefix = 0;
  3472. }
  3473. done_prefixes:
  3474. /* REX prefix. */
  3475. if (ctxt->rex_prefix & 8)
  3476. ctxt->op_bytes = 8; /* REX.W */
  3477. /* Opcode byte(s). */
  3478. opcode = opcode_table[ctxt->b];
  3479. /* Two-byte opcode? */
  3480. if (ctxt->b == 0x0f) {
  3481. ctxt->twobyte = 1;
  3482. ctxt->b = insn_fetch(u8, ctxt);
  3483. opcode = twobyte_table[ctxt->b];
  3484. }
  3485. ctxt->d = opcode.flags;
  3486. while (ctxt->d & GroupMask) {
  3487. switch (ctxt->d & GroupMask) {
  3488. case Group:
  3489. ctxt->modrm = insn_fetch(u8, ctxt);
  3490. --ctxt->_eip;
  3491. goffset = (ctxt->modrm >> 3) & 7;
  3492. opcode = opcode.u.group[goffset];
  3493. break;
  3494. case GroupDual:
  3495. ctxt->modrm = insn_fetch(u8, ctxt);
  3496. --ctxt->_eip;
  3497. goffset = (ctxt->modrm >> 3) & 7;
  3498. if ((ctxt->modrm >> 6) == 3)
  3499. opcode = opcode.u.gdual->mod3[goffset];
  3500. else
  3501. opcode = opcode.u.gdual->mod012[goffset];
  3502. break;
  3503. case RMExt:
  3504. goffset = ctxt->modrm & 7;
  3505. opcode = opcode.u.group[goffset];
  3506. break;
  3507. case Prefix:
  3508. if (ctxt->rep_prefix && op_prefix)
  3509. return EMULATION_FAILED;
  3510. simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
  3511. switch (simd_prefix) {
  3512. case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
  3513. case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
  3514. case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
  3515. case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
  3516. }
  3517. break;
  3518. default:
  3519. return EMULATION_FAILED;
  3520. }
  3521. ctxt->d &= ~(u64)GroupMask;
  3522. ctxt->d |= opcode.flags;
  3523. }
  3524. ctxt->execute = opcode.u.execute;
  3525. ctxt->check_perm = opcode.check_perm;
  3526. ctxt->intercept = opcode.intercept;
  3527. /* Unrecognised? */
  3528. if (ctxt->d == 0 || (ctxt->d & Undefined))
  3529. return EMULATION_FAILED;
  3530. if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
  3531. return EMULATION_FAILED;
  3532. if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
  3533. ctxt->op_bytes = 8;
  3534. if (ctxt->d & Op3264) {
  3535. if (mode == X86EMUL_MODE_PROT64)
  3536. ctxt->op_bytes = 8;
  3537. else
  3538. ctxt->op_bytes = 4;
  3539. }
  3540. if (ctxt->d & Sse)
  3541. ctxt->op_bytes = 16;
  3542. /* ModRM and SIB bytes. */
  3543. if (ctxt->d & ModRM) {
  3544. rc = decode_modrm(ctxt, &ctxt->memop);
  3545. if (!ctxt->has_seg_override)
  3546. set_seg_override(ctxt, ctxt->modrm_seg);
  3547. } else if (ctxt->d & MemAbs)
  3548. rc = decode_abs(ctxt, &ctxt->memop);
  3549. if (rc != X86EMUL_CONTINUE)
  3550. goto done;
  3551. if (!ctxt->has_seg_override)
  3552. set_seg_override(ctxt, VCPU_SREG_DS);
  3553. ctxt->memop.addr.mem.seg = seg_override(ctxt);
  3554. if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
  3555. ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
  3556. /*
  3557. * Decode and fetch the source operand: register, memory
  3558. * or immediate.
  3559. */
  3560. rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
  3561. if (rc != X86EMUL_CONTINUE)
  3562. goto done;
  3563. /*
  3564. * Decode and fetch the second source operand: register, memory
  3565. * or immediate.
  3566. */
  3567. rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
  3568. if (rc != X86EMUL_CONTINUE)
  3569. goto done;
  3570. /* Decode and fetch the destination operand: register or memory. */
  3571. rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
  3572. done:
  3573. if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
  3574. ctxt->memopp->addr.mem.ea += ctxt->_eip;
  3575. return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
  3576. }
  3577. bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
  3578. {
  3579. return ctxt->d & PageTable;
  3580. }
  3581. static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
  3582. {
  3583. /* The second termination condition only applies for REPE
  3584. * and REPNE. Test if the repeat string operation prefix is
  3585. * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
  3586. * corresponding termination condition according to:
  3587. * - if REPE/REPZ and ZF = 0 then done
  3588. * - if REPNE/REPNZ and ZF = 1 then done
  3589. */
  3590. if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
  3591. (ctxt->b == 0xae) || (ctxt->b == 0xaf))
  3592. && (((ctxt->rep_prefix == REPE_PREFIX) &&
  3593. ((ctxt->eflags & EFLG_ZF) == 0))
  3594. || ((ctxt->rep_prefix == REPNE_PREFIX) &&
  3595. ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
  3596. return true;
  3597. return false;
  3598. }
  3599. int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
  3600. {
  3601. struct x86_emulate_ops *ops = ctxt->ops;
  3602. int rc = X86EMUL_CONTINUE;
  3603. int saved_dst_type = ctxt->dst.type;
  3604. ctxt->mem_read.pos = 0;
  3605. if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
  3606. rc = emulate_ud(ctxt);
  3607. goto done;
  3608. }
  3609. /* LOCK prefix is allowed only with some instructions */
  3610. if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
  3611. rc = emulate_ud(ctxt);
  3612. goto done;
  3613. }
  3614. if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
  3615. rc = emulate_ud(ctxt);
  3616. goto done;
  3617. }
  3618. if ((ctxt->d & Sse)
  3619. && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
  3620. || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
  3621. rc = emulate_ud(ctxt);
  3622. goto done;
  3623. }
  3624. if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
  3625. rc = emulate_nm(ctxt);
  3626. goto done;
  3627. }
  3628. if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
  3629. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  3630. X86_ICPT_PRE_EXCEPT);
  3631. if (rc != X86EMUL_CONTINUE)
  3632. goto done;
  3633. }
  3634. /* Privileged instruction can be executed only in CPL=0 */
  3635. if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
  3636. rc = emulate_gp(ctxt, 0);
  3637. goto done;
  3638. }
  3639. /* Instruction can only be executed in protected mode */
  3640. if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
  3641. rc = emulate_ud(ctxt);
  3642. goto done;
  3643. }
  3644. /* Do instruction specific permission checks */
  3645. if (ctxt->check_perm) {
  3646. rc = ctxt->check_perm(ctxt);
  3647. if (rc != X86EMUL_CONTINUE)
  3648. goto done;
  3649. }
  3650. if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
  3651. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  3652. X86_ICPT_POST_EXCEPT);
  3653. if (rc != X86EMUL_CONTINUE)
  3654. goto done;
  3655. }
  3656. if (ctxt->rep_prefix && (ctxt->d & String)) {
  3657. /* All REP prefixes have the same first termination condition */
  3658. if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
  3659. ctxt->eip = ctxt->_eip;
  3660. goto done;
  3661. }
  3662. }
  3663. if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
  3664. rc = segmented_read(ctxt, ctxt->src.addr.mem,
  3665. ctxt->src.valptr, ctxt->src.bytes);
  3666. if (rc != X86EMUL_CONTINUE)
  3667. goto done;
  3668. ctxt->src.orig_val64 = ctxt->src.val64;
  3669. }
  3670. if (ctxt->src2.type == OP_MEM) {
  3671. rc = segmented_read(ctxt, ctxt->src2.addr.mem,
  3672. &ctxt->src2.val, ctxt->src2.bytes);
  3673. if (rc != X86EMUL_CONTINUE)
  3674. goto done;
  3675. }
  3676. if ((ctxt->d & DstMask) == ImplicitOps)
  3677. goto special_insn;
  3678. if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
  3679. /* optimisation - avoid slow emulated read if Mov */
  3680. rc = segmented_read(ctxt, ctxt->dst.addr.mem,
  3681. &ctxt->dst.val, ctxt->dst.bytes);
  3682. if (rc != X86EMUL_CONTINUE)
  3683. goto done;
  3684. }
  3685. /* Copy full 64-bit value for CMPXCHG8B. */
  3686. ctxt->dst.orig_val64 = ctxt->dst.val64;
  3687. special_insn:
  3688. if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
  3689. rc = emulator_check_intercept(ctxt, ctxt->intercept,
  3690. X86_ICPT_POST_MEMACCESS);
  3691. if (rc != X86EMUL_CONTINUE)
  3692. goto done;
  3693. }
  3694. if (ctxt->execute) {
  3695. rc = ctxt->execute(ctxt);
  3696. if (rc != X86EMUL_CONTINUE)
  3697. goto done;
  3698. goto writeback;
  3699. }
  3700. if (ctxt->twobyte)
  3701. goto twobyte_insn;
  3702. switch (ctxt->b) {
  3703. case 0x40 ... 0x47: /* inc r16/r32 */
  3704. emulate_1op(ctxt, "inc");
  3705. break;
  3706. case 0x48 ... 0x4f: /* dec r16/r32 */
  3707. emulate_1op(ctxt, "dec");
  3708. break;
  3709. case 0x63: /* movsxd */
  3710. if (ctxt->mode != X86EMUL_MODE_PROT64)
  3711. goto cannot_emulate;
  3712. ctxt->dst.val = (s32) ctxt->src.val;
  3713. break;
  3714. case 0x70 ... 0x7f: /* jcc (short) */
  3715. if (test_cc(ctxt->b, ctxt->eflags))
  3716. rc = jmp_rel(ctxt, ctxt->src.val);
  3717. break;
  3718. case 0x8d: /* lea r16/r32, m */
  3719. ctxt->dst.val = ctxt->src.addr.mem.ea;
  3720. break;
  3721. case 0x90 ... 0x97: /* nop / xchg reg, rax */
  3722. if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
  3723. break;
  3724. rc = em_xchg(ctxt);
  3725. break;
  3726. case 0x98: /* cbw/cwde/cdqe */
  3727. switch (ctxt->op_bytes) {
  3728. case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
  3729. case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
  3730. case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
  3731. }
  3732. break;
  3733. case 0xc0 ... 0xc1:
  3734. rc = em_grp2(ctxt);
  3735. break;
  3736. case 0xcc: /* int3 */
  3737. rc = emulate_int(ctxt, 3);
  3738. break;
  3739. case 0xcd: /* int n */
  3740. rc = emulate_int(ctxt, ctxt->src.val);
  3741. break;
  3742. case 0xce: /* into */
  3743. if (ctxt->eflags & EFLG_OF)
  3744. rc = emulate_int(ctxt, 4);
  3745. break;
  3746. case 0xd0 ... 0xd1: /* Grp2 */
  3747. rc = em_grp2(ctxt);
  3748. break;
  3749. case 0xd2 ... 0xd3: /* Grp2 */
  3750. ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
  3751. rc = em_grp2(ctxt);
  3752. break;
  3753. case 0xe9: /* jmp rel */
  3754. case 0xeb: /* jmp rel short */
  3755. rc = jmp_rel(ctxt, ctxt->src.val);
  3756. ctxt->dst.type = OP_NONE; /* Disable writeback. */
  3757. break;
  3758. case 0xf4: /* hlt */
  3759. ctxt->ops->halt(ctxt);
  3760. break;
  3761. case 0xf5: /* cmc */
  3762. /* complement carry flag from eflags reg */
  3763. ctxt->eflags ^= EFLG_CF;
  3764. break;
  3765. case 0xf8: /* clc */
  3766. ctxt->eflags &= ~EFLG_CF;
  3767. break;
  3768. case 0xf9: /* stc */
  3769. ctxt->eflags |= EFLG_CF;
  3770. break;
  3771. case 0xfc: /* cld */
  3772. ctxt->eflags &= ~EFLG_DF;
  3773. break;
  3774. case 0xfd: /* std */
  3775. ctxt->eflags |= EFLG_DF;
  3776. break;
  3777. default:
  3778. goto cannot_emulate;
  3779. }
  3780. if (rc != X86EMUL_CONTINUE)
  3781. goto done;
  3782. writeback:
  3783. rc = writeback(ctxt);
  3784. if (rc != X86EMUL_CONTINUE)
  3785. goto done;
  3786. /*
  3787. * restore dst type in case the decoding will be reused
  3788. * (happens for string instruction )
  3789. */
  3790. ctxt->dst.type = saved_dst_type;
  3791. if ((ctxt->d & SrcMask) == SrcSI)
  3792. string_addr_inc(ctxt, seg_override(ctxt),
  3793. VCPU_REGS_RSI, &ctxt->src);
  3794. if ((ctxt->d & DstMask) == DstDI)
  3795. string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
  3796. &ctxt->dst);
  3797. if (ctxt->rep_prefix && (ctxt->d & String)) {
  3798. struct read_cache *r = &ctxt->io_read;
  3799. register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
  3800. if (!string_insn_completed(ctxt)) {
  3801. /*
  3802. * Re-enter guest when pio read ahead buffer is empty
  3803. * or, if it is not used, after each 1024 iteration.
  3804. */
  3805. if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
  3806. (r->end == 0 || r->end != r->pos)) {
  3807. /*
  3808. * Reset read cache. Usually happens before
  3809. * decode, but since instruction is restarted
  3810. * we have to do it here.
  3811. */
  3812. ctxt->mem_read.end = 0;
  3813. return EMULATION_RESTART;
  3814. }
  3815. goto done; /* skip rip writeback */
  3816. }
  3817. }
  3818. ctxt->eip = ctxt->_eip;
  3819. done:
  3820. if (rc == X86EMUL_PROPAGATE_FAULT)
  3821. ctxt->have_exception = true;
  3822. if (rc == X86EMUL_INTERCEPTED)
  3823. return EMULATION_INTERCEPTED;
  3824. return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
  3825. twobyte_insn:
  3826. switch (ctxt->b) {
  3827. case 0x09: /* wbinvd */
  3828. (ctxt->ops->wbinvd)(ctxt);
  3829. break;
  3830. case 0x08: /* invd */
  3831. case 0x0d: /* GrpP (prefetch) */
  3832. case 0x18: /* Grp16 (prefetch/nop) */
  3833. break;
  3834. case 0x20: /* mov cr, reg */
  3835. ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
  3836. break;
  3837. case 0x21: /* mov from dr to reg */
  3838. ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
  3839. break;
  3840. case 0x40 ... 0x4f: /* cmov */
  3841. ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
  3842. if (!test_cc(ctxt->b, ctxt->eflags))
  3843. ctxt->dst.type = OP_NONE; /* no writeback */
  3844. break;
  3845. case 0x80 ... 0x8f: /* jnz rel, etc*/
  3846. if (test_cc(ctxt->b, ctxt->eflags))
  3847. rc = jmp_rel(ctxt, ctxt->src.val);
  3848. break;
  3849. case 0x90 ... 0x9f: /* setcc r/m8 */
  3850. ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
  3851. break;
  3852. case 0xa4: /* shld imm8, r, r/m */
  3853. case 0xa5: /* shld cl, r, r/m */
  3854. emulate_2op_cl(ctxt, "shld");
  3855. break;
  3856. case 0xac: /* shrd imm8, r, r/m */
  3857. case 0xad: /* shrd cl, r, r/m */
  3858. emulate_2op_cl(ctxt, "shrd");
  3859. break;
  3860. case 0xae: /* clflush */
  3861. break;
  3862. case 0xb6 ... 0xb7: /* movzx */
  3863. ctxt->dst.bytes = ctxt->op_bytes;
  3864. ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
  3865. : (u16) ctxt->src.val;
  3866. break;
  3867. case 0xbe ... 0xbf: /* movsx */
  3868. ctxt->dst.bytes = ctxt->op_bytes;
  3869. ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
  3870. (s16) ctxt->src.val;
  3871. break;
  3872. case 0xc0 ... 0xc1: /* xadd */
  3873. emulate_2op_SrcV(ctxt, "add");
  3874. /* Write back the register source. */
  3875. ctxt->src.val = ctxt->dst.orig_val;
  3876. write_register_operand(&ctxt->src);
  3877. break;
  3878. case 0xc3: /* movnti */
  3879. ctxt->dst.bytes = ctxt->op_bytes;
  3880. ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
  3881. (u64) ctxt->src.val;
  3882. break;
  3883. default:
  3884. goto cannot_emulate;
  3885. }
  3886. if (rc != X86EMUL_CONTINUE)
  3887. goto done;
  3888. goto writeback;
  3889. cannot_emulate:
  3890. return EMULATION_FAILED;
  3891. }