msm_nand.c 194 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250
  1. /*
  2. * Copyright (C) 2007 Google, Inc.
  3. * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/mtd/mtd.h>
  19. #include <linux/mtd/nand.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/sched.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/io.h>
  25. #include <linux/crc16.h>
  26. #include <linux/bitrev.h>
  27. #include <asm/dma.h>
  28. #include <asm/mach/flash.h>
  29. #include <mach/dma.h>
  30. #include "msm_nand.h"
  31. unsigned long msm_nand_phys;
  32. unsigned long msm_nandc01_phys;
  33. unsigned long msm_nandc10_phys;
  34. unsigned long msm_nandc11_phys;
  35. unsigned long ebi2_register_base;
  36. uint32_t dual_nand_ctlr_present;
  37. uint32_t interleave_enable;
  38. uint32_t enable_bch_ecc;
  39. #define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
  40. #define MSM_NAND_DMA_BUFFER_SLOTS \
  41. (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
  42. #define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
  43. #define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
  44. #define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
  45. #define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
  46. #define ONFI_IDENTIFIER_LENGTH 0x0004
  47. #define ONFI_PARAM_INFO_LENGTH 0x0200
  48. #define ONFI_PARAM_PAGE_LENGTH 0x0100
  49. #define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
  50. #define FLASH_READ_ONFI_IDENTIFIER_COMMAND 0x90
  51. #define FLASH_READ_ONFI_IDENTIFIER_ADDRESS 0x20
  52. #define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
  53. #define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
  54. #define VERBOSE 0
  55. struct msm_nand_chip {
  56. struct device *dev;
  57. wait_queue_head_t wait_queue;
  58. atomic_t dma_buffer_busy;
  59. unsigned dma_channel;
  60. uint8_t *dma_buffer;
  61. dma_addr_t dma_addr;
  62. unsigned CFG0, CFG1, CFG0_RAW, CFG1_RAW;
  63. uint32_t ecc_buf_cfg;
  64. uint32_t ecc_bch_cfg;
  65. uint32_t ecc_parity_bytes;
  66. unsigned cw_size;
  67. unsigned int uncorrectable_bit_mask;
  68. unsigned int num_err_mask;
  69. };
  70. #define CFG1_WIDE_FLASH (1U << 1)
  71. /* TODO: move datamover code out */
  72. #define SRC_CRCI_NAND_CMD CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD)
  73. #define DST_CRCI_NAND_CMD CMD_DST_CRCI(DMOV_NAND_CRCI_CMD)
  74. #define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA)
  75. #define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA)
  76. #define msm_virt_to_dma(chip, vaddr) \
  77. ((chip)->dma_addr + \
  78. ((uint8_t *)(vaddr) - (chip)->dma_buffer))
  79. /**
  80. * msm_nand_oob_64 - oob info for 2KB page
  81. */
  82. static struct nand_ecclayout msm_nand_oob_64 = {
  83. .eccbytes = 40,
  84. .eccpos = {
  85. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
  86. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  87. 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
  88. 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
  89. },
  90. .oobavail = 16,
  91. .oobfree = {
  92. {30, 16},
  93. }
  94. };
  95. /**
  96. * msm_nand_oob_128 - oob info for 4KB page
  97. */
  98. static struct nand_ecclayout msm_nand_oob_128 = {
  99. .eccbytes = 80,
  100. .eccpos = {
  101. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
  102. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  103. 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
  104. 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
  105. 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
  106. 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
  107. 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  108. 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
  109. },
  110. .oobavail = 32,
  111. .oobfree = {
  112. {70, 32},
  113. }
  114. };
  115. /**
  116. * msm_nand_oob_224 - oob info for 4KB page 8Bit interface
  117. */
  118. static struct nand_ecclayout msm_nand_oob_224_x8 = {
  119. .eccbytes = 104,
  120. .eccpos = {
  121. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
  122. 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
  123. 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
  124. 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
  125. 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
  126. 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
  127. 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
  128. 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
  129. },
  130. .oobavail = 32,
  131. .oobfree = {
  132. {91, 32},
  133. }
  134. };
  135. /**
  136. * msm_nand_oob_224 - oob info for 4KB page 16Bit interface
  137. */
  138. static struct nand_ecclayout msm_nand_oob_224_x16 = {
  139. .eccbytes = 112,
  140. .eccpos = {
  141. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
  142. 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
  143. 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
  144. 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
  145. 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  146. 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
  147. 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
  148. 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
  149. },
  150. .oobavail = 32,
  151. .oobfree = {
  152. {98, 32},
  153. }
  154. };
  155. /**
  156. * msm_nand_oob_256 - oob info for 8KB page
  157. */
  158. static struct nand_ecclayout msm_nand_oob_256 = {
  159. .eccbytes = 160,
  160. .eccpos = {
  161. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
  162. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
  163. 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
  164. 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
  165. 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
  166. 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
  167. 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  168. 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
  169. 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
  170. 90, 91, 92, 93, 94, 96, 97, 98 , 99, 100,
  171. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
  172. 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
  173. 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
  174. 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
  175. 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
  176. 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
  177. },
  178. .oobavail = 64,
  179. .oobfree = {
  180. {151, 64},
  181. }
  182. };
  183. /**
  184. * msm_onenand_oob_64 - oob info for large (2KB) page
  185. */
  186. static struct nand_ecclayout msm_onenand_oob_64 = {
  187. .eccbytes = 20,
  188. .eccpos = {
  189. 8, 9, 10, 11, 12,
  190. 24, 25, 26, 27, 28,
  191. 40, 41, 42, 43, 44,
  192. 56, 57, 58, 59, 60,
  193. },
  194. .oobavail = 20,
  195. .oobfree = {
  196. {2, 3}, {14, 2}, {18, 3}, {30, 2},
  197. {34, 3}, {46, 2}, {50, 3}, {62, 2}
  198. }
  199. };
  200. static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
  201. {
  202. unsigned int bitmask, free_bitmask, old_bitmask;
  203. unsigned int need_mask, current_need_mask;
  204. int free_index;
  205. need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
  206. bitmask = atomic_read(&chip->dma_buffer_busy);
  207. free_bitmask = ~bitmask;
  208. do {
  209. free_index = __ffs(free_bitmask);
  210. current_need_mask = need_mask << free_index;
  211. if (size + free_index * MSM_NAND_DMA_BUFFER_SLOTS >=
  212. MSM_NAND_DMA_BUFFER_SIZE)
  213. return NULL;
  214. if ((bitmask & current_need_mask) == 0) {
  215. old_bitmask =
  216. atomic_cmpxchg(&chip->dma_buffer_busy,
  217. bitmask,
  218. bitmask | current_need_mask);
  219. if (old_bitmask == bitmask)
  220. return chip->dma_buffer +
  221. free_index * MSM_NAND_DMA_BUFFER_SLOTS;
  222. free_bitmask = 0; /* force return */
  223. }
  224. /* current free range was too small, clear all free bits */
  225. /* below the top busy bit within current_need_mask */
  226. free_bitmask &=
  227. ~(~0U >> (32 - fls(bitmask & current_need_mask)));
  228. } while (free_bitmask);
  229. return NULL;
  230. }
  231. static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
  232. void *buffer, size_t size)
  233. {
  234. int index;
  235. unsigned int used_mask;
  236. used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
  237. index = ((uint8_t *)buffer - chip->dma_buffer) /
  238. MSM_NAND_DMA_BUFFER_SLOTS;
  239. atomic_sub(used_mask << index, &chip->dma_buffer_busy);
  240. wake_up(&chip->wait_queue);
  241. }
  242. unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr)
  243. {
  244. struct {
  245. dmov_s cmd;
  246. unsigned cmdptr;
  247. unsigned data;
  248. } *dma_buffer;
  249. unsigned rv;
  250. wait_event(chip->wait_queue,
  251. (dma_buffer = msm_nand_get_dma_buffer(
  252. chip, sizeof(*dma_buffer))));
  253. dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
  254. dma_buffer->cmd.src = addr;
  255. dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data);
  256. dma_buffer->cmd.len = 4;
  257. dma_buffer->cmdptr =
  258. (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  259. dma_buffer->data = 0xeeeeeeee;
  260. mb();
  261. msm_dmov_exec_cmd(
  262. chip->dma_channel, DMOV_CMD_PTR_LIST |
  263. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  264. mb();
  265. rv = dma_buffer->data;
  266. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  267. return rv;
  268. }
  269. void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val)
  270. {
  271. struct {
  272. dmov_s cmd;
  273. unsigned cmdptr;
  274. unsigned data;
  275. } *dma_buffer;
  276. wait_event(chip->wait_queue,
  277. (dma_buffer = msm_nand_get_dma_buffer(
  278. chip, sizeof(*dma_buffer))));
  279. dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
  280. dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data);
  281. dma_buffer->cmd.dst = addr;
  282. dma_buffer->cmd.len = 4;
  283. dma_buffer->cmdptr =
  284. (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  285. dma_buffer->data = val;
  286. mb();
  287. msm_dmov_exec_cmd(
  288. chip->dma_channel, DMOV_CMD_PTR_LIST |
  289. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  290. mb();
  291. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  292. }
  293. static dma_addr_t
  294. msm_nand_dma_map(struct device *dev, void *addr, size_t size,
  295. enum dma_data_direction dir)
  296. {
  297. struct page *page;
  298. unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
  299. if (virt_addr_valid(addr))
  300. page = virt_to_page(addr);
  301. else {
  302. if (WARN_ON(size + offset > PAGE_SIZE))
  303. return ~0;
  304. page = vmalloc_to_page(addr);
  305. }
  306. return dma_map_page(dev, page, offset, size, dir);
  307. }
  308. uint32_t flash_read_id(struct msm_nand_chip *chip)
  309. {
  310. struct {
  311. dmov_s cmd[7];
  312. unsigned cmdptr;
  313. unsigned data[7];
  314. } *dma_buffer;
  315. uint32_t rv;
  316. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  317. (chip, sizeof(*dma_buffer))));
  318. dma_buffer->data[0] = 0 | 4;
  319. dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
  320. dma_buffer->data[2] = 1;
  321. dma_buffer->data[3] = 0xeeeeeeee;
  322. dma_buffer->data[4] = 0xeeeeeeee;
  323. dma_buffer->data[5] = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
  324. dma_buffer->data[6] = 0x00000000;
  325. BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->data) - 1);
  326. dma_buffer->cmd[0].cmd = 0 | CMD_OCB;
  327. dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
  328. dma_buffer->cmd[0].dst = MSM_NAND_SFLASHC_BURST_CFG;
  329. dma_buffer->cmd[0].len = 4;
  330. dma_buffer->cmd[1].cmd = 0;
  331. dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
  332. dma_buffer->cmd[1].dst = MSM_NAND_FLASH_CHIP_SELECT;
  333. dma_buffer->cmd[1].len = 4;
  334. dma_buffer->cmd[2].cmd = DST_CRCI_NAND_CMD;
  335. dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[1]);
  336. dma_buffer->cmd[2].dst = MSM_NAND_FLASH_CMD;
  337. dma_buffer->cmd[2].len = 4;
  338. dma_buffer->cmd[3].cmd = 0;
  339. dma_buffer->cmd[3].src = msm_virt_to_dma(chip, &dma_buffer->data[2]);
  340. dma_buffer->cmd[3].dst = MSM_NAND_EXEC_CMD;
  341. dma_buffer->cmd[3].len = 4;
  342. dma_buffer->cmd[4].cmd = SRC_CRCI_NAND_DATA;
  343. dma_buffer->cmd[4].src = MSM_NAND_FLASH_STATUS;
  344. dma_buffer->cmd[4].dst = msm_virt_to_dma(chip, &dma_buffer->data[3]);
  345. dma_buffer->cmd[4].len = 4;
  346. dma_buffer->cmd[5].cmd = 0;
  347. dma_buffer->cmd[5].src = MSM_NAND_READ_ID;
  348. dma_buffer->cmd[5].dst = msm_virt_to_dma(chip, &dma_buffer->data[4]);
  349. dma_buffer->cmd[5].len = 4;
  350. dma_buffer->cmd[6].cmd = CMD_OCU | CMD_LC;
  351. dma_buffer->cmd[6].src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
  352. dma_buffer->cmd[6].dst = MSM_NAND_SFLASHC_BURST_CFG;
  353. dma_buffer->cmd[6].len = 4;
  354. BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->cmd) - 1);
  355. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3
  356. ) | CMD_PTR_LP;
  357. mb();
  358. msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
  359. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  360. mb();
  361. pr_info("status: %x\n", dma_buffer->data[3]);
  362. pr_info("nandid: %x maker %02x device %02x\n",
  363. dma_buffer->data[4], dma_buffer->data[4] & 0xff,
  364. (dma_buffer->data[4] >> 8) & 0xff);
  365. rv = dma_buffer->data[4];
  366. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  367. return rv;
  368. }
  369. struct flash_identification {
  370. uint32_t flash_id;
  371. uint32_t density;
  372. uint32_t widebus;
  373. uint32_t pagesize;
  374. uint32_t blksize;
  375. uint32_t oobsize;
  376. uint32_t ecc_correctability;
  377. } supported_flash;
  378. uint16_t flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
  379. {
  380. int i;
  381. uint16_t result;
  382. for (i = 0; i < count; i++)
  383. buffer[i] = bitrev8(buffer[i]);
  384. result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
  385. for (i = 0; i < count; i++)
  386. buffer[i] = bitrev8(buffer[i]);
  387. return result;
  388. }
  389. uint32_t flash_onfi_probe(struct msm_nand_chip *chip)
  390. {
  391. struct onfi_param_page {
  392. uint32_t parameter_page_signature;
  393. uint16_t revision_number;
  394. uint16_t features_supported;
  395. uint16_t optional_commands_supported;
  396. uint8_t reserved0[22];
  397. uint8_t device_manufacturer[12];
  398. uint8_t device_model[20];
  399. uint8_t jedec_manufacturer_id;
  400. uint16_t date_code;
  401. uint8_t reserved1[13];
  402. uint32_t number_of_data_bytes_per_page;
  403. uint16_t number_of_spare_bytes_per_page;
  404. uint32_t number_of_data_bytes_per_partial_page;
  405. uint16_t number_of_spare_bytes_per_partial_page;
  406. uint32_t number_of_pages_per_block;
  407. uint32_t number_of_blocks_per_logical_unit;
  408. uint8_t number_of_logical_units;
  409. uint8_t number_of_address_cycles;
  410. uint8_t number_of_bits_per_cell;
  411. uint16_t maximum_bad_blocks_per_logical_unit;
  412. uint16_t block_endurance;
  413. uint8_t guaranteed_valid_begin_blocks;
  414. uint16_t guaranteed_valid_begin_blocks_endurance;
  415. uint8_t number_of_programs_per_page;
  416. uint8_t partial_program_attributes;
  417. uint8_t number_of_bits_ecc_correctability;
  418. uint8_t number_of_interleaved_address_bits;
  419. uint8_t interleaved_operation_attributes;
  420. uint8_t reserved2[13];
  421. uint8_t io_pin_capacitance;
  422. uint16_t timing_mode_support;
  423. uint16_t program_cache_timing_mode_support;
  424. uint16_t maximum_page_programming_time;
  425. uint16_t maximum_block_erase_time;
  426. uint16_t maximum_page_read_time;
  427. uint16_t maximum_change_column_setup_time;
  428. uint8_t reserved3[23];
  429. uint16_t vendor_specific_revision_number;
  430. uint8_t vendor_specific[88];
  431. uint16_t integrity_crc;
  432. } __attribute__((__packed__));
  433. struct onfi_param_page *onfi_param_page_ptr;
  434. uint8_t *onfi_identifier_buf = NULL;
  435. uint8_t *onfi_param_info_buf = NULL;
  436. struct {
  437. dmov_s cmd[11];
  438. unsigned cmdptr;
  439. struct {
  440. uint32_t cmd;
  441. uint32_t addr0;
  442. uint32_t addr1;
  443. uint32_t cfg0;
  444. uint32_t cfg1;
  445. uint32_t exec;
  446. uint32_t flash_status;
  447. uint32_t devcmd1_orig;
  448. uint32_t devcmdvld_orig;
  449. uint32_t devcmd1_mod;
  450. uint32_t devcmdvld_mod;
  451. uint32_t sflash_bcfg_orig;
  452. uint32_t sflash_bcfg_mod;
  453. } data;
  454. } *dma_buffer;
  455. dmov_s *cmd;
  456. unsigned page_address = 0;
  457. int err = 0;
  458. dma_addr_t dma_addr_param_info = 0;
  459. dma_addr_t dma_addr_identifier = 0;
  460. unsigned cmd_set_count = 2;
  461. unsigned crc_chk_count = 0;
  462. if (msm_nand_data.nr_parts) {
  463. page_address = ((msm_nand_data.parts[0]).offset << 6);
  464. } else {
  465. pr_err("flash_onfi_probe: "
  466. "No partition info available\n");
  467. err = -EIO;
  468. return err;
  469. }
  470. wait_event(chip->wait_queue, (onfi_identifier_buf =
  471. msm_nand_get_dma_buffer(chip, ONFI_IDENTIFIER_LENGTH)));
  472. dma_addr_identifier = msm_virt_to_dma(chip, onfi_identifier_buf);
  473. wait_event(chip->wait_queue, (onfi_param_info_buf =
  474. msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
  475. dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
  476. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  477. (chip, sizeof(*dma_buffer))));
  478. dma_buffer->data.sflash_bcfg_orig = flash_rd_reg
  479. (chip, MSM_NAND_SFLASHC_BURST_CFG);
  480. dma_buffer->data.devcmd1_orig = flash_rd_reg(chip, MSM_NAND_DEV_CMD1);
  481. dma_buffer->data.devcmdvld_orig = flash_rd_reg(chip,
  482. MSM_NAND_DEV_CMD_VLD);
  483. while (cmd_set_count-- > 0) {
  484. cmd = dma_buffer->cmd;
  485. dma_buffer->data.devcmd1_mod = (dma_buffer->data.devcmd1_orig &
  486. 0xFFFFFF00) | (cmd_set_count
  487. ? FLASH_READ_ONFI_IDENTIFIER_COMMAND
  488. : FLASH_READ_ONFI_PARAMETERS_COMMAND);
  489. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  490. dma_buffer->data.addr0 = (page_address << 16) | (cmd_set_count
  491. ? FLASH_READ_ONFI_IDENTIFIER_ADDRESS
  492. : FLASH_READ_ONFI_PARAMETERS_ADDRESS);
  493. dma_buffer->data.addr1 = (page_address >> 16) & 0xFF;
  494. dma_buffer->data.cfg0 = (cmd_set_count
  495. ? MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER
  496. : MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO);
  497. dma_buffer->data.cfg1 = (cmd_set_count
  498. ? MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER
  499. : MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO);
  500. dma_buffer->data.sflash_bcfg_mod = 0x00000000;
  501. dma_buffer->data.devcmdvld_mod = (dma_buffer->
  502. data.devcmdvld_orig & 0xFFFFFFFE);
  503. dma_buffer->data.exec = 1;
  504. dma_buffer->data.flash_status = 0xeeeeeeee;
  505. /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
  506. cmd->cmd = 0;
  507. cmd->src = msm_virt_to_dma(chip,
  508. &dma_buffer->data.sflash_bcfg_mod);
  509. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  510. cmd->len = 4;
  511. cmd++;
  512. /* Block on cmd ready, & write CMD,ADDR0,ADDR1,CHIPSEL regs */
  513. cmd->cmd = DST_CRCI_NAND_CMD;
  514. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  515. cmd->dst = MSM_NAND_FLASH_CMD;
  516. cmd->len = 12;
  517. cmd++;
  518. /* Configure the CFG0 and CFG1 registers */
  519. cmd->cmd = 0;
  520. cmd->src = msm_virt_to_dma(chip,
  521. &dma_buffer->data.cfg0);
  522. cmd->dst = MSM_NAND_DEV0_CFG0;
  523. cmd->len = 8;
  524. cmd++;
  525. /* Configure the DEV_CMD_VLD register */
  526. cmd->cmd = 0;
  527. cmd->src = msm_virt_to_dma(chip,
  528. &dma_buffer->data.devcmdvld_mod);
  529. cmd->dst = MSM_NAND_DEV_CMD_VLD;
  530. cmd->len = 4;
  531. cmd++;
  532. /* Configure the DEV_CMD1 register */
  533. cmd->cmd = 0;
  534. cmd->src = msm_virt_to_dma(chip,
  535. &dma_buffer->data.devcmd1_mod);
  536. cmd->dst = MSM_NAND_DEV_CMD1;
  537. cmd->len = 4;
  538. cmd++;
  539. /* Kick the execute command */
  540. cmd->cmd = 0;
  541. cmd->src = msm_virt_to_dma(chip,
  542. &dma_buffer->data.exec);
  543. cmd->dst = MSM_NAND_EXEC_CMD;
  544. cmd->len = 4;
  545. cmd++;
  546. /* Block on data ready, and read the two status registers */
  547. cmd->cmd = SRC_CRCI_NAND_DATA;
  548. cmd->src = MSM_NAND_FLASH_STATUS;
  549. cmd->dst = msm_virt_to_dma(chip,
  550. &dma_buffer->data.flash_status);
  551. cmd->len = 4;
  552. cmd++;
  553. /* Read data block - valid only if status says success */
  554. cmd->cmd = 0;
  555. cmd->src = MSM_NAND_FLASH_BUFFER;
  556. cmd->dst = (cmd_set_count ? dma_addr_identifier :
  557. dma_addr_param_info);
  558. cmd->len = (cmd_set_count ? ONFI_IDENTIFIER_LENGTH :
  559. ONFI_PARAM_INFO_LENGTH);
  560. cmd++;
  561. /* Restore the DEV_CMD1 register */
  562. cmd->cmd = 0 ;
  563. cmd->src = msm_virt_to_dma(chip,
  564. &dma_buffer->data.devcmd1_orig);
  565. cmd->dst = MSM_NAND_DEV_CMD1;
  566. cmd->len = 4;
  567. cmd++;
  568. /* Restore the DEV_CMD_VLD register */
  569. cmd->cmd = 0;
  570. cmd->src = msm_virt_to_dma(chip,
  571. &dma_buffer->data.devcmdvld_orig);
  572. cmd->dst = MSM_NAND_DEV_CMD_VLD;
  573. cmd->len = 4;
  574. cmd++;
  575. /* Restore the SFLASH_BURST_CONFIG register */
  576. cmd->cmd = 0;
  577. cmd->src = msm_virt_to_dma(chip,
  578. &dma_buffer->data.sflash_bcfg_orig);
  579. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  580. cmd->len = 4;
  581. cmd++;
  582. BUILD_BUG_ON(11 != ARRAY_SIZE(dma_buffer->cmd));
  583. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  584. dma_buffer->cmd[0].cmd |= CMD_OCB;
  585. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  586. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  587. >> 3) | CMD_PTR_LP;
  588. mb();
  589. msm_dmov_exec_cmd(chip->dma_channel,
  590. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  591. &dma_buffer->cmdptr)));
  592. mb();
  593. /* Check for errors, protection violations etc */
  594. if (dma_buffer->data.flash_status & 0x110) {
  595. pr_info("MPU/OP error (0x%x) during "
  596. "ONFI probe\n",
  597. dma_buffer->data.flash_status);
  598. err = -EIO;
  599. break;
  600. }
  601. if (cmd_set_count) {
  602. onfi_param_page_ptr = (struct onfi_param_page *)
  603. (&(onfi_identifier_buf[0]));
  604. if (onfi_param_page_ptr->parameter_page_signature !=
  605. ONFI_PARAMETER_PAGE_SIGNATURE) {
  606. pr_info("ONFI probe : Found a non"
  607. "ONFI Compliant device \n");
  608. err = -EIO;
  609. break;
  610. }
  611. } else {
  612. for (crc_chk_count = 0; crc_chk_count <
  613. ONFI_PARAM_INFO_LENGTH
  614. / ONFI_PARAM_PAGE_LENGTH;
  615. crc_chk_count++) {
  616. onfi_param_page_ptr =
  617. (struct onfi_param_page *)
  618. (&(onfi_param_info_buf
  619. [ONFI_PARAM_PAGE_LENGTH *
  620. crc_chk_count]));
  621. if (flash_onfi_crc_check(
  622. (uint8_t *)onfi_param_page_ptr,
  623. ONFI_PARAM_PAGE_LENGTH - 2) ==
  624. onfi_param_page_ptr->integrity_crc) {
  625. break;
  626. }
  627. }
  628. if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
  629. / ONFI_PARAM_PAGE_LENGTH) {
  630. pr_info("ONFI probe : CRC Check "
  631. "failed on ONFI Parameter "
  632. "data \n");
  633. err = -EIO;
  634. break;
  635. } else {
  636. supported_flash.flash_id =
  637. flash_read_id(chip);
  638. supported_flash.widebus =
  639. onfi_param_page_ptr->
  640. features_supported & 0x01;
  641. supported_flash.pagesize =
  642. onfi_param_page_ptr->
  643. number_of_data_bytes_per_page;
  644. supported_flash.blksize =
  645. onfi_param_page_ptr->
  646. number_of_pages_per_block *
  647. supported_flash.pagesize;
  648. supported_flash.oobsize =
  649. onfi_param_page_ptr->
  650. number_of_spare_bytes_per_page;
  651. supported_flash.density =
  652. onfi_param_page_ptr->
  653. number_of_blocks_per_logical_unit
  654. * supported_flash.blksize;
  655. supported_flash.ecc_correctability =
  656. onfi_param_page_ptr->
  657. number_of_bits_ecc_correctability;
  658. pr_info("ONFI probe : Found an ONFI "
  659. "compliant device %s\n",
  660. onfi_param_page_ptr->device_model);
  661. /* Temporary hack for MT29F4G08ABC device.
  662. * Since the device is not properly adhering
  663. * to ONFi specification it is reporting
  664. * as 16 bit device though it is 8 bit device!!!
  665. */
  666. if (!strncmp(onfi_param_page_ptr->device_model,
  667. "MT29F4G08ABC", 12))
  668. supported_flash.widebus = 0;
  669. }
  670. }
  671. }
  672. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  673. msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
  674. ONFI_PARAM_INFO_LENGTH);
  675. msm_nand_release_dma_buffer(chip, onfi_identifier_buf,
  676. ONFI_IDENTIFIER_LENGTH);
  677. return err;
  678. }
  679. static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
  680. struct mtd_oob_ops *ops)
  681. {
  682. struct msm_nand_chip *chip = mtd->priv;
  683. struct {
  684. dmov_s cmd[8 * 5 + 2];
  685. unsigned cmdptr;
  686. struct {
  687. uint32_t cmd;
  688. uint32_t addr0;
  689. uint32_t addr1;
  690. uint32_t chipsel;
  691. uint32_t cfg0;
  692. uint32_t cfg1;
  693. uint32_t eccbchcfg;
  694. uint32_t exec;
  695. uint32_t ecccfg;
  696. struct {
  697. uint32_t flash_status;
  698. uint32_t buffer_status;
  699. } result[8];
  700. } data;
  701. } *dma_buffer;
  702. dmov_s *cmd;
  703. unsigned n;
  704. unsigned page = 0;
  705. uint32_t oob_len;
  706. uint32_t sectordatasize;
  707. uint32_t sectoroobsize;
  708. int err, pageerr, rawerr;
  709. dma_addr_t data_dma_addr = 0;
  710. dma_addr_t oob_dma_addr = 0;
  711. dma_addr_t data_dma_addr_curr = 0;
  712. dma_addr_t oob_dma_addr_curr = 0;
  713. uint32_t oob_col = 0;
  714. unsigned page_count;
  715. unsigned pages_read = 0;
  716. unsigned start_sector = 0;
  717. uint32_t ecc_errors;
  718. uint32_t total_ecc_errors = 0;
  719. unsigned cwperpage;
  720. #if VERBOSE
  721. pr_info("================================================="
  722. "================\n");
  723. pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
  724. "\noobbuf 0x%p ooblen 0x%x\n",
  725. __func__, from, ops->mode, ops->datbuf, ops->len,
  726. ops->oobbuf, ops->ooblen);
  727. #endif
  728. if (mtd->writesize == 2048)
  729. page = from >> 11;
  730. if (mtd->writesize == 4096)
  731. page = from >> 12;
  732. oob_len = ops->ooblen;
  733. cwperpage = (mtd->writesize >> 9);
  734. if (from & (mtd->writesize - 1)) {
  735. pr_err("%s: unsupported from, 0x%llx\n",
  736. __func__, from);
  737. return -EINVAL;
  738. }
  739. if (ops->mode != MTD_OPS_RAW) {
  740. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  741. /* when ops->datbuf is NULL, ops->len can be ooblen */
  742. pr_err("%s: unsupported ops->len, %d\n",
  743. __func__, ops->len);
  744. return -EINVAL;
  745. }
  746. } else {
  747. if (ops->datbuf != NULL &&
  748. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  749. pr_err("%s: unsupported ops->len,"
  750. " %d for MTD_OPS_RAW\n", __func__, ops->len);
  751. return -EINVAL;
  752. }
  753. }
  754. if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  755. pr_err("%s: unsupported ops->ooboffs, %d\n",
  756. __func__, ops->ooboffs);
  757. return -EINVAL;
  758. }
  759. if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
  760. start_sector = cwperpage - 1;
  761. if (ops->oobbuf && !ops->datbuf) {
  762. page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
  763. mtd->oobavail : mtd->oobsize);
  764. if ((page_count == 0) && (ops->ooblen))
  765. page_count = 1;
  766. } else if (ops->mode != MTD_OPS_RAW)
  767. page_count = ops->len / mtd->writesize;
  768. else
  769. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  770. if (ops->datbuf) {
  771. data_dma_addr_curr = data_dma_addr =
  772. msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
  773. DMA_FROM_DEVICE);
  774. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  775. pr_err("msm_nand_read_oob: failed to get dma addr "
  776. "for %p\n", ops->datbuf);
  777. return -EIO;
  778. }
  779. }
  780. if (ops->oobbuf) {
  781. memset(ops->oobbuf, 0xff, ops->ooblen);
  782. oob_dma_addr_curr = oob_dma_addr =
  783. msm_nand_dma_map(chip->dev, ops->oobbuf,
  784. ops->ooblen, DMA_BIDIRECTIONAL);
  785. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  786. pr_err("msm_nand_read_oob: failed to get dma addr "
  787. "for %p\n", ops->oobbuf);
  788. err = -EIO;
  789. goto err_dma_map_oobbuf_failed;
  790. }
  791. }
  792. wait_event(chip->wait_queue,
  793. (dma_buffer = msm_nand_get_dma_buffer(
  794. chip, sizeof(*dma_buffer))));
  795. oob_col = start_sector * chip->cw_size;
  796. if (chip->CFG1 & CFG1_WIDE_FLASH)
  797. oob_col >>= 1;
  798. err = 0;
  799. while (page_count-- > 0) {
  800. cmd = dma_buffer->cmd;
  801. /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
  802. if (ops->mode != MTD_OPS_RAW) {
  803. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
  804. dma_buffer->data.cfg0 =
  805. (chip->CFG0 & ~(7U << 6))
  806. | (((cwperpage-1) - start_sector) << 6);
  807. dma_buffer->data.cfg1 = chip->CFG1;
  808. if (enable_bch_ecc)
  809. dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
  810. } else {
  811. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  812. dma_buffer->data.cfg0 = (chip->CFG0_RAW
  813. & ~(7U << 6)) | ((cwperpage-1) << 6);
  814. dma_buffer->data.cfg1 = chip->CFG1_RAW |
  815. (chip->CFG1 & CFG1_WIDE_FLASH);
  816. }
  817. dma_buffer->data.addr0 = (page << 16) | oob_col;
  818. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  819. /* chipsel_0 + enable DM interface */
  820. dma_buffer->data.chipsel = 0 | 4;
  821. /* GO bit for the EXEC register */
  822. dma_buffer->data.exec = 1;
  823. BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result));
  824. for (n = start_sector; n < cwperpage; n++) {
  825. /* flash + buffer status return words */
  826. dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
  827. dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
  828. /* block on cmd ready, then
  829. * write CMD / ADDR0 / ADDR1 / CHIPSEL
  830. * regs in a burst
  831. */
  832. cmd->cmd = DST_CRCI_NAND_CMD;
  833. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  834. cmd->dst = MSM_NAND_FLASH_CMD;
  835. if (n == start_sector)
  836. cmd->len = 16;
  837. else
  838. cmd->len = 4;
  839. cmd++;
  840. if (n == start_sector) {
  841. cmd->cmd = 0;
  842. cmd->src = msm_virt_to_dma(chip,
  843. &dma_buffer->data.cfg0);
  844. cmd->dst = MSM_NAND_DEV0_CFG0;
  845. if (enable_bch_ecc)
  846. cmd->len = 12;
  847. else
  848. cmd->len = 8;
  849. cmd++;
  850. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  851. cmd->cmd = 0;
  852. cmd->src = msm_virt_to_dma(chip,
  853. &dma_buffer->data.ecccfg);
  854. cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
  855. cmd->len = 4;
  856. cmd++;
  857. }
  858. /* kick the execute register */
  859. cmd->cmd = 0;
  860. cmd->src =
  861. msm_virt_to_dma(chip, &dma_buffer->data.exec);
  862. cmd->dst = MSM_NAND_EXEC_CMD;
  863. cmd->len = 4;
  864. cmd++;
  865. /* block on data ready, then
  866. * read the status register
  867. */
  868. cmd->cmd = SRC_CRCI_NAND_DATA;
  869. cmd->src = MSM_NAND_FLASH_STATUS;
  870. cmd->dst = msm_virt_to_dma(chip,
  871. &dma_buffer->data.result[n]);
  872. /* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */
  873. cmd->len = 8;
  874. cmd++;
  875. /* read data block
  876. * (only valid if status says success)
  877. */
  878. if (ops->datbuf) {
  879. if (ops->mode != MTD_OPS_RAW)
  880. sectordatasize = (n < (cwperpage - 1))
  881. ? 516 : (512 - ((cwperpage - 1) << 2));
  882. else
  883. sectordatasize = chip->cw_size;
  884. cmd->cmd = 0;
  885. cmd->src = MSM_NAND_FLASH_BUFFER;
  886. cmd->dst = data_dma_addr_curr;
  887. data_dma_addr_curr += sectordatasize;
  888. cmd->len = sectordatasize;
  889. cmd++;
  890. }
  891. if (ops->oobbuf && (n == (cwperpage - 1)
  892. || ops->mode != MTD_OPS_AUTO_OOB)) {
  893. cmd->cmd = 0;
  894. if (n == (cwperpage - 1)) {
  895. cmd->src = MSM_NAND_FLASH_BUFFER +
  896. (512 - ((cwperpage - 1) << 2));
  897. sectoroobsize = (cwperpage << 2);
  898. if (ops->mode != MTD_OPS_AUTO_OOB)
  899. sectoroobsize +=
  900. chip->ecc_parity_bytes;
  901. } else {
  902. cmd->src = MSM_NAND_FLASH_BUFFER + 516;
  903. sectoroobsize = chip->ecc_parity_bytes;
  904. }
  905. cmd->dst = oob_dma_addr_curr;
  906. if (sectoroobsize < oob_len)
  907. cmd->len = sectoroobsize;
  908. else
  909. cmd->len = oob_len;
  910. oob_dma_addr_curr += cmd->len;
  911. oob_len -= cmd->len;
  912. if (cmd->len > 0)
  913. cmd++;
  914. }
  915. }
  916. BUILD_BUG_ON(8 * 5 + 2 != ARRAY_SIZE(dma_buffer->cmd));
  917. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  918. dma_buffer->cmd[0].cmd |= CMD_OCB;
  919. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  920. dma_buffer->cmdptr =
  921. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
  922. | CMD_PTR_LP;
  923. mb();
  924. msm_dmov_exec_cmd(chip->dma_channel,
  925. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  926. &dma_buffer->cmdptr)));
  927. mb();
  928. /* if any of the writes failed (0x10), or there
  929. * was a protection violation (0x100), we lose
  930. */
  931. pageerr = rawerr = 0;
  932. for (n = start_sector; n < cwperpage; n++) {
  933. if (dma_buffer->data.result[n].flash_status & 0x110) {
  934. rawerr = -EIO;
  935. break;
  936. }
  937. }
  938. if (rawerr) {
  939. if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
  940. uint8_t *datbuf = ops->datbuf +
  941. pages_read * mtd->writesize;
  942. dma_sync_single_for_cpu(chip->dev,
  943. data_dma_addr_curr-mtd->writesize,
  944. mtd->writesize, DMA_BIDIRECTIONAL);
  945. for (n = 0; n < mtd->writesize; n++) {
  946. /* empty blocks read 0x54 at
  947. * these offsets
  948. */
  949. if ((n % 516 == 3 || n % 516 == 175)
  950. && datbuf[n] == 0x54)
  951. datbuf[n] = 0xff;
  952. if (datbuf[n] != 0xff) {
  953. pageerr = rawerr;
  954. break;
  955. }
  956. }
  957. dma_sync_single_for_device(chip->dev,
  958. data_dma_addr_curr-mtd->writesize,
  959. mtd->writesize, DMA_BIDIRECTIONAL);
  960. }
  961. if (ops->oobbuf) {
  962. dma_sync_single_for_cpu(chip->dev,
  963. oob_dma_addr_curr - (ops->ooblen - oob_len),
  964. ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
  965. for (n = 0; n < ops->ooblen; n++) {
  966. if (ops->oobbuf[n] != 0xff) {
  967. pageerr = rawerr;
  968. break;
  969. }
  970. }
  971. dma_sync_single_for_device(chip->dev,
  972. oob_dma_addr_curr - (ops->ooblen - oob_len),
  973. ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
  974. }
  975. }
  976. if (pageerr) {
  977. for (n = start_sector; n < cwperpage; n++) {
  978. if (dma_buffer->data.result[n].buffer_status &
  979. chip->uncorrectable_bit_mask) {
  980. /* not thread safe */
  981. mtd->ecc_stats.failed++;
  982. pageerr = -EBADMSG;
  983. break;
  984. }
  985. }
  986. }
  987. if (!rawerr) { /* check for corretable errors */
  988. for (n = start_sector; n < cwperpage; n++) {
  989. ecc_errors =
  990. (dma_buffer->data.result[n].buffer_status
  991. & chip->num_err_mask);
  992. if (ecc_errors) {
  993. total_ecc_errors += ecc_errors;
  994. /* not thread safe */
  995. mtd->ecc_stats.corrected += ecc_errors;
  996. if (ecc_errors > 1)
  997. pageerr = -EUCLEAN;
  998. }
  999. }
  1000. }
  1001. if (pageerr && (pageerr != -EUCLEAN || err == 0))
  1002. err = pageerr;
  1003. #if VERBOSE
  1004. if (rawerr && !pageerr) {
  1005. pr_err("msm_nand_read_oob %llx %x %x empty page\n",
  1006. (loff_t)page * mtd->writesize, ops->len,
  1007. ops->ooblen);
  1008. } else {
  1009. for (n = start_sector; n < cwperpage; n++)
  1010. pr_info("flash_status[%d] = %x,\
  1011. buffr_status[%d] = %x\n",
  1012. n, dma_buffer->data.result[n].flash_status,
  1013. n, dma_buffer->data.result[n].buffer_status);
  1014. }
  1015. #endif
  1016. if (err && err != -EUCLEAN && err != -EBADMSG)
  1017. break;
  1018. pages_read++;
  1019. page++;
  1020. }
  1021. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  1022. if (ops->oobbuf) {
  1023. dma_unmap_page(chip->dev, oob_dma_addr,
  1024. ops->ooblen, DMA_FROM_DEVICE);
  1025. }
  1026. err_dma_map_oobbuf_failed:
  1027. if (ops->datbuf) {
  1028. dma_unmap_page(chip->dev, data_dma_addr,
  1029. ops->len, DMA_BIDIRECTIONAL);
  1030. }
  1031. if (ops->mode != MTD_OPS_RAW)
  1032. ops->retlen = mtd->writesize * pages_read;
  1033. else
  1034. ops->retlen = (mtd->writesize + mtd->oobsize) *
  1035. pages_read;
  1036. ops->oobretlen = ops->ooblen - oob_len;
  1037. if (err)
  1038. pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n",
  1039. from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
  1040. total_ecc_errors);
  1041. #if VERBOSE
  1042. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  1043. __func__, err, ops->retlen, ops->oobretlen);
  1044. pr_info("==================================================="
  1045. "==============\n");
  1046. #endif
  1047. return err;
  1048. }
  1049. static int msm_nand_read_oob_dualnandc(struct mtd_info *mtd, loff_t from,
  1050. struct mtd_oob_ops *ops)
  1051. {
  1052. struct msm_nand_chip *chip = mtd->priv;
  1053. struct {
  1054. dmov_s cmd[16 * 6 + 20];
  1055. unsigned cmdptr;
  1056. struct {
  1057. uint32_t cmd;
  1058. uint32_t nandc01_addr0;
  1059. uint32_t nandc10_addr0;
  1060. uint32_t nandc11_addr1;
  1061. uint32_t chipsel_cs0;
  1062. uint32_t chipsel_cs1;
  1063. uint32_t cfg0;
  1064. uint32_t cfg1;
  1065. uint32_t eccbchcfg;
  1066. uint32_t exec;
  1067. uint32_t ecccfg;
  1068. uint32_t ebi2_chip_select_cfg0;
  1069. uint32_t adm_mux_data_ack_req_nc01;
  1070. uint32_t adm_mux_cmd_ack_req_nc01;
  1071. uint32_t adm_mux_data_ack_req_nc10;
  1072. uint32_t adm_mux_cmd_ack_req_nc10;
  1073. uint32_t adm_default_mux;
  1074. uint32_t default_ebi2_chip_select_cfg0;
  1075. uint32_t nc10_flash_dev_cmd_vld;
  1076. uint32_t nc10_flash_dev_cmd1;
  1077. uint32_t nc10_flash_dev_cmd_vld_default;
  1078. uint32_t nc10_flash_dev_cmd1_default;
  1079. struct {
  1080. uint32_t flash_status;
  1081. uint32_t buffer_status;
  1082. } result[16];
  1083. } data;
  1084. } *dma_buffer;
  1085. dmov_s *cmd;
  1086. unsigned n;
  1087. unsigned page = 0;
  1088. uint32_t oob_len;
  1089. uint32_t sectordatasize;
  1090. uint32_t sectoroobsize;
  1091. int err, pageerr, rawerr;
  1092. dma_addr_t data_dma_addr = 0;
  1093. dma_addr_t oob_dma_addr = 0;
  1094. dma_addr_t data_dma_addr_curr = 0;
  1095. dma_addr_t oob_dma_addr_curr = 0;
  1096. uint32_t oob_col = 0;
  1097. unsigned page_count;
  1098. unsigned pages_read = 0;
  1099. unsigned start_sector = 0;
  1100. uint32_t ecc_errors;
  1101. uint32_t total_ecc_errors = 0;
  1102. unsigned cwperpage;
  1103. unsigned cw_offset = chip->cw_size;
  1104. #if VERBOSE
  1105. pr_info("================================================="
  1106. "============\n");
  1107. pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
  1108. "\noobbuf 0x%p ooblen 0x%x\n\n",
  1109. __func__, from, ops->mode, ops->datbuf,
  1110. ops->len, ops->oobbuf, ops->ooblen);
  1111. #endif
  1112. if (mtd->writesize == 2048)
  1113. page = from >> 11;
  1114. if (mtd->writesize == 4096)
  1115. page = from >> 12;
  1116. if (interleave_enable)
  1117. page = (from >> 1) >> 12;
  1118. oob_len = ops->ooblen;
  1119. cwperpage = (mtd->writesize >> 9);
  1120. if (from & (mtd->writesize - 1)) {
  1121. pr_err("%s: unsupported from, 0x%llx\n",
  1122. __func__, from);
  1123. return -EINVAL;
  1124. }
  1125. if (ops->mode != MTD_OPS_RAW) {
  1126. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  1127. pr_err("%s: unsupported ops->len, %d\n",
  1128. __func__, ops->len);
  1129. return -EINVAL;
  1130. }
  1131. } else {
  1132. if (ops->datbuf != NULL &&
  1133. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  1134. pr_err("%s: unsupported ops->len,"
  1135. " %d for MTD_OPS_RAW\n", __func__, ops->len);
  1136. return -EINVAL;
  1137. }
  1138. }
  1139. if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  1140. pr_err("%s: unsupported ops->ooboffs, %d\n",
  1141. __func__, ops->ooboffs);
  1142. return -EINVAL;
  1143. }
  1144. if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
  1145. start_sector = cwperpage - 1;
  1146. if (ops->oobbuf && !ops->datbuf) {
  1147. page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
  1148. mtd->oobavail : mtd->oobsize);
  1149. if ((page_count == 0) && (ops->ooblen))
  1150. page_count = 1;
  1151. } else if (ops->mode != MTD_OPS_RAW)
  1152. page_count = ops->len / mtd->writesize;
  1153. else
  1154. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  1155. if (ops->datbuf) {
  1156. data_dma_addr_curr = data_dma_addr =
  1157. msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
  1158. DMA_FROM_DEVICE);
  1159. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  1160. pr_err("msm_nand_read_oob_dualnandc: "
  1161. "failed to get dma addr for %p\n",
  1162. ops->datbuf);
  1163. return -EIO;
  1164. }
  1165. }
  1166. if (ops->oobbuf) {
  1167. memset(ops->oobbuf, 0xff, ops->ooblen);
  1168. oob_dma_addr_curr = oob_dma_addr =
  1169. msm_nand_dma_map(chip->dev, ops->oobbuf,
  1170. ops->ooblen, DMA_BIDIRECTIONAL);
  1171. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  1172. pr_err("msm_nand_read_oob_dualnandc: "
  1173. "failed to get dma addr for %p\n",
  1174. ops->oobbuf);
  1175. err = -EIO;
  1176. goto err_dma_map_oobbuf_failed;
  1177. }
  1178. }
  1179. wait_event(chip->wait_queue,
  1180. (dma_buffer = msm_nand_get_dma_buffer(
  1181. chip, sizeof(*dma_buffer))));
  1182. oob_col = start_sector * chip->cw_size;
  1183. if (chip->CFG1 & CFG1_WIDE_FLASH) {
  1184. oob_col >>= 1;
  1185. cw_offset >>= 1;
  1186. }
  1187. err = 0;
  1188. while (page_count-- > 0) {
  1189. cmd = dma_buffer->cmd;
  1190. if (ops->mode != MTD_OPS_RAW) {
  1191. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
  1192. if (start_sector == (cwperpage - 1)) {
  1193. dma_buffer->data.cfg0 = (chip->CFG0 &
  1194. ~(7U << 6));
  1195. } else {
  1196. dma_buffer->data.cfg0 = (chip->CFG0 &
  1197. ~(7U << 6))
  1198. | (((cwperpage >> 1)-1) << 6);
  1199. }
  1200. dma_buffer->data.cfg1 = chip->CFG1;
  1201. if (enable_bch_ecc)
  1202. dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
  1203. } else {
  1204. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  1205. dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
  1206. ~(7U << 6)) | ((((cwperpage >> 1)-1) << 6)));
  1207. dma_buffer->data.cfg1 = chip->CFG1_RAW |
  1208. (chip->CFG1 & CFG1_WIDE_FLASH);
  1209. }
  1210. if (!interleave_enable) {
  1211. if (start_sector == (cwperpage - 1)) {
  1212. dma_buffer->data.nandc10_addr0 =
  1213. (page << 16) | oob_col;
  1214. dma_buffer->data.nc10_flash_dev_cmd_vld = 0xD;
  1215. dma_buffer->data.nc10_flash_dev_cmd1 =
  1216. 0xF00F3000;
  1217. } else {
  1218. dma_buffer->data.nandc01_addr0 = page << 16;
  1219. /* NC10 ADDR0 points to the next code word */
  1220. dma_buffer->data.nandc10_addr0 = (page << 16) |
  1221. cw_offset;
  1222. dma_buffer->data.nc10_flash_dev_cmd_vld = 0x1D;
  1223. dma_buffer->data.nc10_flash_dev_cmd1 =
  1224. 0xF00FE005;
  1225. }
  1226. } else {
  1227. dma_buffer->data.nandc01_addr0 =
  1228. dma_buffer->data.nandc10_addr0 =
  1229. (page << 16) | oob_col;
  1230. }
  1231. /* ADDR1 */
  1232. dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
  1233. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  1234. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  1235. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  1236. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  1237. dma_buffer->data.adm_default_mux = 0x00000FC0;
  1238. dma_buffer->data.nc10_flash_dev_cmd_vld_default = 0x1D;
  1239. dma_buffer->data.nc10_flash_dev_cmd1_default = 0xF00F3000;
  1240. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  1241. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  1242. /* chipsel_0 + enable DM interface */
  1243. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  1244. /* chipsel_1 + enable DM interface */
  1245. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  1246. /* GO bit for the EXEC register */
  1247. dma_buffer->data.exec = 1;
  1248. BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.result));
  1249. for (n = start_sector; n < cwperpage; n++) {
  1250. /* flash + buffer status return words */
  1251. dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
  1252. dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
  1253. if (n == start_sector) {
  1254. if (!interleave_enable) {
  1255. cmd->cmd = 0;
  1256. cmd->src = msm_virt_to_dma(chip,
  1257. &dma_buffer->
  1258. data.nc10_flash_dev_cmd_vld);
  1259. cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
  1260. cmd->len = 4;
  1261. cmd++;
  1262. cmd->cmd = 0;
  1263. cmd->src = msm_virt_to_dma(chip,
  1264. &dma_buffer->data.nc10_flash_dev_cmd1);
  1265. cmd->dst = NC10(MSM_NAND_DEV_CMD1);
  1266. cmd->len = 4;
  1267. cmd++;
  1268. /* NC01, NC10 --> ADDR1 */
  1269. cmd->cmd = 0;
  1270. cmd->src = msm_virt_to_dma(chip,
  1271. &dma_buffer->data.nandc11_addr1);
  1272. cmd->dst = NC11(MSM_NAND_ADDR1);
  1273. cmd->len = 8;
  1274. cmd++;
  1275. cmd->cmd = 0;
  1276. cmd->src = msm_virt_to_dma(chip,
  1277. &dma_buffer->data.cfg0);
  1278. cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
  1279. if (enable_bch_ecc)
  1280. cmd->len = 12;
  1281. else
  1282. cmd->len = 8;
  1283. cmd++;
  1284. } else {
  1285. /* enable CS0 & CS1 */
  1286. cmd->cmd = 0;
  1287. cmd->src = msm_virt_to_dma(chip,
  1288. &dma_buffer->
  1289. data.ebi2_chip_select_cfg0);
  1290. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  1291. cmd->len = 4;
  1292. cmd++;
  1293. /* NC01, NC10 --> ADDR1 */
  1294. cmd->cmd = 0;
  1295. cmd->src = msm_virt_to_dma(chip,
  1296. &dma_buffer->data.nandc11_addr1);
  1297. cmd->dst = NC11(MSM_NAND_ADDR1);
  1298. cmd->len = 4;
  1299. cmd++;
  1300. /* Enable CS0 for NC01 */
  1301. cmd->cmd = 0;
  1302. cmd->src = msm_virt_to_dma(chip,
  1303. &dma_buffer->data.chipsel_cs0);
  1304. cmd->dst =
  1305. NC01(MSM_NAND_FLASH_CHIP_SELECT);
  1306. cmd->len = 4;
  1307. cmd++;
  1308. /* Enable CS1 for NC10 */
  1309. cmd->cmd = 0;
  1310. cmd->src = msm_virt_to_dma(chip,
  1311. &dma_buffer->data.chipsel_cs1);
  1312. cmd->dst =
  1313. NC10(MSM_NAND_FLASH_CHIP_SELECT);
  1314. cmd->len = 4;
  1315. cmd++;
  1316. /* config DEV0_CFG0 & CFG1 for CS0 */
  1317. cmd->cmd = 0;
  1318. cmd->src = msm_virt_to_dma(chip,
  1319. &dma_buffer->data.cfg0);
  1320. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  1321. cmd->len = 8;
  1322. cmd++;
  1323. /* config DEV1_CFG0 & CFG1 for CS1 */
  1324. cmd->cmd = 0;
  1325. cmd->src = msm_virt_to_dma(chip,
  1326. &dma_buffer->data.cfg0);
  1327. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  1328. cmd->len = 8;
  1329. cmd++;
  1330. }
  1331. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  1332. cmd->cmd = 0;
  1333. cmd->src = msm_virt_to_dma(chip,
  1334. &dma_buffer->data.ecccfg);
  1335. cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
  1336. cmd->len = 4;
  1337. cmd++;
  1338. /* if 'only' the last code word */
  1339. if (n == cwperpage - 1) {
  1340. /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
  1341. cmd->cmd = 0;
  1342. cmd->src = msm_virt_to_dma(chip,
  1343. &dma_buffer->
  1344. data.adm_mux_cmd_ack_req_nc01);
  1345. cmd->dst = EBI2_NAND_ADM_MUX;
  1346. cmd->len = 4;
  1347. cmd++;
  1348. /* CMD */
  1349. cmd->cmd = DST_CRCI_NAND_CMD;
  1350. cmd->src = msm_virt_to_dma(chip,
  1351. &dma_buffer->data.cmd);
  1352. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  1353. cmd->len = 4;
  1354. cmd++;
  1355. /* NC10 --> ADDR0 ( 0x0 ) */
  1356. cmd->cmd = 0;
  1357. cmd->src = msm_virt_to_dma(chip,
  1358. &dma_buffer->data.nandc10_addr0);
  1359. cmd->dst = NC10(MSM_NAND_ADDR0);
  1360. cmd->len = 4;
  1361. cmd++;
  1362. /* kick the execute reg for NC10 */
  1363. cmd->cmd = 0;
  1364. cmd->src = msm_virt_to_dma(chip,
  1365. &dma_buffer->data.exec);
  1366. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  1367. cmd->len = 4;
  1368. cmd++;
  1369. /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
  1370. cmd->cmd = 0;
  1371. cmd->src = msm_virt_to_dma(chip,
  1372. &dma_buffer->
  1373. data.adm_mux_data_ack_req_nc01);
  1374. cmd->dst = EBI2_NAND_ADM_MUX;
  1375. cmd->len = 4;
  1376. cmd++;
  1377. /* block on data ready from NC10, then
  1378. * read the status register
  1379. */
  1380. cmd->cmd = SRC_CRCI_NAND_DATA;
  1381. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  1382. cmd->dst = msm_virt_to_dma(chip,
  1383. &dma_buffer->data.result[n]);
  1384. /* MSM_NAND_FLASH_STATUS +
  1385. * MSM_NAND_BUFFER_STATUS
  1386. */
  1387. cmd->len = 8;
  1388. cmd++;
  1389. } else {
  1390. /* NC01 --> ADDR0 */
  1391. cmd->cmd = 0;
  1392. cmd->src = msm_virt_to_dma(chip,
  1393. &dma_buffer->data.nandc01_addr0);
  1394. cmd->dst = NC01(MSM_NAND_ADDR0);
  1395. cmd->len = 4;
  1396. cmd++;
  1397. /* NC10 --> ADDR1 */
  1398. cmd->cmd = 0;
  1399. cmd->src = msm_virt_to_dma(chip,
  1400. &dma_buffer->data.nandc10_addr0);
  1401. cmd->dst = NC10(MSM_NAND_ADDR0);
  1402. cmd->len = 4;
  1403. cmd++;
  1404. /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
  1405. cmd->cmd = 0;
  1406. cmd->src = msm_virt_to_dma(chip,
  1407. &dma_buffer->
  1408. data.adm_mux_cmd_ack_req_nc10);
  1409. cmd->dst = EBI2_NAND_ADM_MUX;
  1410. cmd->len = 4;
  1411. cmd++;
  1412. /* CMD */
  1413. cmd->cmd = DST_CRCI_NAND_CMD;
  1414. cmd->src = msm_virt_to_dma(chip,
  1415. &dma_buffer->data.cmd);
  1416. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  1417. cmd->len = 4;
  1418. cmd++;
  1419. /* kick the execute register for NC01*/
  1420. cmd->cmd = 0;
  1421. cmd->src = msm_virt_to_dma(chip,
  1422. &dma_buffer->data.exec);
  1423. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  1424. cmd->len = 4;
  1425. cmd++;
  1426. }
  1427. }
  1428. /* read data block
  1429. * (only valid if status says success)
  1430. */
  1431. if (ops->datbuf || (ops->oobbuf &&
  1432. ops->mode != MTD_OPS_AUTO_OOB)) {
  1433. if (ops->mode != MTD_OPS_RAW)
  1434. sectordatasize = (n < (cwperpage - 1))
  1435. ? 516 : (512 - ((cwperpage - 1) << 2));
  1436. else
  1437. sectordatasize = chip->cw_size;
  1438. if (n % 2 == 0) {
  1439. /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
  1440. cmd->cmd = 0;
  1441. cmd->src = msm_virt_to_dma(chip,
  1442. &dma_buffer->
  1443. data.adm_mux_data_ack_req_nc10);
  1444. cmd->dst = EBI2_NAND_ADM_MUX;
  1445. cmd->len = 4;
  1446. cmd++;
  1447. /* block on data ready from NC01, then
  1448. * read the status register
  1449. */
  1450. cmd->cmd = SRC_CRCI_NAND_DATA;
  1451. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  1452. cmd->dst = msm_virt_to_dma(chip,
  1453. &dma_buffer->data.result[n]);
  1454. /* MSM_NAND_FLASH_STATUS +
  1455. * MSM_NAND_BUFFER_STATUS
  1456. */
  1457. cmd->len = 8;
  1458. cmd++;
  1459. /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
  1460. cmd->cmd = 0;
  1461. cmd->src = msm_virt_to_dma(chip,
  1462. &dma_buffer->
  1463. data.adm_mux_cmd_ack_req_nc01);
  1464. cmd->dst = EBI2_NAND_ADM_MUX;
  1465. cmd->len = 4;
  1466. cmd++;
  1467. /* CMD */
  1468. cmd->cmd = DST_CRCI_NAND_CMD;
  1469. cmd->src = msm_virt_to_dma(chip,
  1470. &dma_buffer->data.cmd);
  1471. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  1472. cmd->len = 4;
  1473. cmd++;
  1474. /* kick the execute register for NC10 */
  1475. cmd->cmd = 0;
  1476. cmd->src = msm_virt_to_dma(chip,
  1477. &dma_buffer->data.exec);
  1478. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  1479. cmd->len = 4;
  1480. cmd++;
  1481. /* Read only when there is data
  1482. * buffer
  1483. */
  1484. if (ops->datbuf) {
  1485. cmd->cmd = 0;
  1486. cmd->src =
  1487. NC01(MSM_NAND_FLASH_BUFFER);
  1488. cmd->dst = data_dma_addr_curr;
  1489. data_dma_addr_curr +=
  1490. sectordatasize;
  1491. cmd->len = sectordatasize;
  1492. cmd++;
  1493. }
  1494. } else {
  1495. /* MASK DATA ACK/REQ -->
  1496. * NC01 (0xA3C)
  1497. */
  1498. cmd->cmd = 0;
  1499. cmd->src = msm_virt_to_dma(chip,
  1500. &dma_buffer->
  1501. data.adm_mux_data_ack_req_nc01);
  1502. cmd->dst = EBI2_NAND_ADM_MUX;
  1503. cmd->len = 4;
  1504. cmd++;
  1505. /* block on data ready from NC10
  1506. * then read the status register
  1507. */
  1508. cmd->cmd = SRC_CRCI_NAND_DATA;
  1509. cmd->src =
  1510. NC10(MSM_NAND_FLASH_STATUS);
  1511. cmd->dst = msm_virt_to_dma(chip,
  1512. &dma_buffer->data.result[n]);
  1513. /* MSM_NAND_FLASH_STATUS +
  1514. * MSM_NAND_BUFFER_STATUS
  1515. */
  1516. cmd->len = 8;
  1517. cmd++;
  1518. if (n != cwperpage - 1) {
  1519. /* MASK CMD ACK/REQ -->
  1520. * NC10 (0xF14)
  1521. */
  1522. cmd->cmd = 0;
  1523. cmd->src =
  1524. msm_virt_to_dma(chip,
  1525. &dma_buffer->
  1526. data.adm_mux_cmd_ack_req_nc10);
  1527. cmd->dst = EBI2_NAND_ADM_MUX;
  1528. cmd->len = 4;
  1529. cmd++;
  1530. /* CMD */
  1531. cmd->cmd = DST_CRCI_NAND_CMD;
  1532. cmd->src = msm_virt_to_dma(chip,
  1533. &dma_buffer->data.cmd);
  1534. cmd->dst =
  1535. NC01(MSM_NAND_FLASH_CMD);
  1536. cmd->len = 4;
  1537. cmd++;
  1538. /* EXEC */
  1539. cmd->cmd = 0;
  1540. cmd->src = msm_virt_to_dma(chip,
  1541. &dma_buffer->data.exec);
  1542. cmd->dst =
  1543. NC01(MSM_NAND_EXEC_CMD);
  1544. cmd->len = 4;
  1545. cmd++;
  1546. }
  1547. /* Read only when there is data
  1548. * buffer
  1549. */
  1550. if (ops->datbuf) {
  1551. cmd->cmd = 0;
  1552. cmd->src =
  1553. NC10(MSM_NAND_FLASH_BUFFER);
  1554. cmd->dst = data_dma_addr_curr;
  1555. data_dma_addr_curr +=
  1556. sectordatasize;
  1557. cmd->len = sectordatasize;
  1558. cmd++;
  1559. }
  1560. }
  1561. }
  1562. if (ops->oobbuf && (n == (cwperpage - 1)
  1563. || ops->mode != MTD_OPS_AUTO_OOB)) {
  1564. cmd->cmd = 0;
  1565. if (n == (cwperpage - 1)) {
  1566. /* Use NC10 for reading the
  1567. * last codeword!!!
  1568. */
  1569. cmd->src = NC10(MSM_NAND_FLASH_BUFFER) +
  1570. (512 - ((cwperpage - 1) << 2));
  1571. sectoroobsize = (cwperpage << 2);
  1572. if (ops->mode != MTD_OPS_AUTO_OOB)
  1573. sectoroobsize +=
  1574. chip->ecc_parity_bytes;
  1575. } else {
  1576. if (n % 2 == 0)
  1577. cmd->src =
  1578. NC01(MSM_NAND_FLASH_BUFFER)
  1579. + 516;
  1580. else
  1581. cmd->src =
  1582. NC10(MSM_NAND_FLASH_BUFFER)
  1583. + 516;
  1584. sectoroobsize = chip->ecc_parity_bytes;
  1585. }
  1586. cmd->dst = oob_dma_addr_curr;
  1587. if (sectoroobsize < oob_len)
  1588. cmd->len = sectoroobsize;
  1589. else
  1590. cmd->len = oob_len;
  1591. oob_dma_addr_curr += cmd->len;
  1592. oob_len -= cmd->len;
  1593. if (cmd->len > 0)
  1594. cmd++;
  1595. }
  1596. }
  1597. /* ADM --> Default mux state (0xFC0) */
  1598. cmd->cmd = 0;
  1599. cmd->src = msm_virt_to_dma(chip,
  1600. &dma_buffer->data.adm_default_mux);
  1601. cmd->dst = EBI2_NAND_ADM_MUX;
  1602. cmd->len = 4;
  1603. cmd++;
  1604. if (!interleave_enable) {
  1605. cmd->cmd = 0;
  1606. cmd->src = msm_virt_to_dma(chip,
  1607. &dma_buffer->data.nc10_flash_dev_cmd_vld_default);
  1608. cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
  1609. cmd->len = 4;
  1610. cmd++;
  1611. cmd->cmd = 0;
  1612. cmd->src = msm_virt_to_dma(chip,
  1613. &dma_buffer->data.nc10_flash_dev_cmd1_default);
  1614. cmd->dst = NC10(MSM_NAND_DEV_CMD1);
  1615. cmd->len = 4;
  1616. cmd++;
  1617. } else {
  1618. /* disable CS1 */
  1619. cmd->cmd = 0;
  1620. cmd->src = msm_virt_to_dma(chip,
  1621. &dma_buffer->data.default_ebi2_chip_select_cfg0);
  1622. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  1623. cmd->len = 4;
  1624. cmd++;
  1625. }
  1626. BUILD_BUG_ON(16 * 6 + 20 != ARRAY_SIZE(dma_buffer->cmd));
  1627. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  1628. dma_buffer->cmd[0].cmd |= CMD_OCB;
  1629. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  1630. dma_buffer->cmdptr =
  1631. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
  1632. | CMD_PTR_LP;
  1633. mb();
  1634. msm_dmov_exec_cmd(chip->dma_channel,
  1635. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  1636. &dma_buffer->cmdptr)));
  1637. mb();
  1638. /* if any of the writes failed (0x10), or there
  1639. * was a protection violation (0x100), we lose
  1640. */
  1641. pageerr = rawerr = 0;
  1642. for (n = start_sector; n < cwperpage; n++) {
  1643. if (dma_buffer->data.result[n].flash_status & 0x110) {
  1644. rawerr = -EIO;
  1645. break;
  1646. }
  1647. }
  1648. if (rawerr) {
  1649. if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
  1650. uint8_t *datbuf = ops->datbuf +
  1651. pages_read * mtd->writesize;
  1652. dma_sync_single_for_cpu(chip->dev,
  1653. data_dma_addr_curr-mtd->writesize,
  1654. mtd->writesize, DMA_BIDIRECTIONAL);
  1655. for (n = 0; n < mtd->writesize; n++) {
  1656. /* empty blocks read 0x54 at
  1657. * these offsets
  1658. */
  1659. if ((n % 516 == 3 || n % 516 == 175)
  1660. && datbuf[n] == 0x54)
  1661. datbuf[n] = 0xff;
  1662. if (datbuf[n] != 0xff) {
  1663. pageerr = rawerr;
  1664. break;
  1665. }
  1666. }
  1667. dma_sync_single_for_device(chip->dev,
  1668. data_dma_addr_curr-mtd->writesize,
  1669. mtd->writesize, DMA_BIDIRECTIONAL);
  1670. }
  1671. if (ops->oobbuf) {
  1672. dma_sync_single_for_cpu(chip->dev,
  1673. oob_dma_addr_curr - (ops->ooblen - oob_len),
  1674. ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
  1675. for (n = 0; n < ops->ooblen; n++) {
  1676. if (ops->oobbuf[n] != 0xff) {
  1677. pageerr = rawerr;
  1678. break;
  1679. }
  1680. }
  1681. dma_sync_single_for_device(chip->dev,
  1682. oob_dma_addr_curr - (ops->ooblen - oob_len),
  1683. ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
  1684. }
  1685. }
  1686. if (pageerr) {
  1687. for (n = start_sector; n < cwperpage; n++) {
  1688. if (dma_buffer->data.result[n].buffer_status
  1689. & chip->uncorrectable_bit_mask) {
  1690. /* not thread safe */
  1691. mtd->ecc_stats.failed++;
  1692. pageerr = -EBADMSG;
  1693. break;
  1694. }
  1695. }
  1696. }
  1697. if (!rawerr) { /* check for corretable errors */
  1698. for (n = start_sector; n < cwperpage; n++) {
  1699. ecc_errors = dma_buffer->data.
  1700. result[n].buffer_status
  1701. & chip->num_err_mask;
  1702. if (ecc_errors) {
  1703. total_ecc_errors += ecc_errors;
  1704. /* not thread safe */
  1705. mtd->ecc_stats.corrected += ecc_errors;
  1706. if (ecc_errors > 1)
  1707. pageerr = -EUCLEAN;
  1708. }
  1709. }
  1710. }
  1711. if (pageerr && (pageerr != -EUCLEAN || err == 0))
  1712. err = pageerr;
  1713. #if VERBOSE
  1714. if (rawerr && !pageerr) {
  1715. pr_err("msm_nand_read_oob_dualnandc "
  1716. "%llx %x %x empty page\n",
  1717. (loff_t)page * mtd->writesize, ops->len,
  1718. ops->ooblen);
  1719. } else {
  1720. for (n = start_sector; n < cwperpage; n++) {
  1721. if (n%2) {
  1722. pr_info("NC10: flash_status[%d] = %x, "
  1723. "buffr_status[%d] = %x\n",
  1724. n, dma_buffer->
  1725. data.result[n].flash_status,
  1726. n, dma_buffer->
  1727. data.result[n].buffer_status);
  1728. } else {
  1729. pr_info("NC01: flash_status[%d] = %x, "
  1730. "buffr_status[%d] = %x\n",
  1731. n, dma_buffer->
  1732. data.result[n].flash_status,
  1733. n, dma_buffer->
  1734. data.result[n].buffer_status);
  1735. }
  1736. }
  1737. }
  1738. #endif
  1739. if (err && err != -EUCLEAN && err != -EBADMSG)
  1740. break;
  1741. pages_read++;
  1742. page++;
  1743. }
  1744. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  1745. if (ops->oobbuf) {
  1746. dma_unmap_page(chip->dev, oob_dma_addr,
  1747. ops->ooblen, DMA_FROM_DEVICE);
  1748. }
  1749. err_dma_map_oobbuf_failed:
  1750. if (ops->datbuf) {
  1751. dma_unmap_page(chip->dev, data_dma_addr,
  1752. ops->len, DMA_BIDIRECTIONAL);
  1753. }
  1754. if (ops->mode != MTD_OPS_RAW)
  1755. ops->retlen = mtd->writesize * pages_read;
  1756. else
  1757. ops->retlen = (mtd->writesize + mtd->oobsize) *
  1758. pages_read;
  1759. ops->oobretlen = ops->ooblen - oob_len;
  1760. if (err)
  1761. pr_err("msm_nand_read_oob_dualnandc "
  1762. "%llx %x %x failed %d, corrected %d\n",
  1763. from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
  1764. total_ecc_errors);
  1765. #if VERBOSE
  1766. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  1767. __func__, err, ops->retlen, ops->oobretlen);
  1768. pr_info("==================================================="
  1769. "==========\n");
  1770. #endif
  1771. return err;
  1772. }
  1773. static int
  1774. msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
  1775. size_t *retlen, u_char *buf)
  1776. {
  1777. int ret;
  1778. struct mtd_oob_ops ops;
  1779. int (*read_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
  1780. if (!dual_nand_ctlr_present)
  1781. read_oob = msm_nand_read_oob;
  1782. else
  1783. read_oob = msm_nand_read_oob_dualnandc;
  1784. ops.mode = MTD_OPS_PLACE_OOB;
  1785. ops.retlen = 0;
  1786. ops.ooblen = 0;
  1787. ops.oobbuf = NULL;
  1788. ret = 0;
  1789. *retlen = 0;
  1790. if ((from & (mtd->writesize - 1)) == 0 && len == mtd->writesize) {
  1791. /* reading a page on page boundary */
  1792. ops.len = len;
  1793. ops.datbuf = buf;
  1794. ret = read_oob(mtd, from, &ops);
  1795. *retlen = ops.retlen;
  1796. } else if (len > 0) {
  1797. /* reading any size on any offset. partial page is supported */
  1798. u8 *bounce_buf;
  1799. loff_t aligned_from;
  1800. loff_t offset;
  1801. size_t actual_len;
  1802. bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
  1803. if (!bounce_buf) {
  1804. pr_err("%s: could not allocate memory\n", __func__);
  1805. ret = -ENOMEM;
  1806. goto out;
  1807. }
  1808. ops.len = mtd->writesize;
  1809. offset = from & (mtd->writesize - 1);
  1810. aligned_from = from - offset;
  1811. for (;;) {
  1812. int no_copy;
  1813. actual_len = mtd->writesize - offset;
  1814. if (actual_len > len)
  1815. actual_len = len;
  1816. no_copy = (offset == 0 && actual_len == mtd->writesize);
  1817. ops.datbuf = (no_copy) ? buf : bounce_buf;
  1818. ret = read_oob(mtd, aligned_from, &ops);
  1819. if (ret < 0)
  1820. break;
  1821. if (!no_copy)
  1822. memcpy(buf, bounce_buf + offset, actual_len);
  1823. len -= actual_len;
  1824. *retlen += actual_len;
  1825. if (len == 0)
  1826. break;
  1827. buf += actual_len;
  1828. offset = 0;
  1829. aligned_from += mtd->writesize;
  1830. }
  1831. kfree(bounce_buf);
  1832. }
  1833. out:
  1834. return ret;
  1835. }
  1836. static int
  1837. msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
  1838. {
  1839. struct msm_nand_chip *chip = mtd->priv;
  1840. struct {
  1841. dmov_s cmd[8 * 7 + 2];
  1842. unsigned cmdptr;
  1843. struct {
  1844. uint32_t cmd;
  1845. uint32_t addr0;
  1846. uint32_t addr1;
  1847. uint32_t chipsel;
  1848. uint32_t cfg0;
  1849. uint32_t cfg1;
  1850. uint32_t eccbchcfg;
  1851. uint32_t exec;
  1852. uint32_t ecccfg;
  1853. uint32_t clrfstatus;
  1854. uint32_t clrrstatus;
  1855. uint32_t flash_status[8];
  1856. } data;
  1857. } *dma_buffer;
  1858. dmov_s *cmd;
  1859. unsigned n;
  1860. unsigned page = 0;
  1861. uint32_t oob_len;
  1862. uint32_t sectordatawritesize;
  1863. int err = 0;
  1864. dma_addr_t data_dma_addr = 0;
  1865. dma_addr_t oob_dma_addr = 0;
  1866. dma_addr_t data_dma_addr_curr = 0;
  1867. dma_addr_t oob_dma_addr_curr = 0;
  1868. unsigned page_count;
  1869. unsigned pages_written = 0;
  1870. unsigned cwperpage;
  1871. #if VERBOSE
  1872. pr_info("================================================="
  1873. "================\n");
  1874. pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
  1875. "\noobbuf 0x%p ooblen 0x%x\n",
  1876. __func__, to, ops->mode, ops->datbuf, ops->len,
  1877. ops->oobbuf, ops->ooblen);
  1878. #endif
  1879. if (mtd->writesize == 2048)
  1880. page = to >> 11;
  1881. if (mtd->writesize == 4096)
  1882. page = to >> 12;
  1883. oob_len = ops->ooblen;
  1884. cwperpage = (mtd->writesize >> 9);
  1885. if (to & (mtd->writesize - 1)) {
  1886. pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
  1887. return -EINVAL;
  1888. }
  1889. if (ops->mode != MTD_OPS_RAW) {
  1890. if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
  1891. pr_err("%s: unsupported ops->mode,%d\n",
  1892. __func__, ops->mode);
  1893. return -EINVAL;
  1894. }
  1895. if ((ops->len % mtd->writesize) != 0) {
  1896. pr_err("%s: unsupported ops->len, %d\n",
  1897. __func__, ops->len);
  1898. return -EINVAL;
  1899. }
  1900. } else {
  1901. if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  1902. pr_err("%s: unsupported ops->len, "
  1903. "%d for MTD_OPS_RAW mode\n",
  1904. __func__, ops->len);
  1905. return -EINVAL;
  1906. }
  1907. }
  1908. if (ops->datbuf == NULL) {
  1909. pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
  1910. return -EINVAL;
  1911. }
  1912. if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  1913. pr_err("%s: unsupported ops->ooboffs, %d\n",
  1914. __func__, ops->ooboffs);
  1915. return -EINVAL;
  1916. }
  1917. if (ops->datbuf) {
  1918. data_dma_addr_curr = data_dma_addr =
  1919. msm_nand_dma_map(chip->dev, ops->datbuf,
  1920. ops->len, DMA_TO_DEVICE);
  1921. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  1922. pr_err("msm_nand_write_oob: failed to get dma addr "
  1923. "for %p\n", ops->datbuf);
  1924. return -EIO;
  1925. }
  1926. }
  1927. if (ops->oobbuf) {
  1928. oob_dma_addr_curr = oob_dma_addr =
  1929. msm_nand_dma_map(chip->dev, ops->oobbuf,
  1930. ops->ooblen, DMA_TO_DEVICE);
  1931. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  1932. pr_err("msm_nand_write_oob: failed to get dma addr "
  1933. "for %p\n", ops->oobbuf);
  1934. err = -EIO;
  1935. goto err_dma_map_oobbuf_failed;
  1936. }
  1937. }
  1938. if (ops->mode != MTD_OPS_RAW)
  1939. page_count = ops->len / mtd->writesize;
  1940. else
  1941. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  1942. wait_event(chip->wait_queue, (dma_buffer =
  1943. msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
  1944. while (page_count-- > 0) {
  1945. cmd = dma_buffer->cmd;
  1946. if (ops->mode != MTD_OPS_RAW) {
  1947. dma_buffer->data.cfg0 = chip->CFG0;
  1948. dma_buffer->data.cfg1 = chip->CFG1;
  1949. if (enable_bch_ecc)
  1950. dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
  1951. } else {
  1952. dma_buffer->data.cfg0 = (chip->CFG0_RAW &
  1953. ~(7U << 6)) | ((cwperpage-1) << 6);
  1954. dma_buffer->data.cfg1 = chip->CFG1_RAW |
  1955. (chip->CFG1 & CFG1_WIDE_FLASH);
  1956. }
  1957. /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
  1958. dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
  1959. dma_buffer->data.addr0 = page << 16;
  1960. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  1961. /* chipsel_0 + enable DM interface */
  1962. dma_buffer->data.chipsel = 0 | 4;
  1963. /* GO bit for the EXEC register */
  1964. dma_buffer->data.exec = 1;
  1965. dma_buffer->data.clrfstatus = 0x00000020;
  1966. dma_buffer->data.clrrstatus = 0x000000C0;
  1967. BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status));
  1968. for (n = 0; n < cwperpage ; n++) {
  1969. /* status return words */
  1970. dma_buffer->data.flash_status[n] = 0xeeeeeeee;
  1971. /* block on cmd ready, then
  1972. * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst
  1973. */
  1974. cmd->cmd = DST_CRCI_NAND_CMD;
  1975. cmd->src =
  1976. msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  1977. cmd->dst = MSM_NAND_FLASH_CMD;
  1978. if (n == 0)
  1979. cmd->len = 16;
  1980. else
  1981. cmd->len = 4;
  1982. cmd++;
  1983. if (n == 0) {
  1984. cmd->cmd = 0;
  1985. cmd->src = msm_virt_to_dma(chip,
  1986. &dma_buffer->data.cfg0);
  1987. cmd->dst = MSM_NAND_DEV0_CFG0;
  1988. if (enable_bch_ecc)
  1989. cmd->len = 12;
  1990. else
  1991. cmd->len = 8;
  1992. cmd++;
  1993. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  1994. cmd->cmd = 0;
  1995. cmd->src = msm_virt_to_dma(chip,
  1996. &dma_buffer->data.ecccfg);
  1997. cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
  1998. cmd->len = 4;
  1999. cmd++;
  2000. }
  2001. /* write data block */
  2002. if (ops->mode != MTD_OPS_RAW)
  2003. sectordatawritesize = (n < (cwperpage - 1)) ?
  2004. 516 : (512 - ((cwperpage - 1) << 2));
  2005. else
  2006. sectordatawritesize = chip->cw_size;
  2007. cmd->cmd = 0;
  2008. cmd->src = data_dma_addr_curr;
  2009. data_dma_addr_curr += sectordatawritesize;
  2010. cmd->dst = MSM_NAND_FLASH_BUFFER;
  2011. cmd->len = sectordatawritesize;
  2012. cmd++;
  2013. if (ops->oobbuf) {
  2014. if (n == (cwperpage - 1)) {
  2015. cmd->cmd = 0;
  2016. cmd->src = oob_dma_addr_curr;
  2017. cmd->dst = MSM_NAND_FLASH_BUFFER +
  2018. (512 - ((cwperpage - 1) << 2));
  2019. if ((cwperpage << 2) < oob_len)
  2020. cmd->len = (cwperpage << 2);
  2021. else
  2022. cmd->len = oob_len;
  2023. oob_dma_addr_curr += cmd->len;
  2024. oob_len -= cmd->len;
  2025. if (cmd->len > 0)
  2026. cmd++;
  2027. }
  2028. if (ops->mode != MTD_OPS_AUTO_OOB) {
  2029. /* skip ecc bytes in oobbuf */
  2030. if (oob_len < chip->ecc_parity_bytes) {
  2031. oob_dma_addr_curr +=
  2032. chip->ecc_parity_bytes;
  2033. oob_len -=
  2034. chip->ecc_parity_bytes;
  2035. } else {
  2036. oob_dma_addr_curr += oob_len;
  2037. oob_len = 0;
  2038. }
  2039. }
  2040. }
  2041. /* kick the execute register */
  2042. cmd->cmd = 0;
  2043. cmd->src =
  2044. msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2045. cmd->dst = MSM_NAND_EXEC_CMD;
  2046. cmd->len = 4;
  2047. cmd++;
  2048. /* block on data ready, then
  2049. * read the status register
  2050. */
  2051. cmd->cmd = SRC_CRCI_NAND_DATA;
  2052. cmd->src = MSM_NAND_FLASH_STATUS;
  2053. cmd->dst = msm_virt_to_dma(chip,
  2054. &dma_buffer->data.flash_status[n]);
  2055. cmd->len = 4;
  2056. cmd++;
  2057. cmd->cmd = 0;
  2058. cmd->src = msm_virt_to_dma(chip,
  2059. &dma_buffer->data.clrfstatus);
  2060. cmd->dst = MSM_NAND_FLASH_STATUS;
  2061. cmd->len = 4;
  2062. cmd++;
  2063. cmd->cmd = 0;
  2064. cmd->src = msm_virt_to_dma(chip,
  2065. &dma_buffer->data.clrrstatus);
  2066. cmd->dst = MSM_NAND_READ_STATUS;
  2067. cmd->len = 4;
  2068. cmd++;
  2069. }
  2070. dma_buffer->cmd[0].cmd |= CMD_OCB;
  2071. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  2072. BUILD_BUG_ON(8 * 7 + 2 != ARRAY_SIZE(dma_buffer->cmd));
  2073. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  2074. dma_buffer->cmdptr =
  2075. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) |
  2076. CMD_PTR_LP;
  2077. mb();
  2078. msm_dmov_exec_cmd(chip->dma_channel,
  2079. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
  2080. msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  2081. mb();
  2082. /* if any of the writes failed (0x10), or there was a
  2083. * protection violation (0x100), or the program success
  2084. * bit (0x80) is unset, we lose
  2085. */
  2086. err = 0;
  2087. for (n = 0; n < cwperpage; n++) {
  2088. if (dma_buffer->data.flash_status[n] & 0x110) {
  2089. err = -EIO;
  2090. break;
  2091. }
  2092. if (!(dma_buffer->data.flash_status[n] & 0x80)) {
  2093. err = -EIO;
  2094. break;
  2095. }
  2096. }
  2097. #if VERBOSE
  2098. for (n = 0; n < cwperpage; n++)
  2099. pr_info("write pg %d: flash_status[%d] = %x\n", page,
  2100. n, dma_buffer->data.flash_status[n]);
  2101. #endif
  2102. if (err)
  2103. break;
  2104. pages_written++;
  2105. page++;
  2106. }
  2107. if (ops->mode != MTD_OPS_RAW)
  2108. ops->retlen = mtd->writesize * pages_written;
  2109. else
  2110. ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
  2111. ops->oobretlen = ops->ooblen - oob_len;
  2112. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  2113. if (ops->oobbuf)
  2114. dma_unmap_page(chip->dev, oob_dma_addr,
  2115. ops->ooblen, DMA_TO_DEVICE);
  2116. err_dma_map_oobbuf_failed:
  2117. if (ops->datbuf)
  2118. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  2119. DMA_TO_DEVICE);
  2120. if (err)
  2121. pr_err("msm_nand_write_oob %llx %x %x failed %d\n",
  2122. to, ops->len, ops->ooblen, err);
  2123. #if VERBOSE
  2124. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  2125. __func__, err, ops->retlen, ops->oobretlen);
  2126. pr_info("==================================================="
  2127. "==============\n");
  2128. #endif
  2129. return err;
  2130. }
  2131. static int
  2132. msm_nand_write_oob_dualnandc(struct mtd_info *mtd, loff_t to,
  2133. struct mtd_oob_ops *ops)
  2134. {
  2135. struct msm_nand_chip *chip = mtd->priv;
  2136. struct {
  2137. dmov_s cmd[16 * 6 + 18];
  2138. unsigned cmdptr;
  2139. struct {
  2140. uint32_t cmd;
  2141. uint32_t nandc01_addr0;
  2142. uint32_t nandc10_addr0;
  2143. uint32_t nandc11_addr1;
  2144. uint32_t chipsel_cs0;
  2145. uint32_t chipsel_cs1;
  2146. uint32_t cfg0;
  2147. uint32_t cfg1;
  2148. uint32_t eccbchcfg;
  2149. uint32_t exec;
  2150. uint32_t ecccfg;
  2151. uint32_t cfg0_nc01;
  2152. uint32_t ebi2_chip_select_cfg0;
  2153. uint32_t adm_mux_data_ack_req_nc01;
  2154. uint32_t adm_mux_cmd_ack_req_nc01;
  2155. uint32_t adm_mux_data_ack_req_nc10;
  2156. uint32_t adm_mux_cmd_ack_req_nc10;
  2157. uint32_t adm_default_mux;
  2158. uint32_t default_ebi2_chip_select_cfg0;
  2159. uint32_t nc01_flash_dev_cmd_vld;
  2160. uint32_t nc10_flash_dev_cmd0;
  2161. uint32_t nc01_flash_dev_cmd_vld_default;
  2162. uint32_t nc10_flash_dev_cmd0_default;
  2163. uint32_t flash_status[16];
  2164. uint32_t clrfstatus;
  2165. uint32_t clrrstatus;
  2166. } data;
  2167. } *dma_buffer;
  2168. dmov_s *cmd;
  2169. unsigned n;
  2170. unsigned page = 0;
  2171. uint32_t oob_len;
  2172. uint32_t sectordatawritesize;
  2173. int err = 0;
  2174. dma_addr_t data_dma_addr = 0;
  2175. dma_addr_t oob_dma_addr = 0;
  2176. dma_addr_t data_dma_addr_curr = 0;
  2177. dma_addr_t oob_dma_addr_curr = 0;
  2178. unsigned page_count;
  2179. unsigned pages_written = 0;
  2180. unsigned cwperpage;
  2181. unsigned cw_offset = chip->cw_size;
  2182. #if VERBOSE
  2183. pr_info("================================================="
  2184. "============\n");
  2185. pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
  2186. "\noobbuf 0x%p ooblen 0x%x\n\n",
  2187. __func__, to, ops->mode, ops->datbuf, ops->len,
  2188. ops->oobbuf, ops->ooblen);
  2189. #endif
  2190. if (mtd->writesize == 2048)
  2191. page = to >> 11;
  2192. if (mtd->writesize == 4096)
  2193. page = to >> 12;
  2194. if (interleave_enable)
  2195. page = (to >> 1) >> 12;
  2196. oob_len = ops->ooblen;
  2197. cwperpage = (mtd->writesize >> 9);
  2198. if (to & (mtd->writesize - 1)) {
  2199. pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
  2200. return -EINVAL;
  2201. }
  2202. if (ops->mode != MTD_OPS_RAW) {
  2203. if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
  2204. pr_err("%s: unsupported ops->mode,%d\n",
  2205. __func__, ops->mode);
  2206. return -EINVAL;
  2207. }
  2208. if ((ops->len % mtd->writesize) != 0) {
  2209. pr_err("%s: unsupported ops->len, %d\n",
  2210. __func__, ops->len);
  2211. return -EINVAL;
  2212. }
  2213. } else {
  2214. if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  2215. pr_err("%s: unsupported ops->len, "
  2216. "%d for MTD_OPS_RAW mode\n",
  2217. __func__, ops->len);
  2218. return -EINVAL;
  2219. }
  2220. }
  2221. if (ops->datbuf == NULL) {
  2222. pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
  2223. return -EINVAL;
  2224. }
  2225. if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
  2226. pr_err("%s: unsupported ops->ooboffs, %d\n",
  2227. __func__, ops->ooboffs);
  2228. return -EINVAL;
  2229. }
  2230. if (ops->datbuf) {
  2231. data_dma_addr_curr = data_dma_addr =
  2232. msm_nand_dma_map(chip->dev, ops->datbuf,
  2233. ops->len, DMA_TO_DEVICE);
  2234. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  2235. pr_err("msm_nand_write_oob_dualnandc:"
  2236. "failed to get dma addr "
  2237. "for %p\n", ops->datbuf);
  2238. return -EIO;
  2239. }
  2240. }
  2241. if (ops->oobbuf) {
  2242. oob_dma_addr_curr = oob_dma_addr =
  2243. msm_nand_dma_map(chip->dev, ops->oobbuf,
  2244. ops->ooblen, DMA_TO_DEVICE);
  2245. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  2246. pr_err("msm_nand_write_oob_dualnandc:"
  2247. "failed to get dma addr "
  2248. "for %p\n", ops->oobbuf);
  2249. err = -EIO;
  2250. goto err_dma_map_oobbuf_failed;
  2251. }
  2252. }
  2253. if (ops->mode != MTD_OPS_RAW)
  2254. page_count = ops->len / mtd->writesize;
  2255. else
  2256. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  2257. wait_event(chip->wait_queue, (dma_buffer =
  2258. msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
  2259. if (chip->CFG1 & CFG1_WIDE_FLASH)
  2260. cw_offset >>= 1;
  2261. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  2262. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  2263. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  2264. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  2265. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  2266. dma_buffer->data.adm_default_mux = 0x00000FC0;
  2267. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  2268. dma_buffer->data.nc01_flash_dev_cmd_vld = 0x9;
  2269. dma_buffer->data.nc10_flash_dev_cmd0 = 0x1085D060;
  2270. dma_buffer->data.nc01_flash_dev_cmd_vld_default = 0x1D;
  2271. dma_buffer->data.nc10_flash_dev_cmd0_default = 0x1080D060;
  2272. dma_buffer->data.clrfstatus = 0x00000020;
  2273. dma_buffer->data.clrrstatus = 0x000000C0;
  2274. while (page_count-- > 0) {
  2275. cmd = dma_buffer->cmd;
  2276. if (ops->mode != MTD_OPS_RAW) {
  2277. dma_buffer->data.cfg0 = ((chip->CFG0 & ~(7U << 6))
  2278. & ~(1 << 4)) | ((((cwperpage >> 1)-1)) << 6);
  2279. dma_buffer->data.cfg1 = chip->CFG1;
  2280. if (enable_bch_ecc)
  2281. dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
  2282. } else {
  2283. dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
  2284. ~(7U << 6)) & ~(1 << 4)) | (((cwperpage >> 1)-1) << 6);
  2285. dma_buffer->data.cfg1 = chip->CFG1_RAW |
  2286. (chip->CFG1 & CFG1_WIDE_FLASH);
  2287. }
  2288. /* Disables the automatic issuing of the read
  2289. * status command for first NAND controller.
  2290. */
  2291. if (!interleave_enable)
  2292. dma_buffer->data.cfg0_nc01 = dma_buffer->data.cfg0
  2293. | (1 << 4);
  2294. else
  2295. dma_buffer->data.cfg0 |= (1 << 4);
  2296. dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
  2297. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  2298. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  2299. /* GO bit for the EXEC register */
  2300. dma_buffer->data.exec = 1;
  2301. if (!interleave_enable) {
  2302. dma_buffer->data.nandc01_addr0 = (page << 16) | 0x0;
  2303. /* NC10 ADDR0 points to the next code word */
  2304. dma_buffer->data.nandc10_addr0 =
  2305. (page << 16) | cw_offset;
  2306. } else {
  2307. dma_buffer->data.nandc01_addr0 =
  2308. dma_buffer->data.nandc10_addr0 = (page << 16) | 0x0;
  2309. }
  2310. /* ADDR1 */
  2311. dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
  2312. BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.flash_status));
  2313. for (n = 0; n < cwperpage; n++) {
  2314. /* status return words */
  2315. dma_buffer->data.flash_status[n] = 0xeeeeeeee;
  2316. if (n == 0) {
  2317. if (!interleave_enable) {
  2318. cmd->cmd = 0;
  2319. cmd->src = msm_virt_to_dma(chip,
  2320. &dma_buffer->
  2321. data.nc01_flash_dev_cmd_vld);
  2322. cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
  2323. cmd->len = 4;
  2324. cmd++;
  2325. cmd->cmd = 0;
  2326. cmd->src = msm_virt_to_dma(chip,
  2327. &dma_buffer->data.nc10_flash_dev_cmd0);
  2328. cmd->dst = NC10(MSM_NAND_DEV_CMD0);
  2329. cmd->len = 4;
  2330. cmd++;
  2331. /* common settings for both NC01 & NC10
  2332. * NC01, NC10 --> ADDR1 / CHIPSEL
  2333. */
  2334. cmd->cmd = 0;
  2335. cmd->src = msm_virt_to_dma(chip,
  2336. &dma_buffer->data.nandc11_addr1);
  2337. cmd->dst = NC11(MSM_NAND_ADDR1);
  2338. cmd->len = 8;
  2339. cmd++;
  2340. /* Disables the automatic issue of the
  2341. * read status command after the write
  2342. * operation.
  2343. */
  2344. cmd->cmd = 0;
  2345. cmd->src = msm_virt_to_dma(chip,
  2346. &dma_buffer->data.cfg0_nc01);
  2347. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  2348. cmd->len = 4;
  2349. cmd++;
  2350. cmd->cmd = 0;
  2351. cmd->src = msm_virt_to_dma(chip,
  2352. &dma_buffer->data.cfg0);
  2353. cmd->dst = NC10(MSM_NAND_DEV0_CFG0);
  2354. cmd->len = 4;
  2355. cmd++;
  2356. cmd->cmd = 0;
  2357. cmd->src = msm_virt_to_dma(chip,
  2358. &dma_buffer->data.cfg1);
  2359. cmd->dst = NC11(MSM_NAND_DEV0_CFG1);
  2360. if (enable_bch_ecc)
  2361. cmd->len = 8;
  2362. else
  2363. cmd->len = 4;
  2364. cmd++;
  2365. } else {
  2366. /* enable CS1 */
  2367. cmd->cmd = 0;
  2368. cmd->src = msm_virt_to_dma(chip,
  2369. &dma_buffer->
  2370. data.ebi2_chip_select_cfg0);
  2371. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  2372. cmd->len = 4;
  2373. cmd++;
  2374. /* NC11 --> ADDR1 */
  2375. cmd->cmd = 0;
  2376. cmd->src = msm_virt_to_dma(chip,
  2377. &dma_buffer->data.nandc11_addr1);
  2378. cmd->dst = NC11(MSM_NAND_ADDR1);
  2379. cmd->len = 4;
  2380. cmd++;
  2381. /* Enable CS0 for NC01 */
  2382. cmd->cmd = 0;
  2383. cmd->src = msm_virt_to_dma(chip,
  2384. &dma_buffer->data.chipsel_cs0);
  2385. cmd->dst =
  2386. NC01(MSM_NAND_FLASH_CHIP_SELECT);
  2387. cmd->len = 4;
  2388. cmd++;
  2389. /* Enable CS1 for NC10 */
  2390. cmd->cmd = 0;
  2391. cmd->src = msm_virt_to_dma(chip,
  2392. &dma_buffer->data.chipsel_cs1);
  2393. cmd->dst =
  2394. NC10(MSM_NAND_FLASH_CHIP_SELECT);
  2395. cmd->len = 4;
  2396. cmd++;
  2397. /* config DEV0_CFG0 & CFG1 for CS0 */
  2398. cmd->cmd = 0;
  2399. cmd->src = msm_virt_to_dma(chip,
  2400. &dma_buffer->data.cfg0);
  2401. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  2402. cmd->len = 8;
  2403. cmd++;
  2404. /* config DEV1_CFG0 & CFG1 for CS1 */
  2405. cmd->cmd = 0;
  2406. cmd->src = msm_virt_to_dma(chip,
  2407. &dma_buffer->data.cfg0);
  2408. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  2409. cmd->len = 8;
  2410. cmd++;
  2411. }
  2412. dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
  2413. cmd->cmd = 0;
  2414. cmd->src = msm_virt_to_dma(chip,
  2415. &dma_buffer->data.ecccfg);
  2416. cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
  2417. cmd->len = 4;
  2418. cmd++;
  2419. /* NC01 --> ADDR0 */
  2420. cmd->cmd = 0;
  2421. cmd->src = msm_virt_to_dma(chip,
  2422. &dma_buffer->data.nandc01_addr0);
  2423. cmd->dst = NC01(MSM_NAND_ADDR0);
  2424. cmd->len = 4;
  2425. cmd++;
  2426. /* NC10 --> ADDR0 */
  2427. cmd->cmd = 0;
  2428. cmd->src = msm_virt_to_dma(chip,
  2429. &dma_buffer->data.nandc10_addr0);
  2430. cmd->dst = NC10(MSM_NAND_ADDR0);
  2431. cmd->len = 4;
  2432. cmd++;
  2433. }
  2434. if (n % 2 == 0) {
  2435. /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
  2436. cmd->cmd = 0;
  2437. cmd->src = msm_virt_to_dma(chip,
  2438. &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
  2439. cmd->dst = EBI2_NAND_ADM_MUX;
  2440. cmd->len = 4;
  2441. cmd++;
  2442. /* CMD */
  2443. cmd->cmd = DST_CRCI_NAND_CMD;
  2444. cmd->src = msm_virt_to_dma(chip,
  2445. &dma_buffer->data.cmd);
  2446. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  2447. cmd->len = 4;
  2448. cmd++;
  2449. } else {
  2450. /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
  2451. cmd->cmd = 0;
  2452. cmd->src = msm_virt_to_dma(chip,
  2453. &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
  2454. cmd->dst = EBI2_NAND_ADM_MUX;
  2455. cmd->len = 4;
  2456. cmd++;
  2457. /* CMD */
  2458. cmd->cmd = DST_CRCI_NAND_CMD;
  2459. cmd->src = msm_virt_to_dma(chip,
  2460. &dma_buffer->data.cmd);
  2461. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  2462. cmd->len = 4;
  2463. cmd++;
  2464. }
  2465. if (ops->mode != MTD_OPS_RAW)
  2466. sectordatawritesize = (n < (cwperpage - 1)) ?
  2467. 516 : (512 - ((cwperpage - 1) << 2));
  2468. else
  2469. sectordatawritesize = chip->cw_size;
  2470. cmd->cmd = 0;
  2471. cmd->src = data_dma_addr_curr;
  2472. data_dma_addr_curr += sectordatawritesize;
  2473. if (n % 2 == 0)
  2474. cmd->dst = NC01(MSM_NAND_FLASH_BUFFER);
  2475. else
  2476. cmd->dst = NC10(MSM_NAND_FLASH_BUFFER);
  2477. cmd->len = sectordatawritesize;
  2478. cmd++;
  2479. if (ops->oobbuf) {
  2480. if (n == (cwperpage - 1)) {
  2481. cmd->cmd = 0;
  2482. cmd->src = oob_dma_addr_curr;
  2483. cmd->dst = NC10(MSM_NAND_FLASH_BUFFER) +
  2484. (512 - ((cwperpage - 1) << 2));
  2485. if ((cwperpage << 2) < oob_len)
  2486. cmd->len = (cwperpage << 2);
  2487. else
  2488. cmd->len = oob_len;
  2489. oob_dma_addr_curr += cmd->len;
  2490. oob_len -= cmd->len;
  2491. if (cmd->len > 0)
  2492. cmd++;
  2493. }
  2494. if (ops->mode != MTD_OPS_AUTO_OOB) {
  2495. /* skip ecc bytes in oobbuf */
  2496. if (oob_len < chip->ecc_parity_bytes) {
  2497. oob_dma_addr_curr +=
  2498. chip->ecc_parity_bytes;
  2499. oob_len -=
  2500. chip->ecc_parity_bytes;
  2501. } else {
  2502. oob_dma_addr_curr += oob_len;
  2503. oob_len = 0;
  2504. }
  2505. }
  2506. }
  2507. if (n % 2 == 0) {
  2508. if (n != 0) {
  2509. /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
  2510. cmd->cmd = 0;
  2511. cmd->src = msm_virt_to_dma(chip,
  2512. &dma_buffer->
  2513. data.adm_mux_data_ack_req_nc01);
  2514. cmd->dst = EBI2_NAND_ADM_MUX;
  2515. cmd->len = 4;
  2516. cmd++;
  2517. /* block on data ready from NC10, then
  2518. * read the status register
  2519. */
  2520. cmd->cmd = SRC_CRCI_NAND_DATA;
  2521. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  2522. cmd->dst = msm_virt_to_dma(chip,
  2523. &dma_buffer->data.flash_status[n-1]);
  2524. cmd->len = 4;
  2525. cmd++;
  2526. }
  2527. /* kick the NC01 execute register */
  2528. cmd->cmd = 0;
  2529. cmd->src = msm_virt_to_dma(chip,
  2530. &dma_buffer->data.exec);
  2531. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  2532. cmd->len = 4;
  2533. cmd++;
  2534. } else {
  2535. /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
  2536. cmd->cmd = 0;
  2537. cmd->src = msm_virt_to_dma(chip,
  2538. &dma_buffer->data.adm_mux_data_ack_req_nc10);
  2539. cmd->dst = EBI2_NAND_ADM_MUX;
  2540. cmd->len = 4;
  2541. cmd++;
  2542. /* block on data ready from NC01, then
  2543. * read the status register
  2544. */
  2545. cmd->cmd = SRC_CRCI_NAND_DATA;
  2546. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  2547. cmd->dst = msm_virt_to_dma(chip,
  2548. &dma_buffer->data.flash_status[n-1]);
  2549. cmd->len = 4;
  2550. cmd++;
  2551. /* kick the execute register */
  2552. cmd->cmd = 0;
  2553. cmd->src =
  2554. msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2555. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  2556. cmd->len = 4;
  2557. cmd++;
  2558. }
  2559. }
  2560. /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
  2561. cmd->cmd = 0;
  2562. cmd->src = msm_virt_to_dma(chip,
  2563. &dma_buffer->data.adm_mux_data_ack_req_nc01);
  2564. cmd->dst = EBI2_NAND_ADM_MUX;
  2565. cmd->len = 4;
  2566. cmd++;
  2567. /* we should process outstanding request */
  2568. /* block on data ready, then
  2569. * read the status register
  2570. */
  2571. cmd->cmd = SRC_CRCI_NAND_DATA;
  2572. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  2573. cmd->dst = msm_virt_to_dma(chip,
  2574. &dma_buffer->data.flash_status[n-1]);
  2575. cmd->len = 4;
  2576. cmd++;
  2577. cmd->cmd = 0;
  2578. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
  2579. cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
  2580. cmd->len = 4;
  2581. cmd++;
  2582. cmd->cmd = 0;
  2583. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
  2584. cmd->dst = NC11(MSM_NAND_READ_STATUS);
  2585. cmd->len = 4;
  2586. cmd++;
  2587. /* MASK DATA ACK/REQ --> NC01 (0xFC0)*/
  2588. cmd->cmd = 0;
  2589. cmd->src = msm_virt_to_dma(chip,
  2590. &dma_buffer->data.adm_default_mux);
  2591. cmd->dst = EBI2_NAND_ADM_MUX;
  2592. cmd->len = 4;
  2593. cmd++;
  2594. if (!interleave_enable) {
  2595. /* setting to defalut values back */
  2596. cmd->cmd = 0;
  2597. cmd->src = msm_virt_to_dma(chip,
  2598. &dma_buffer->data.nc01_flash_dev_cmd_vld_default);
  2599. cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
  2600. cmd->len = 4;
  2601. cmd++;
  2602. cmd->cmd = 0;
  2603. cmd->src = msm_virt_to_dma(chip,
  2604. &dma_buffer->data.nc10_flash_dev_cmd0_default);
  2605. cmd->dst = NC10(MSM_NAND_DEV_CMD0);
  2606. cmd->len = 4;
  2607. cmd++;
  2608. } else {
  2609. /* disable CS1 */
  2610. cmd->cmd = 0;
  2611. cmd->src = msm_virt_to_dma(chip,
  2612. &dma_buffer->data.default_ebi2_chip_select_cfg0);
  2613. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  2614. cmd->len = 4;
  2615. cmd++;
  2616. }
  2617. dma_buffer->cmd[0].cmd |= CMD_OCB;
  2618. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  2619. BUILD_BUG_ON(16 * 6 + 18 != ARRAY_SIZE(dma_buffer->cmd));
  2620. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  2621. dma_buffer->cmdptr =
  2622. ((msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP);
  2623. mb();
  2624. msm_dmov_exec_cmd(chip->dma_channel,
  2625. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
  2626. msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  2627. mb();
  2628. /* if any of the writes failed (0x10), or there was a
  2629. * protection violation (0x100), or the program success
  2630. * bit (0x80) is unset, we lose
  2631. */
  2632. err = 0;
  2633. for (n = 0; n < cwperpage; n++) {
  2634. if (dma_buffer->data.flash_status[n] & 0x110) {
  2635. err = -EIO;
  2636. break;
  2637. }
  2638. if (!(dma_buffer->data.flash_status[n] & 0x80)) {
  2639. err = -EIO;
  2640. break;
  2641. }
  2642. }
  2643. /* check for flash status busy for the last codeword */
  2644. if (!interleave_enable)
  2645. if (!(dma_buffer->data.flash_status[cwperpage - 1]
  2646. & 0x20)) {
  2647. err = -EIO;
  2648. break;
  2649. }
  2650. #if VERBOSE
  2651. for (n = 0; n < cwperpage; n++) {
  2652. if (n%2) {
  2653. pr_info("NC10: write pg %d: flash_status[%d] = %x\n",
  2654. page, n, dma_buffer->data.flash_status[n]);
  2655. } else {
  2656. pr_info("NC01: write pg %d: flash_status[%d] = %x\n",
  2657. page, n, dma_buffer->data.flash_status[n]);
  2658. }
  2659. }
  2660. #endif
  2661. if (err)
  2662. break;
  2663. pages_written++;
  2664. page++;
  2665. }
  2666. if (ops->mode != MTD_OPS_RAW)
  2667. ops->retlen = mtd->writesize * pages_written;
  2668. else
  2669. ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
  2670. ops->oobretlen = ops->ooblen - oob_len;
  2671. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  2672. if (ops->oobbuf)
  2673. dma_unmap_page(chip->dev, oob_dma_addr,
  2674. ops->ooblen, DMA_TO_DEVICE);
  2675. err_dma_map_oobbuf_failed:
  2676. if (ops->datbuf)
  2677. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  2678. DMA_TO_DEVICE);
  2679. if (err)
  2680. pr_err("msm_nand_write_oob_dualnandc %llx %x %x failed %d\n",
  2681. to, ops->len, ops->ooblen, err);
  2682. #if VERBOSE
  2683. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  2684. __func__, err, ops->retlen, ops->oobretlen);
  2685. pr_info("==================================================="
  2686. "==========\n");
  2687. #endif
  2688. return err;
  2689. }
  2690. static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  2691. size_t *retlen, const u_char *buf)
  2692. {
  2693. int ret;
  2694. struct mtd_oob_ops ops;
  2695. int (*write_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
  2696. if (!dual_nand_ctlr_present)
  2697. write_oob = msm_nand_write_oob;
  2698. else
  2699. write_oob = msm_nand_write_oob_dualnandc;
  2700. ops.mode = MTD_OPS_PLACE_OOB;
  2701. ops.retlen = 0;
  2702. ops.ooblen = 0;
  2703. ops.oobbuf = NULL;
  2704. ret = 0;
  2705. *retlen = 0;
  2706. if (!virt_addr_valid(buf) &&
  2707. ((to | len) & (mtd->writesize - 1)) == 0 &&
  2708. ((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE) {
  2709. /*
  2710. * Handle writing of large size write buffer in vmalloc
  2711. * address space that does not fit in an MMU page.
  2712. * The destination address must be on page boundary,
  2713. * and the size must be multiple of NAND page size.
  2714. * Writing partial page is not supported.
  2715. */
  2716. ops.len = mtd->writesize;
  2717. for (;;) {
  2718. ops.datbuf = (uint8_t *) buf;
  2719. ret = write_oob(mtd, to, &ops);
  2720. if (ret < 0)
  2721. break;
  2722. len -= mtd->writesize;
  2723. *retlen += mtd->writesize;
  2724. if (len == 0)
  2725. break;
  2726. buf += mtd->writesize;
  2727. to += mtd->writesize;
  2728. }
  2729. } else {
  2730. ops.len = len;
  2731. ops.datbuf = (uint8_t *) buf;
  2732. ret = write_oob(mtd, to, &ops);
  2733. *retlen = ops.retlen;
  2734. }
  2735. return ret;
  2736. }
  2737. static int
  2738. msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  2739. {
  2740. int err;
  2741. struct msm_nand_chip *chip = mtd->priv;
  2742. struct {
  2743. dmov_s cmd[6];
  2744. unsigned cmdptr;
  2745. struct {
  2746. uint32_t cmd;
  2747. uint32_t addr0;
  2748. uint32_t addr1;
  2749. uint32_t chipsel;
  2750. uint32_t cfg0;
  2751. uint32_t cfg1;
  2752. uint32_t exec;
  2753. uint32_t flash_status;
  2754. uint32_t clrfstatus;
  2755. uint32_t clrrstatus;
  2756. } data;
  2757. } *dma_buffer;
  2758. dmov_s *cmd;
  2759. unsigned page = 0;
  2760. if (mtd->writesize == 2048)
  2761. page = instr->addr >> 11;
  2762. if (mtd->writesize == 4096)
  2763. page = instr->addr >> 12;
  2764. if (instr->addr & (mtd->erasesize - 1)) {
  2765. pr_err("%s: unsupported erase address, 0x%llx\n",
  2766. __func__, instr->addr);
  2767. return -EINVAL;
  2768. }
  2769. if (instr->len != mtd->erasesize) {
  2770. pr_err("%s: unsupported erase len, %lld\n",
  2771. __func__, instr->len);
  2772. return -EINVAL;
  2773. }
  2774. wait_event(chip->wait_queue,
  2775. (dma_buffer = msm_nand_get_dma_buffer(
  2776. chip, sizeof(*dma_buffer))));
  2777. cmd = dma_buffer->cmd;
  2778. dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
  2779. dma_buffer->data.addr0 = page;
  2780. dma_buffer->data.addr1 = 0;
  2781. dma_buffer->data.chipsel = 0 | 4;
  2782. dma_buffer->data.exec = 1;
  2783. dma_buffer->data.flash_status = 0xeeeeeeee;
  2784. dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
  2785. dma_buffer->data.cfg1 = chip->CFG1;
  2786. dma_buffer->data.clrfstatus = 0x00000020;
  2787. dma_buffer->data.clrrstatus = 0x000000C0;
  2788. cmd->cmd = DST_CRCI_NAND_CMD | CMD_OCB;
  2789. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  2790. cmd->dst = MSM_NAND_FLASH_CMD;
  2791. cmd->len = 16;
  2792. cmd++;
  2793. cmd->cmd = 0;
  2794. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  2795. cmd->dst = MSM_NAND_DEV0_CFG0;
  2796. cmd->len = 8;
  2797. cmd++;
  2798. cmd->cmd = 0;
  2799. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2800. cmd->dst = MSM_NAND_EXEC_CMD;
  2801. cmd->len = 4;
  2802. cmd++;
  2803. cmd->cmd = SRC_CRCI_NAND_DATA;
  2804. cmd->src = MSM_NAND_FLASH_STATUS;
  2805. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
  2806. cmd->len = 4;
  2807. cmd++;
  2808. cmd->cmd = 0;
  2809. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
  2810. cmd->dst = MSM_NAND_FLASH_STATUS;
  2811. cmd->len = 4;
  2812. cmd++;
  2813. cmd->cmd = CMD_OCU | CMD_LC;
  2814. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
  2815. cmd->dst = MSM_NAND_READ_STATUS;
  2816. cmd->len = 4;
  2817. cmd++;
  2818. BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd) - 1);
  2819. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  2820. dma_buffer->cmdptr =
  2821. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  2822. mb();
  2823. msm_dmov_exec_cmd(
  2824. chip->dma_channel, DMOV_CMD_PTR_LIST |
  2825. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  2826. mb();
  2827. /* we fail if there was an operation error, a mpu error, or the
  2828. * erase success bit was not set.
  2829. */
  2830. if (dma_buffer->data.flash_status & 0x110 ||
  2831. !(dma_buffer->data.flash_status & 0x80))
  2832. err = -EIO;
  2833. else
  2834. err = 0;
  2835. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  2836. if (err) {
  2837. pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
  2838. instr->fail_addr = instr->addr;
  2839. instr->state = MTD_ERASE_FAILED;
  2840. } else {
  2841. instr->state = MTD_ERASE_DONE;
  2842. instr->fail_addr = 0xffffffff;
  2843. mtd_erase_callback(instr);
  2844. }
  2845. return err;
  2846. }
  2847. static int
  2848. msm_nand_erase_dualnandc(struct mtd_info *mtd, struct erase_info *instr)
  2849. {
  2850. int err;
  2851. struct msm_nand_chip *chip = mtd->priv;
  2852. struct {
  2853. dmov_s cmd[18];
  2854. unsigned cmdptr;
  2855. struct {
  2856. uint32_t cmd;
  2857. uint32_t addr0;
  2858. uint32_t addr1;
  2859. uint32_t chipsel_cs0;
  2860. uint32_t chipsel_cs1;
  2861. uint32_t cfg0;
  2862. uint32_t cfg1;
  2863. uint32_t exec;
  2864. uint32_t ecccfg;
  2865. uint32_t ebi2_chip_select_cfg0;
  2866. uint32_t adm_mux_data_ack_req_nc01;
  2867. uint32_t adm_mux_cmd_ack_req_nc01;
  2868. uint32_t adm_mux_data_ack_req_nc10;
  2869. uint32_t adm_mux_cmd_ack_req_nc10;
  2870. uint32_t adm_default_mux;
  2871. uint32_t default_ebi2_chip_select_cfg0;
  2872. uint32_t nc01_flash_dev_cmd0;
  2873. uint32_t nc01_flash_dev_cmd0_default;
  2874. uint32_t flash_status[2];
  2875. uint32_t clrfstatus;
  2876. uint32_t clrrstatus;
  2877. } data;
  2878. } *dma_buffer;
  2879. dmov_s *cmd;
  2880. unsigned page = 0;
  2881. if (mtd->writesize == 2048)
  2882. page = instr->addr >> 11;
  2883. if (mtd->writesize == 4096)
  2884. page = instr->addr >> 12;
  2885. if (mtd->writesize == 8192)
  2886. page = (instr->addr >> 1) >> 12;
  2887. if (instr->addr & (mtd->erasesize - 1)) {
  2888. pr_err("%s: unsupported erase address, 0x%llx\n",
  2889. __func__, instr->addr);
  2890. return -EINVAL;
  2891. }
  2892. if (instr->len != mtd->erasesize) {
  2893. pr_err("%s: unsupported erase len, %lld\n",
  2894. __func__, instr->len);
  2895. return -EINVAL;
  2896. }
  2897. wait_event(chip->wait_queue,
  2898. (dma_buffer = msm_nand_get_dma_buffer(
  2899. chip, sizeof(*dma_buffer))));
  2900. cmd = dma_buffer->cmd;
  2901. dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
  2902. dma_buffer->data.addr0 = page;
  2903. dma_buffer->data.addr1 = 0;
  2904. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  2905. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  2906. dma_buffer->data.exec = 1;
  2907. dma_buffer->data.flash_status[0] = 0xeeeeeeee;
  2908. dma_buffer->data.flash_status[1] = 0xeeeeeeee;
  2909. dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
  2910. dma_buffer->data.cfg1 = chip->CFG1;
  2911. dma_buffer->data.clrfstatus = 0x00000020;
  2912. dma_buffer->data.clrrstatus = 0x000000C0;
  2913. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  2914. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  2915. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  2916. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  2917. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  2918. dma_buffer->data.adm_default_mux = 0x00000FC0;
  2919. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  2920. /* enable CS1 */
  2921. cmd->cmd = 0 | CMD_OCB;
  2922. cmd->src = msm_virt_to_dma(chip,
  2923. &dma_buffer->data.ebi2_chip_select_cfg0);
  2924. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  2925. cmd->len = 4;
  2926. cmd++;
  2927. /* erase CS0 block now !!! */
  2928. /* 0xF14 */
  2929. cmd->cmd = 0;
  2930. cmd->src = msm_virt_to_dma(chip,
  2931. &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
  2932. cmd->dst = EBI2_NAND_ADM_MUX;
  2933. cmd->len = 4;
  2934. cmd++;
  2935. cmd->cmd = DST_CRCI_NAND_CMD;
  2936. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  2937. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  2938. cmd->len = 16;
  2939. cmd++;
  2940. cmd->cmd = 0;
  2941. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  2942. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  2943. cmd->len = 8;
  2944. cmd++;
  2945. cmd->cmd = 0;
  2946. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2947. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  2948. cmd->len = 4;
  2949. cmd++;
  2950. /* 0xF28 */
  2951. cmd->cmd = 0;
  2952. cmd->src = msm_virt_to_dma(chip,
  2953. &dma_buffer->data.adm_mux_data_ack_req_nc10);
  2954. cmd->dst = EBI2_NAND_ADM_MUX;
  2955. cmd->len = 4;
  2956. cmd++;
  2957. cmd->cmd = SRC_CRCI_NAND_DATA;
  2958. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  2959. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[0]);
  2960. cmd->len = 4;
  2961. cmd++;
  2962. /* erase CS1 block now !!! */
  2963. /* 0x53C */
  2964. cmd->cmd = 0;
  2965. cmd->src = msm_virt_to_dma(chip,
  2966. &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
  2967. cmd->dst = EBI2_NAND_ADM_MUX;
  2968. cmd->len = 4;
  2969. cmd++;
  2970. cmd->cmd = DST_CRCI_NAND_CMD;
  2971. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  2972. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  2973. cmd->len = 12;
  2974. cmd++;
  2975. cmd->cmd = 0;
  2976. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
  2977. cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
  2978. cmd->len = 4;
  2979. cmd++;
  2980. cmd->cmd = 0;
  2981. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  2982. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  2983. cmd->len = 8;
  2984. cmd->cmd = 0;
  2985. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  2986. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  2987. cmd->len = 4;
  2988. cmd++;
  2989. /* 0xA3C */
  2990. cmd->cmd = 0;
  2991. cmd->src = msm_virt_to_dma(chip,
  2992. &dma_buffer->data.adm_mux_data_ack_req_nc01);
  2993. cmd->dst = EBI2_NAND_ADM_MUX;
  2994. cmd->len = 4;
  2995. cmd++;
  2996. cmd->cmd = SRC_CRCI_NAND_DATA;
  2997. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  2998. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[1]);
  2999. cmd->len = 4;
  3000. cmd++;
  3001. cmd->cmd = 0;
  3002. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
  3003. cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
  3004. cmd->len = 4;
  3005. cmd++;
  3006. cmd->cmd = 0;
  3007. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
  3008. cmd->dst = NC11(MSM_NAND_READ_STATUS);
  3009. cmd->len = 4;
  3010. cmd++;
  3011. cmd->cmd = 0;
  3012. cmd->src = msm_virt_to_dma(chip,
  3013. &dma_buffer->data.adm_default_mux);
  3014. cmd->dst = EBI2_NAND_ADM_MUX;
  3015. cmd->len = 4;
  3016. cmd++;
  3017. /* disable CS1 */
  3018. cmd->cmd = CMD_OCU | CMD_LC;
  3019. cmd->src = msm_virt_to_dma(chip,
  3020. &dma_buffer->data.default_ebi2_chip_select_cfg0);
  3021. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  3022. cmd->len = 4;
  3023. cmd++;
  3024. BUILD_BUG_ON(17 != ARRAY_SIZE(dma_buffer->cmd) - 1);
  3025. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  3026. dma_buffer->cmdptr =
  3027. (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3028. mb();
  3029. msm_dmov_exec_cmd(
  3030. chip->dma_channel, DMOV_CMD_PTR_LIST |
  3031. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3032. mb();
  3033. /* we fail if there was an operation error, a mpu error, or the
  3034. * erase success bit was not set.
  3035. */
  3036. if (dma_buffer->data.flash_status[0] & 0x110 ||
  3037. !(dma_buffer->data.flash_status[0] & 0x80) ||
  3038. dma_buffer->data.flash_status[1] & 0x110 ||
  3039. !(dma_buffer->data.flash_status[1] & 0x80))
  3040. err = -EIO;
  3041. else
  3042. err = 0;
  3043. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  3044. if (err) {
  3045. pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
  3046. instr->fail_addr = instr->addr;
  3047. instr->state = MTD_ERASE_FAILED;
  3048. } else {
  3049. instr->state = MTD_ERASE_DONE;
  3050. instr->fail_addr = 0xffffffff;
  3051. mtd_erase_callback(instr);
  3052. }
  3053. return err;
  3054. }
  3055. static int
  3056. msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
  3057. {
  3058. struct msm_nand_chip *chip = mtd->priv;
  3059. int ret;
  3060. struct {
  3061. dmov_s cmd[5];
  3062. unsigned cmdptr;
  3063. struct {
  3064. uint32_t cmd;
  3065. uint32_t addr0;
  3066. uint32_t addr1;
  3067. uint32_t chipsel;
  3068. uint32_t cfg0;
  3069. uint32_t cfg1;
  3070. uint32_t eccbchcfg;
  3071. uint32_t exec;
  3072. uint32_t ecccfg;
  3073. struct {
  3074. uint32_t flash_status;
  3075. uint32_t buffer_status;
  3076. } result;
  3077. } data;
  3078. } *dma_buffer;
  3079. dmov_s *cmd;
  3080. uint8_t *buf;
  3081. unsigned page = 0;
  3082. unsigned cwperpage;
  3083. if (mtd->writesize == 2048)
  3084. page = ofs >> 11;
  3085. if (mtd->writesize == 4096)
  3086. page = ofs >> 12;
  3087. cwperpage = (mtd->writesize >> 9);
  3088. /* Check for invalid offset */
  3089. if (ofs > mtd->size)
  3090. return -EINVAL;
  3091. if (ofs & (mtd->erasesize - 1)) {
  3092. pr_err("%s: unsupported block address, 0x%x\n",
  3093. __func__, (uint32_t)ofs);
  3094. return -EINVAL;
  3095. }
  3096. wait_event(chip->wait_queue,
  3097. (dma_buffer = msm_nand_get_dma_buffer(chip ,
  3098. sizeof(*dma_buffer) + 4)));
  3099. buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
  3100. /* Read 4 bytes starting from the bad block marker location
  3101. * in the last code word of the page
  3102. */
  3103. cmd = dma_buffer->cmd;
  3104. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  3105. dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
  3106. dma_buffer->data.cfg1 = chip->CFG1_RAW |
  3107. (chip->CFG1 & CFG1_WIDE_FLASH);
  3108. if (enable_bch_ecc)
  3109. dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
  3110. if (chip->CFG1 & CFG1_WIDE_FLASH)
  3111. dma_buffer->data.addr0 = (page << 16) |
  3112. ((chip->cw_size * (cwperpage-1)) >> 1);
  3113. else
  3114. dma_buffer->data.addr0 = (page << 16) |
  3115. (chip->cw_size * (cwperpage-1));
  3116. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  3117. dma_buffer->data.chipsel = 0 | 4;
  3118. dma_buffer->data.exec = 1;
  3119. dma_buffer->data.result.flash_status = 0xeeeeeeee;
  3120. dma_buffer->data.result.buffer_status = 0xeeeeeeee;
  3121. cmd->cmd = DST_CRCI_NAND_CMD;
  3122. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3123. cmd->dst = MSM_NAND_FLASH_CMD;
  3124. cmd->len = 16;
  3125. cmd++;
  3126. cmd->cmd = 0;
  3127. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  3128. cmd->dst = MSM_NAND_DEV0_CFG0;
  3129. if (enable_bch_ecc)
  3130. cmd->len = 12;
  3131. else
  3132. cmd->len = 8;
  3133. cmd++;
  3134. cmd->cmd = 0;
  3135. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3136. cmd->dst = MSM_NAND_EXEC_CMD;
  3137. cmd->len = 4;
  3138. cmd++;
  3139. cmd->cmd = SRC_CRCI_NAND_DATA;
  3140. cmd->src = MSM_NAND_FLASH_STATUS;
  3141. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result);
  3142. cmd->len = 8;
  3143. cmd++;
  3144. cmd->cmd = 0;
  3145. cmd->src = MSM_NAND_FLASH_BUFFER +
  3146. (mtd->writesize - (chip->cw_size * (cwperpage-1)));
  3147. cmd->dst = msm_virt_to_dma(chip, buf);
  3148. cmd->len = 4;
  3149. cmd++;
  3150. BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd));
  3151. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  3152. dma_buffer->cmd[0].cmd |= CMD_OCB;
  3153. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  3154. dma_buffer->cmdptr = (msm_virt_to_dma(chip,
  3155. dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3156. mb();
  3157. msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
  3158. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3159. mb();
  3160. ret = 0;
  3161. if (dma_buffer->data.result.flash_status & 0x110)
  3162. ret = -EIO;
  3163. if (!ret) {
  3164. /* Check for bad block marker byte */
  3165. if (chip->CFG1 & CFG1_WIDE_FLASH) {
  3166. if (buf[0] != 0xFF || buf[1] != 0xFF)
  3167. ret = 1;
  3168. } else {
  3169. if (buf[0] != 0xFF)
  3170. ret = 1;
  3171. }
  3172. }
  3173. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
  3174. return ret;
  3175. }
  3176. static int
  3177. msm_nand_block_isbad_dualnandc(struct mtd_info *mtd, loff_t ofs)
  3178. {
  3179. struct msm_nand_chip *chip = mtd->priv;
  3180. int ret;
  3181. struct {
  3182. dmov_s cmd[18];
  3183. unsigned cmdptr;
  3184. struct {
  3185. uint32_t cmd;
  3186. uint32_t addr0;
  3187. uint32_t addr1;
  3188. uint32_t chipsel_cs0;
  3189. uint32_t chipsel_cs1;
  3190. uint32_t cfg0;
  3191. uint32_t cfg1;
  3192. uint32_t exec;
  3193. uint32_t ecccfg;
  3194. uint32_t ebi2_chip_select_cfg0;
  3195. uint32_t adm_mux_data_ack_req_nc01;
  3196. uint32_t adm_mux_cmd_ack_req_nc01;
  3197. uint32_t adm_mux_data_ack_req_nc10;
  3198. uint32_t adm_mux_cmd_ack_req_nc10;
  3199. uint32_t adm_default_mux;
  3200. uint32_t default_ebi2_chip_select_cfg0;
  3201. struct {
  3202. uint32_t flash_status;
  3203. uint32_t buffer_status;
  3204. } result[2];
  3205. } data;
  3206. } *dma_buffer;
  3207. dmov_s *cmd;
  3208. uint8_t *buf01;
  3209. uint8_t *buf10;
  3210. unsigned page = 0;
  3211. unsigned cwperpage;
  3212. if (mtd->writesize == 2048)
  3213. page = ofs >> 11;
  3214. if (mtd->writesize == 4096)
  3215. page = ofs >> 12;
  3216. if (mtd->writesize == 8192)
  3217. page = (ofs >> 1) >> 12;
  3218. cwperpage = ((mtd->writesize >> 1) >> 9);
  3219. /* Check for invalid offset */
  3220. if (ofs > mtd->size)
  3221. return -EINVAL;
  3222. if (ofs & (mtd->erasesize - 1)) {
  3223. pr_err("%s: unsupported block address, 0x%x\n",
  3224. __func__, (uint32_t)ofs);
  3225. return -EINVAL;
  3226. }
  3227. wait_event(chip->wait_queue,
  3228. (dma_buffer = msm_nand_get_dma_buffer(chip ,
  3229. sizeof(*dma_buffer) + 8)));
  3230. buf01 = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
  3231. buf10 = buf01 + 4;
  3232. /* Read 4 bytes starting from the bad block marker location
  3233. * in the last code word of the page
  3234. */
  3235. cmd = dma_buffer->cmd;
  3236. dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
  3237. dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
  3238. dma_buffer->data.cfg1 = chip->CFG1_RAW |
  3239. (chip->CFG1 & CFG1_WIDE_FLASH);
  3240. if (chip->CFG1 & CFG1_WIDE_FLASH)
  3241. dma_buffer->data.addr0 = (page << 16) |
  3242. ((528*(cwperpage-1)) >> 1);
  3243. else
  3244. dma_buffer->data.addr0 = (page << 16) |
  3245. (528*(cwperpage-1));
  3246. dma_buffer->data.addr1 = (page >> 16) & 0xff;
  3247. dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
  3248. dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
  3249. dma_buffer->data.exec = 1;
  3250. dma_buffer->data.result[0].flash_status = 0xeeeeeeee;
  3251. dma_buffer->data.result[0].buffer_status = 0xeeeeeeee;
  3252. dma_buffer->data.result[1].flash_status = 0xeeeeeeee;
  3253. dma_buffer->data.result[1].buffer_status = 0xeeeeeeee;
  3254. dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
  3255. dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
  3256. dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
  3257. dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
  3258. dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
  3259. dma_buffer->data.adm_default_mux = 0x00000FC0;
  3260. dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
  3261. /* Reading last code word from NC01 */
  3262. /* enable CS1 */
  3263. cmd->cmd = 0;
  3264. cmd->src = msm_virt_to_dma(chip,
  3265. &dma_buffer->data.ebi2_chip_select_cfg0);
  3266. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  3267. cmd->len = 4;
  3268. cmd++;
  3269. /* 0xF14 */
  3270. cmd->cmd = 0;
  3271. cmd->src = msm_virt_to_dma(chip,
  3272. &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
  3273. cmd->dst = EBI2_NAND_ADM_MUX;
  3274. cmd->len = 4;
  3275. cmd++;
  3276. cmd->cmd = DST_CRCI_NAND_CMD;
  3277. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3278. cmd->dst = NC01(MSM_NAND_FLASH_CMD);
  3279. cmd->len = 16;
  3280. cmd++;
  3281. cmd->cmd = 0;
  3282. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  3283. cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
  3284. cmd->len = 8;
  3285. cmd++;
  3286. cmd->cmd = 0;
  3287. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3288. cmd->dst = NC01(MSM_NAND_EXEC_CMD);
  3289. cmd->len = 4;
  3290. cmd++;
  3291. /* 0xF28 */
  3292. cmd->cmd = 0;
  3293. cmd->src = msm_virt_to_dma(chip,
  3294. &dma_buffer->data.adm_mux_data_ack_req_nc10);
  3295. cmd->dst = EBI2_NAND_ADM_MUX;
  3296. cmd->len = 4;
  3297. cmd++;
  3298. cmd->cmd = SRC_CRCI_NAND_DATA;
  3299. cmd->src = NC01(MSM_NAND_FLASH_STATUS);
  3300. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[0]);
  3301. cmd->len = 8;
  3302. cmd++;
  3303. cmd->cmd = 0;
  3304. cmd->src = NC01(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
  3305. (528*(cwperpage-1)));
  3306. cmd->dst = msm_virt_to_dma(chip, buf01);
  3307. cmd->len = 4;
  3308. cmd++;
  3309. /* Reading last code word from NC10 */
  3310. /* 0x53C */
  3311. cmd->cmd = 0;
  3312. cmd->src = msm_virt_to_dma(chip,
  3313. &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
  3314. cmd->dst = EBI2_NAND_ADM_MUX;
  3315. cmd->len = 4;
  3316. cmd++;
  3317. cmd->cmd = DST_CRCI_NAND_CMD;
  3318. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3319. cmd->dst = NC10(MSM_NAND_FLASH_CMD);
  3320. cmd->len = 12;
  3321. cmd++;
  3322. cmd->cmd = 0;
  3323. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
  3324. cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
  3325. cmd->len = 4;
  3326. cmd++;
  3327. cmd->cmd = 0;
  3328. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
  3329. cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
  3330. cmd->len = 8;
  3331. cmd++;
  3332. cmd->cmd = 0;
  3333. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3334. cmd->dst = NC10(MSM_NAND_EXEC_CMD);
  3335. cmd->len = 4;
  3336. cmd++;
  3337. /* A3C */
  3338. cmd->cmd = 0;
  3339. cmd->src = msm_virt_to_dma(chip,
  3340. &dma_buffer->data.adm_mux_data_ack_req_nc01);
  3341. cmd->dst = EBI2_NAND_ADM_MUX;
  3342. cmd->len = 4;
  3343. cmd++;
  3344. cmd->cmd = SRC_CRCI_NAND_DATA;
  3345. cmd->src = NC10(MSM_NAND_FLASH_STATUS);
  3346. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[1]);
  3347. cmd->len = 8;
  3348. cmd++;
  3349. cmd->cmd = 0;
  3350. cmd->src = NC10(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
  3351. (528*(cwperpage-1)));
  3352. cmd->dst = msm_virt_to_dma(chip, buf10);
  3353. cmd->len = 4;
  3354. cmd++;
  3355. /* FC0 */
  3356. cmd->cmd = 0;
  3357. cmd->src = msm_virt_to_dma(chip,
  3358. &dma_buffer->data.adm_default_mux);
  3359. cmd->dst = EBI2_NAND_ADM_MUX;
  3360. cmd->len = 4;
  3361. cmd++;
  3362. /* disble CS1 */
  3363. cmd->cmd = 0;
  3364. cmd->src = msm_virt_to_dma(chip,
  3365. &dma_buffer->data.ebi2_chip_select_cfg0);
  3366. cmd->dst = EBI2_CHIP_SELECT_CFG0;
  3367. cmd->len = 4;
  3368. cmd++;
  3369. BUILD_BUG_ON(18 != ARRAY_SIZE(dma_buffer->cmd));
  3370. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  3371. dma_buffer->cmd[0].cmd |= CMD_OCB;
  3372. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  3373. dma_buffer->cmdptr = (msm_virt_to_dma(chip,
  3374. dma_buffer->cmd) >> 3) | CMD_PTR_LP;
  3375. mb();
  3376. msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
  3377. DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
  3378. mb();
  3379. ret = 0;
  3380. if ((dma_buffer->data.result[0].flash_status & 0x110) ||
  3381. (dma_buffer->data.result[1].flash_status & 0x110))
  3382. ret = -EIO;
  3383. if (!ret) {
  3384. /* Check for bad block marker byte for NC01 & NC10 */
  3385. if (chip->CFG1 & CFG1_WIDE_FLASH) {
  3386. if ((buf01[0] != 0xFF || buf01[1] != 0xFF) ||
  3387. (buf10[0] != 0xFF || buf10[1] != 0xFF))
  3388. ret = 1;
  3389. } else {
  3390. if (buf01[0] != 0xFF || buf10[0] != 0xFF)
  3391. ret = 1;
  3392. }
  3393. }
  3394. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 8);
  3395. return ret;
  3396. }
  3397. static int
  3398. msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  3399. {
  3400. struct mtd_oob_ops ops;
  3401. int ret;
  3402. uint8_t *buf;
  3403. /* Check for invalid offset */
  3404. if (ofs > mtd->size)
  3405. return -EINVAL;
  3406. if (ofs & (mtd->erasesize - 1)) {
  3407. pr_err("%s: unsupported block address, 0x%x\n",
  3408. __func__, (uint32_t)ofs);
  3409. return -EINVAL;
  3410. }
  3411. /*
  3412. Write all 0s to the first page
  3413. This will set the BB marker to 0
  3414. */
  3415. buf = page_address(ZERO_PAGE());
  3416. ops.mode = MTD_OPS_RAW;
  3417. ops.len = mtd->writesize + mtd->oobsize;
  3418. ops.retlen = 0;
  3419. ops.ooblen = 0;
  3420. ops.datbuf = buf;
  3421. ops.oobbuf = NULL;
  3422. if (!interleave_enable)
  3423. ret = msm_nand_write_oob(mtd, ofs, &ops);
  3424. else
  3425. ret = msm_nand_write_oob_dualnandc(mtd, ofs, &ops);
  3426. return ret;
  3427. }
  3428. /**
  3429. * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash
  3430. * @param mtd MTD device structure
  3431. */
  3432. static int msm_nand_suspend(struct mtd_info *mtd)
  3433. {
  3434. return 0;
  3435. }
  3436. /**
  3437. * msm_nand_resume - [MTD Interface] Resume the msm_nand flash
  3438. * @param mtd MTD device structure
  3439. */
  3440. static void msm_nand_resume(struct mtd_info *mtd)
  3441. {
  3442. }
  3443. struct onenand_information {
  3444. uint16_t manufacturer_id;
  3445. uint16_t device_id;
  3446. uint16_t version_id;
  3447. uint16_t data_buf_size;
  3448. uint16_t boot_buf_size;
  3449. uint16_t num_of_buffers;
  3450. uint16_t technology;
  3451. };
  3452. static struct onenand_information onenand_info;
  3453. static uint32_t nand_sfcmd_mode;
  3454. uint32_t flash_onenand_probe(struct msm_nand_chip *chip)
  3455. {
  3456. struct {
  3457. dmov_s cmd[7];
  3458. unsigned cmdptr;
  3459. struct {
  3460. uint32_t bcfg;
  3461. uint32_t cmd;
  3462. uint32_t exec;
  3463. uint32_t status;
  3464. uint32_t addr0;
  3465. uint32_t addr1;
  3466. uint32_t addr2;
  3467. uint32_t addr3;
  3468. uint32_t addr4;
  3469. uint32_t addr5;
  3470. uint32_t addr6;
  3471. uint32_t data0;
  3472. uint32_t data1;
  3473. uint32_t data2;
  3474. uint32_t data3;
  3475. uint32_t data4;
  3476. uint32_t data5;
  3477. uint32_t data6;
  3478. } data;
  3479. } *dma_buffer;
  3480. dmov_s *cmd;
  3481. int err = 0;
  3482. uint32_t initialsflashcmd = 0;
  3483. initialsflashcmd = flash_rd_reg(chip, MSM_NAND_SFLASHC_CMD);
  3484. if ((initialsflashcmd & 0x10) == 0x10)
  3485. nand_sfcmd_mode = MSM_NAND_SFCMD_ASYNC;
  3486. else
  3487. nand_sfcmd_mode = MSM_NAND_SFCMD_BURST;
  3488. printk(KERN_INFO "SFLASHC Async Mode bit: %x \n", nand_sfcmd_mode);
  3489. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  3490. (chip, sizeof(*dma_buffer))));
  3491. cmd = dma_buffer->cmd;
  3492. dma_buffer->data.bcfg = SFLASH_BCFG |
  3493. (nand_sfcmd_mode ? 0 : (1 << 24));
  3494. dma_buffer->data.cmd = SFLASH_PREPCMD(7, 0, 0,
  3495. MSM_NAND_SFCMD_DATXS,
  3496. nand_sfcmd_mode,
  3497. MSM_NAND_SFCMD_REGRD);
  3498. dma_buffer->data.exec = 1;
  3499. dma_buffer->data.status = CLEAN_DATA_32;
  3500. dma_buffer->data.addr0 = (ONENAND_DEVICE_ID << 16) |
  3501. (ONENAND_MANUFACTURER_ID);
  3502. dma_buffer->data.addr1 = (ONENAND_DATA_BUFFER_SIZE << 16) |
  3503. (ONENAND_VERSION_ID);
  3504. dma_buffer->data.addr2 = (ONENAND_AMOUNT_OF_BUFFERS << 16) |
  3505. (ONENAND_BOOT_BUFFER_SIZE);
  3506. dma_buffer->data.addr3 = (CLEAN_DATA_16 << 16) |
  3507. (ONENAND_TECHNOLOGY << 0);
  3508. dma_buffer->data.data0 = CLEAN_DATA_32;
  3509. dma_buffer->data.data1 = CLEAN_DATA_32;
  3510. dma_buffer->data.data2 = CLEAN_DATA_32;
  3511. dma_buffer->data.data3 = CLEAN_DATA_32;
  3512. /* Enable and configure the SFlash controller */
  3513. cmd->cmd = 0;
  3514. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.bcfg);
  3515. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  3516. cmd->len = 4;
  3517. cmd++;
  3518. /* Block on cmd ready and write CMD register */
  3519. cmd->cmd = DST_CRCI_NAND_CMD;
  3520. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
  3521. cmd->dst = MSM_NAND_SFLASHC_CMD;
  3522. cmd->len = 4;
  3523. cmd++;
  3524. /* Configure the ADDR0 and ADDR1 registers */
  3525. cmd->cmd = 0;
  3526. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  3527. cmd->dst = MSM_NAND_ADDR0;
  3528. cmd->len = 8;
  3529. cmd++;
  3530. /* Configure the ADDR2 and ADDR3 registers */
  3531. cmd->cmd = 0;
  3532. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  3533. cmd->dst = MSM_NAND_ADDR2;
  3534. cmd->len = 8;
  3535. cmd++;
  3536. /* Kick the execute command */
  3537. cmd->cmd = 0;
  3538. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
  3539. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  3540. cmd->len = 4;
  3541. cmd++;
  3542. /* Block on data ready, and read the two status registers */
  3543. cmd->cmd = SRC_CRCI_NAND_DATA;
  3544. cmd->src = MSM_NAND_SFLASHC_STATUS;
  3545. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.status);
  3546. cmd->len = 4;
  3547. cmd++;
  3548. /* Read data registers - valid only if status says success */
  3549. cmd->cmd = 0;
  3550. cmd->src = MSM_NAND_GENP_REG0;
  3551. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  3552. cmd->len = 16;
  3553. cmd++;
  3554. BUILD_BUG_ON(7 != ARRAY_SIZE(dma_buffer->cmd));
  3555. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  3556. dma_buffer->cmd[0].cmd |= CMD_OCB;
  3557. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  3558. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  3559. >> 3) | CMD_PTR_LP;
  3560. mb();
  3561. msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
  3562. | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  3563. &dma_buffer->cmdptr)));
  3564. mb();
  3565. /* Check for errors, protection violations etc */
  3566. if (dma_buffer->data.status & 0x110) {
  3567. pr_info("%s: MPU/OP error"
  3568. "(0x%x) during Onenand probe\n",
  3569. __func__, dma_buffer->data.status);
  3570. err = -EIO;
  3571. } else {
  3572. onenand_info.manufacturer_id =
  3573. (dma_buffer->data.data0 >> 0) & 0x0000FFFF;
  3574. onenand_info.device_id =
  3575. (dma_buffer->data.data0 >> 16) & 0x0000FFFF;
  3576. onenand_info.version_id =
  3577. (dma_buffer->data.data1 >> 0) & 0x0000FFFF;
  3578. onenand_info.data_buf_size =
  3579. (dma_buffer->data.data1 >> 16) & 0x0000FFFF;
  3580. onenand_info.boot_buf_size =
  3581. (dma_buffer->data.data2 >> 0) & 0x0000FFFF;
  3582. onenand_info.num_of_buffers =
  3583. (dma_buffer->data.data2 >> 16) & 0x0000FFFF;
  3584. onenand_info.technology =
  3585. (dma_buffer->data.data3 >> 0) & 0x0000FFFF;
  3586. pr_info("======================================="
  3587. "==========================\n");
  3588. pr_info("%s: manufacturer_id = 0x%x\n"
  3589. , __func__, onenand_info.manufacturer_id);
  3590. pr_info("%s: device_id = 0x%x\n"
  3591. , __func__, onenand_info.device_id);
  3592. pr_info("%s: version_id = 0x%x\n"
  3593. , __func__, onenand_info.version_id);
  3594. pr_info("%s: data_buf_size = 0x%x\n"
  3595. , __func__, onenand_info.data_buf_size);
  3596. pr_info("%s: boot_buf_size = 0x%x\n"
  3597. , __func__, onenand_info.boot_buf_size);
  3598. pr_info("%s: num_of_buffers = 0x%x\n"
  3599. , __func__, onenand_info.num_of_buffers);
  3600. pr_info("%s: technology = 0x%x\n"
  3601. , __func__, onenand_info.technology);
  3602. pr_info("======================================="
  3603. "==========================\n");
  3604. if ((onenand_info.manufacturer_id != 0x00EC)
  3605. || ((onenand_info.device_id & 0x0040) != 0x0040)
  3606. || (onenand_info.data_buf_size != 0x0800)
  3607. || (onenand_info.boot_buf_size != 0x0200)
  3608. || (onenand_info.num_of_buffers != 0x0201)
  3609. || (onenand_info.technology != 0)) {
  3610. pr_info("%s: Detected an unsupported device\n"
  3611. , __func__);
  3612. err = -EIO;
  3613. }
  3614. }
  3615. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  3616. return err;
  3617. }
  3618. int msm_onenand_read_oob(struct mtd_info *mtd,
  3619. loff_t from, struct mtd_oob_ops *ops)
  3620. {
  3621. struct msm_nand_chip *chip = mtd->priv;
  3622. struct {
  3623. dmov_s cmd[53];
  3624. unsigned cmdptr;
  3625. struct {
  3626. uint32_t sfbcfg;
  3627. uint32_t sfcmd[9];
  3628. uint32_t sfexec;
  3629. uint32_t sfstat[9];
  3630. uint32_t addr0;
  3631. uint32_t addr1;
  3632. uint32_t addr2;
  3633. uint32_t addr3;
  3634. uint32_t addr4;
  3635. uint32_t addr5;
  3636. uint32_t addr6;
  3637. uint32_t data0;
  3638. uint32_t data1;
  3639. uint32_t data2;
  3640. uint32_t data3;
  3641. uint32_t data4;
  3642. uint32_t data5;
  3643. uint32_t data6;
  3644. uint32_t macro[5];
  3645. } data;
  3646. } *dma_buffer;
  3647. dmov_s *cmd;
  3648. int err = 0;
  3649. int i;
  3650. dma_addr_t data_dma_addr = 0;
  3651. dma_addr_t oob_dma_addr = 0;
  3652. dma_addr_t data_dma_addr_curr = 0;
  3653. dma_addr_t oob_dma_addr_curr = 0;
  3654. loff_t from_curr = 0;
  3655. unsigned page_count;
  3656. unsigned pages_read = 0;
  3657. uint16_t onenand_startaddr1;
  3658. uint16_t onenand_startaddr8;
  3659. uint16_t onenand_startaddr2;
  3660. uint16_t onenand_startbuffer;
  3661. uint16_t onenand_sysconfig1;
  3662. uint16_t controller_status;
  3663. uint16_t interrupt_status;
  3664. uint16_t ecc_status;
  3665. #if VERBOSE
  3666. pr_info("================================================="
  3667. "================\n");
  3668. pr_info("%s: from 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
  3669. "\noobbuf 0x%p ooblen 0x%x\n",
  3670. __func__, from, ops->mode, ops->datbuf, ops->len,
  3671. ops->oobbuf, ops->ooblen);
  3672. #endif
  3673. if (!mtd) {
  3674. pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
  3675. (uint32_t)mtd);
  3676. return -EINVAL;
  3677. }
  3678. if (from & (mtd->writesize - 1)) {
  3679. pr_err("%s: unsupported from, 0x%llx\n", __func__,
  3680. from);
  3681. return -EINVAL;
  3682. }
  3683. if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
  3684. (ops->mode != MTD_OPS_RAW)) {
  3685. pr_err("%s: unsupported ops->mode, %d\n", __func__,
  3686. ops->mode);
  3687. return -EINVAL;
  3688. }
  3689. if (((ops->datbuf == NULL) || (ops->len == 0)) &&
  3690. ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
  3691. pr_err("%s: incorrect ops fields - nothing to do\n",
  3692. __func__);
  3693. return -EINVAL;
  3694. }
  3695. if ((ops->datbuf != NULL) && (ops->len == 0)) {
  3696. pr_err("%s: data buffer passed but length 0\n",
  3697. __func__);
  3698. return -EINVAL;
  3699. }
  3700. if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
  3701. pr_err("%s: oob buffer passed but length 0\n",
  3702. __func__);
  3703. return -EINVAL;
  3704. }
  3705. if (ops->mode != MTD_OPS_RAW) {
  3706. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  3707. /* when ops->datbuf is NULL, ops->len can be ooblen */
  3708. pr_err("%s: unsupported ops->len, %d\n", __func__,
  3709. ops->len);
  3710. return -EINVAL;
  3711. }
  3712. } else {
  3713. if (ops->datbuf != NULL &&
  3714. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  3715. pr_err("%s: unsupported ops->len,"
  3716. " %d for MTD_OPS_RAW\n", __func__, ops->len);
  3717. return -EINVAL;
  3718. }
  3719. }
  3720. if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
  3721. pr_err("%s: unsupported operation, oobbuf pointer "
  3722. "passed in for RAW mode, %x\n", __func__,
  3723. (uint32_t)ops->oobbuf);
  3724. return -EINVAL;
  3725. }
  3726. if (ops->oobbuf && !ops->datbuf) {
  3727. page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
  3728. mtd->oobavail : mtd->oobsize);
  3729. if ((page_count == 0) && (ops->ooblen))
  3730. page_count = 1;
  3731. } else if (ops->mode != MTD_OPS_RAW)
  3732. page_count = ops->len / mtd->writesize;
  3733. else
  3734. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  3735. if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
  3736. if (page_count * mtd->oobsize > ops->ooblen) {
  3737. pr_err("%s: unsupported ops->ooblen for "
  3738. "PLACE, %d\n", __func__, ops->ooblen);
  3739. return -EINVAL;
  3740. }
  3741. }
  3742. if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
  3743. (ops->ooboffs != 0)) {
  3744. pr_err("%s: unsupported ops->ooboffs, %d\n", __func__,
  3745. ops->ooboffs);
  3746. return -EINVAL;
  3747. }
  3748. if (ops->datbuf) {
  3749. memset(ops->datbuf, 0x55, ops->len);
  3750. data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
  3751. ops->datbuf, ops->len, DMA_FROM_DEVICE);
  3752. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  3753. pr_err("%s: failed to get dma addr for %p\n",
  3754. __func__, ops->datbuf);
  3755. return -EIO;
  3756. }
  3757. }
  3758. if (ops->oobbuf) {
  3759. memset(ops->oobbuf, 0x55, ops->ooblen);
  3760. oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
  3761. ops->oobbuf, ops->ooblen, DMA_FROM_DEVICE);
  3762. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  3763. pr_err("%s: failed to get dma addr for %p\n",
  3764. __func__, ops->oobbuf);
  3765. err = -EIO;
  3766. goto err_dma_map_oobbuf_failed;
  3767. }
  3768. }
  3769. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  3770. (chip, sizeof(*dma_buffer))));
  3771. from_curr = from;
  3772. while (page_count-- > 0) {
  3773. cmd = dma_buffer->cmd;
  3774. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  3775. && (from_curr >= (mtd->size>>1))) { /* DDP Device */
  3776. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  3777. (((uint32_t)(from_curr-(mtd->size>>1))
  3778. / mtd->erasesize));
  3779. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  3780. } else {
  3781. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  3782. ((uint32_t)from_curr / mtd->erasesize) ;
  3783. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  3784. }
  3785. onenand_startaddr8 = (((uint32_t)from_curr &
  3786. (mtd->erasesize - 1)) / mtd->writesize) << 2;
  3787. onenand_startbuffer = DATARAM0_0 << 8;
  3788. onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
  3789. ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
  3790. ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
  3791. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  3792. (nand_sfcmd_mode ? 0 : (1 << 24));
  3793. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  3794. MSM_NAND_SFCMD_CMDXS,
  3795. nand_sfcmd_mode,
  3796. MSM_NAND_SFCMD_REGWR);
  3797. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  3798. MSM_NAND_SFCMD_CMDXS,
  3799. nand_sfcmd_mode,
  3800. MSM_NAND_SFCMD_INTHI);
  3801. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  3802. MSM_NAND_SFCMD_DATXS,
  3803. nand_sfcmd_mode,
  3804. MSM_NAND_SFCMD_REGRD);
  3805. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
  3806. MSM_NAND_SFCMD_DATXS,
  3807. nand_sfcmd_mode,
  3808. MSM_NAND_SFCMD_DATRD);
  3809. dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
  3810. MSM_NAND_SFCMD_DATXS,
  3811. nand_sfcmd_mode,
  3812. MSM_NAND_SFCMD_DATRD);
  3813. dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(256, 0, 0,
  3814. MSM_NAND_SFCMD_DATXS,
  3815. nand_sfcmd_mode,
  3816. MSM_NAND_SFCMD_DATRD);
  3817. dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(256, 0, 0,
  3818. MSM_NAND_SFCMD_DATXS,
  3819. nand_sfcmd_mode,
  3820. MSM_NAND_SFCMD_DATRD);
  3821. dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(32, 0, 0,
  3822. MSM_NAND_SFCMD_DATXS,
  3823. nand_sfcmd_mode,
  3824. MSM_NAND_SFCMD_DATRD);
  3825. dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(4, 10, 0,
  3826. MSM_NAND_SFCMD_CMDXS,
  3827. nand_sfcmd_mode,
  3828. MSM_NAND_SFCMD_REGWR);
  3829. dma_buffer->data.sfexec = 1;
  3830. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  3831. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  3832. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  3833. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  3834. dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
  3835. dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
  3836. dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
  3837. dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
  3838. dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
  3839. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  3840. (ONENAND_SYSTEM_CONFIG_1);
  3841. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  3842. (ONENAND_START_ADDRESS_1);
  3843. dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
  3844. (ONENAND_START_ADDRESS_2);
  3845. dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
  3846. (ONENAND_COMMAND);
  3847. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  3848. (ONENAND_INTERRUPT_STATUS);
  3849. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  3850. (ONENAND_SYSTEM_CONFIG_1);
  3851. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  3852. (ONENAND_START_ADDRESS_1);
  3853. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  3854. (onenand_sysconfig1);
  3855. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  3856. (onenand_startaddr1);
  3857. dma_buffer->data.data2 = (onenand_startbuffer << 16) |
  3858. (onenand_startaddr2);
  3859. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  3860. (ONENAND_CMDLOADSPARE);
  3861. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  3862. (CLEAN_DATA_16);
  3863. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  3864. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  3865. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  3866. (ONENAND_STARTADDR1_RES);
  3867. dma_buffer->data.macro[0] = 0x0200;
  3868. dma_buffer->data.macro[1] = 0x0300;
  3869. dma_buffer->data.macro[2] = 0x0400;
  3870. dma_buffer->data.macro[3] = 0x0500;
  3871. dma_buffer->data.macro[4] = 0x8010;
  3872. /*************************************************************/
  3873. /* Write necessary address registers in the onenand device */
  3874. /*************************************************************/
  3875. /* Enable and configure the SFlash controller */
  3876. cmd->cmd = 0;
  3877. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  3878. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  3879. cmd->len = 4;
  3880. cmd++;
  3881. /* Block on cmd ready and write CMD register */
  3882. cmd->cmd = DST_CRCI_NAND_CMD;
  3883. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  3884. cmd->dst = MSM_NAND_SFLASHC_CMD;
  3885. cmd->len = 4;
  3886. cmd++;
  3887. /* Write the ADDR0 and ADDR1 registers */
  3888. cmd->cmd = 0;
  3889. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  3890. cmd->dst = MSM_NAND_ADDR0;
  3891. cmd->len = 8;
  3892. cmd++;
  3893. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  3894. cmd->cmd = 0;
  3895. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  3896. cmd->dst = MSM_NAND_ADDR2;
  3897. cmd->len = 16;
  3898. cmd++;
  3899. /* Write the ADDR6 registers */
  3900. cmd->cmd = 0;
  3901. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  3902. cmd->dst = MSM_NAND_ADDR6;
  3903. cmd->len = 4;
  3904. cmd++;
  3905. /* Write the GENP0, GENP1, GENP2, GENP3 registers */
  3906. cmd->cmd = 0;
  3907. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  3908. cmd->dst = MSM_NAND_GENP_REG0;
  3909. cmd->len = 16;
  3910. cmd++;
  3911. /* Write the FLASH_DEV_CMD4,5,6 registers */
  3912. cmd->cmd = 0;
  3913. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  3914. cmd->dst = MSM_NAND_DEV_CMD4;
  3915. cmd->len = 12;
  3916. cmd++;
  3917. /* Kick the execute command */
  3918. cmd->cmd = 0;
  3919. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  3920. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  3921. cmd->len = 4;
  3922. cmd++;
  3923. /* Block on data ready, and read the status register */
  3924. cmd->cmd = SRC_CRCI_NAND_DATA;
  3925. cmd->src = MSM_NAND_SFLASHC_STATUS;
  3926. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  3927. cmd->len = 4;
  3928. cmd++;
  3929. /*************************************************************/
  3930. /* Wait for the interrupt from the Onenand device controller */
  3931. /*************************************************************/
  3932. /* Block on cmd ready and write CMD register */
  3933. cmd->cmd = DST_CRCI_NAND_CMD;
  3934. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  3935. cmd->dst = MSM_NAND_SFLASHC_CMD;
  3936. cmd->len = 4;
  3937. cmd++;
  3938. /* Kick the execute command */
  3939. cmd->cmd = 0;
  3940. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  3941. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  3942. cmd->len = 4;
  3943. cmd++;
  3944. /* Block on data ready, and read the status register */
  3945. cmd->cmd = SRC_CRCI_NAND_DATA;
  3946. cmd->src = MSM_NAND_SFLASHC_STATUS;
  3947. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  3948. cmd->len = 4;
  3949. cmd++;
  3950. /*************************************************************/
  3951. /* Read necessary status registers from the onenand device */
  3952. /*************************************************************/
  3953. /* Block on cmd ready and write CMD register */
  3954. cmd->cmd = DST_CRCI_NAND_CMD;
  3955. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  3956. cmd->dst = MSM_NAND_SFLASHC_CMD;
  3957. cmd->len = 4;
  3958. cmd++;
  3959. /* Kick the execute command */
  3960. cmd->cmd = 0;
  3961. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  3962. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  3963. cmd->len = 4;
  3964. cmd++;
  3965. /* Block on data ready, and read the status register */
  3966. cmd->cmd = SRC_CRCI_NAND_DATA;
  3967. cmd->src = MSM_NAND_SFLASHC_STATUS;
  3968. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  3969. cmd->len = 4;
  3970. cmd++;
  3971. /* Read the GENP3 register */
  3972. cmd->cmd = 0;
  3973. cmd->src = MSM_NAND_GENP_REG3;
  3974. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  3975. cmd->len = 4;
  3976. cmd++;
  3977. /* Read the DEVCMD4 register */
  3978. cmd->cmd = 0;
  3979. cmd->src = MSM_NAND_DEV_CMD4;
  3980. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  3981. cmd->len = 4;
  3982. cmd++;
  3983. /*************************************************************/
  3984. /* Read the data ram area from the onenand buffer ram */
  3985. /*************************************************************/
  3986. if (ops->datbuf) {
  3987. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  3988. (ONENAND_CMDLOAD);
  3989. for (i = 0; i < 4; i++) {
  3990. /* Block on cmd ready and write CMD register */
  3991. cmd->cmd = DST_CRCI_NAND_CMD;
  3992. cmd->src = msm_virt_to_dma(chip,
  3993. &dma_buffer->data.sfcmd[3+i]);
  3994. cmd->dst = MSM_NAND_SFLASHC_CMD;
  3995. cmd->len = 4;
  3996. cmd++;
  3997. /* Write the MACRO1 register */
  3998. cmd->cmd = 0;
  3999. cmd->src = msm_virt_to_dma(chip,
  4000. &dma_buffer->data.macro[i]);
  4001. cmd->dst = MSM_NAND_MACRO1_REG;
  4002. cmd->len = 4;
  4003. cmd++;
  4004. /* Kick the execute command */
  4005. cmd->cmd = 0;
  4006. cmd->src = msm_virt_to_dma(chip,
  4007. &dma_buffer->data.sfexec);
  4008. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4009. cmd->len = 4;
  4010. cmd++;
  4011. /* Block on data rdy, & read status register */
  4012. cmd->cmd = SRC_CRCI_NAND_DATA;
  4013. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4014. cmd->dst = msm_virt_to_dma(chip,
  4015. &dma_buffer->data.sfstat[3+i]);
  4016. cmd->len = 4;
  4017. cmd++;
  4018. /* Transfer nand ctlr buf contents to usr buf */
  4019. cmd->cmd = 0;
  4020. cmd->src = MSM_NAND_FLASH_BUFFER;
  4021. cmd->dst = data_dma_addr_curr;
  4022. cmd->len = 512;
  4023. data_dma_addr_curr += 512;
  4024. cmd++;
  4025. }
  4026. }
  4027. if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
  4028. /* Block on cmd ready and write CMD register */
  4029. cmd->cmd = DST_CRCI_NAND_CMD;
  4030. cmd->src = msm_virt_to_dma(chip,
  4031. &dma_buffer->data.sfcmd[7]);
  4032. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4033. cmd->len = 4;
  4034. cmd++;
  4035. /* Write the MACRO1 register */
  4036. cmd->cmd = 0;
  4037. cmd->src = msm_virt_to_dma(chip,
  4038. &dma_buffer->data.macro[4]);
  4039. cmd->dst = MSM_NAND_MACRO1_REG;
  4040. cmd->len = 4;
  4041. cmd++;
  4042. /* Kick the execute command */
  4043. cmd->cmd = 0;
  4044. cmd->src = msm_virt_to_dma(chip,
  4045. &dma_buffer->data.sfexec);
  4046. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4047. cmd->len = 4;
  4048. cmd++;
  4049. /* Block on data ready, and read status register */
  4050. cmd->cmd = SRC_CRCI_NAND_DATA;
  4051. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4052. cmd->dst = msm_virt_to_dma(chip,
  4053. &dma_buffer->data.sfstat[7]);
  4054. cmd->len = 4;
  4055. cmd++;
  4056. /* Transfer nand ctlr buffer contents into usr buf */
  4057. if (ops->mode == MTD_OPS_AUTO_OOB) {
  4058. for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
  4059. cmd->cmd = 0;
  4060. cmd->src = MSM_NAND_FLASH_BUFFER +
  4061. mtd->ecclayout->oobfree[i].offset;
  4062. cmd->dst = oob_dma_addr_curr;
  4063. cmd->len =
  4064. mtd->ecclayout->oobfree[i].length;
  4065. oob_dma_addr_curr +=
  4066. mtd->ecclayout->oobfree[i].length;
  4067. cmd++;
  4068. }
  4069. }
  4070. if (ops->mode == MTD_OPS_PLACE_OOB) {
  4071. cmd->cmd = 0;
  4072. cmd->src = MSM_NAND_FLASH_BUFFER;
  4073. cmd->dst = oob_dma_addr_curr;
  4074. cmd->len = mtd->oobsize;
  4075. oob_dma_addr_curr += mtd->oobsize;
  4076. cmd++;
  4077. }
  4078. if (ops->mode == MTD_OPS_RAW) {
  4079. cmd->cmd = 0;
  4080. cmd->src = MSM_NAND_FLASH_BUFFER;
  4081. cmd->dst = data_dma_addr_curr;
  4082. cmd->len = mtd->oobsize;
  4083. data_dma_addr_curr += mtd->oobsize;
  4084. cmd++;
  4085. }
  4086. }
  4087. /*************************************************************/
  4088. /* Restore the necessary registers to proper values */
  4089. /*************************************************************/
  4090. /* Block on cmd ready and write CMD register */
  4091. cmd->cmd = DST_CRCI_NAND_CMD;
  4092. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
  4093. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4094. cmd->len = 4;
  4095. cmd++;
  4096. /* Kick the execute command */
  4097. cmd->cmd = 0;
  4098. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4099. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4100. cmd->len = 4;
  4101. cmd++;
  4102. /* Block on data ready, and read the status register */
  4103. cmd->cmd = SRC_CRCI_NAND_DATA;
  4104. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4105. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
  4106. cmd->len = 4;
  4107. cmd++;
  4108. BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
  4109. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  4110. dma_buffer->cmd[0].cmd |= CMD_OCB;
  4111. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  4112. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  4113. >> 3) | CMD_PTR_LP;
  4114. mb();
  4115. msm_dmov_exec_cmd(chip->dma_channel,
  4116. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  4117. &dma_buffer->cmdptr)));
  4118. mb();
  4119. ecc_status = (dma_buffer->data.data3 >> 16) &
  4120. 0x0000FFFF;
  4121. interrupt_status = (dma_buffer->data.data4 >> 0) &
  4122. 0x0000FFFF;
  4123. controller_status = (dma_buffer->data.data4 >> 16) &
  4124. 0x0000FFFF;
  4125. #if VERBOSE
  4126. pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
  4127. "%x %x\n", __func__,
  4128. dma_buffer->data.sfstat[0],
  4129. dma_buffer->data.sfstat[1],
  4130. dma_buffer->data.sfstat[2],
  4131. dma_buffer->data.sfstat[3],
  4132. dma_buffer->data.sfstat[4],
  4133. dma_buffer->data.sfstat[5],
  4134. dma_buffer->data.sfstat[6],
  4135. dma_buffer->data.sfstat[7],
  4136. dma_buffer->data.sfstat[8]);
  4137. pr_info("%s: controller_status = %x\n", __func__,
  4138. controller_status);
  4139. pr_info("%s: interrupt_status = %x\n", __func__,
  4140. interrupt_status);
  4141. pr_info("%s: ecc_status = %x\n", __func__,
  4142. ecc_status);
  4143. #endif
  4144. /* Check for errors, protection violations etc */
  4145. if ((controller_status != 0)
  4146. || (dma_buffer->data.sfstat[0] & 0x110)
  4147. || (dma_buffer->data.sfstat[1] & 0x110)
  4148. || (dma_buffer->data.sfstat[2] & 0x110)
  4149. || (dma_buffer->data.sfstat[8] & 0x110)
  4150. || ((dma_buffer->data.sfstat[3] & 0x110) &&
  4151. (ops->datbuf))
  4152. || ((dma_buffer->data.sfstat[4] & 0x110) &&
  4153. (ops->datbuf))
  4154. || ((dma_buffer->data.sfstat[5] & 0x110) &&
  4155. (ops->datbuf))
  4156. || ((dma_buffer->data.sfstat[6] & 0x110) &&
  4157. (ops->datbuf))
  4158. || ((dma_buffer->data.sfstat[7] & 0x110) &&
  4159. ((ops->oobbuf)
  4160. || (ops->mode == MTD_OPS_RAW)))) {
  4161. pr_info("%s: ECC/MPU/OP error\n", __func__);
  4162. err = -EIO;
  4163. }
  4164. if (err)
  4165. break;
  4166. pages_read++;
  4167. from_curr += mtd->writesize;
  4168. }
  4169. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  4170. if (ops->oobbuf) {
  4171. dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
  4172. DMA_FROM_DEVICE);
  4173. }
  4174. err_dma_map_oobbuf_failed:
  4175. if (ops->datbuf) {
  4176. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  4177. DMA_FROM_DEVICE);
  4178. }
  4179. if (err) {
  4180. pr_err("%s: %llx %x %x failed\n", __func__, from_curr,
  4181. ops->datbuf ? ops->len : 0, ops->ooblen);
  4182. } else {
  4183. ops->retlen = ops->oobretlen = 0;
  4184. if (ops->datbuf != NULL) {
  4185. if (ops->mode != MTD_OPS_RAW)
  4186. ops->retlen = mtd->writesize * pages_read;
  4187. else
  4188. ops->retlen = (mtd->writesize + mtd->oobsize)
  4189. * pages_read;
  4190. }
  4191. if (ops->oobbuf != NULL) {
  4192. if (ops->mode == MTD_OPS_AUTO_OOB)
  4193. ops->oobretlen = mtd->oobavail * pages_read;
  4194. else
  4195. ops->oobretlen = mtd->oobsize * pages_read;
  4196. }
  4197. }
  4198. #if VERBOSE
  4199. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  4200. __func__, err, ops->retlen, ops->oobretlen);
  4201. pr_info("==================================================="
  4202. "==============\n");
  4203. #endif
  4204. return err;
  4205. }
  4206. int msm_onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
  4207. size_t *retlen, u_char *buf)
  4208. {
  4209. int ret;
  4210. struct mtd_oob_ops ops;
  4211. ops.mode = MTD_OPS_PLACE_OOB;
  4212. ops.datbuf = buf;
  4213. ops.len = len;
  4214. ops.retlen = 0;
  4215. ops.oobbuf = NULL;
  4216. ops.ooblen = 0;
  4217. ops.oobretlen = 0;
  4218. ret = msm_onenand_read_oob(mtd, from, &ops);
  4219. *retlen = ops.retlen;
  4220. return ret;
  4221. }
  4222. static int msm_onenand_write_oob(struct mtd_info *mtd, loff_t to,
  4223. struct mtd_oob_ops *ops)
  4224. {
  4225. struct msm_nand_chip *chip = mtd->priv;
  4226. struct {
  4227. dmov_s cmd[53];
  4228. unsigned cmdptr;
  4229. struct {
  4230. uint32_t sfbcfg;
  4231. uint32_t sfcmd[10];
  4232. uint32_t sfexec;
  4233. uint32_t sfstat[10];
  4234. uint32_t addr0;
  4235. uint32_t addr1;
  4236. uint32_t addr2;
  4237. uint32_t addr3;
  4238. uint32_t addr4;
  4239. uint32_t addr5;
  4240. uint32_t addr6;
  4241. uint32_t data0;
  4242. uint32_t data1;
  4243. uint32_t data2;
  4244. uint32_t data3;
  4245. uint32_t data4;
  4246. uint32_t data5;
  4247. uint32_t data6;
  4248. uint32_t macro[5];
  4249. } data;
  4250. } *dma_buffer;
  4251. dmov_s *cmd;
  4252. int err = 0;
  4253. int i, j, k;
  4254. dma_addr_t data_dma_addr = 0;
  4255. dma_addr_t oob_dma_addr = 0;
  4256. dma_addr_t init_dma_addr = 0;
  4257. dma_addr_t data_dma_addr_curr = 0;
  4258. dma_addr_t oob_dma_addr_curr = 0;
  4259. uint8_t *init_spare_bytes;
  4260. loff_t to_curr = 0;
  4261. unsigned page_count;
  4262. unsigned pages_written = 0;
  4263. uint16_t onenand_startaddr1;
  4264. uint16_t onenand_startaddr8;
  4265. uint16_t onenand_startaddr2;
  4266. uint16_t onenand_startbuffer;
  4267. uint16_t onenand_sysconfig1;
  4268. uint16_t controller_status;
  4269. uint16_t interrupt_status;
  4270. uint16_t ecc_status;
  4271. #if VERBOSE
  4272. pr_info("================================================="
  4273. "================\n");
  4274. pr_info("%s: to 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
  4275. "\noobbuf 0x%p ooblen 0x%x\n",
  4276. __func__, to, ops->mode, ops->datbuf, ops->len,
  4277. ops->oobbuf, ops->ooblen);
  4278. #endif
  4279. if (!mtd) {
  4280. pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
  4281. (uint32_t)mtd);
  4282. return -EINVAL;
  4283. }
  4284. if (to & (mtd->writesize - 1)) {
  4285. pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
  4286. return -EINVAL;
  4287. }
  4288. if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
  4289. (ops->mode != MTD_OPS_RAW)) {
  4290. pr_err("%s: unsupported ops->mode, %d\n", __func__,
  4291. ops->mode);
  4292. return -EINVAL;
  4293. }
  4294. if (((ops->datbuf == NULL) || (ops->len == 0)) &&
  4295. ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
  4296. pr_err("%s: incorrect ops fields - nothing to do\n",
  4297. __func__);
  4298. return -EINVAL;
  4299. }
  4300. if ((ops->datbuf != NULL) && (ops->len == 0)) {
  4301. pr_err("%s: data buffer passed but length 0\n",
  4302. __func__);
  4303. return -EINVAL;
  4304. }
  4305. if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
  4306. pr_err("%s: oob buffer passed but length 0\n",
  4307. __func__);
  4308. return -EINVAL;
  4309. }
  4310. if (ops->mode != MTD_OPS_RAW) {
  4311. if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
  4312. /* when ops->datbuf is NULL, ops->len can be ooblen */
  4313. pr_err("%s: unsupported ops->len, %d\n", __func__,
  4314. ops->len);
  4315. return -EINVAL;
  4316. }
  4317. } else {
  4318. if (ops->datbuf != NULL &&
  4319. (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
  4320. pr_err("%s: unsupported ops->len,"
  4321. " %d for MTD_OPS_RAW\n", __func__, ops->len);
  4322. return -EINVAL;
  4323. }
  4324. }
  4325. if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
  4326. pr_err("%s: unsupported operation, oobbuf pointer "
  4327. "passed in for RAW mode, %x\n", __func__,
  4328. (uint32_t)ops->oobbuf);
  4329. return -EINVAL;
  4330. }
  4331. if (ops->oobbuf && !ops->datbuf) {
  4332. page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
  4333. mtd->oobavail : mtd->oobsize);
  4334. if ((page_count == 0) && (ops->ooblen))
  4335. page_count = 1;
  4336. } else if (ops->mode != MTD_OPS_RAW)
  4337. page_count = ops->len / mtd->writesize;
  4338. else
  4339. page_count = ops->len / (mtd->writesize + mtd->oobsize);
  4340. if ((ops->mode == MTD_OPS_AUTO_OOB) && (ops->oobbuf != NULL)) {
  4341. if (page_count > 1) {
  4342. pr_err("%s: unsupported ops->ooblen for"
  4343. "AUTO, %d\n", __func__, ops->ooblen);
  4344. return -EINVAL;
  4345. }
  4346. }
  4347. if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
  4348. if (page_count * mtd->oobsize > ops->ooblen) {
  4349. pr_err("%s: unsupported ops->ooblen for"
  4350. "PLACE, %d\n", __func__, ops->ooblen);
  4351. return -EINVAL;
  4352. }
  4353. }
  4354. if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
  4355. (ops->ooboffs != 0)) {
  4356. pr_err("%s: unsupported ops->ooboffs, %d\n",
  4357. __func__, ops->ooboffs);
  4358. return -EINVAL;
  4359. }
  4360. init_spare_bytes = kmalloc(64, GFP_KERNEL);
  4361. if (!init_spare_bytes) {
  4362. pr_err("%s: failed to alloc init_spare_bytes buffer\n",
  4363. __func__);
  4364. return -ENOMEM;
  4365. }
  4366. for (i = 0; i < 64; i++)
  4367. init_spare_bytes[i] = 0xFF;
  4368. if ((ops->oobbuf) && (ops->mode == MTD_OPS_AUTO_OOB)) {
  4369. for (i = 0, k = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++)
  4370. for (j = 0; j < mtd->ecclayout->oobfree[i].length;
  4371. j++) {
  4372. init_spare_bytes[j +
  4373. mtd->ecclayout->oobfree[i].offset]
  4374. = (ops->oobbuf)[k];
  4375. k++;
  4376. }
  4377. }
  4378. if (ops->datbuf) {
  4379. data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
  4380. ops->datbuf, ops->len, DMA_TO_DEVICE);
  4381. if (dma_mapping_error(chip->dev, data_dma_addr)) {
  4382. pr_err("%s: failed to get dma addr for %p\n",
  4383. __func__, ops->datbuf);
  4384. return -EIO;
  4385. }
  4386. }
  4387. if (ops->oobbuf) {
  4388. oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
  4389. ops->oobbuf, ops->ooblen, DMA_TO_DEVICE);
  4390. if (dma_mapping_error(chip->dev, oob_dma_addr)) {
  4391. pr_err("%s: failed to get dma addr for %p\n",
  4392. __func__, ops->oobbuf);
  4393. err = -EIO;
  4394. goto err_dma_map_oobbuf_failed;
  4395. }
  4396. }
  4397. init_dma_addr = msm_nand_dma_map(chip->dev, init_spare_bytes, 64,
  4398. DMA_TO_DEVICE);
  4399. if (dma_mapping_error(chip->dev, init_dma_addr)) {
  4400. pr_err("%s: failed to get dma addr for %p\n",
  4401. __func__, init_spare_bytes);
  4402. err = -EIO;
  4403. goto err_dma_map_initbuf_failed;
  4404. }
  4405. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  4406. (chip, sizeof(*dma_buffer))));
  4407. to_curr = to;
  4408. while (page_count-- > 0) {
  4409. cmd = dma_buffer->cmd;
  4410. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  4411. && (to_curr >= (mtd->size>>1))) { /* DDP Device */
  4412. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  4413. (((uint32_t)(to_curr-(mtd->size>>1))
  4414. / mtd->erasesize));
  4415. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  4416. } else {
  4417. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  4418. ((uint32_t)to_curr / mtd->erasesize) ;
  4419. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  4420. }
  4421. onenand_startaddr8 = (((uint32_t)to_curr &
  4422. (mtd->erasesize - 1)) / mtd->writesize) << 2;
  4423. onenand_startbuffer = DATARAM0_0 << 8;
  4424. onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
  4425. ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
  4426. ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
  4427. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  4428. (nand_sfcmd_mode ? 0 : (1 << 24));
  4429. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(6, 0, 0,
  4430. MSM_NAND_SFCMD_CMDXS,
  4431. nand_sfcmd_mode,
  4432. MSM_NAND_SFCMD_REGWR);
  4433. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(256, 0, 0,
  4434. MSM_NAND_SFCMD_CMDXS,
  4435. nand_sfcmd_mode,
  4436. MSM_NAND_SFCMD_DATWR);
  4437. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(256, 0, 0,
  4438. MSM_NAND_SFCMD_CMDXS,
  4439. nand_sfcmd_mode,
  4440. MSM_NAND_SFCMD_DATWR);
  4441. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
  4442. MSM_NAND_SFCMD_CMDXS,
  4443. nand_sfcmd_mode,
  4444. MSM_NAND_SFCMD_DATWR);
  4445. dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
  4446. MSM_NAND_SFCMD_CMDXS,
  4447. nand_sfcmd_mode,
  4448. MSM_NAND_SFCMD_DATWR);
  4449. dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(32, 0, 0,
  4450. MSM_NAND_SFCMD_CMDXS,
  4451. nand_sfcmd_mode,
  4452. MSM_NAND_SFCMD_DATWR);
  4453. dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(1, 6, 0,
  4454. MSM_NAND_SFCMD_CMDXS,
  4455. nand_sfcmd_mode,
  4456. MSM_NAND_SFCMD_REGWR);
  4457. dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(0, 0, 32,
  4458. MSM_NAND_SFCMD_CMDXS,
  4459. nand_sfcmd_mode,
  4460. MSM_NAND_SFCMD_INTHI);
  4461. dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(3, 7, 0,
  4462. MSM_NAND_SFCMD_DATXS,
  4463. nand_sfcmd_mode,
  4464. MSM_NAND_SFCMD_REGRD);
  4465. dma_buffer->data.sfcmd[9] = SFLASH_PREPCMD(4, 10, 0,
  4466. MSM_NAND_SFCMD_CMDXS,
  4467. nand_sfcmd_mode,
  4468. MSM_NAND_SFCMD_REGWR);
  4469. dma_buffer->data.sfexec = 1;
  4470. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  4471. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  4472. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  4473. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  4474. dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
  4475. dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
  4476. dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
  4477. dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
  4478. dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
  4479. dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
  4480. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  4481. (ONENAND_SYSTEM_CONFIG_1);
  4482. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  4483. (ONENAND_START_ADDRESS_1);
  4484. dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
  4485. (ONENAND_START_ADDRESS_2);
  4486. dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
  4487. (ONENAND_COMMAND);
  4488. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  4489. (ONENAND_INTERRUPT_STATUS);
  4490. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  4491. (ONENAND_SYSTEM_CONFIG_1);
  4492. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  4493. (ONENAND_START_ADDRESS_1);
  4494. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  4495. (onenand_sysconfig1);
  4496. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  4497. (onenand_startaddr1);
  4498. dma_buffer->data.data2 = (onenand_startbuffer << 16) |
  4499. (onenand_startaddr2);
  4500. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  4501. (ONENAND_CMDPROGSPARE);
  4502. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  4503. (CLEAN_DATA_16);
  4504. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  4505. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  4506. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  4507. (ONENAND_STARTADDR1_RES);
  4508. dma_buffer->data.macro[0] = 0x0200;
  4509. dma_buffer->data.macro[1] = 0x0300;
  4510. dma_buffer->data.macro[2] = 0x0400;
  4511. dma_buffer->data.macro[3] = 0x0500;
  4512. dma_buffer->data.macro[4] = 0x8010;
  4513. /*************************************************************/
  4514. /* Write necessary address registers in the onenand device */
  4515. /*************************************************************/
  4516. /* Enable and configure the SFlash controller */
  4517. cmd->cmd = 0;
  4518. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  4519. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  4520. cmd->len = 4;
  4521. cmd++;
  4522. /* Block on cmd ready and write CMD register */
  4523. cmd->cmd = DST_CRCI_NAND_CMD;
  4524. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  4525. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4526. cmd->len = 4;
  4527. cmd++;
  4528. /* Write the ADDR0 and ADDR1 registers */
  4529. cmd->cmd = 0;
  4530. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  4531. cmd->dst = MSM_NAND_ADDR0;
  4532. cmd->len = 8;
  4533. cmd++;
  4534. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  4535. cmd->cmd = 0;
  4536. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  4537. cmd->dst = MSM_NAND_ADDR2;
  4538. cmd->len = 16;
  4539. cmd++;
  4540. /* Write the ADDR6 registers */
  4541. cmd->cmd = 0;
  4542. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  4543. cmd->dst = MSM_NAND_ADDR6;
  4544. cmd->len = 4;
  4545. cmd++;
  4546. /* Write the GENP0, GENP1, GENP2, GENP3 registers */
  4547. cmd->cmd = 0;
  4548. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  4549. cmd->dst = MSM_NAND_GENP_REG0;
  4550. cmd->len = 16;
  4551. cmd++;
  4552. /* Write the FLASH_DEV_CMD4,5,6 registers */
  4553. cmd->cmd = 0;
  4554. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  4555. cmd->dst = MSM_NAND_DEV_CMD4;
  4556. cmd->len = 12;
  4557. cmd++;
  4558. /* Kick the execute command */
  4559. cmd->cmd = 0;
  4560. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4561. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4562. cmd->len = 4;
  4563. cmd++;
  4564. /* Block on data ready, and read the status register */
  4565. cmd->cmd = SRC_CRCI_NAND_DATA;
  4566. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4567. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  4568. cmd->len = 4;
  4569. cmd++;
  4570. /*************************************************************/
  4571. /* Write the data ram area in the onenand buffer ram */
  4572. /*************************************************************/
  4573. if (ops->datbuf) {
  4574. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  4575. (ONENAND_CMDPROG);
  4576. for (i = 0; i < 4; i++) {
  4577. /* Block on cmd ready and write CMD register */
  4578. cmd->cmd = DST_CRCI_NAND_CMD;
  4579. cmd->src = msm_virt_to_dma(chip,
  4580. &dma_buffer->data.sfcmd[1+i]);
  4581. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4582. cmd->len = 4;
  4583. cmd++;
  4584. /* Trnsfr usr buf contents to nand ctlr buf */
  4585. cmd->cmd = 0;
  4586. cmd->src = data_dma_addr_curr;
  4587. cmd->dst = MSM_NAND_FLASH_BUFFER;
  4588. cmd->len = 512;
  4589. data_dma_addr_curr += 512;
  4590. cmd++;
  4591. /* Write the MACRO1 register */
  4592. cmd->cmd = 0;
  4593. cmd->src = msm_virt_to_dma(chip,
  4594. &dma_buffer->data.macro[i]);
  4595. cmd->dst = MSM_NAND_MACRO1_REG;
  4596. cmd->len = 4;
  4597. cmd++;
  4598. /* Kick the execute command */
  4599. cmd->cmd = 0;
  4600. cmd->src = msm_virt_to_dma(chip,
  4601. &dma_buffer->data.sfexec);
  4602. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4603. cmd->len = 4;
  4604. cmd++;
  4605. /* Block on data rdy, & read status register */
  4606. cmd->cmd = SRC_CRCI_NAND_DATA;
  4607. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4608. cmd->dst = msm_virt_to_dma(chip,
  4609. &dma_buffer->data.sfstat[1+i]);
  4610. cmd->len = 4;
  4611. cmd++;
  4612. }
  4613. }
  4614. /* Block on cmd ready and write CMD register */
  4615. cmd->cmd = DST_CRCI_NAND_CMD;
  4616. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[5]);
  4617. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4618. cmd->len = 4;
  4619. cmd++;
  4620. if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
  4621. /* Transfer user buf contents into nand ctlr buffer */
  4622. if (ops->mode == MTD_OPS_AUTO_OOB) {
  4623. cmd->cmd = 0;
  4624. cmd->src = init_dma_addr;
  4625. cmd->dst = MSM_NAND_FLASH_BUFFER;
  4626. cmd->len = mtd->oobsize;
  4627. cmd++;
  4628. }
  4629. if (ops->mode == MTD_OPS_PLACE_OOB) {
  4630. cmd->cmd = 0;
  4631. cmd->src = oob_dma_addr_curr;
  4632. cmd->dst = MSM_NAND_FLASH_BUFFER;
  4633. cmd->len = mtd->oobsize;
  4634. oob_dma_addr_curr += mtd->oobsize;
  4635. cmd++;
  4636. }
  4637. if (ops->mode == MTD_OPS_RAW) {
  4638. cmd->cmd = 0;
  4639. cmd->src = data_dma_addr_curr;
  4640. cmd->dst = MSM_NAND_FLASH_BUFFER;
  4641. cmd->len = mtd->oobsize;
  4642. data_dma_addr_curr += mtd->oobsize;
  4643. cmd++;
  4644. }
  4645. } else {
  4646. cmd->cmd = 0;
  4647. cmd->src = init_dma_addr;
  4648. cmd->dst = MSM_NAND_FLASH_BUFFER;
  4649. cmd->len = mtd->oobsize;
  4650. cmd++;
  4651. }
  4652. /* Write the MACRO1 register */
  4653. cmd->cmd = 0;
  4654. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.macro[4]);
  4655. cmd->dst = MSM_NAND_MACRO1_REG;
  4656. cmd->len = 4;
  4657. cmd++;
  4658. /* Kick the execute command */
  4659. cmd->cmd = 0;
  4660. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4661. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4662. cmd->len = 4;
  4663. cmd++;
  4664. /* Block on data ready, and read the status register */
  4665. cmd->cmd = SRC_CRCI_NAND_DATA;
  4666. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4667. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[5]);
  4668. cmd->len = 4;
  4669. cmd++;
  4670. /*********************************************************/
  4671. /* Issuing write command */
  4672. /*********************************************************/
  4673. /* Block on cmd ready and write CMD register */
  4674. cmd->cmd = DST_CRCI_NAND_CMD;
  4675. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[6]);
  4676. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4677. cmd->len = 4;
  4678. cmd++;
  4679. /* Kick the execute command */
  4680. cmd->cmd = 0;
  4681. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4682. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4683. cmd->len = 4;
  4684. cmd++;
  4685. /* Block on data ready, and read the status register */
  4686. cmd->cmd = SRC_CRCI_NAND_DATA;
  4687. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4688. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[6]);
  4689. cmd->len = 4;
  4690. cmd++;
  4691. /*************************************************************/
  4692. /* Wait for the interrupt from the Onenand device controller */
  4693. /*************************************************************/
  4694. /* Block on cmd ready and write CMD register */
  4695. cmd->cmd = DST_CRCI_NAND_CMD;
  4696. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[7]);
  4697. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4698. cmd->len = 4;
  4699. cmd++;
  4700. /* Kick the execute command */
  4701. cmd->cmd = 0;
  4702. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4703. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4704. cmd->len = 4;
  4705. cmd++;
  4706. /* Block on data ready, and read the status register */
  4707. cmd->cmd = SRC_CRCI_NAND_DATA;
  4708. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4709. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[7]);
  4710. cmd->len = 4;
  4711. cmd++;
  4712. /*************************************************************/
  4713. /* Read necessary status registers from the onenand device */
  4714. /*************************************************************/
  4715. /* Block on cmd ready and write CMD register */
  4716. cmd->cmd = DST_CRCI_NAND_CMD;
  4717. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
  4718. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4719. cmd->len = 4;
  4720. cmd++;
  4721. /* Kick the execute command */
  4722. cmd->cmd = 0;
  4723. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4724. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4725. cmd->len = 4;
  4726. cmd++;
  4727. /* Block on data ready, and read the status register */
  4728. cmd->cmd = SRC_CRCI_NAND_DATA;
  4729. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4730. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
  4731. cmd->len = 4;
  4732. cmd++;
  4733. /* Read the GENP3 register */
  4734. cmd->cmd = 0;
  4735. cmd->src = MSM_NAND_GENP_REG3;
  4736. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  4737. cmd->len = 4;
  4738. cmd++;
  4739. /* Read the DEVCMD4 register */
  4740. cmd->cmd = 0;
  4741. cmd->src = MSM_NAND_DEV_CMD4;
  4742. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  4743. cmd->len = 4;
  4744. cmd++;
  4745. /*************************************************************/
  4746. /* Restore the necessary registers to proper values */
  4747. /*************************************************************/
  4748. /* Block on cmd ready and write CMD register */
  4749. cmd->cmd = DST_CRCI_NAND_CMD;
  4750. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[9]);
  4751. cmd->dst = MSM_NAND_SFLASHC_CMD;
  4752. cmd->len = 4;
  4753. cmd++;
  4754. /* Kick the execute command */
  4755. cmd->cmd = 0;
  4756. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  4757. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  4758. cmd->len = 4;
  4759. cmd++;
  4760. /* Block on data ready, and read the status register */
  4761. cmd->cmd = SRC_CRCI_NAND_DATA;
  4762. cmd->src = MSM_NAND_SFLASHC_STATUS;
  4763. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[9]);
  4764. cmd->len = 4;
  4765. cmd++;
  4766. BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
  4767. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  4768. dma_buffer->cmd[0].cmd |= CMD_OCB;
  4769. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  4770. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  4771. >> 3) | CMD_PTR_LP;
  4772. mb();
  4773. msm_dmov_exec_cmd(chip->dma_channel,
  4774. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  4775. &dma_buffer->cmdptr)));
  4776. mb();
  4777. ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  4778. interrupt_status = (dma_buffer->data.data4 >> 0)&0x0000FFFF;
  4779. controller_status = (dma_buffer->data.data4 >> 16)&0x0000FFFF;
  4780. #if VERBOSE
  4781. pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
  4782. " %x %x %x\n", __func__,
  4783. dma_buffer->data.sfstat[0],
  4784. dma_buffer->data.sfstat[1],
  4785. dma_buffer->data.sfstat[2],
  4786. dma_buffer->data.sfstat[3],
  4787. dma_buffer->data.sfstat[4],
  4788. dma_buffer->data.sfstat[5],
  4789. dma_buffer->data.sfstat[6],
  4790. dma_buffer->data.sfstat[7],
  4791. dma_buffer->data.sfstat[8],
  4792. dma_buffer->data.sfstat[9]);
  4793. pr_info("%s: controller_status = %x\n", __func__,
  4794. controller_status);
  4795. pr_info("%s: interrupt_status = %x\n", __func__,
  4796. interrupt_status);
  4797. pr_info("%s: ecc_status = %x\n", __func__,
  4798. ecc_status);
  4799. #endif
  4800. /* Check for errors, protection violations etc */
  4801. if ((controller_status != 0)
  4802. || (dma_buffer->data.sfstat[0] & 0x110)
  4803. || (dma_buffer->data.sfstat[6] & 0x110)
  4804. || (dma_buffer->data.sfstat[7] & 0x110)
  4805. || (dma_buffer->data.sfstat[8] & 0x110)
  4806. || (dma_buffer->data.sfstat[9] & 0x110)
  4807. || ((dma_buffer->data.sfstat[1] & 0x110) &&
  4808. (ops->datbuf))
  4809. || ((dma_buffer->data.sfstat[2] & 0x110) &&
  4810. (ops->datbuf))
  4811. || ((dma_buffer->data.sfstat[3] & 0x110) &&
  4812. (ops->datbuf))
  4813. || ((dma_buffer->data.sfstat[4] & 0x110) &&
  4814. (ops->datbuf))
  4815. || ((dma_buffer->data.sfstat[5] & 0x110) &&
  4816. ((ops->oobbuf)
  4817. || (ops->mode == MTD_OPS_RAW)))) {
  4818. pr_info("%s: ECC/MPU/OP error\n", __func__);
  4819. err = -EIO;
  4820. }
  4821. if (err)
  4822. break;
  4823. pages_written++;
  4824. to_curr += mtd->writesize;
  4825. }
  4826. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  4827. dma_unmap_page(chip->dev, init_dma_addr, 64, DMA_TO_DEVICE);
  4828. err_dma_map_initbuf_failed:
  4829. if (ops->oobbuf) {
  4830. dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
  4831. DMA_TO_DEVICE);
  4832. }
  4833. err_dma_map_oobbuf_failed:
  4834. if (ops->datbuf) {
  4835. dma_unmap_page(chip->dev, data_dma_addr, ops->len,
  4836. DMA_TO_DEVICE);
  4837. }
  4838. if (err) {
  4839. pr_err("%s: %llx %x %x failed\n", __func__, to_curr,
  4840. ops->datbuf ? ops->len : 0, ops->ooblen);
  4841. } else {
  4842. ops->retlen = ops->oobretlen = 0;
  4843. if (ops->datbuf != NULL) {
  4844. if (ops->mode != MTD_OPS_RAW)
  4845. ops->retlen = mtd->writesize * pages_written;
  4846. else
  4847. ops->retlen = (mtd->writesize + mtd->oobsize)
  4848. * pages_written;
  4849. }
  4850. if (ops->oobbuf != NULL) {
  4851. if (ops->mode == MTD_OPS_AUTO_OOB)
  4852. ops->oobretlen = mtd->oobavail * pages_written;
  4853. else
  4854. ops->oobretlen = mtd->oobsize * pages_written;
  4855. }
  4856. }
  4857. #if VERBOSE
  4858. pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
  4859. __func__, err, ops->retlen, ops->oobretlen);
  4860. pr_info("================================================="
  4861. "================\n");
  4862. #endif
  4863. kfree(init_spare_bytes);
  4864. return err;
  4865. }
  4866. static int msm_onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
  4867. size_t *retlen, const u_char *buf)
  4868. {
  4869. int ret;
  4870. struct mtd_oob_ops ops;
  4871. ops.mode = MTD_OPS_PLACE_OOB;
  4872. ops.datbuf = (uint8_t *)buf;
  4873. ops.len = len;
  4874. ops.retlen = 0;
  4875. ops.oobbuf = NULL;
  4876. ops.ooblen = 0;
  4877. ops.oobretlen = 0;
  4878. ret = msm_onenand_write_oob(mtd, to, &ops);
  4879. *retlen = ops.retlen;
  4880. return ret;
  4881. }
  4882. static int msm_onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
  4883. {
  4884. struct msm_nand_chip *chip = mtd->priv;
  4885. struct {
  4886. dmov_s cmd[20];
  4887. unsigned cmdptr;
  4888. struct {
  4889. uint32_t sfbcfg;
  4890. uint32_t sfcmd[4];
  4891. uint32_t sfexec;
  4892. uint32_t sfstat[4];
  4893. uint32_t addr0;
  4894. uint32_t addr1;
  4895. uint32_t addr2;
  4896. uint32_t addr3;
  4897. uint32_t addr4;
  4898. uint32_t addr5;
  4899. uint32_t addr6;
  4900. uint32_t data0;
  4901. uint32_t data1;
  4902. uint32_t data2;
  4903. uint32_t data3;
  4904. uint32_t data4;
  4905. uint32_t data5;
  4906. uint32_t data6;
  4907. } data;
  4908. } *dma_buffer;
  4909. dmov_s *cmd;
  4910. int err = 0;
  4911. uint16_t onenand_startaddr1;
  4912. uint16_t onenand_startaddr8;
  4913. uint16_t onenand_startaddr2;
  4914. uint16_t onenand_startbuffer;
  4915. uint16_t controller_status;
  4916. uint16_t interrupt_status;
  4917. uint16_t ecc_status;
  4918. uint64_t temp;
  4919. #if VERBOSE
  4920. pr_info("================================================="
  4921. "================\n");
  4922. pr_info("%s: addr 0x%llx len 0x%llx\n",
  4923. __func__, instr->addr, instr->len);
  4924. #endif
  4925. if (instr->addr & (mtd->erasesize - 1)) {
  4926. pr_err("%s: Unsupported erase address, 0x%llx\n",
  4927. __func__, instr->addr);
  4928. return -EINVAL;
  4929. }
  4930. if (instr->len != mtd->erasesize) {
  4931. pr_err("%s: Unsupported erase len, %lld\n",
  4932. __func__, instr->len);
  4933. return -EINVAL;
  4934. }
  4935. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  4936. (chip, sizeof(*dma_buffer))));
  4937. cmd = dma_buffer->cmd;
  4938. temp = instr->addr;
  4939. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  4940. && (temp >= (mtd->size>>1))) { /* DDP Device */
  4941. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  4942. (((uint32_t)(temp-(mtd->size>>1))
  4943. / mtd->erasesize));
  4944. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  4945. } else {
  4946. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  4947. ((uint32_t)temp / mtd->erasesize) ;
  4948. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  4949. }
  4950. onenand_startaddr8 = 0x0000;
  4951. onenand_startbuffer = DATARAM0_0 << 8;
  4952. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  4953. (nand_sfcmd_mode ? 0 : (1 << 24));
  4954. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  4955. MSM_NAND_SFCMD_CMDXS,
  4956. nand_sfcmd_mode,
  4957. MSM_NAND_SFCMD_REGWR);
  4958. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  4959. MSM_NAND_SFCMD_CMDXS,
  4960. nand_sfcmd_mode,
  4961. MSM_NAND_SFCMD_INTHI);
  4962. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  4963. MSM_NAND_SFCMD_DATXS,
  4964. nand_sfcmd_mode,
  4965. MSM_NAND_SFCMD_REGRD);
  4966. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
  4967. MSM_NAND_SFCMD_CMDXS,
  4968. nand_sfcmd_mode,
  4969. MSM_NAND_SFCMD_REGWR);
  4970. dma_buffer->data.sfexec = 1;
  4971. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  4972. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  4973. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  4974. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  4975. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  4976. (ONENAND_SYSTEM_CONFIG_1);
  4977. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  4978. (ONENAND_START_ADDRESS_1);
  4979. dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
  4980. (ONENAND_START_ADDRESS_2);
  4981. dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
  4982. (ONENAND_COMMAND);
  4983. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  4984. (ONENAND_INTERRUPT_STATUS);
  4985. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  4986. (ONENAND_SYSTEM_CONFIG_1);
  4987. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  4988. (ONENAND_START_ADDRESS_1);
  4989. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  4990. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  4991. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  4992. (onenand_startaddr1);
  4993. dma_buffer->data.data2 = (onenand_startbuffer << 16) |
  4994. (onenand_startaddr2);
  4995. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  4996. (ONENAND_CMDERAS);
  4997. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  4998. (CLEAN_DATA_16);
  4999. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  5000. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5001. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  5002. (ONENAND_STARTADDR1_RES);
  5003. /***************************************************************/
  5004. /* Write the necessary address registers in the onenand device */
  5005. /***************************************************************/
  5006. /* Enable and configure the SFlash controller */
  5007. cmd->cmd = 0;
  5008. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  5009. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  5010. cmd->len = 4;
  5011. cmd++;
  5012. /* Block on cmd ready and write CMD register */
  5013. cmd->cmd = DST_CRCI_NAND_CMD;
  5014. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  5015. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5016. cmd->len = 4;
  5017. cmd++;
  5018. /* Write the ADDR0 and ADDR1 registers */
  5019. cmd->cmd = 0;
  5020. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  5021. cmd->dst = MSM_NAND_ADDR0;
  5022. cmd->len = 8;
  5023. cmd++;
  5024. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  5025. cmd->cmd = 0;
  5026. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  5027. cmd->dst = MSM_NAND_ADDR2;
  5028. cmd->len = 16;
  5029. cmd++;
  5030. /* Write the ADDR6 registers */
  5031. cmd->cmd = 0;
  5032. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  5033. cmd->dst = MSM_NAND_ADDR6;
  5034. cmd->len = 4;
  5035. cmd++;
  5036. /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
  5037. cmd->cmd = 0;
  5038. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  5039. cmd->dst = MSM_NAND_GENP_REG0;
  5040. cmd->len = 16;
  5041. cmd++;
  5042. /* Write the FLASH_DEV_CMD4,5,6 registers */
  5043. cmd->cmd = 0;
  5044. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5045. cmd->dst = MSM_NAND_DEV_CMD4;
  5046. cmd->len = 12;
  5047. cmd++;
  5048. /* Kick the execute command */
  5049. cmd->cmd = 0;
  5050. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5051. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5052. cmd->len = 4;
  5053. cmd++;
  5054. /* Block on data ready, and read the status register */
  5055. cmd->cmd = SRC_CRCI_NAND_DATA;
  5056. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5057. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  5058. cmd->len = 4;
  5059. cmd++;
  5060. /***************************************************************/
  5061. /* Wait for the interrupt from the Onenand device controller */
  5062. /***************************************************************/
  5063. /* Block on cmd ready and write CMD register */
  5064. cmd->cmd = DST_CRCI_NAND_CMD;
  5065. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  5066. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5067. cmd->len = 4;
  5068. cmd++;
  5069. /* Kick the execute command */
  5070. cmd->cmd = 0;
  5071. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5072. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5073. cmd->len = 4;
  5074. cmd++;
  5075. /* Block on data ready, and read the status register */
  5076. cmd->cmd = SRC_CRCI_NAND_DATA;
  5077. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5078. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  5079. cmd->len = 4;
  5080. cmd++;
  5081. /***************************************************************/
  5082. /* Read the necessary status registers from the onenand device */
  5083. /***************************************************************/
  5084. /* Block on cmd ready and write CMD register */
  5085. cmd->cmd = DST_CRCI_NAND_CMD;
  5086. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  5087. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5088. cmd->len = 4;
  5089. cmd++;
  5090. /* Kick the execute command */
  5091. cmd->cmd = 0;
  5092. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5093. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5094. cmd->len = 4;
  5095. cmd++;
  5096. /* Block on data ready, and read the status register */
  5097. cmd->cmd = SRC_CRCI_NAND_DATA;
  5098. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5099. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  5100. cmd->len = 4;
  5101. cmd++;
  5102. /* Read the GENP3 register */
  5103. cmd->cmd = 0;
  5104. cmd->src = MSM_NAND_GENP_REG3;
  5105. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  5106. cmd->len = 4;
  5107. cmd++;
  5108. /* Read the DEVCMD4 register */
  5109. cmd->cmd = 0;
  5110. cmd->src = MSM_NAND_DEV_CMD4;
  5111. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5112. cmd->len = 4;
  5113. cmd++;
  5114. /***************************************************************/
  5115. /* Restore the necessary registers to proper values */
  5116. /***************************************************************/
  5117. /* Block on cmd ready and write CMD register */
  5118. cmd->cmd = DST_CRCI_NAND_CMD;
  5119. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
  5120. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5121. cmd->len = 4;
  5122. cmd++;
  5123. /* Kick the execute command */
  5124. cmd->cmd = 0;
  5125. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5126. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5127. cmd->len = 4;
  5128. cmd++;
  5129. /* Block on data ready, and read the status register */
  5130. cmd->cmd = SRC_CRCI_NAND_DATA;
  5131. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5132. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
  5133. cmd->len = 4;
  5134. cmd++;
  5135. BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
  5136. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  5137. dma_buffer->cmd[0].cmd |= CMD_OCB;
  5138. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  5139. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  5140. >> 3) | CMD_PTR_LP;
  5141. mb();
  5142. msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
  5143. | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  5144. &dma_buffer->cmdptr)));
  5145. mb();
  5146. ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  5147. interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
  5148. controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
  5149. #if VERBOSE
  5150. pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
  5151. dma_buffer->data.sfstat[0],
  5152. dma_buffer->data.sfstat[1],
  5153. dma_buffer->data.sfstat[2],
  5154. dma_buffer->data.sfstat[3]);
  5155. pr_info("%s: controller_status = %x\n", __func__,
  5156. controller_status);
  5157. pr_info("%s: interrupt_status = %x\n", __func__,
  5158. interrupt_status);
  5159. pr_info("%s: ecc_status = %x\n", __func__,
  5160. ecc_status);
  5161. #endif
  5162. /* Check for errors, protection violations etc */
  5163. if ((controller_status != 0)
  5164. || (dma_buffer->data.sfstat[0] & 0x110)
  5165. || (dma_buffer->data.sfstat[1] & 0x110)
  5166. || (dma_buffer->data.sfstat[2] & 0x110)
  5167. || (dma_buffer->data.sfstat[3] & 0x110)) {
  5168. pr_err("%s: ECC/MPU/OP error\n", __func__);
  5169. err = -EIO;
  5170. }
  5171. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  5172. if (err) {
  5173. pr_err("%s: Erase failed, 0x%llx\n", __func__,
  5174. instr->addr);
  5175. instr->fail_addr = instr->addr;
  5176. instr->state = MTD_ERASE_FAILED;
  5177. } else {
  5178. instr->state = MTD_ERASE_DONE;
  5179. instr->fail_addr = 0xffffffff;
  5180. mtd_erase_callback(instr);
  5181. }
  5182. #if VERBOSE
  5183. pr_info("\n%s: ret %d\n", __func__, err);
  5184. pr_info("===================================================="
  5185. "=============\n");
  5186. #endif
  5187. return err;
  5188. }
  5189. static int msm_onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
  5190. {
  5191. struct mtd_oob_ops ops;
  5192. int rval, i;
  5193. int ret = 0;
  5194. uint8_t *buffer;
  5195. uint8_t *oobptr;
  5196. if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
  5197. pr_err("%s: unsupported block address, 0x%x\n",
  5198. __func__, (uint32_t)ofs);
  5199. return -EINVAL;
  5200. }
  5201. buffer = kmalloc(2112, GFP_KERNEL|GFP_DMA);
  5202. if (buffer == 0) {
  5203. pr_err("%s: Could not kmalloc for buffer\n",
  5204. __func__);
  5205. return -ENOMEM;
  5206. }
  5207. memset(buffer, 0x00, 2112);
  5208. oobptr = &(buffer[2048]);
  5209. ops.mode = MTD_OPS_RAW;
  5210. ops.len = 2112;
  5211. ops.retlen = 0;
  5212. ops.ooblen = 0;
  5213. ops.oobretlen = 0;
  5214. ops.ooboffs = 0;
  5215. ops.datbuf = buffer;
  5216. ops.oobbuf = NULL;
  5217. for (i = 0; i < 2; i++) {
  5218. ofs = ofs + i*mtd->writesize;
  5219. rval = msm_onenand_read_oob(mtd, ofs, &ops);
  5220. if (rval) {
  5221. pr_err("%s: Error in reading bad blk info\n",
  5222. __func__);
  5223. ret = rval;
  5224. break;
  5225. }
  5226. if ((oobptr[0] != 0xFF) || (oobptr[1] != 0xFF) ||
  5227. (oobptr[16] != 0xFF) || (oobptr[17] != 0xFF) ||
  5228. (oobptr[32] != 0xFF) || (oobptr[33] != 0xFF) ||
  5229. (oobptr[48] != 0xFF) || (oobptr[49] != 0xFF)
  5230. ) {
  5231. ret = 1;
  5232. break;
  5233. }
  5234. }
  5235. kfree(buffer);
  5236. #if VERBOSE
  5237. if (ret == 1)
  5238. pr_info("%s : Block containing 0x%x is bad\n",
  5239. __func__, (unsigned int)ofs);
  5240. #endif
  5241. return ret;
  5242. }
  5243. static int msm_onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  5244. {
  5245. struct mtd_oob_ops ops;
  5246. int rval, i;
  5247. int ret = 0;
  5248. uint8_t *buffer;
  5249. if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
  5250. pr_err("%s: unsupported block address, 0x%x\n",
  5251. __func__, (uint32_t)ofs);
  5252. return -EINVAL;
  5253. }
  5254. buffer = page_address(ZERO_PAGE());
  5255. ops.mode = MTD_OPS_RAW;
  5256. ops.len = 2112;
  5257. ops.retlen = 0;
  5258. ops.ooblen = 0;
  5259. ops.oobretlen = 0;
  5260. ops.ooboffs = 0;
  5261. ops.datbuf = buffer;
  5262. ops.oobbuf = NULL;
  5263. for (i = 0; i < 2; i++) {
  5264. ofs = ofs + i*mtd->writesize;
  5265. rval = msm_onenand_write_oob(mtd, ofs, &ops);
  5266. if (rval) {
  5267. pr_err("%s: Error in writing bad blk info\n",
  5268. __func__);
  5269. ret = rval;
  5270. break;
  5271. }
  5272. }
  5273. return ret;
  5274. }
  5275. static int msm_onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  5276. {
  5277. struct msm_nand_chip *chip = mtd->priv;
  5278. struct {
  5279. dmov_s cmd[20];
  5280. unsigned cmdptr;
  5281. struct {
  5282. uint32_t sfbcfg;
  5283. uint32_t sfcmd[4];
  5284. uint32_t sfexec;
  5285. uint32_t sfstat[4];
  5286. uint32_t addr0;
  5287. uint32_t addr1;
  5288. uint32_t addr2;
  5289. uint32_t addr3;
  5290. uint32_t addr4;
  5291. uint32_t addr5;
  5292. uint32_t addr6;
  5293. uint32_t data0;
  5294. uint32_t data1;
  5295. uint32_t data2;
  5296. uint32_t data3;
  5297. uint32_t data4;
  5298. uint32_t data5;
  5299. uint32_t data6;
  5300. } data;
  5301. } *dma_buffer;
  5302. dmov_s *cmd;
  5303. int err = 0;
  5304. uint16_t onenand_startaddr1;
  5305. uint16_t onenand_startaddr8;
  5306. uint16_t onenand_startaddr2;
  5307. uint16_t onenand_startblock;
  5308. uint16_t controller_status;
  5309. uint16_t interrupt_status;
  5310. uint16_t write_prot_status;
  5311. uint64_t start_ofs;
  5312. #if VERBOSE
  5313. pr_info("===================================================="
  5314. "=============\n");
  5315. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  5316. #endif
  5317. /* 'ofs' & 'len' should align to block size */
  5318. if (ofs&(mtd->erasesize - 1)) {
  5319. pr_err("%s: Unsupported ofs address, 0x%llx\n",
  5320. __func__, ofs);
  5321. return -EINVAL;
  5322. }
  5323. if (len&(mtd->erasesize - 1)) {
  5324. pr_err("%s: Unsupported len, %lld\n",
  5325. __func__, len);
  5326. return -EINVAL;
  5327. }
  5328. if (ofs+len > mtd->size) {
  5329. pr_err("%s: Maximum chip size exceeded\n", __func__);
  5330. return -EINVAL;
  5331. }
  5332. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  5333. (chip, sizeof(*dma_buffer))));
  5334. for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
  5335. #if VERBOSE
  5336. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  5337. #endif
  5338. cmd = dma_buffer->cmd;
  5339. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  5340. && (ofs >= (mtd->size>>1))) { /* DDP Device */
  5341. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  5342. (((uint32_t)(ofs - (mtd->size>>1))
  5343. / mtd->erasesize));
  5344. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  5345. onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
  5346. / mtd->erasesize);
  5347. } else {
  5348. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  5349. ((uint32_t)ofs / mtd->erasesize) ;
  5350. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  5351. onenand_startblock = ((uint32_t)ofs
  5352. / mtd->erasesize);
  5353. }
  5354. onenand_startaddr8 = 0x0000;
  5355. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  5356. (nand_sfcmd_mode ? 0 : (1 << 24));
  5357. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  5358. MSM_NAND_SFCMD_CMDXS,
  5359. nand_sfcmd_mode,
  5360. MSM_NAND_SFCMD_REGWR);
  5361. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  5362. MSM_NAND_SFCMD_CMDXS,
  5363. nand_sfcmd_mode,
  5364. MSM_NAND_SFCMD_INTHI);
  5365. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  5366. MSM_NAND_SFCMD_DATXS,
  5367. nand_sfcmd_mode,
  5368. MSM_NAND_SFCMD_REGRD);
  5369. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
  5370. MSM_NAND_SFCMD_CMDXS,
  5371. nand_sfcmd_mode,
  5372. MSM_NAND_SFCMD_REGWR);
  5373. dma_buffer->data.sfexec = 1;
  5374. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  5375. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  5376. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  5377. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  5378. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  5379. (ONENAND_SYSTEM_CONFIG_1);
  5380. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  5381. (ONENAND_START_ADDRESS_1);
  5382. dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
  5383. (ONENAND_START_ADDRESS_2);
  5384. dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
  5385. (ONENAND_COMMAND);
  5386. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  5387. (ONENAND_INTERRUPT_STATUS);
  5388. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  5389. (ONENAND_SYSTEM_CONFIG_1);
  5390. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  5391. (ONENAND_START_ADDRESS_1);
  5392. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  5393. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5394. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  5395. (onenand_startaddr1);
  5396. dma_buffer->data.data2 = (onenand_startblock << 16) |
  5397. (onenand_startaddr2);
  5398. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  5399. (ONENAND_CMD_UNLOCK);
  5400. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  5401. (CLEAN_DATA_16);
  5402. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  5403. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5404. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  5405. (ONENAND_STARTADDR1_RES);
  5406. /*************************************************************/
  5407. /* Write the necessary address reg in the onenand device */
  5408. /*************************************************************/
  5409. /* Enable and configure the SFlash controller */
  5410. cmd->cmd = 0;
  5411. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  5412. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  5413. cmd->len = 4;
  5414. cmd++;
  5415. /* Block on cmd ready and write CMD register */
  5416. cmd->cmd = DST_CRCI_NAND_CMD;
  5417. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  5418. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5419. cmd->len = 4;
  5420. cmd++;
  5421. /* Write the ADDR0 and ADDR1 registers */
  5422. cmd->cmd = 0;
  5423. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  5424. cmd->dst = MSM_NAND_ADDR0;
  5425. cmd->len = 8;
  5426. cmd++;
  5427. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  5428. cmd->cmd = 0;
  5429. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  5430. cmd->dst = MSM_NAND_ADDR2;
  5431. cmd->len = 16;
  5432. cmd++;
  5433. /* Write the ADDR6 registers */
  5434. cmd->cmd = 0;
  5435. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  5436. cmd->dst = MSM_NAND_ADDR6;
  5437. cmd->len = 4;
  5438. cmd++;
  5439. /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
  5440. cmd->cmd = 0;
  5441. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  5442. cmd->dst = MSM_NAND_GENP_REG0;
  5443. cmd->len = 16;
  5444. cmd++;
  5445. /* Write the FLASH_DEV_CMD4,5,6 registers */
  5446. cmd->cmd = 0;
  5447. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5448. cmd->dst = MSM_NAND_DEV_CMD4;
  5449. cmd->len = 12;
  5450. cmd++;
  5451. /* Kick the execute command */
  5452. cmd->cmd = 0;
  5453. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5454. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5455. cmd->len = 4;
  5456. cmd++;
  5457. /* Block on data ready, and read the status register */
  5458. cmd->cmd = SRC_CRCI_NAND_DATA;
  5459. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5460. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  5461. cmd->len = 4;
  5462. cmd++;
  5463. /*************************************************************/
  5464. /* Wait for the interrupt from the Onenand device controller */
  5465. /*************************************************************/
  5466. /* Block on cmd ready and write CMD register */
  5467. cmd->cmd = DST_CRCI_NAND_CMD;
  5468. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  5469. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5470. cmd->len = 4;
  5471. cmd++;
  5472. /* Kick the execute command */
  5473. cmd->cmd = 0;
  5474. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5475. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5476. cmd->len = 4;
  5477. cmd++;
  5478. /* Block on data ready, and read the status register */
  5479. cmd->cmd = SRC_CRCI_NAND_DATA;
  5480. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5481. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  5482. cmd->len = 4;
  5483. cmd++;
  5484. /*********************************************************/
  5485. /* Read the necessary status reg from the onenand device */
  5486. /*********************************************************/
  5487. /* Block on cmd ready and write CMD register */
  5488. cmd->cmd = DST_CRCI_NAND_CMD;
  5489. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  5490. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5491. cmd->len = 4;
  5492. cmd++;
  5493. /* Kick the execute command */
  5494. cmd->cmd = 0;
  5495. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5496. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5497. cmd->len = 4;
  5498. cmd++;
  5499. /* Block on data ready, and read the status register */
  5500. cmd->cmd = SRC_CRCI_NAND_DATA;
  5501. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5502. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  5503. cmd->len = 4;
  5504. cmd++;
  5505. /* Read the GENP3 register */
  5506. cmd->cmd = 0;
  5507. cmd->src = MSM_NAND_GENP_REG3;
  5508. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  5509. cmd->len = 4;
  5510. cmd++;
  5511. /* Read the DEVCMD4 register */
  5512. cmd->cmd = 0;
  5513. cmd->src = MSM_NAND_DEV_CMD4;
  5514. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5515. cmd->len = 4;
  5516. cmd++;
  5517. /************************************************************/
  5518. /* Restore the necessary registers to proper values */
  5519. /************************************************************/
  5520. /* Block on cmd ready and write CMD register */
  5521. cmd->cmd = DST_CRCI_NAND_CMD;
  5522. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
  5523. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5524. cmd->len = 4;
  5525. cmd++;
  5526. /* Kick the execute command */
  5527. cmd->cmd = 0;
  5528. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5529. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5530. cmd->len = 4;
  5531. cmd++;
  5532. /* Block on data ready, and read the status register */
  5533. cmd->cmd = SRC_CRCI_NAND_DATA;
  5534. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5535. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
  5536. cmd->len = 4;
  5537. cmd++;
  5538. BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
  5539. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  5540. dma_buffer->cmd[0].cmd |= CMD_OCB;
  5541. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  5542. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  5543. >> 3) | CMD_PTR_LP;
  5544. mb();
  5545. msm_dmov_exec_cmd(chip->dma_channel,
  5546. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  5547. &dma_buffer->cmdptr)));
  5548. mb();
  5549. write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  5550. interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
  5551. controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
  5552. #if VERBOSE
  5553. pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
  5554. dma_buffer->data.sfstat[0],
  5555. dma_buffer->data.sfstat[1],
  5556. dma_buffer->data.sfstat[2],
  5557. dma_buffer->data.sfstat[3]);
  5558. pr_info("%s: controller_status = %x\n", __func__,
  5559. controller_status);
  5560. pr_info("%s: interrupt_status = %x\n", __func__,
  5561. interrupt_status);
  5562. pr_info("%s: write_prot_status = %x\n", __func__,
  5563. write_prot_status);
  5564. #endif
  5565. /* Check for errors, protection violations etc */
  5566. if ((controller_status != 0)
  5567. || (dma_buffer->data.sfstat[0] & 0x110)
  5568. || (dma_buffer->data.sfstat[1] & 0x110)
  5569. || (dma_buffer->data.sfstat[2] & 0x110)
  5570. || (dma_buffer->data.sfstat[3] & 0x110)) {
  5571. pr_err("%s: ECC/MPU/OP error\n", __func__);
  5572. err = -EIO;
  5573. }
  5574. if (!(write_prot_status & ONENAND_WP_US)) {
  5575. pr_err("%s: Unexpected status ofs = 0x%llx,"
  5576. "wp_status = %x\n",
  5577. __func__, ofs, write_prot_status);
  5578. err = -EIO;
  5579. }
  5580. if (err)
  5581. break;
  5582. }
  5583. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  5584. #if VERBOSE
  5585. pr_info("\n%s: ret %d\n", __func__, err);
  5586. pr_info("===================================================="
  5587. "=============\n");
  5588. #endif
  5589. return err;
  5590. }
  5591. static int msm_onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  5592. {
  5593. struct msm_nand_chip *chip = mtd->priv;
  5594. struct {
  5595. dmov_s cmd[20];
  5596. unsigned cmdptr;
  5597. struct {
  5598. uint32_t sfbcfg;
  5599. uint32_t sfcmd[4];
  5600. uint32_t sfexec;
  5601. uint32_t sfstat[4];
  5602. uint32_t addr0;
  5603. uint32_t addr1;
  5604. uint32_t addr2;
  5605. uint32_t addr3;
  5606. uint32_t addr4;
  5607. uint32_t addr5;
  5608. uint32_t addr6;
  5609. uint32_t data0;
  5610. uint32_t data1;
  5611. uint32_t data2;
  5612. uint32_t data3;
  5613. uint32_t data4;
  5614. uint32_t data5;
  5615. uint32_t data6;
  5616. } data;
  5617. } *dma_buffer;
  5618. dmov_s *cmd;
  5619. int err = 0;
  5620. uint16_t onenand_startaddr1;
  5621. uint16_t onenand_startaddr8;
  5622. uint16_t onenand_startaddr2;
  5623. uint16_t onenand_startblock;
  5624. uint16_t controller_status;
  5625. uint16_t interrupt_status;
  5626. uint16_t write_prot_status;
  5627. uint64_t start_ofs;
  5628. #if VERBOSE
  5629. pr_info("===================================================="
  5630. "=============\n");
  5631. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  5632. #endif
  5633. /* 'ofs' & 'len' should align to block size */
  5634. if (ofs&(mtd->erasesize - 1)) {
  5635. pr_err("%s: Unsupported ofs address, 0x%llx\n",
  5636. __func__, ofs);
  5637. return -EINVAL;
  5638. }
  5639. if (len&(mtd->erasesize - 1)) {
  5640. pr_err("%s: Unsupported len, %lld\n",
  5641. __func__, len);
  5642. return -EINVAL;
  5643. }
  5644. if (ofs+len > mtd->size) {
  5645. pr_err("%s: Maximum chip size exceeded\n", __func__);
  5646. return -EINVAL;
  5647. }
  5648. wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
  5649. (chip, sizeof(*dma_buffer))));
  5650. for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
  5651. #if VERBOSE
  5652. pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
  5653. #endif
  5654. cmd = dma_buffer->cmd;
  5655. if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
  5656. && (ofs >= (mtd->size>>1))) { /* DDP Device */
  5657. onenand_startaddr1 = DEVICE_FLASHCORE_1 |
  5658. (((uint32_t)(ofs - (mtd->size>>1))
  5659. / mtd->erasesize));
  5660. onenand_startaddr2 = DEVICE_BUFFERRAM_1;
  5661. onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
  5662. / mtd->erasesize);
  5663. } else {
  5664. onenand_startaddr1 = DEVICE_FLASHCORE_0 |
  5665. ((uint32_t)ofs / mtd->erasesize) ;
  5666. onenand_startaddr2 = DEVICE_BUFFERRAM_0;
  5667. onenand_startblock = ((uint32_t)ofs
  5668. / mtd->erasesize);
  5669. }
  5670. onenand_startaddr8 = 0x0000;
  5671. dma_buffer->data.sfbcfg = SFLASH_BCFG |
  5672. (nand_sfcmd_mode ? 0 : (1 << 24));
  5673. dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
  5674. MSM_NAND_SFCMD_CMDXS,
  5675. nand_sfcmd_mode,
  5676. MSM_NAND_SFCMD_REGWR);
  5677. dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
  5678. MSM_NAND_SFCMD_CMDXS,
  5679. nand_sfcmd_mode,
  5680. MSM_NAND_SFCMD_INTHI);
  5681. dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
  5682. MSM_NAND_SFCMD_DATXS,
  5683. nand_sfcmd_mode,
  5684. MSM_NAND_SFCMD_REGRD);
  5685. dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
  5686. MSM_NAND_SFCMD_CMDXS,
  5687. nand_sfcmd_mode,
  5688. MSM_NAND_SFCMD_REGWR);
  5689. dma_buffer->data.sfexec = 1;
  5690. dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
  5691. dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
  5692. dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
  5693. dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
  5694. dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
  5695. (ONENAND_SYSTEM_CONFIG_1);
  5696. dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
  5697. (ONENAND_START_ADDRESS_1);
  5698. dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
  5699. (ONENAND_START_ADDRESS_2);
  5700. dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
  5701. (ONENAND_COMMAND);
  5702. dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
  5703. (ONENAND_INTERRUPT_STATUS);
  5704. dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
  5705. (ONENAND_SYSTEM_CONFIG_1);
  5706. dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
  5707. (ONENAND_START_ADDRESS_1);
  5708. dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
  5709. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5710. dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
  5711. (onenand_startaddr1);
  5712. dma_buffer->data.data2 = (onenand_startblock << 16) |
  5713. (onenand_startaddr2);
  5714. dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
  5715. (ONENAND_CMD_LOCK);
  5716. dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
  5717. (CLEAN_DATA_16);
  5718. dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
  5719. (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
  5720. dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
  5721. (ONENAND_STARTADDR1_RES);
  5722. /*************************************************************/
  5723. /* Write the necessary address reg in the onenand device */
  5724. /*************************************************************/
  5725. /* Enable and configure the SFlash controller */
  5726. cmd->cmd = 0;
  5727. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
  5728. cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
  5729. cmd->len = 4;
  5730. cmd++;
  5731. /* Block on cmd ready and write CMD register */
  5732. cmd->cmd = DST_CRCI_NAND_CMD;
  5733. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
  5734. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5735. cmd->len = 4;
  5736. cmd++;
  5737. /* Write the ADDR0 and ADDR1 registers */
  5738. cmd->cmd = 0;
  5739. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
  5740. cmd->dst = MSM_NAND_ADDR0;
  5741. cmd->len = 8;
  5742. cmd++;
  5743. /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
  5744. cmd->cmd = 0;
  5745. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
  5746. cmd->dst = MSM_NAND_ADDR2;
  5747. cmd->len = 16;
  5748. cmd++;
  5749. /* Write the ADDR6 registers */
  5750. cmd->cmd = 0;
  5751. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
  5752. cmd->dst = MSM_NAND_ADDR6;
  5753. cmd->len = 4;
  5754. cmd++;
  5755. /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
  5756. cmd->cmd = 0;
  5757. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
  5758. cmd->dst = MSM_NAND_GENP_REG0;
  5759. cmd->len = 16;
  5760. cmd++;
  5761. /* Write the FLASH_DEV_CMD4,5,6 registers */
  5762. cmd->cmd = 0;
  5763. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5764. cmd->dst = MSM_NAND_DEV_CMD4;
  5765. cmd->len = 12;
  5766. cmd++;
  5767. /* Kick the execute command */
  5768. cmd->cmd = 0;
  5769. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5770. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5771. cmd->len = 4;
  5772. cmd++;
  5773. /* Block on data ready, and read the status register */
  5774. cmd->cmd = SRC_CRCI_NAND_DATA;
  5775. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5776. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
  5777. cmd->len = 4;
  5778. cmd++;
  5779. /*************************************************************/
  5780. /* Wait for the interrupt from the Onenand device controller */
  5781. /*************************************************************/
  5782. /* Block on cmd ready and write CMD register */
  5783. cmd->cmd = DST_CRCI_NAND_CMD;
  5784. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
  5785. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5786. cmd->len = 4;
  5787. cmd++;
  5788. /* Kick the execute command */
  5789. cmd->cmd = 0;
  5790. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5791. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5792. cmd->len = 4;
  5793. cmd++;
  5794. /* Block on data ready, and read the status register */
  5795. cmd->cmd = SRC_CRCI_NAND_DATA;
  5796. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5797. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
  5798. cmd->len = 4;
  5799. cmd++;
  5800. /*********************************************************/
  5801. /* Read the necessary status reg from the onenand device */
  5802. /*********************************************************/
  5803. /* Block on cmd ready and write CMD register */
  5804. cmd->cmd = DST_CRCI_NAND_CMD;
  5805. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
  5806. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5807. cmd->len = 4;
  5808. cmd++;
  5809. /* Kick the execute command */
  5810. cmd->cmd = 0;
  5811. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5812. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5813. cmd->len = 4;
  5814. cmd++;
  5815. /* Block on data ready, and read the status register */
  5816. cmd->cmd = SRC_CRCI_NAND_DATA;
  5817. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5818. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
  5819. cmd->len = 4;
  5820. cmd++;
  5821. /* Read the GENP3 register */
  5822. cmd->cmd = 0;
  5823. cmd->src = MSM_NAND_GENP_REG3;
  5824. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
  5825. cmd->len = 4;
  5826. cmd++;
  5827. /* Read the DEVCMD4 register */
  5828. cmd->cmd = 0;
  5829. cmd->src = MSM_NAND_DEV_CMD4;
  5830. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
  5831. cmd->len = 4;
  5832. cmd++;
  5833. /************************************************************/
  5834. /* Restore the necessary registers to proper values */
  5835. /************************************************************/
  5836. /* Block on cmd ready and write CMD register */
  5837. cmd->cmd = DST_CRCI_NAND_CMD;
  5838. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
  5839. cmd->dst = MSM_NAND_SFLASHC_CMD;
  5840. cmd->len = 4;
  5841. cmd++;
  5842. /* Kick the execute command */
  5843. cmd->cmd = 0;
  5844. cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
  5845. cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
  5846. cmd->len = 4;
  5847. cmd++;
  5848. /* Block on data ready, and read the status register */
  5849. cmd->cmd = SRC_CRCI_NAND_DATA;
  5850. cmd->src = MSM_NAND_SFLASHC_STATUS;
  5851. cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
  5852. cmd->len = 4;
  5853. cmd++;
  5854. BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
  5855. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  5856. dma_buffer->cmd[0].cmd |= CMD_OCB;
  5857. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  5858. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
  5859. >> 3) | CMD_PTR_LP;
  5860. mb();
  5861. msm_dmov_exec_cmd(chip->dma_channel,
  5862. DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  5863. &dma_buffer->cmdptr)));
  5864. mb();
  5865. write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
  5866. interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
  5867. controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
  5868. #if VERBOSE
  5869. pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
  5870. dma_buffer->data.sfstat[0],
  5871. dma_buffer->data.sfstat[1],
  5872. dma_buffer->data.sfstat[2],
  5873. dma_buffer->data.sfstat[3]);
  5874. pr_info("%s: controller_status = %x\n", __func__,
  5875. controller_status);
  5876. pr_info("%s: interrupt_status = %x\n", __func__,
  5877. interrupt_status);
  5878. pr_info("%s: write_prot_status = %x\n", __func__,
  5879. write_prot_status);
  5880. #endif
  5881. /* Check for errors, protection violations etc */
  5882. if ((controller_status != 0)
  5883. || (dma_buffer->data.sfstat[0] & 0x110)
  5884. || (dma_buffer->data.sfstat[1] & 0x110)
  5885. || (dma_buffer->data.sfstat[2] & 0x110)
  5886. || (dma_buffer->data.sfstat[3] & 0x110)) {
  5887. pr_err("%s: ECC/MPU/OP error\n", __func__);
  5888. err = -EIO;
  5889. }
  5890. if (!(write_prot_status & ONENAND_WP_LS)) {
  5891. pr_err("%s: Unexpected status ofs = 0x%llx,"
  5892. "wp_status = %x\n",
  5893. __func__, ofs, write_prot_status);
  5894. err = -EIO;
  5895. }
  5896. if (err)
  5897. break;
  5898. }
  5899. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  5900. #if VERBOSE
  5901. pr_info("\n%s: ret %d\n", __func__, err);
  5902. pr_info("===================================================="
  5903. "=============\n");
  5904. #endif
  5905. return err;
  5906. }
  5907. static int msm_onenand_suspend(struct mtd_info *mtd)
  5908. {
  5909. return 0;
  5910. }
  5911. static void msm_onenand_resume(struct mtd_info *mtd)
  5912. {
  5913. }
  5914. int msm_onenand_scan(struct mtd_info *mtd, int maxchips)
  5915. {
  5916. struct msm_nand_chip *chip = mtd->priv;
  5917. /* Probe and check whether onenand device is present */
  5918. if (flash_onenand_probe(chip))
  5919. return -ENODEV;
  5920. mtd->size = 0x1000000 << ((onenand_info.device_id & 0xF0) >> 4);
  5921. mtd->writesize = onenand_info.data_buf_size;
  5922. mtd->oobsize = mtd->writesize >> 5;
  5923. mtd->erasesize = mtd->writesize << 6;
  5924. mtd->oobavail = msm_onenand_oob_64.oobavail;
  5925. mtd->ecclayout = &msm_onenand_oob_64;
  5926. mtd->type = MTD_NANDFLASH;
  5927. mtd->flags = MTD_CAP_NANDFLASH;
  5928. mtd->_erase = msm_onenand_erase;
  5929. mtd->_point = NULL;
  5930. mtd->_unpoint = NULL;
  5931. mtd->_read = msm_onenand_read;
  5932. mtd->_write = msm_onenand_write;
  5933. mtd->_read_oob = msm_onenand_read_oob;
  5934. mtd->_write_oob = msm_onenand_write_oob;
  5935. mtd->_lock = msm_onenand_lock;
  5936. mtd->_unlock = msm_onenand_unlock;
  5937. mtd->_suspend = msm_onenand_suspend;
  5938. mtd->_resume = msm_onenand_resume;
  5939. mtd->_block_isbad = msm_onenand_block_isbad;
  5940. mtd->_block_markbad = msm_onenand_block_markbad;
  5941. mtd->owner = THIS_MODULE;
  5942. pr_info("Found a supported onenand device\n");
  5943. return 0;
  5944. }
  5945. static const unsigned int bch_sup_cntrl[] = {
  5946. 0x307, /* MSM7x2xA */
  5947. 0x4030, /* MDM 9x15 */
  5948. };
  5949. static inline bool msm_nand_has_bch_ecc_engine(unsigned int hw_id)
  5950. {
  5951. int i;
  5952. for (i = 0; i < ARRAY_SIZE(bch_sup_cntrl); i++) {
  5953. if (hw_id == bch_sup_cntrl[i])
  5954. return true;
  5955. }
  5956. return false;
  5957. }
  5958. /**
  5959. * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
  5960. * @param mtd MTD device structure
  5961. * @param maxchips Number of chips to scan for
  5962. *
  5963. * This fills out all the not initialized function pointers
  5964. * with the defaults.
  5965. * The flash ID is read and the mtd/chip structures are
  5966. * filled with the appropriate values.
  5967. */
  5968. int msm_nand_scan(struct mtd_info *mtd, int maxchips)
  5969. {
  5970. struct msm_nand_chip *chip = mtd->priv;
  5971. uint32_t flash_id = 0, i, mtd_writesize;
  5972. uint8_t dev_found = 0;
  5973. uint8_t wide_bus;
  5974. uint32_t manid;
  5975. uint32_t devid;
  5976. uint32_t devcfg;
  5977. struct nand_flash_dev *flashdev = NULL;
  5978. struct nand_manufacturers *flashman = NULL;
  5979. unsigned int hw_id;
  5980. /* Probe the Flash device for ONFI compliance */
  5981. if (!flash_onfi_probe(chip)) {
  5982. dev_found = 1;
  5983. } else {
  5984. /* Read the Flash ID from the Nand Flash Device */
  5985. flash_id = flash_read_id(chip);
  5986. manid = flash_id & 0xFF;
  5987. devid = (flash_id >> 8) & 0xFF;
  5988. devcfg = (flash_id >> 24) & 0xFF;
  5989. for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
  5990. if (nand_manuf_ids[i].id == manid)
  5991. flashman = &nand_manuf_ids[i];
  5992. for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
  5993. if (nand_flash_ids[i].id == devid)
  5994. flashdev = &nand_flash_ids[i];
  5995. if (!flashdev || !flashman) {
  5996. pr_err("ERROR: unknown nand device manuf=%x devid=%x\n",
  5997. manid, devid);
  5998. return -ENOENT;
  5999. } else
  6000. dev_found = 1;
  6001. if (!flashdev->pagesize) {
  6002. supported_flash.flash_id = flash_id;
  6003. supported_flash.density = flashdev->chipsize << 20;
  6004. supported_flash.widebus = devcfg & (1 << 6) ? 1 : 0;
  6005. supported_flash.pagesize = 1024 << (devcfg & 0x3);
  6006. supported_flash.blksize = (64 * 1024) <<
  6007. ((devcfg >> 4) & 0x3);
  6008. supported_flash.oobsize = (8 << ((devcfg >> 2) & 0x3)) *
  6009. (supported_flash.pagesize >> 9);
  6010. if ((supported_flash.oobsize > 64) &&
  6011. (supported_flash.pagesize == 2048)) {
  6012. pr_info("msm_nand: Found a 2K page device with"
  6013. " %d oobsize - changing oobsize to 64 "
  6014. "bytes.\n", supported_flash.oobsize);
  6015. supported_flash.oobsize = 64;
  6016. }
  6017. } else {
  6018. supported_flash.flash_id = flash_id;
  6019. supported_flash.density = flashdev->chipsize << 20;
  6020. supported_flash.widebus = flashdev->options &
  6021. NAND_BUSWIDTH_16 ? 1 : 0;
  6022. supported_flash.pagesize = flashdev->pagesize;
  6023. supported_flash.blksize = flashdev->erasesize;
  6024. supported_flash.oobsize = flashdev->pagesize >> 5;
  6025. }
  6026. }
  6027. if (dev_found) {
  6028. (!interleave_enable) ? (i = 1) : (i = 2);
  6029. wide_bus = supported_flash.widebus;
  6030. mtd->size = supported_flash.density * i;
  6031. mtd->writesize = supported_flash.pagesize * i;
  6032. mtd->oobsize = supported_flash.oobsize * i;
  6033. mtd->erasesize = supported_flash.blksize * i;
  6034. mtd->writebufsize = mtd->writesize;
  6035. if (!interleave_enable)
  6036. mtd_writesize = mtd->writesize;
  6037. else
  6038. mtd_writesize = mtd->writesize >> 1;
  6039. /* Check whether controller and NAND device support 8bit ECC*/
  6040. hw_id = flash_rd_reg(chip, MSM_NAND_HW_INFO);
  6041. if (msm_nand_has_bch_ecc_engine(hw_id)
  6042. && (supported_flash.ecc_correctability >= 8)) {
  6043. pr_info("Found supported NAND device for %dbit ECC\n",
  6044. supported_flash.ecc_correctability);
  6045. enable_bch_ecc = 1;
  6046. } else {
  6047. pr_info("Found a supported NAND device\n");
  6048. }
  6049. pr_info("NAND Controller ID : 0x%x\n", hw_id);
  6050. pr_info("NAND Device ID : 0x%x\n", supported_flash.flash_id);
  6051. pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
  6052. pr_info("Density : %lld MByte\n", (mtd->size>>20));
  6053. pr_info("Pagesize : %d Bytes\n", mtd->writesize);
  6054. pr_info("Erasesize: %d Bytes\n", mtd->erasesize);
  6055. pr_info("Oobsize : %d Bytes\n", mtd->oobsize);
  6056. } else {
  6057. pr_err("Unsupported Nand,Id: 0x%x \n", flash_id);
  6058. return -ENODEV;
  6059. }
  6060. /* Size of each codeword is 532Bytes incase of 8bit BCH ECC*/
  6061. chip->cw_size = enable_bch_ecc ? 532 : 528;
  6062. chip->CFG0 = (((mtd_writesize >> 9)-1) << 6) /* 4/8 cw/pg for 2/4k */
  6063. | (516 << 9) /* 516 user data bytes */
  6064. | (10 << 19) /* 10 parity bytes */
  6065. | (5 << 27) /* 5 address cycles */
  6066. | (0 << 30) /* Do not read status before data */
  6067. | (1 << 31) /* Send read cmd */
  6068. /* 0 spare bytes for 16 bit nand or 1/2 spare bytes for 8 bit */
  6069. | (wide_bus ? 0 << 23 : (enable_bch_ecc ? 2 << 23 : 1 << 23));
  6070. chip->CFG1 = (0 << 0) /* Enable ecc */
  6071. | (7 << 2) /* 8 recovery cycles */
  6072. | (0 << 5) /* Allow CS deassertion */
  6073. /* Bad block marker location */
  6074. | ((mtd_writesize - (chip->cw_size * (
  6075. (mtd_writesize >> 9) - 1)) + 1) << 6)
  6076. | (0 << 16) /* Bad block in user data area */
  6077. | (2 << 17) /* 6 cycle tWB/tRB */
  6078. | ((wide_bus) ? CFG1_WIDE_FLASH : 0); /* Wide flash bit */
  6079. chip->ecc_buf_cfg = 0x203;
  6080. chip->CFG0_RAW = 0xA80420C0;
  6081. chip->CFG1_RAW = 0x5045D;
  6082. if (enable_bch_ecc) {
  6083. chip->CFG1 |= (1 << 27); /* Enable BCH engine */
  6084. chip->ecc_bch_cfg = (0 << 0) /* Enable ECC*/
  6085. | (0 << 1) /* Enable/Disable SW reset of ECC engine */
  6086. | (1 << 4) /* 8bit ecc*/
  6087. | ((wide_bus) ? (14 << 8) : (13 << 8))/*parity bytes*/
  6088. | (516 << 16) /* 516 user data bytes */
  6089. | (1 << 30); /* Turn on ECC engine clocks always */
  6090. chip->CFG0_RAW = 0xA80428C0; /* CW size is increased to 532B */
  6091. }
  6092. /*
  6093. * For 4bit RS ECC (default ECC), parity bytes = 10 (for x8 and x16 I/O)
  6094. * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
  6095. */
  6096. chip->ecc_parity_bytes = enable_bch_ecc ? (wide_bus ? 14 : 13) : 10;
  6097. pr_info("CFG0 Init : 0x%08x\n", chip->CFG0);
  6098. pr_info("CFG1 Init : 0x%08x\n", chip->CFG1);
  6099. pr_info("ECCBUFCFG : 0x%08x\n", chip->ecc_buf_cfg);
  6100. if (mtd->oobsize == 64) {
  6101. mtd->oobavail = msm_nand_oob_64.oobavail;
  6102. mtd->ecclayout = &msm_nand_oob_64;
  6103. } else if (mtd->oobsize == 128) {
  6104. mtd->oobavail = msm_nand_oob_128.oobavail;
  6105. mtd->ecclayout = &msm_nand_oob_128;
  6106. } else if (mtd->oobsize == 224) {
  6107. mtd->oobavail = wide_bus ? msm_nand_oob_224_x16.oobavail :
  6108. msm_nand_oob_224_x8.oobavail;
  6109. mtd->ecclayout = wide_bus ? &msm_nand_oob_224_x16 :
  6110. &msm_nand_oob_224_x8;
  6111. } else if (mtd->oobsize == 256) {
  6112. mtd->oobavail = msm_nand_oob_256.oobavail;
  6113. mtd->ecclayout = &msm_nand_oob_256;
  6114. } else {
  6115. pr_err("Unsupported Nand, oobsize: 0x%x \n",
  6116. mtd->oobsize);
  6117. return -ENODEV;
  6118. }
  6119. /* Fill in remaining MTD driver data */
  6120. mtd->type = MTD_NANDFLASH;
  6121. mtd->flags = MTD_CAP_NANDFLASH;
  6122. /* mtd->ecctype = MTD_ECC_SW; */
  6123. mtd->_erase = msm_nand_erase;
  6124. mtd->_block_isbad = msm_nand_block_isbad;
  6125. mtd->_block_markbad = msm_nand_block_markbad;
  6126. mtd->_point = NULL;
  6127. mtd->_unpoint = NULL;
  6128. mtd->_read = msm_nand_read;
  6129. mtd->_write = msm_nand_write;
  6130. mtd->_read_oob = msm_nand_read_oob;
  6131. mtd->_write_oob = msm_nand_write_oob;
  6132. if (dual_nand_ctlr_present) {
  6133. mtd->_read_oob = msm_nand_read_oob_dualnandc;
  6134. mtd->_write_oob = msm_nand_write_oob_dualnandc;
  6135. if (interleave_enable) {
  6136. mtd->_erase = msm_nand_erase_dualnandc;
  6137. mtd->_block_isbad = msm_nand_block_isbad_dualnandc;
  6138. }
  6139. }
  6140. /* mtd->sync = msm_nand_sync; */
  6141. mtd->_lock = NULL;
  6142. /* mtd->_unlock = msm_nand_unlock; */
  6143. mtd->_suspend = msm_nand_suspend;
  6144. mtd->_resume = msm_nand_resume;
  6145. mtd->owner = THIS_MODULE;
  6146. /* Unlock whole block */
  6147. /* msm_nand_unlock_all(mtd); */
  6148. /* return this->scan_bbt(mtd); */
  6149. return 0;
  6150. }
  6151. EXPORT_SYMBOL_GPL(msm_nand_scan);
  6152. /**
  6153. * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device
  6154. * @param mtd MTD device structure
  6155. */
  6156. void msm_nand_release(struct mtd_info *mtd)
  6157. {
  6158. /* struct msm_nand_chip *this = mtd->priv; */
  6159. /* Deregister the device */
  6160. mtd_device_unregister(mtd);
  6161. }
  6162. EXPORT_SYMBOL_GPL(msm_nand_release);
  6163. struct msm_nand_info {
  6164. struct mtd_info mtd;
  6165. struct mtd_partition *parts;
  6166. struct msm_nand_chip msm_nand;
  6167. };
  6168. /* duplicating the NC01 XFR contents to NC10 */
  6169. static int msm_nand_nc10_xfr_settings(struct mtd_info *mtd)
  6170. {
  6171. struct msm_nand_chip *chip = mtd->priv;
  6172. struct {
  6173. dmov_s cmd[2];
  6174. unsigned cmdptr;
  6175. } *dma_buffer;
  6176. dmov_s *cmd;
  6177. wait_event(chip->wait_queue,
  6178. (dma_buffer = msm_nand_get_dma_buffer(
  6179. chip, sizeof(*dma_buffer))));
  6180. cmd = dma_buffer->cmd;
  6181. /* Copying XFR register contents from NC01 --> NC10 */
  6182. cmd->cmd = 0;
  6183. cmd->src = NC01(MSM_NAND_XFR_STEP1);
  6184. cmd->dst = NC10(MSM_NAND_XFR_STEP1);
  6185. cmd->len = 28;
  6186. cmd++;
  6187. BUILD_BUG_ON(2 != ARRAY_SIZE(dma_buffer->cmd));
  6188. BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
  6189. dma_buffer->cmd[0].cmd |= CMD_OCB;
  6190. cmd[-1].cmd |= CMD_OCU | CMD_LC;
  6191. dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
  6192. | CMD_PTR_LP;
  6193. mb();
  6194. msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
  6195. | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
  6196. &dma_buffer->cmdptr)));
  6197. mb();
  6198. msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
  6199. return 0;
  6200. }
  6201. static int setup_mtd_device(struct platform_device *pdev,
  6202. struct msm_nand_info *info)
  6203. {
  6204. int i, err;
  6205. struct flash_platform_data *pdata = pdev->dev.platform_data;
  6206. if (pdata) {
  6207. for (i = 0; i < pdata->nr_parts; i++) {
  6208. pdata->parts[i].offset = pdata->parts[i].offset
  6209. * info->mtd.erasesize;
  6210. pdata->parts[i].size = pdata->parts[i].size
  6211. * info->mtd.erasesize;
  6212. }
  6213. err = mtd_device_register(&info->mtd, pdata->parts,
  6214. pdata->nr_parts);
  6215. } else {
  6216. err = mtd_device_register(&info->mtd, NULL, 0);
  6217. }
  6218. return err;
  6219. }
  6220. static int __devinit msm_nand_probe(struct platform_device *pdev)
  6221. {
  6222. struct msm_nand_info *info;
  6223. struct resource *res;
  6224. int err;
  6225. struct flash_platform_data *plat_data;
  6226. plat_data = pdev->dev.platform_data;
  6227. res = platform_get_resource_byname(pdev,
  6228. IORESOURCE_MEM, "msm_nand_phys");
  6229. if (!res || !res->start) {
  6230. pr_err("%s: msm_nand_phys resource invalid/absent\n",
  6231. __func__);
  6232. return -ENODEV;
  6233. }
  6234. msm_nand_phys = res->start;
  6235. pr_info("%s: phys addr 0x%lx \n", __func__, msm_nand_phys);
  6236. res = platform_get_resource_byname(pdev,
  6237. IORESOURCE_MEM, "msm_nandc01_phys");
  6238. if (!res || !res->start)
  6239. goto no_dual_nand_ctlr_support;
  6240. msm_nandc01_phys = res->start;
  6241. res = platform_get_resource_byname(pdev,
  6242. IORESOURCE_MEM, "msm_nandc10_phys");
  6243. if (!res || !res->start)
  6244. goto no_dual_nand_ctlr_support;
  6245. msm_nandc10_phys = res->start;
  6246. res = platform_get_resource_byname(pdev,
  6247. IORESOURCE_MEM, "msm_nandc11_phys");
  6248. if (!res || !res->start)
  6249. goto no_dual_nand_ctlr_support;
  6250. msm_nandc11_phys = res->start;
  6251. res = platform_get_resource_byname(pdev,
  6252. IORESOURCE_MEM, "ebi2_reg_base");
  6253. if (!res || !res->start)
  6254. goto no_dual_nand_ctlr_support;
  6255. ebi2_register_base = res->start;
  6256. dual_nand_ctlr_present = 1;
  6257. if (plat_data != NULL)
  6258. interleave_enable = plat_data->interleave;
  6259. else
  6260. interleave_enable = 0;
  6261. if (!interleave_enable)
  6262. pr_info("%s: Dual Nand Ctrl in ping-pong mode\n", __func__);
  6263. else
  6264. pr_info("%s: Dual Nand Ctrl in interleave mode\n", __func__);
  6265. no_dual_nand_ctlr_support:
  6266. res = platform_get_resource_byname(pdev,
  6267. IORESOURCE_DMA, "msm_nand_dmac");
  6268. if (!res || !res->start) {
  6269. pr_err("%s: invalid msm_nand_dmac resource\n", __func__);
  6270. return -ENODEV;
  6271. }
  6272. info = kzalloc(sizeof(struct msm_nand_info), GFP_KERNEL);
  6273. if (!info) {
  6274. pr_err("%s: No memory for msm_nand_info\n", __func__);
  6275. return -ENOMEM;
  6276. }
  6277. info->msm_nand.dev = &pdev->dev;
  6278. init_waitqueue_head(&info->msm_nand.wait_queue);
  6279. info->msm_nand.dma_channel = res->start;
  6280. pr_info("%s: dmac 0x%x\n", __func__, info->msm_nand.dma_channel);
  6281. /* this currently fails if dev is passed in */
  6282. info->msm_nand.dma_buffer =
  6283. dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE,
  6284. &info->msm_nand.dma_addr, GFP_KERNEL);
  6285. if (info->msm_nand.dma_buffer == NULL) {
  6286. pr_err("%s: No memory for msm_nand.dma_buffer\n", __func__);
  6287. err = -ENOMEM;
  6288. goto out_free_info;
  6289. }
  6290. pr_info("%s: allocated dma buffer at %p, dma_addr %x\n",
  6291. __func__, info->msm_nand.dma_buffer, info->msm_nand.dma_addr);
  6292. /* Let default be VERSION_1 for backward compatibility */
  6293. info->msm_nand.uncorrectable_bit_mask = BIT(3);
  6294. info->msm_nand.num_err_mask = 0x7;
  6295. if (plat_data && (plat_data->version == VERSION_2)) {
  6296. info->msm_nand.uncorrectable_bit_mask = BIT(8);
  6297. info->msm_nand.num_err_mask = 0x1F;
  6298. }
  6299. info->mtd.name = dev_name(&pdev->dev);
  6300. info->mtd.priv = &info->msm_nand;
  6301. info->mtd.owner = THIS_MODULE;
  6302. /* config ebi2_cfg register only for ping pong mode!!! */
  6303. if (!interleave_enable && dual_nand_ctlr_present)
  6304. flash_wr_reg(&info->msm_nand, EBI2_CFG_REG, 0x4010080);
  6305. if (dual_nand_ctlr_present)
  6306. msm_nand_nc10_xfr_settings(&info->mtd);
  6307. if (msm_nand_scan(&info->mtd, 1))
  6308. if (msm_onenand_scan(&info->mtd, 1)) {
  6309. pr_err("%s: No nand device found\n", __func__);
  6310. err = -ENXIO;
  6311. goto out_free_dma_buffer;
  6312. }
  6313. err = setup_mtd_device(pdev, info);
  6314. if (err < 0) {
  6315. pr_err("%s: setup_mtd_device failed with err=%d\n",
  6316. __func__, err);
  6317. goto out_free_dma_buffer;
  6318. }
  6319. dev_set_drvdata(&pdev->dev, info);
  6320. return 0;
  6321. out_free_dma_buffer:
  6322. dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
  6323. info->msm_nand.dma_buffer,
  6324. info->msm_nand.dma_addr);
  6325. out_free_info:
  6326. kfree(info);
  6327. return err;
  6328. }
  6329. static int __devexit msm_nand_remove(struct platform_device *pdev)
  6330. {
  6331. struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
  6332. dev_set_drvdata(&pdev->dev, NULL);
  6333. if (info) {
  6334. msm_nand_release(&info->mtd);
  6335. dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
  6336. info->msm_nand.dma_buffer,
  6337. info->msm_nand.dma_addr);
  6338. kfree(info);
  6339. }
  6340. return 0;
  6341. }
  6342. #define DRIVER_NAME "msm_nand"
  6343. static struct platform_driver msm_nand_driver = {
  6344. .probe = msm_nand_probe,
  6345. .remove = __devexit_p(msm_nand_remove),
  6346. .driver = {
  6347. .name = DRIVER_NAME,
  6348. }
  6349. };
  6350. MODULE_ALIAS(DRIVER_NAME);
  6351. static int __init msm_nand_init(void)
  6352. {
  6353. return platform_driver_register(&msm_nand_driver);
  6354. }
  6355. static void __exit msm_nand_exit(void)
  6356. {
  6357. platform_driver_unregister(&msm_nand_driver);
  6358. }
  6359. module_init(msm_nand_init);
  6360. module_exit(msm_nand_exit);
  6361. MODULE_LICENSE("GPL");
  6362. MODULE_DESCRIPTION("msm_nand flash driver code");