aml_nand.c 166 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761
  1. // linux/drivers/amlogic/nand/aml_nand.c
  2. #include <linux/module.h>
  3. #include <linux/types.h>
  4. #include <linux/init.h>
  5. #include <linux/kernel.h>
  6. #include <linux/string.h>
  7. #include <linux/ioport.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/delay.h>
  10. #include <linux/err.h>
  11. #include <linux/slab.h>
  12. #include <linux/io.h>
  13. #include <linux/bitops.h>
  14. #include <linux/crc32.h>
  15. #include <linux/fs.h>
  16. #include <asm/uaccess.h>
  17. #include <linux/mtd/mtd.h>
  18. #include <linux/mtd/nand.h>
  19. #include <linux/mtd/nand_ecc.h>
  20. #include <linux/mtd/partitions.h>
  21. #include <mach/nand.h>
  22. #define NAND_DEBUG
  23. #ifdef NAND_DEBUG
  24. #define aml_nand_debug(a...) {printk("%s()[%s,%d]",__func__,__FILE__,__LINE__); printk(a);}
  25. #define aml_nand_debug2(a...) //printk(a)
  26. #else
  27. #define aml_nand_debug(a...)
  28. #define aml_nand_debug2(a...)
  29. #endif
  30. static char *aml_nand_bch_string[]={
  31. "NAND_SOFT_MODE",
  32. "NAND_BCH9_MODE",
  33. "NAND_BCH8_MODE",
  34. "NAND_BCH12_MODE",
  35. "NAND_BCH16_MODE",
  36. };
  37. static char *aml_nand_plane_string[]={
  38. "NAND_SINGLE_PLANE_MODE",
  39. "NAND_TWO_PLANE_MODE",
  40. };
  41. static char *aml_nand_internal_string[]={
  42. "NAND_NONE_INTERLEAVING_MODE",
  43. "NAND_INTERLEAVING_MODE",
  44. };
  45. static struct nand_ecclayout aml_nand_oob_64 = {
  46. .eccbytes = 60,
  47. .eccpos = {
  48. 4, 5, 6, 7, 8, 9, 10, 11,
  49. 12, 13, 14, 15, 16, 17, 18, 19,
  50. 20, 21, 22, 23, 24, 25, 26, 27,
  51. 28, 29, 30, 31, 32, 33, 34, 35,
  52. 36, 37, 38, 39, 40, 41, 42, 43,
  53. 44, 45, 46, 47, 48, 49, 50, 51,
  54. 52, 53, 54, 55, 56, 57, 58, 59,
  55. 60, 61, 62, 63},
  56. .oobfree = {
  57. {.offset = 0,
  58. .length = 4}}
  59. };
  60. static struct nand_ecclayout aml_nand_uboot_oob = {
  61. .eccbytes = 84,
  62. .oobfree = {
  63. {.offset = 0,
  64. .length = 6}}
  65. };
  66. static struct nand_ecclayout aml_nand_oob_64_2info = {
  67. .eccbytes = 56,
  68. .oobfree = {
  69. {.offset = 0,
  70. .length = 8}}
  71. };
  72. static struct nand_ecclayout aml_nand_oob_128 = {
  73. .eccbytes = 120,
  74. .oobfree = {
  75. {.offset = 0,
  76. .length = 8}}
  77. };
  78. static struct nand_ecclayout aml_nand_oob_218 = {
  79. .eccbytes = 200,
  80. .oobfree = {
  81. {.offset = 0,
  82. .length = 8}}
  83. };
  84. static struct nand_ecclayout aml_nand_oob_224 = {
  85. .eccbytes = 208,
  86. .oobfree = {
  87. {.offset = 0,
  88. .length = 8}}
  89. };
  90. static struct nand_ecclayout aml_nand_oob_256 = {
  91. .eccbytes = 240,
  92. .oobfree = {
  93. {.offset = 0,
  94. .length = 16}}
  95. };
  96. static struct nand_ecclayout aml_nand_oob_376 = {
  97. .eccbytes = 352,
  98. .oobfree = {
  99. {.offset = 0,
  100. .length = 16}}
  101. };
  102. static struct nand_ecclayout aml_nand_oob_436 = {
  103. .eccbytes = 352,
  104. .oobfree = {
  105. {.offset = 0,
  106. .length = 16}}
  107. };
  108. static struct nand_ecclayout aml_nand_oob_448 = {
  109. .eccbytes = 416,
  110. .oobfree = {
  111. {.offset = 0,
  112. .length = 16}}
  113. };
  114. static struct nand_ecclayout aml_nand_oob_640 = {
  115. .eccbytes = 608,
  116. .oobfree = {
  117. {.offset = 0,
  118. .length = 16}}
  119. };
  120. static unsigned default_environment_size = (ENV_SIZE - sizeof(struct aml_nand_bbt_info));
  121. static uint8_t nand_boot_flag = 0;
  122. static uint8_t nand_erarly_suspend_flag = 0;
  123. static uint8_t nand_mode_time[6] = {9, 7, 6, 5, 5, 4};
  124. static int aml_nand_update_env(struct mtd_info *mtd);
  125. //static void aml_nand_cmdfunc(struct mtd_info *mtd, unsigned command, int column, int page_addr);
  126. struct aml_nand_flash_dev aml_nand_flash_ids[] = {
  127. {"A revision NAND 2GiB H27UAG8T2A", {NAND_MFR_HYNIX, 0xd5, 0x94, 0x25, 0x44, 0x41}, 4096, 2048, 0x80000, 224, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  128. {"A revision NAND 4GiB H27UBG8T2A", {NAND_MFR_HYNIX, 0xd7, 0x94, 0x9a, 0x74, 0x42}, 8192, 4096, 0x200000, 448, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  129. {"B revision NAND 2GiB H27UAG8T2B", {NAND_MFR_HYNIX, 0xd5, 0x94, 0x9a, 0x74, 0x42}, 8192, 2048, 0x200000, 448, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  130. #ifdef NEW_NAND_SUPPORT
  131. {"B revision NAND 4GiB H27UBG8T2B", {NAND_MFR_HYNIX, 0xd7, 0x94, 0xda, 0x74, 0xc3}, 8192, 4096, 0x200000, 640, 1, 16, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE)}, //need readretry, disable two plane mode
  132. {"B revision NAND 8GiB H27UCG8T2M", {NAND_MFR_HYNIX, 0xde, 0x94, 0xd2, 0x04, 0x43}, 8192, 8192, 0x200000, 448, 1, 16, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE)}, //need readretry, disable two plane mode
  133. #endif
  134. {"A revision NAND 4GiB MT29F32G-A", {NAND_MFR_MICRON, 0xd7, 0x94, 0x3e, 0x84}, 4096, 4096, 0x80000, 218, 1, 16, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  135. {"A revision NAND 16GiB MT29F128G-A", {NAND_MFR_MICRON, 0xd9, 0xd5, 0x3e, 0x88}, 4096, 16384, 0x80000, 218, 1, 16, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  136. {"B revision NAND 4GiB MT29F32G-B", {NAND_MFR_MICRON, 0x68, 0x04, 0x46, 0x89}, 4096, 4096, 0x100000, 224, 1, 20, 15, 4, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  137. {"B revision NAND 16GiB MT29F128G-B", {NAND_MFR_MICRON, 0x88, 0x05, 0xc6, 0x89}, 4096, 16384, 0x100000, 224, 1, 20, 15, 4, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  138. {"C revision NAND 4GiB MT29F32G-C", {NAND_MFR_MICRON, 0x68, 0x04, 0x4a, 0xa9}, 4096, 4096, 0x100000, 224, 1, 16, 15, 5, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  139. {"C revision NAND 8GiB MT29F64G-C", {NAND_MFR_MICRON, 0x88, 0x04, 0x4b, 0xa9}, 8192, 8192, 0x200000, 448, 1, 16, 15, 5, (NAND_TIMING_MODE5 | NAND_ECC_BCH30_1K_MODE | NAND_TWO_PLANE_MODE)},
  140. {"C revision NAND 32GiB MT29F256G-C", {NAND_MFR_MICRON, 0xa8, 0x05, 0xcb, 0xa9}, 8192, 32768, 0x200000, 448, 2, 16, 15, 5, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE | NAND_INTERLEAVING_MODE)},
  141. {"1 Generation NAND 4GiB JS29F32G08AA-1", {NAND_MFR_INTEL, 0x68, 0x04, 0x46, 0xA9}, 4096, 4096, 0x100000, 218, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  142. {"1 Generation NAND 8GiB JS29F64G08AA-1", {NAND_MFR_INTEL, 0x88, 0x24, 0x4b, 0xA9}, 8192, 8192, 0x200000, 448, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  143. {"E serials NAND 2GiB TC58NVG4D2ETA00", {NAND_MFR_TOSHIBA, 0xD5, 0x94, 0x32, 0x76, 0x54}, 8192, 2048, 0x100000, 376, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  144. {"E serials NAND 4GiB TC58NVG5D2ETA00", {NAND_MFR_TOSHIBA, 0xD7, 0x94, 0x32, 0x76, 0x54}, 8192, 4096, 0x100000, 376, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  145. {"F serials NAND 2GiB TC58NVG4D2FTA00", {NAND_MFR_TOSHIBA, 0xD5, 0x94, 0x32, 0x76, 0x55}, 8192, 2076, 0x100000, 448, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  146. {"F serials NAND 4GiB TC58NVG5D2FTA00", {NAND_MFR_TOSHIBA, 0xD7, 0x94, 0x32, 0x76, 0x55}, 8192, 4096, 0x100000, 448, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  147. {"F serials NAND 8GiB TC58NVG6D2FTA00", {NAND_MFR_TOSHIBA, 0xDE, 0x94, 0x32, 0x76, 0x55}, 8192, 8192, 0x100000, 448, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  148. {"F serials NAND 8GiB TH58NVG7D2FTA20", {NAND_MFR_TOSHIBA, 0xDE, 0x95, 0x32, 0x7a, 0x55}, 8192, 8200, 0x100000, 448, 2, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE | NAND_INTERLEAVING_MODE)},
  149. #ifdef NEW_NAND_SUPPORT
  150. {"F serials NAND 4GiB TC58NVG5D2HTA00", {NAND_MFR_TOSHIBA, 0xD7, 0x94, 0x32, 0x76, 0x56}, 8192, 4096, 0x100000, 640, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE )}, //need readretry, disable two plane mode
  151. {"F serials NAND 8GiB TC58NVG6D2GTA00", {NAND_MFR_TOSHIBA, 0xDE, 0x94, 0x82, 0x76, 0x56}, 8192, 8192, 0x200000, 640, 1, 20, 25, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE )}, //need readretry, disable two plane mode
  152. #endif
  153. {"M Generation NAND 2GiB K9GAG08U0M", {NAND_MFR_SAMSUNG, 0xD5, 0x14, 0xb6, 0x74}, 4096, 2048, 0x80000, 128, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH8_MODE)},
  154. {"5 Generation NAND 2GiB K9GAG08X0D", {NAND_MFR_SAMSUNG, 0xD5, 0x94, 0x29, 0x34, 0x41}, 4096, 2048, 0x80000, 218, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  155. {"6 Generation NAND 2GiB K9GAG08U0E", {NAND_MFR_SAMSUNG, 0xD5, 0x84, 0x72, 0x50, 0x42}, 8192, 2048, 0x100000, 436, 1, 25, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE)},
  156. {"7 Generation NAND 2GiB K9GAG08U0F", {NAND_MFR_SAMSUNG, 0xD5, 0x94, 0x76, 0x54, 0x43}, 8192, 2048, 0x100000, 512, 1, 25, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  157. {"6 Generation NAND 4GiB K9LBG08U0E", {NAND_MFR_SAMSUNG, 0xD7, 0xC5, 0x72, 0x54, 0x42}, 8192, 4096, 0x100000, 436, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  158. {"6 Generation NAND 8GiB K9HCG08U0E", {NAND_MFR_SAMSUNG, 0xDE, 0xC5, 0x72, 0x54, 0x42}, 8192, 8192, 0x100000, 436, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH12_MODE | NAND_TWO_PLANE_MODE)},
  159. {"2 Generation NAND 4GiB K9GBG08U0A", {NAND_MFR_SAMSUNG, 0xD7, 0x94, 0x7a, 0x54, 0x43}, 8192, 4152, 0x100000, 640, 1, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE)},
  160. {"2 Generation NAND 8GiB K9LCG08U0A", {NAND_MFR_SAMSUNG, 0xDE, 0xD5, 0x7a, 0x58, 0x43}, 8192, 8304, 0x100000, 640, 2, 20, 15, 0, (NAND_TIMING_MODE5 | NAND_ECC_BCH16_MODE | NAND_TWO_PLANE_MODE | NAND_INTERLEAVING_MODE)},
  161. {NULL,}
  162. };
  163. uint8_t aml_nand_get_onfi_features(struct aml_nand_chip *aml_chip, uint8_t *buf, int addr)
  164. {
  165. struct nand_chip *chip = &aml_chip->chip;
  166. struct mtd_info *mtd = &aml_chip->mtd;
  167. int i, j;
  168. for (i=0; i<aml_chip->chip_num; i++) {
  169. if (aml_chip->valid_chip[i]) {
  170. aml_chip->aml_nand_select_chip(aml_chip, i);
  171. aml_chip->aml_nand_command(aml_chip, NAND_CMD_GET_FEATURES, addr, -1, i);
  172. for (j=0; j<4; j++)
  173. buf[j] = chip->read_byte(mtd);
  174. }
  175. }
  176. return 0;
  177. }
  178. void aml_nand_set_onfi_features(struct aml_nand_chip *aml_chip, uint8_t *buf, int addr)
  179. {
  180. int i, j;
  181. for (i=0; i<aml_chip->chip_num; i++) {
  182. if (aml_chip->valid_chip[i]) {
  183. aml_chip->aml_nand_select_chip(aml_chip, i);
  184. aml_chip->aml_nand_command(aml_chip, NAND_CMD_SET_FEATURES, addr, -1, i);
  185. for (j=0; j<4; j++)
  186. aml_chip->aml_nand_write_byte(aml_chip, buf[j]);
  187. aml_chip->aml_nand_wait_devready(aml_chip, i);
  188. }
  189. }
  190. }
  191. static void aml_platform_get_user_byte(struct aml_nand_chip *aml_chip, unsigned char *oob_buf, int byte_num)
  192. {
  193. int read_times = 0;
  194. unsigned int len = PER_INFO_BYTE/sizeof(unsigned int);
  195. while (byte_num > 0) {
  196. *oob_buf++ = (aml_chip->user_info_buf[read_times*len] & 0xff);
  197. byte_num--;
  198. if (aml_chip->user_byte_mode == 2) {
  199. *oob_buf++ = ((aml_chip->user_info_buf[read_times*len] >> 8) & 0xff);
  200. byte_num--;
  201. }
  202. read_times++;
  203. }
  204. }
  205. static void aml_platform_set_user_byte(struct aml_nand_chip *aml_chip, unsigned char *oob_buf, int byte_num)
  206. {
  207. int write_times = 0;
  208. unsigned int len = PER_INFO_BYTE/sizeof(unsigned int);
  209. while (byte_num > 0) {
  210. aml_chip->user_info_buf[write_times*len] = *oob_buf++;
  211. byte_num--;
  212. if (aml_chip->user_byte_mode == 2) {
  213. aml_chip->user_info_buf[write_times*len] |= (*oob_buf++ << 8);
  214. byte_num--;
  215. }
  216. write_times++;
  217. }
  218. }
  219. #ifdef NEW_NAND_SUPPORT
  220. /*****************************HYNIX******************************************/
  221. uint8_t aml_nand_get_reg_value_hynix(struct aml_nand_chip *aml_chip, uint8_t *buf, uint8_t *addr, int chipnr, int cnt)
  222. {
  223. struct nand_chip *chip = &aml_chip->chip;
  224. struct mtd_info *mtd = &aml_chip->mtd;
  225. int j;
  226. if((aml_chip->new_nand_info.type == 0) ||(aml_chip->new_nand_info.type > 10))
  227. return 0;
  228. printk("Enter %s\n", __func__);
  229. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  230. aml_chip->aml_nand_command(aml_chip, NAND_CMD_HYNIX_GET_VALUE, -1, -1, chipnr);
  231. for (j=0; j<cnt; j++){
  232. chip->cmd_ctrl(mtd, addr[j], NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE);
  233. udelay(2);
  234. buf[j] = chip->read_byte(mtd);
  235. udelay(2);
  236. printk("%s, REG(0x%x): value:0x%x, for chip[%d]\n", __func__, addr[j], buf[j], chipnr);
  237. }
  238. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  239. return 0;
  240. }
  241. uint8_t aml_nand_set_reg_value_hynix(struct aml_nand_chip *aml_chip, uint8_t *buf, uint8_t *addr, int chipnr, int cnt)
  242. {
  243. struct nand_chip *chip = &aml_chip->chip;
  244. struct mtd_info *mtd = &aml_chip->mtd;
  245. int j;
  246. if((aml_chip->new_nand_info.type == 0) ||(aml_chip->new_nand_info.type > 10))
  247. return 0;
  248. printk("Enter %s\n", __func__);
  249. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  250. aml_chip->aml_nand_command(aml_chip, NAND_CMD_HYNIX_SET_VALUE_START, -1, -1, chipnr);
  251. udelay(2);
  252. for (j=0; j<cnt; j++){
  253. chip->cmd_ctrl(mtd, addr[j], NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE);
  254. aml_chip->aml_nand_write_byte(aml_chip, buf[j]);
  255. printk("%s, REG(0x%x): value:0x%x for chip[%d]\n", __func__, addr[j], buf[j], chipnr);
  256. }
  257. aml_chip->aml_nand_command(aml_chip, NAND_CMD_HYNIX_SET_VALUE_END, -1, -1, chipnr);
  258. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  259. return 0;
  260. }
  261. void aml_nand_enter_enslc_mode_hynix(struct mtd_info *mtd)
  262. {
  263. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  264. unsigned char hynix_reg_program_value_tmp[ENHANCE_SLC_REG_NUM];
  265. struct nand_chip *chip = mtd->priv;
  266. int i, j;
  267. if((aml_chip->new_nand_info.type == 0) ||(aml_chip->new_nand_info.type > 10))
  268. return;
  269. printk("Enter %s\n", __func__);
  270. memset(&hynix_reg_program_value_tmp[0], 0, ENHANCE_SLC_REG_NUM);
  271. chip->select_chip(mtd, 0);
  272. for (i=0; i<aml_chip->chip_num; i++) {
  273. if (aml_chip->valid_chip[i]) {
  274. for(j=0;j<aml_chip->new_nand_info.slc_program_info.reg_cnt;j++)
  275. hynix_reg_program_value_tmp[j] = aml_chip->new_nand_info.slc_program_info.reg_default_value[i][j] + aml_chip->new_nand_info.slc_program_info.reg_offset_value[j];
  276. aml_nand_set_reg_value_hynix(aml_chip, &hynix_reg_program_value_tmp[0], &aml_chip->new_nand_info.slc_program_info.reg_addr[0], i, aml_chip->new_nand_info.slc_program_info.reg_cnt);
  277. udelay(10);
  278. memset(&hynix_reg_program_value_tmp[0], 0, aml_chip->new_nand_info.slc_program_info.reg_cnt);
  279. }
  280. }
  281. mdelay(2);
  282. //chip->select_chip(mtd, -1);
  283. }
  284. //working in Normal program mode
  285. void aml_nand_exit_enslc_mode_hynix(struct mtd_info *mtd)
  286. {
  287. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  288. struct nand_chip *chip = mtd->priv;
  289. int i;
  290. if((aml_chip->new_nand_info.type == 0) ||(aml_chip->new_nand_info.type > 10))
  291. return;
  292. printk("Enter %s\n", __func__);
  293. chip->select_chip(mtd, 0);
  294. for (i=0; i<aml_chip->chip_num; i++) {
  295. if (aml_chip->valid_chip[i]) {
  296. aml_nand_set_reg_value_hynix(aml_chip, &aml_chip->new_nand_info.slc_program_info.reg_default_value[i][0], &aml_chip->new_nand_info.slc_program_info.reg_addr[0], i, aml_chip->new_nand_info.slc_program_info.reg_cnt);
  297. udelay(10);
  298. }
  299. }
  300. mdelay(2);
  301. //chip->select_chip(mtd, -1);
  302. }
  303. //when ecc fail,set nand retry reg
  304. void aml_nand_read_retry_handle_hynix(struct mtd_info *mtd, int chipnr)
  305. {
  306. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  307. u8 hynix_reg_read_value[READ_RETRY_REG_NUM];
  308. int i, cur_cnt;
  309. if((aml_chip->new_nand_info.type == 0) ||(aml_chip->new_nand_info.type > 10))
  310. return;
  311. cur_cnt = aml_chip->new_nand_info.read_rety_info.cur_cnt[chipnr];
  312. printk("HYNIX NAND set partmeters here and hynix_read_retry_cnt:%d\n", cur_cnt);
  313. memset(&hynix_reg_read_value[0], 0, READ_RETRY_REG_NUM);
  314. for(i=0;i<aml_chip->new_nand_info.read_rety_info.reg_cnt;i++){
  315. //printk("reg_offset_value[%d][%d]%02x\n", cur_cnt, i, aml_chip->new_nand_info.read_rety_info.reg_offset_value[cur_cnt][i]);
  316. if(aml_chip->new_nand_info.read_rety_info.reg_offset_value[cur_cnt][i] == READ_RETRY_ZERO){
  317. hynix_reg_read_value[i] = 0;
  318. }
  319. else{
  320. hynix_reg_read_value[i] = aml_chip->new_nand_info.read_rety_info.reg_default_value[chipnr][i] + aml_chip->new_nand_info.read_rety_info.reg_offset_value[cur_cnt][i];
  321. }
  322. }
  323. aml_nand_set_reg_value_hynix(aml_chip, &hynix_reg_read_value[0], &aml_chip->new_nand_info.read_rety_info.reg_addr[0], chipnr, aml_chip->new_nand_info.read_rety_info.reg_cnt);
  324. udelay(10);
  325. cur_cnt++;
  326. aml_chip->new_nand_info.read_rety_info.cur_cnt[chipnr] = (cur_cnt > (aml_chip->new_nand_info.read_rety_info.retry_cnt-1)) ? 0 : cur_cnt;
  327. }
  328. void aml_nand_get_slc_default_value_hynix(struct mtd_info *mtd)
  329. {
  330. struct nand_chip *chip = mtd->priv;
  331. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  332. int i;
  333. chip->select_chip(mtd, 0);
  334. for(i=0; i<aml_chip->chip_num; i++){
  335. if(aml_chip->valid_chip[i]){
  336. aml_nand_get_reg_value_hynix(aml_chip, &aml_chip->new_nand_info.slc_program_info.reg_default_value[i][0], &aml_chip->new_nand_info.slc_program_info.reg_addr[0], i, aml_chip->new_nand_info.slc_program_info.reg_cnt);
  337. udelay(2);
  338. }
  339. }
  340. //chip->select_chip(mtd, -1);
  341. }
  342. void aml_nand_set_readretry_default_value_hynix(struct mtd_info *mtd)
  343. {
  344. unsigned char hynix_reg_read_value_tmp[READ_RETRY_REG_NUM];
  345. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  346. struct nand_chip *chip = mtd->priv;
  347. int i;
  348. if((aml_chip->new_nand_info.type == 0) ||(aml_chip->new_nand_info.type > 10))
  349. return;
  350. printk("Enter %s\n", __func__);
  351. memset(&hynix_reg_read_value_tmp[0], 0, READ_RETRY_REG_NUM);
  352. chip->select_chip(mtd, 0);
  353. for (i=0; i<aml_chip->chip_num; i++) {
  354. if (aml_chip->valid_chip[i]) {
  355. aml_nand_set_reg_value_hynix(aml_chip, &aml_chip->new_nand_info.read_rety_info.reg_default_value[i][0], &aml_chip->new_nand_info.read_rety_info.reg_addr[0], i, aml_chip->new_nand_info.read_rety_info.reg_cnt);
  356. udelay(10);
  357. //aml_nand_hynix_get_parameters(aml_chip, &hynix_reg_read_value_tmp[0], &aml_chip->hynix_reg_read_addr[0], i, 4);
  358. }
  359. }
  360. //chip->select_chip(mtd, -1);
  361. }
  362. void aml_nand_get_read_default_value_hynix(struct mtd_info *mtd)
  363. {
  364. struct mtd_oob_ops aml_oob_ops;
  365. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  366. struct nand_chip *chip = mtd->priv;
  367. size_t addr;
  368. unsigned char *data_buf;
  369. char oob_buf[4];
  370. unsigned char page_list[RETRY_NAND_COPY_NUM] = {0x07, 0x0B, 0x0F, 0x13};
  371. int error = 0, i, j, nand_type, total_blk, phys_erase_shift = fls(mtd->erasesize) - 1;
  372. data_buf = kzalloc(mtd->writesize, GFP_KERNEL);
  373. if (data_buf == NULL){
  374. printk("%s %d no mem for databuf and mtd->writesize:%d \n", __func__, __LINE__, mtd->writesize);
  375. return;
  376. }
  377. if (nand_boot_flag){
  378. addr = (1024 * mtd->writesize / aml_chip->plane_num);
  379. }
  380. else {
  381. addr = 0;
  382. }
  383. total_blk = 0;
  384. aml_chip->new_nand_info.read_rety_info.default_flag = 0;
  385. while(total_blk < RETRY_NAND_BLK_NUM){
  386. error = mtd->block_isbad(mtd, addr);
  387. if (error) {
  388. printk("%s %d detect bad blk at blk:%d\n", __func__, __LINE__, addr>> phys_erase_shift);
  389. addr += mtd->erasesize;
  390. total_blk++;
  391. continue;
  392. }
  393. aml_oob_ops.mode = MTD_OOB_AUTO;
  394. aml_oob_ops.len = mtd->writesize;
  395. aml_oob_ops.ooblen = 4;
  396. aml_oob_ops.ooboffs = mtd->ecclayout->oobfree[0].offset;
  397. aml_oob_ops.datbuf = data_buf;
  398. aml_oob_ops.oobbuf = oob_buf;
  399. memset(oob_buf, 0, 4);
  400. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  401. memset((unsigned char *)aml_oob_ops.oobbuf, 0x0, aml_oob_ops.ooblen);
  402. for(i=0;i<RETRY_NAND_COPY_NUM;i++){
  403. memset(oob_buf, 0, 4);
  404. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  405. memset((unsigned char *)aml_oob_ops.oobbuf, 0x0, aml_oob_ops.ooblen);
  406. nand_type = aml_chip->new_nand_info.type;
  407. aml_chip->new_nand_info.type = 0;
  408. error = mtd->read_oob(mtd, (addr + page_list[i]*mtd->writesize), &aml_oob_ops);
  409. aml_chip->new_nand_info.type = nand_type;
  410. if ((error != 0) && (error != -EUCLEAN)) {
  411. printk("%s %d read oob failed at blk:%d, page:%d\n", __func__, __LINE__, addr>> phys_erase_shift, (addr + page_list[i]*mtd->writesize)/mtd->writesize);
  412. continue;
  413. }
  414. if (!memcmp(oob_buf, RETRY_NAND_MAGIC, 4)){
  415. memcpy(&aml_chip->new_nand_info.read_rety_info.reg_default_value[0][0], (unsigned char *)aml_oob_ops.datbuf, MAX_CHIP_NUM*READ_RETRY_REG_NUM);
  416. //memcpy(&aml_chip->new_nand_info.slc_program_info.reg_default_value[0][0], (unsigned char *)aml_oob_ops.datbuf, MAX_CHIP_NUM*ENHANCE_SLC_REG_NUM);
  417. printk("%s %d get default reg value at blk:%d, page:%d\n", __func__, __LINE__, addr>> phys_erase_shift, (addr + page_list[i]*mtd->writesize)/mtd->writesize);
  418. for(i=0; i<aml_chip->chip_num; i++){
  419. if(aml_chip->valid_chip[i]){
  420. for(j=0;j<aml_chip->new_nand_info.read_rety_info.reg_cnt;j++)
  421. printk("%s, REG(0x%x): value:0x%x, for chip[%d]\n", __func__, aml_chip->new_nand_info.read_rety_info.reg_addr[j], aml_chip->new_nand_info.read_rety_info.reg_default_value[i][j], i);
  422. //for(j=0;j<aml_chip->new_nand_info.slc_program_info.reg_cnt;j++)
  423. // printk("%s, REG(0x%x): value:0x%x, for chip[%d]\n", __func__, aml_chip->new_nand_info.slc_program_info.reg_addr[j], aml_chip->new_nand_info.slc_program_info.reg_default_value[i][j], i);
  424. }
  425. }
  426. aml_chip->new_nand_info.read_rety_info.default_flag = 1;
  427. goto READ_OK;
  428. }
  429. }
  430. addr += mtd->erasesize;
  431. total_blk++;
  432. }
  433. aml_chip->new_nand_info.read_rety_info.default_flag = 0;
  434. printk("######%s %d read default read retry reg value failed and need read from chip write back to nand using SLC\n", __func__, __LINE__);
  435. chip->select_chip(mtd, 0);
  436. for(i=0; i<aml_chip->chip_num; i++){
  437. if(aml_chip->valid_chip[i]){
  438. aml_nand_get_reg_value_hynix(aml_chip, &aml_chip->new_nand_info.read_rety_info.reg_default_value[i][0], &aml_chip->new_nand_info.read_rety_info.reg_addr[0], i, aml_chip->new_nand_info.read_rety_info.reg_cnt);
  439. udelay(2);
  440. //aml_nand_get_reg_value_hynix(aml_chip, &aml_chip->new_nand_info.slc_program_info.reg_default_value[i][0], &aml_chip->new_nand_info.slc_program_info.reg_addr[0], i, aml_chip->new_nand_info.slc_program_info.reg_cnt);
  441. }
  442. }
  443. //chip->select_chip(mtd, -1);
  444. READ_OK:
  445. kfree(data_buf);
  446. }
  447. /*******************************************TOSHIBA*********************************************/
  448. void aml_nand_set_reg_value_toshiba(struct aml_nand_chip *aml_chip, uint8_t *buf, uint8_t *addr, int chipnr, int cnt)
  449. {
  450. struct nand_chip *chip = &aml_chip->chip;
  451. struct mtd_info *mtd = &aml_chip->mtd;
  452. int j;
  453. if(aml_chip->new_nand_info.type != TOSHIBA_24NM)
  454. return;
  455. printk("Enter %s\n", __func__);
  456. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  457. aml_chip->aml_nand_select_chip(aml_chip, chipnr);
  458. if(aml_chip->new_nand_info.read_rety_info.cur_cnt[chipnr] ==0){
  459. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TOSHIBA_PRE_CON1, -1, -1, chipnr);
  460. udelay(2);
  461. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TOSHIBA_PRE_CON2, -1, -1, chipnr);
  462. udelay(2);
  463. }
  464. for (j=0; j<cnt; j++){
  465. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TOSHIBA_SET_VALUE, -1, -1, chipnr);
  466. udelay(2);
  467. chip->cmd_ctrl(mtd, addr[j], NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE);
  468. udelay(2);
  469. aml_chip->aml_nand_write_byte(aml_chip, buf[j]);
  470. printk("%s, REG(0x%x): value:0x%x\n", __func__, addr[j], buf[j]);
  471. }
  472. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TOSHIBA_BEF_COMMAND1, -1, -1, chipnr);
  473. udelay(2);
  474. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TOSHIBA_BEF_COMMAND2, -1, -1, chipnr);
  475. udelay(2);
  476. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  477. return;
  478. }
  479. //when ecc fail,set nand retry reg
  480. void aml_nand_read_retry_handle_toshiba(struct mtd_info *mtd, int chipnr)
  481. {
  482. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  483. int cur_cnt;
  484. if(aml_chip->new_nand_info.type != TOSHIBA_24NM)
  485. return;
  486. cur_cnt = aml_chip->new_nand_info.read_rety_info.cur_cnt[chipnr];
  487. printk("TOSHIBA NAND set partmeters here and read_retry_cnt:%d\n", cur_cnt);
  488. aml_nand_set_reg_value_toshiba(aml_chip, &aml_chip->new_nand_info.read_rety_info.reg_offset_value[cur_cnt][0], &aml_chip->new_nand_info.read_rety_info.reg_addr[0], chipnr, aml_chip->new_nand_info.read_rety_info.reg_cnt);
  489. udelay(10);
  490. cur_cnt++;
  491. aml_chip->new_nand_info.read_rety_info.cur_cnt[chipnr] = (cur_cnt > (aml_chip->new_nand_info.read_rety_info.retry_cnt-1)) ? 0 : cur_cnt;
  492. }
  493. void aml_nand_read_retry_exit_toshiba(struct mtd_info *mtd, int chipnr)
  494. {
  495. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  496. if(aml_chip->new_nand_info.type != TOSHIBA_24NM)
  497. return;
  498. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  499. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RESET, -1, -1, chipnr);
  500. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  501. memset(&aml_chip->new_nand_info.read_rety_info.cur_cnt[0], 0, MAX_CHIP_NUM);
  502. }
  503. #endif
  504. static void aml_platform_hw_init(struct aml_nand_chip *aml_chip)
  505. {
  506. struct clk *sys_clk;
  507. int sys_clk_rate, sys_time, start_cycle, end_cycle, bus_cycle, time_mode, adjust, Tcycle, T_REA = DEFAULT_T_REA, T_RHOH = DEFAULT_T_RHOH, i;
  508. #ifdef CONFIG_ARCH_MESON
  509. struct mtd_info *mtd = &aml_chip->mtd;
  510. struct nand_chip *chip = &aml_chip->chip;
  511. if (aml_chip->chip_num > 1) {
  512. chip->select_chip(mtd, -1);
  513. CLEAR_CBUS_REG_MASK(PREG_HGPIO_EN_N, (1 << 5));
  514. CLEAR_CBUS_REG_MASK(PREG_HGPIO_O, (1 << 5));
  515. SET_CBUS_REG_MASK(PREG_HGPIO_EN_N, (1 << 16));
  516. if (!(READ_CBUS_REG(PREG_HGPIO_I) & (1 << 16))) {
  517. SET_CBUS_REG_MASK(PREG_HGPIO_O, (1 << 5));
  518. if ((READ_CBUS_REG(PREG_HGPIO_I) & (1 << 16))) {
  519. aml_chip->chip_enable[1] = (aml_chip->chip_enable[1] & aml_chip->chip_enable[2]);
  520. aml_chip->rb_enable[1] = aml_chip->rb_enable[2];
  521. aml_chip->chip_num = 2;
  522. aml_nand_debug("ce1 and ce2 connected\n");
  523. }
  524. }
  525. }
  526. #endif
  527. sys_clk = clk_get_sys(NAND_SYS_CLK_NAME, NULL);
  528. sys_clk_rate = clk_get_rate(sys_clk);
  529. sys_time = (10000 / (sys_clk_rate / 1000000));
  530. start_cycle = (((NAND_CYCLE_DELAY + T_REA * 10) * 10) / sys_time);
  531. start_cycle = (start_cycle + 9) / 10;
  532. time_mode = -1;
  533. for (i=5; i>=0; i--) {
  534. bus_cycle = nand_mode_time[i];
  535. Tcycle = bus_cycle * sys_time;
  536. end_cycle = (((NAND_CYCLE_DELAY + Tcycle / 2 + T_RHOH * 10) * 10) / sys_time);
  537. end_cycle = end_cycle / 10;
  538. if ((((start_cycle >= 3) && (start_cycle <= ( bus_cycle + 1)))
  539. || ((end_cycle >= 3) && (end_cycle <= (bus_cycle + 1))))
  540. && (start_cycle <= end_cycle)) {
  541. time_mode = i;
  542. break;
  543. }
  544. }
  545. if (time_mode < 0) {
  546. time_mode = 0;
  547. for (bus_cycle = 19; bus_cycle > nand_mode_time[time_mode]; bus_cycle--) {
  548. Tcycle = bus_cycle * sys_time;
  549. end_cycle = (((NAND_CYCLE_DELAY + Tcycle / 2 + T_RHOH * 10) * 10) / sys_time);
  550. end_cycle = end_cycle / 10;
  551. if ((((start_cycle >= 3) && (start_cycle <= ( bus_cycle + 1)))
  552. || ((end_cycle >= 3) && (end_cycle <= (bus_cycle + 1))))
  553. && (start_cycle <= end_cycle)) {
  554. break;
  555. }
  556. }
  557. if (bus_cycle <= nand_mode_time[time_mode])
  558. return;
  559. }
  560. if (nand_mode_time[time_mode] < start_cycle)
  561. adjust = start_cycle - nand_mode_time[time_mode];
  562. else if(nand_mode_time[time_mode] > end_cycle)
  563. adjust = ((((~(nand_mode_time[time_mode] - end_cycle) + 1)) & 0xf) | 0x8);
  564. else
  565. adjust = 0;
  566. NFC_SET_CFG(0);
  567. NFC_SET_TIMING(time_mode, (bus_cycle - 1), adjust);
  568. NFC_SEND_CMD(1<<31);
  569. dev_info(aml_chip->device, "time_mode=%d, bus_cycle=%d, adjust=%d, start_cycle=%d, end_cycle=%d,system=%d.%dns\n",
  570. time_mode, bus_cycle, adjust, start_cycle, end_cycle, sys_time/10, sys_time%10);
  571. }
  572. static void aml_platform_adjust_timing(struct aml_nand_chip *aml_chip)
  573. {
  574. struct aml_nand_platform *plat = aml_chip->platform;
  575. struct clk *sys_clk;
  576. int sys_clk_rate, sys_time, start_cycle, end_cycle, bus_cycle, time_mode, time_mode_select, adjust, Tcycle, i;
  577. time_mode_select = ((plat->platform_nand_data.chip.options & NAND_TIMING_OPTIONS_MASK) >> 8);
  578. if ((time_mode_select > 5) || (time_mode_select < 0))
  579. time_mode_select = 5;
  580. if (!aml_chip->T_REA)
  581. aml_chip->T_REA = 20;
  582. if (!aml_chip->T_RHOH)
  583. aml_chip->T_RHOH = 15;
  584. if (READ_CBUS_REG(HHI_MPEG_CLK_CNTL)&(1<<8)) {
  585. sys_clk = clk_get_sys(NAND_SYS_CLK_NAME, NULL);
  586. sys_clk_rate = clk_get_rate(sys_clk);
  587. time_mode = -1;
  588. }
  589. else {
  590. time_mode = 0;
  591. sys_clk_rate = 27000000;
  592. }
  593. sys_time = (10000 / (sys_clk_rate / 1000000));
  594. start_cycle = (((NAND_CYCLE_DELAY + aml_chip->T_REA * 10) * 10) / sys_time);
  595. start_cycle = (start_cycle + 9) / 10;
  596. if (time_mode == 0) {
  597. for (bus_cycle = 5; bus_cycle <= 19; bus_cycle++) {
  598. Tcycle = bus_cycle * sys_time;
  599. end_cycle = (((NAND_CYCLE_DELAY + Tcycle / 2 + aml_chip->T_RHOH * 10) * 10) / sys_time);
  600. end_cycle = end_cycle / 10;
  601. if (((start_cycle - bus_cycle) > 7) || ((bus_cycle - end_cycle) > 8))
  602. continue;
  603. if ((((start_cycle >= 3) && (start_cycle <= ( bus_cycle + 1)))
  604. || ((end_cycle >= 3) && (end_cycle <= (bus_cycle + 1))))
  605. && (start_cycle <= end_cycle)) {
  606. break;
  607. }
  608. }
  609. BUG_ON(bus_cycle > 19);
  610. }
  611. else{
  612. for (i=time_mode_select; i>=0; i--) {
  613. bus_cycle = nand_mode_time[i];
  614. Tcycle = bus_cycle * sys_time;
  615. end_cycle = (((NAND_CYCLE_DELAY + Tcycle / 2 + aml_chip->T_RHOH * 10) * 10) / sys_time);
  616. end_cycle = end_cycle / 10;
  617. if ((((start_cycle >= 3) && (start_cycle <= ( bus_cycle + 1)))
  618. || ((end_cycle >= 3) && (end_cycle <= (bus_cycle + 1))))
  619. && (start_cycle <= end_cycle)) {
  620. time_mode = i;
  621. break;
  622. }
  623. }
  624. if (time_mode < 0)
  625. return;
  626. }
  627. if (nand_mode_time[time_mode] < start_cycle)
  628. adjust = start_cycle - nand_mode_time[time_mode];
  629. else if(nand_mode_time[time_mode] > end_cycle)
  630. adjust = ((((~(nand_mode_time[time_mode] - end_cycle) + 1)) & 0xf) | 0x8);
  631. else
  632. adjust = 0;
  633. NFC_SET_TIMING(time_mode, (bus_cycle - 1), adjust);
  634. dev_info(aml_chip->device, "time_mode=%d, bus_cycle=%d, adjust=%d, start_cycle=%d, end_cycle=%d,system=%d.%dns\n",
  635. time_mode, bus_cycle, adjust, start_cycle, end_cycle, sys_time/10, sys_time%10);
  636. }
  637. static int aml_nand_add_partition(struct aml_nand_chip *aml_chip)
  638. {
  639. uint32_t adjust_offset = 0, mini_part_blk_num, start_blk = 0;
  640. struct mtd_info *mtd = &aml_chip->mtd;
  641. struct aml_nand_platform *plat = aml_chip->platform;
  642. struct platform_nand_chip *chip = &plat->platform_nand_data.chip;
  643. #ifdef CONFIG_MTD_PARTITIONS
  644. struct mtd_partition *temp_parts = NULL;
  645. struct mtd_partition *parts;
  646. int nr, i, error = 0, part_save_in_env = 1, file_system_part = 0, phys_erase_shift;
  647. u8 part_num = 0;
  648. size_t offset;
  649. uint64_t mini_part_size = ((mtd->erasesize > NAND_MINI_PART_SIZE) ? mtd->erasesize : NAND_MINI_PART_SIZE);
  650. if (chip->set_parts)
  651. chip->set_parts(mtd->size, chip);
  652. phys_erase_shift = fls(mtd->erasesize) - 1;
  653. parts = plat->platform_nand_data.chip.partitions;
  654. nr = plat->platform_nand_data.chip.nr_partitions;
  655. if (!strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME))) {
  656. if (nr == 0) {
  657. parts = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
  658. if (!parts)
  659. return -ENOMEM;
  660. }
  661. parts->name = NAND_BOOT_NAME;
  662. parts->offset = 0;
  663. parts->size = (mtd->writesize * 1024);
  664. nr = 1;
  665. nand_boot_flag = 1;
  666. }
  667. else {
  668. if (nand_boot_flag)
  669. adjust_offset = (1024 * mtd->writesize / aml_chip->plane_num);
  670. #ifdef NEW_NAND_SUPPORT
  671. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10))
  672. adjust_offset += RETRY_NAND_BLK_NUM* mtd->erasesize;
  673. #endif
  674. part_num++;
  675. start_blk = adjust_offset / mtd->erasesize;
  676. if ((NAND_MINI_PART_SIZE / mtd->erasesize) < 2)
  677. mini_part_blk_num = 2;
  678. else
  679. mini_part_blk_num = (NAND_MINI_PART_SIZE >> phys_erase_shift);
  680. start_blk = 0;
  681. do {
  682. offset = adjust_offset + start_blk * mtd->erasesize;
  683. error = mtd->block_isbad(mtd, offset);
  684. if (error) {
  685. adjust_offset += mtd->erasesize;
  686. continue;
  687. }
  688. start_blk++;
  689. } while (start_blk < mini_part_blk_num);
  690. adjust_offset += mini_part_blk_num * mtd->erasesize;
  691. if (nr == 0) {
  692. part_save_in_env = 0;
  693. if (nand_boot_flag)
  694. nr = NAND_MINI_PART_NUM + 1;
  695. else
  696. nr = 2;
  697. parts = kzalloc((nr * sizeof(struct mtd_partition)), GFP_KERNEL);
  698. if (!parts)
  699. return -ENOMEM;
  700. mini_part_size = ((mtd->erasesize > NAND_MINI_PART_SIZE) ? mtd->erasesize : NAND_MINI_PART_SIZE);
  701. }
  702. for (i=0; i<nr; i++) {
  703. temp_parts = parts + i;
  704. if ((temp_parts->size >= mtd->erasesize) || (i == (nr - 1)))
  705. mini_part_size = temp_parts->size;
  706. temp_parts->offset = adjust_offset;
  707. if ((mini_part_size < NAND_SYS_PART_SIZE) && (file_system_part == 0)) {
  708. start_blk = 0;
  709. do {
  710. offset = adjust_offset + start_blk * mtd->erasesize;
  711. error = mtd->block_isbad(mtd, offset);
  712. if (error) {
  713. adjust_offset += mtd->erasesize;
  714. continue;
  715. }
  716. start_blk++;
  717. } while (start_blk < (mini_part_size >> phys_erase_shift));
  718. }
  719. else {
  720. file_system_part = 1;
  721. }
  722. if ((i == (nr - 1)) && (part_save_in_env == 0))
  723. temp_parts->size = NAND_SYS_PART_SIZE;
  724. else if (mini_part_size != MTDPART_SIZ_FULL)
  725. temp_parts->size = mini_part_size + (adjust_offset - temp_parts->offset);
  726. adjust_offset += mini_part_size;
  727. if (temp_parts->name == NULL) {
  728. temp_parts->name = kzalloc(MAX_MTD_PART_NAME_LEN, GFP_KERNEL);
  729. if (!temp_parts->name)
  730. return -ENOMEM;
  731. sprintf(temp_parts->name, "mtd%d", part_num++);
  732. }
  733. }
  734. }
  735. return add_mtd_partitions(mtd, parts, nr);
  736. #else
  737. return add_mtd_device(mtd);
  738. #endif
  739. }
  740. static void aml_nand_select_chip(struct mtd_info *mtd, int chipnr)
  741. {
  742. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  743. if (((nand_erarly_suspend_flag == 1) && (!(READ_CBUS_REG(HHI_MPEG_CLK_CNTL)&(1<<8))))
  744. || ((READ_CBUS_REG(HHI_MPEG_CLK_CNTL)&(1<<8)) && (nand_erarly_suspend_flag == 2))) {
  745. aml_chip->aml_nand_adjust_timing(aml_chip);
  746. if (nand_erarly_suspend_flag == 1)
  747. nand_erarly_suspend_flag = 2;
  748. else if (nand_erarly_suspend_flag == 2)
  749. nand_erarly_suspend_flag = 0;
  750. }
  751. switch (chipnr) {
  752. case -1:
  753. nand_release_chip();
  754. break;
  755. case 0:
  756. nand_get_chip();
  757. aml_chip->aml_nand_select_chip(aml_chip, chipnr);
  758. break;
  759. case 1:
  760. case 2:
  761. case 3:
  762. aml_chip->aml_nand_select_chip(aml_chip, chipnr);
  763. break;
  764. default:
  765. BUG();
  766. }
  767. return;
  768. }
  769. static void aml_platform_select_chip(struct aml_nand_chip *aml_chip, int chipnr)
  770. {
  771. int i;
  772. switch (chipnr) {
  773. case 0:
  774. case 1:
  775. case 2:
  776. case 3:
  777. aml_chip->chip_selected = aml_chip->chip_enable[chipnr];
  778. aml_chip->rb_received = aml_chip->rb_enable[chipnr];
  779. for (i=1; i<aml_chip->chip_num; i++) {
  780. if (aml_chip->valid_chip[i]) {
  781. if (!((aml_chip->chip_enable[i] >> 10) & 1))
  782. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 4));
  783. if (!((aml_chip->chip_enable[i] >> 10) & 2))
  784. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 3));
  785. if (!((aml_chip->chip_enable[i] >> 10) & 4))
  786. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 14));
  787. if (!((aml_chip->chip_enable[i] >> 10) & 8))
  788. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 13));
  789. if (!((aml_chip->rb_enable[i] >> 10) & 1))
  790. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 2));
  791. if (!((aml_chip->rb_enable[i] >> 10) & 2))
  792. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 1));
  793. if (!((aml_chip->rb_enable[i] >> 10) & 4))
  794. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 12));
  795. if (!((aml_chip->rb_enable[i] >> 10) & 8))
  796. SET_CBUS_REG_MASK(PERIPHS_PIN_MUX_6, (1 << 11));
  797. }
  798. }
  799. NFC_SEND_CMD_IDLE(aml_chip->chip_selected, 0);
  800. break;
  801. default:
  802. BUG();
  803. aml_chip->chip_selected = CE_NOT_SEL;
  804. break;
  805. }
  806. return;
  807. }
  808. static void aml_platform_cmd_ctrl(struct aml_nand_chip *aml_chip, int cmd, unsigned int ctrl)
  809. {
  810. if (cmd == NAND_CMD_NONE)
  811. return;
  812. #ifdef CONFIG_CLK81_DFS
  813. down(&aml_chip->nand_sem);
  814. #endif
  815. if (ctrl & NAND_CLE)
  816. cmd=NFC_CMD_CLE(aml_chip->chip_selected, cmd);
  817. else
  818. cmd=NFC_CMD_ALE(aml_chip->chip_selected, cmd);
  819. NFC_SEND_CMD(cmd);
  820. #ifdef CONFIG_CLK81_DFS
  821. up(&aml_chip->nand_sem);
  822. #endif
  823. }
  824. static int aml_platform_wait_devready(struct aml_nand_chip *aml_chip, int chipnr)
  825. {
  826. struct nand_chip *chip = &aml_chip->chip;
  827. struct mtd_info *mtd = &aml_chip->mtd;
  828. unsigned time_out_cnt = 0;
  829. int status;
  830. /* wait until command is processed or timeout occures */
  831. aml_chip->aml_nand_select_chip(aml_chip, chipnr);
  832. #if 1
  833. if (aml_chip->ops_mode & AML_CHIP_NONE_RB) {
  834. do{
  835. //udelay(chip->chip_delay);
  836. aml_chip->aml_nand_command(aml_chip, NAND_CMD_STATUS, -1, -1, chipnr);
  837. udelay(2);
  838. status = (int)chip->read_byte(mtd);
  839. if (status & NAND_STATUS_READY)
  840. break;
  841. udelay(20);
  842. }while(time_out_cnt++ <= 0x2000); //200ms max
  843. if (time_out_cnt > 0x2000)
  844. return 0;
  845. }
  846. else{
  847. do{
  848. if (chip->dev_ready(mtd))
  849. break;
  850. } while (time_out_cnt++ <= AML_NAND_BUSY_TIMEOUT);
  851. if (time_out_cnt > AML_NAND_BUSY_TIMEOUT)
  852. return 0;
  853. }
  854. #else
  855. do {
  856. if (aml_chip->ops_mode & AML_CHIP_NONE_RB) {
  857. //udelay(chip->chip_delay);
  858. aml_chip->aml_nand_command(aml_chip, NAND_CMD_STATUS, -1, -1, chipnr);
  859. udelay(2);
  860. status = (int)chip->read_byte(mtd);
  861. if (status & NAND_STATUS_READY)
  862. break;
  863. udelay(20);
  864. }
  865. else {
  866. if (chip->dev_ready(mtd))
  867. break;
  868. }
  869. } while (time_out_cnt++ <= AML_NAND_BUSY_TIMEOUT);
  870. if (time_out_cnt > AML_NAND_BUSY_TIMEOUT)
  871. return 0;
  872. #endif
  873. return 1;
  874. }
  875. static int aml_nand_dev_ready(struct mtd_info *mtd)
  876. {
  877. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  878. return NFC_GET_RB_STATUS(aml_chip->rb_received);
  879. }
  880. static int aml_nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  881. {
  882. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  883. struct nand_chip *chip = mtd->priv;
  884. chip->read_buf(mtd, aml_chip->aml_nand_data_buf, len);
  885. if (memcmp(buf, aml_chip->aml_nand_data_buf, len))
  886. return -EFAULT;
  887. return 0;
  888. }
  889. static void aml_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
  890. {
  891. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  892. aml_chip->aml_nand_cmd_ctrl(aml_chip, cmd, ctrl);
  893. }
  894. static int aml_nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
  895. {
  896. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  897. int status[MAX_CHIP_NUM], state = chip->state, i = 0, time_cnt = 0, chip_nr = 1;
  898. /* Apply this short delay always to ensure that we do wait tWB in
  899. * any case on any machine. */
  900. ndelay(100);
  901. if (state == FL_ERASING)
  902. chip_nr = aml_chip->chip_num;
  903. for (i=0; i<chip_nr; i++) {
  904. if (aml_chip->valid_chip[i]) {
  905. //active ce for operation chip and send cmd
  906. aml_chip->aml_nand_select_chip(aml_chip, i);
  907. if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
  908. aml_chip->aml_nand_command(aml_chip, NAND_CMD_STATUS_MULTI, -1, -1, i);
  909. else
  910. aml_chip->aml_nand_command(aml_chip, NAND_CMD_STATUS, -1, -1, i);
  911. time_cnt = 0;
  912. while (time_cnt++ < 0x40000) {
  913. if (chip->dev_ready) {
  914. if (chip->dev_ready(mtd))
  915. break;
  916. udelay(2);
  917. } else {
  918. if(time_cnt == 1)
  919. udelay(500);
  920. if (chip->read_byte(mtd) & NAND_STATUS_READY) {
  921. break;
  922. }
  923. aml_chip->aml_nand_command(aml_chip, NAND_CMD_STATUS, -1, -1, i);
  924. udelay(50);
  925. }
  926. }
  927. status[i] = (int)chip->read_byte(mtd);
  928. status[0] |= status[i];
  929. }
  930. }
  931. return status[0];
  932. }
  933. static void aml_nand_base_command(struct aml_nand_chip *aml_chip, unsigned command, int column, int page_addr, int chipnr)
  934. {
  935. struct nand_chip *chip = &aml_chip->chip;
  936. struct mtd_info *mtd = &aml_chip->mtd;
  937. unsigned command_temp, pages_per_blk_shift, plane_page_addr = 0, plane_blk_addr = 0;
  938. pages_per_blk_shift = (chip->phys_erase_shift - chip->page_shift);
  939. if (page_addr != -1) {
  940. page_addr /= aml_chip->plane_num;
  941. plane_page_addr = (page_addr & ((1 << pages_per_blk_shift) - 1));
  942. plane_blk_addr = (page_addr >> pages_per_blk_shift);
  943. plane_blk_addr = (plane_blk_addr << 1);
  944. }
  945. if (aml_chip->plane_num == 2) {
  946. switch (command) {
  947. case NAND_CMD_READ0:
  948. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL)) {
  949. command_temp = command;
  950. }
  951. else {
  952. command_temp = NAND_CMD_TWOPLANE_PREVIOS_READ;
  953. column = -1;
  954. }
  955. plane_page_addr |= (plane_blk_addr << pages_per_blk_shift);
  956. break;
  957. case NAND_CMD_TWOPLANE_READ1:
  958. command_temp = NAND_CMD_READ0;
  959. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL))
  960. //plane_page_addr |= ((plane_blk_addr + 1) << 8);
  961. return;
  962. else
  963. plane_page_addr |= (plane_blk_addr << pages_per_blk_shift);
  964. break;
  965. case NAND_CMD_TWOPLANE_READ2:
  966. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL)) {
  967. command_temp = NAND_CMD_PLANE2_READ_START;
  968. }
  969. else {
  970. command_temp = NAND_CMD_READ0;
  971. }
  972. plane_page_addr |= ((plane_blk_addr + 1) << pages_per_blk_shift);
  973. break;
  974. case NAND_CMD_SEQIN:
  975. command_temp = command;
  976. plane_page_addr |= (plane_blk_addr << pages_per_blk_shift);
  977. break;
  978. case NAND_CMD_TWOPLANE_WRITE2:
  979. if ((aml_chip->mfr_type == NAND_MFR_HYNIX) || (aml_chip->mfr_type == NAND_MFR_SAMSUNG))
  980. command_temp = command;
  981. else
  982. command_temp = NAND_CMD_TWOPLANE_WRITE2_MICRO;
  983. plane_page_addr |= ((plane_blk_addr + 1) << pages_per_blk_shift);
  984. break;
  985. case NAND_CMD_ERASE1:
  986. command_temp = command;
  987. plane_page_addr |= (plane_blk_addr << pages_per_blk_shift);
  988. break;
  989. case NAND_CMD_MULTI_CHIP_STATUS:
  990. command_temp = command;
  991. plane_page_addr |= (plane_blk_addr << pages_per_blk_shift);
  992. break;
  993. default:
  994. command_temp = command;
  995. break;
  996. }
  997. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  998. //if ((command_temp == NAND_CMD_SEQIN) || (command_temp == NAND_CMD_TWOPLANE_WRITE2) || (command_temp == NAND_CMD_READ0))
  999. //printk(" NAND plane_page_addr: %x plane_blk_addr %x command: %x \n", plane_page_addr, plane_blk_addr, command);
  1000. if (column != -1 || page_addr != -1) {
  1001. int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
  1002. /* Serially input address */
  1003. if (column != -1) {
  1004. /* Adjust columns for 16 bit buswidth */
  1005. if (chip->options & NAND_BUSWIDTH_16)
  1006. column >>= 1;
  1007. chip->cmd_ctrl(mtd, column, ctrl);
  1008. ctrl &= ~NAND_CTRL_CHANGE;
  1009. chip->cmd_ctrl(mtd, column >> 8, ctrl);
  1010. }
  1011. if (page_addr != -1) {
  1012. chip->cmd_ctrl(mtd, plane_page_addr, ctrl);
  1013. chip->cmd_ctrl(mtd, plane_page_addr >> 8, NAND_NCE | NAND_ALE);
  1014. /* One more address cycle for devices > 128MiB */
  1015. if (chip->chipsize > (128 << 20))
  1016. chip->cmd_ctrl(mtd, plane_page_addr >> 16, NAND_NCE | NAND_ALE);
  1017. }
  1018. }
  1019. switch (command) {
  1020. case NAND_CMD_READ0:
  1021. plane_page_addr = page_addr % (1 << pages_per_blk_shift);
  1022. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL)) {
  1023. plane_page_addr |= ((plane_blk_addr + 1) << pages_per_blk_shift);
  1024. command_temp = command;
  1025. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1026. }
  1027. else {
  1028. command_temp = NAND_CMD_TWOPLANE_PREVIOS_READ;
  1029. column = -1;
  1030. plane_page_addr |= ((plane_blk_addr + 1) << pages_per_blk_shift);
  1031. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1032. }
  1033. break;
  1034. case NAND_CMD_TWOPLANE_READ1:
  1035. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL)) {
  1036. page_addr = -1;
  1037. column = -1;
  1038. }
  1039. else {
  1040. command_temp = NAND_CMD_RNDOUT;
  1041. page_addr = -1;
  1042. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1043. }
  1044. break;
  1045. case NAND_CMD_TWOPLANE_READ2:
  1046. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL)) {
  1047. page_addr = -1;
  1048. column = -1;
  1049. }
  1050. else {
  1051. command_temp = NAND_CMD_RNDOUT;
  1052. page_addr = -1;
  1053. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1054. }
  1055. break;
  1056. case NAND_CMD_ERASE1:
  1057. if ((aml_chip->mfr_type == NAND_MFR_MICRON) || (aml_chip->mfr_type == NAND_MFR_INTEL)) {
  1058. command_temp = NAND_CMD_ERASE1_END;
  1059. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1060. aml_chip->aml_nand_wait_devready(aml_chip, chipnr);
  1061. }
  1062. command_temp = command;
  1063. chip->cmd_ctrl(mtd, command_temp & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1064. plane_page_addr = page_addr % (1 << pages_per_blk_shift);
  1065. plane_page_addr |= ((plane_blk_addr + 1) << pages_per_blk_shift);
  1066. break;
  1067. default:
  1068. column = -1;
  1069. page_addr = -1;
  1070. break;
  1071. }
  1072. if (column != -1 || page_addr != -1) {
  1073. int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
  1074. /* Serially input address */
  1075. if (column != -1) {
  1076. /* Adjust columns for 16 bit buswidth */
  1077. if (chip->options & NAND_BUSWIDTH_16)
  1078. column >>= 1;
  1079. chip->cmd_ctrl(mtd, column, ctrl);
  1080. ctrl &= ~NAND_CTRL_CHANGE;
  1081. chip->cmd_ctrl(mtd, column >> 8, ctrl);
  1082. }
  1083. if (page_addr != -1) {
  1084. //plane_page_addr |= (1 << (pages_per_blk_shift + 1));
  1085. //BUG_ON((plane_page_addr & 0x7FF) == 0);
  1086. chip->cmd_ctrl(mtd, plane_page_addr, ctrl);
  1087. chip->cmd_ctrl(mtd, plane_page_addr >> 8, NAND_NCE | NAND_ALE);
  1088. /* One more address cycle for devices > 128MiB */
  1089. if (chip->chipsize > (128 << 20))
  1090. chip->cmd_ctrl(mtd, plane_page_addr >> 16, NAND_NCE | NAND_ALE);
  1091. }
  1092. }
  1093. if ((command == NAND_CMD_RNDOUT) || (command == NAND_CMD_TWOPLANE_READ2))
  1094. chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1095. else if ((command == NAND_CMD_TWOPLANE_READ1)) {
  1096. chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1097. }
  1098. else if (command == NAND_CMD_READ0) {
  1099. chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1100. }
  1101. chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
  1102. }
  1103. else {
  1104. chip->cmd_ctrl(mtd, command & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1105. if (column != -1 || page_addr != -1) {
  1106. int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
  1107. /* Serially input address */
  1108. if (column != -1) {
  1109. /* Adjust columns for 16 bit buswidth */
  1110. if (chip->options & NAND_BUSWIDTH_16)
  1111. column >>= 1;
  1112. chip->cmd_ctrl(mtd, column, ctrl);
  1113. ctrl &= ~NAND_CTRL_CHANGE;
  1114. chip->cmd_ctrl(mtd, column >> 8, ctrl);
  1115. }
  1116. if (page_addr != -1) {
  1117. chip->cmd_ctrl(mtd, page_addr, ctrl);
  1118. chip->cmd_ctrl(mtd, page_addr >> 8, NAND_NCE | NAND_ALE);
  1119. /* One more address cycle for devices > 128MiB */
  1120. if (chip->chipsize > (128 << 20))
  1121. chip->cmd_ctrl(mtd, page_addr >> 16, NAND_NCE | NAND_ALE);
  1122. }
  1123. }
  1124. if (command == NAND_CMD_RNDOUT)
  1125. chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1126. else if (command == NAND_CMD_READ0)
  1127. chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1128. chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
  1129. }
  1130. /*
  1131. * program and erase have their own busy handlers
  1132. * status, sequential in, and deplete1 need no delay
  1133. */
  1134. switch (command) {
  1135. case NAND_CMD_CACHEDPROG:
  1136. case NAND_CMD_PAGEPROG:
  1137. case NAND_CMD_ERASE1:
  1138. case NAND_CMD_ERASE2:
  1139. case NAND_CMD_SEQIN:
  1140. case NAND_CMD_RNDIN:
  1141. case NAND_CMD_STATUS:
  1142. case NAND_CMD_DEPLETE1:
  1143. return;
  1144. /*
  1145. * read error status commands require only a short delay
  1146. */
  1147. case NAND_CMD_STATUS_ERROR:
  1148. case NAND_CMD_STATUS_ERROR0:
  1149. case NAND_CMD_STATUS_ERROR1:
  1150. case NAND_CMD_STATUS_ERROR2:
  1151. case NAND_CMD_STATUS_ERROR3:
  1152. udelay(chip->chip_delay);
  1153. return;
  1154. case NAND_CMD_RESET:
  1155. if (!aml_chip->aml_nand_wait_devready(aml_chip, chipnr))
  1156. aml_nand_debug ("couldn't found selected chip: %d ready\n", chipnr);
  1157. if (chip->dev_ready)
  1158. break;
  1159. udelay(chip->chip_delay);
  1160. chip->cmd_ctrl(mtd, NAND_CMD_STATUS, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1161. chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
  1162. while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
  1163. return;
  1164. default:
  1165. /*
  1166. * If we don't have access to the busy pin, we apply the given
  1167. * command delay
  1168. */
  1169. break;
  1170. }
  1171. /* Apply this short delay always to ensure that we do wait tWB in
  1172. * any case on any machine. */
  1173. ndelay(100);
  1174. }
  1175. static void aml_nand_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
  1176. {
  1177. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1178. struct nand_chip *chip = &aml_chip->chip;
  1179. int i = 0, valid_page_num = 1, internal_chip;
  1180. if (page_addr != -1) {
  1181. valid_page_num = (mtd->writesize >> chip->page_shift);
  1182. valid_page_num /= aml_chip->plane_num;
  1183. aml_chip->page_addr = page_addr / valid_page_num;
  1184. if (unlikely(aml_chip->page_addr >= aml_chip->internal_page_nums)) {
  1185. internal_chip = aml_chip->page_addr / aml_chip->internal_page_nums;
  1186. aml_chip->page_addr -= aml_chip->internal_page_nums;
  1187. aml_chip->page_addr |= (1 << aml_chip->internal_chip_shift) * internal_chip;
  1188. }
  1189. }
  1190. /* Emulate NAND_CMD_READOOB */
  1191. if (command == NAND_CMD_READOOB) {
  1192. command = NAND_CMD_READ0;
  1193. aml_chip->aml_nand_wait_devready(aml_chip, 0);
  1194. aml_chip->aml_nand_command(aml_chip, command, column, aml_chip->page_addr, 0);
  1195. return;
  1196. }
  1197. if (command == NAND_CMD_PAGEPROG)
  1198. return;
  1199. if (command == NAND_CMD_SEQIN) {
  1200. aml_chip->aml_nand_wait_devready(aml_chip, 0);
  1201. aml_chip->aml_nand_command(aml_chip, command, column, aml_chip->page_addr, 0);
  1202. return;
  1203. }
  1204. for (i=0; i<aml_chip->chip_num; i++) {
  1205. if (aml_chip->valid_chip[i]) {
  1206. //active ce for operation chip and send cmd
  1207. aml_chip->aml_nand_wait_devready(aml_chip, i);
  1208. aml_chip->aml_nand_command(aml_chip, command, column, aml_chip->page_addr, i);
  1209. }
  1210. }
  1211. return;
  1212. }
  1213. static void aml_nand_erase_cmd(struct mtd_info *mtd, int page)
  1214. {
  1215. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1216. struct nand_chip *chip = mtd->priv;
  1217. unsigned pages_per_blk_shift = (chip->phys_erase_shift - chip->page_shift);
  1218. unsigned vt_page_num, i = 0, j = 0, internal_chipnr = 1, page_addr, valid_page_num;
  1219. vt_page_num = (mtd->writesize / (1 << chip->page_shift));
  1220. vt_page_num *= (1 << pages_per_blk_shift);
  1221. if (page % vt_page_num)
  1222. return;
  1223. /* Send commands to erase a block */
  1224. valid_page_num = (mtd->writesize >> chip->page_shift);
  1225. valid_page_num /= aml_chip->plane_num;
  1226. aml_chip->page_addr = page / valid_page_num;
  1227. if (unlikely(aml_chip->page_addr >= aml_chip->internal_page_nums)) {
  1228. internal_chipnr = aml_chip->page_addr / aml_chip->internal_page_nums;
  1229. aml_chip->page_addr -= aml_chip->internal_page_nums;
  1230. aml_chip->page_addr |= (1 << aml_chip->internal_chip_shift) * internal_chipnr;
  1231. }
  1232. if (unlikely(aml_chip->ops_mode & AML_INTERLEAVING_MODE))
  1233. internal_chipnr = aml_chip->internal_chipnr;
  1234. else
  1235. internal_chipnr = 1;
  1236. for (i=0; i<aml_chip->chip_num; i++) {
  1237. if (aml_chip->valid_chip[i]) {
  1238. aml_chip->aml_nand_select_chip(aml_chip, i);
  1239. page_addr = aml_chip->page_addr;
  1240. for (j=0; j<internal_chipnr; j++) {
  1241. if (j > 0) {
  1242. page_addr = aml_chip->page_addr;
  1243. page_addr |= (1 << aml_chip->internal_chip_shift) * j;
  1244. }
  1245. aml_chip->aml_nand_command(aml_chip, NAND_CMD_ERASE1, -1, page_addr, i);
  1246. aml_chip->aml_nand_command(aml_chip, NAND_CMD_ERASE2, -1, -1, i);
  1247. }
  1248. }
  1249. }
  1250. return ;
  1251. }
  1252. #ifdef CONFIG_ARCH_MESON
  1253. static int aml_platform_dma_waiting(struct aml_nand_chip *aml_chip)
  1254. {
  1255. unsigned time_out_cnt = 0;
  1256. NFC_SEND_CMD_IDLE(aml_chip->chip_selected, 0);
  1257. NFC_SEND_CMD_IDLE(aml_chip->chip_selected, 0);
  1258. do {
  1259. if (NFC_CMDFIFO_SIZE() <= 0)
  1260. break;
  1261. }while (time_out_cnt++ <= AML_DMA_BUSY_TIMEOUT);
  1262. if (time_out_cnt < AML_DMA_BUSY_TIMEOUT)
  1263. return 0;
  1264. return -EBUSY;
  1265. }
  1266. static int aml_platform_hwecc_correct(struct aml_nand_chip *aml_chip, unsigned char *buf, unsigned size, unsigned char *oob_buf)
  1267. {
  1268. struct nand_chip *chip = &aml_chip->chip;
  1269. struct mtd_info *mtd = &aml_chip->mtd;
  1270. unsigned ecc_step_num;
  1271. int error = 0;
  1272. if (size % chip->ecc.size) {
  1273. printk ("error parameter size for ecc correct %x\n", size);
  1274. return -EINVAL;
  1275. }
  1276. for (ecc_step_num = 0; ecc_step_num < (size / chip->ecc.size); ecc_step_num++) {
  1277. //check if there have uncorrectable sector
  1278. if (NAND_ECC_FAIL(aml_chip->user_info_buf[ecc_step_num])
  1279. || (NAND_ECC_CNT(aml_chip->user_info_buf[ecc_step_num]) == 0x1f)) {
  1280. printk ("nand communication have uncorrectable ecc error %d %d %d\n", ecc_step_num, NAND_ECC_CNT(aml_chip->user_info_buf[ecc_step_num-1]), NAND_ECC_CNT(aml_chip->user_info_buf[ecc_step_num+1]));
  1281. error = -EIO;
  1282. }
  1283. else {
  1284. mtd->ecc_stats.corrected += NAND_ECC_CNT(aml_chip->user_info_buf[ecc_step_num]);
  1285. }
  1286. }
  1287. return error;
  1288. }
  1289. static int aml_platform_dma_write(struct aml_nand_chip *aml_chip, unsigned char *buf, int len, unsigned bch_mode)
  1290. {
  1291. int ret = 0;
  1292. memcpy(aml_chip->aml_nand_data_buf, buf, len);
  1293. wmb();
  1294. NFC_SEND_CMD_ADL(aml_chip->data_dma_addr);
  1295. NFC_SEND_CMD_ADH(aml_chip->data_dma_addr);
  1296. NFC_SEND_CMD_AIL(aml_chip->nand_info_dma_addr);
  1297. NFC_SEND_CMD_AIH((aml_chip->nand_info_dma_addr));
  1298. NFC_SEND_CMD_M2N(len, bch_mode);
  1299. ret = aml_platform_dma_waiting(aml_chip);
  1300. return ret;
  1301. }
  1302. static int aml_platform_dma_read(struct aml_nand_chip *aml_chip, unsigned char *buf, int len, unsigned bch_mode)
  1303. {
  1304. volatile unsigned int * info_buf;
  1305. struct nand_chip *chip = &aml_chip->chip;
  1306. unsigned dma_unit_size;
  1307. int ret = 0;
  1308. if (chip->ecc.size >= 512)
  1309. dma_unit_size = chip->ecc.size;
  1310. else
  1311. dma_unit_size = 512;
  1312. info_buf = aml_chip->user_info_buf + (((len + dma_unit_size - 1) / dma_unit_size) - 1);
  1313. memset((unsigned char *)aml_chip->user_info_buf, 0, ((len + dma_unit_size - 1) / dma_unit_size)*sizeof(int));
  1314. wmb();
  1315. NFC_SEND_CMD_ADL(aml_chip->data_dma_addr);
  1316. NFC_SEND_CMD_ADH(aml_chip->data_dma_addr);
  1317. NFC_SEND_CMD_AIL(aml_chip->nand_info_dma_addr);
  1318. NFC_SEND_CMD_AIH((aml_chip->nand_info_dma_addr));
  1319. NFC_SEND_CMD_N2M(len, bch_mode);
  1320. ret = aml_platform_dma_waiting(aml_chip);
  1321. if (ret)
  1322. return ret;
  1323. while(NAND_INFO_DONE(*info_buf) == 0);
  1324. rmb();
  1325. if (buf != aml_chip->aml_nand_data_buf)
  1326. memcpy(buf, aml_chip->aml_nand_data_buf, len);
  1327. wmb();
  1328. return 0;
  1329. }
  1330. #else
  1331. static int aml_platform_hwecc_correct(struct aml_nand_chip *aml_chip, unsigned char *buf, unsigned size, unsigned char *oob_buf)
  1332. {
  1333. return 0;
  1334. }
  1335. static int aml_platform_dma_write(struct aml_nand_chip *aml_chip, unsigned char *buf, int len, unsigned bch_mode)
  1336. {
  1337. return 0;
  1338. }
  1339. static int aml_platform_dma_read(struct aml_nand_chip *aml_chip, unsigned char *buf, int len, unsigned bch_mode)
  1340. {
  1341. return 0;
  1342. }
  1343. #endif
  1344. static void aml_nand_dma_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  1345. {
  1346. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1347. aml_chip->aml_nand_dma_read(aml_chip, buf, len, 0);
  1348. }
  1349. static void aml_nand_dma_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  1350. {
  1351. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1352. aml_chip->aml_nand_dma_write(aml_chip, (unsigned char *)buf, len, 0);
  1353. }
  1354. static int aml_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page)
  1355. {
  1356. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1357. unsigned nand_page_size = aml_chip->page_size;
  1358. unsigned nand_oob_size = aml_chip->oob_size;
  1359. uint8_t *oob_buf = chip->oob_poi;
  1360. int i, error = 0, j = 0, page_addr, internal_chipnr = 1;
  1361. if (aml_chip->ops_mode & AML_INTERLEAVING_MODE)
  1362. internal_chipnr = aml_chip->internal_chipnr;
  1363. for (i=0; i<aml_chip->chip_num; i++) {
  1364. if (aml_chip->valid_chip[i]) {
  1365. page_addr = aml_chip->page_addr;
  1366. for (j=0; j<internal_chipnr; j++) {
  1367. if (j > 0) {
  1368. page_addr = aml_chip->page_addr;
  1369. page_addr |= (1 << aml_chip->internal_chip_shift) * j;
  1370. aml_chip->aml_nand_select_chip(aml_chip, i);
  1371. aml_chip->aml_nand_command(aml_chip, NAND_CMD_READ0, 0, page_addr, i);
  1372. }
  1373. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1374. printk ("couldn`t found selected chip: %d ready\n", i);
  1375. error = -EBUSY;
  1376. goto exit;
  1377. }
  1378. if (aml_chip->ops_mode & AML_CHIP_NONE_RB)
  1379. chip->cmd_ctrl(mtd, NAND_CMD_READ0 & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1380. if (aml_chip->plane_num == 2) {
  1381. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_READ1, 0x00, page_addr, i);
  1382. aml_chip->aml_nand_dma_read(aml_chip, aml_chip->aml_nand_data_buf, nand_page_size + nand_oob_size, 0);
  1383. memcpy(buf, aml_chip->aml_nand_data_buf, (nand_page_size + nand_oob_size));
  1384. memcpy(oob_buf, aml_chip->aml_nand_data_buf + nand_page_size, nand_oob_size);
  1385. oob_buf += nand_oob_size;
  1386. buf += (nand_page_size + nand_oob_size);
  1387. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_READ2, 0x00, page_addr, i);
  1388. aml_chip->aml_nand_dma_read(aml_chip, aml_chip->aml_nand_data_buf, nand_page_size + nand_oob_size, 0);
  1389. memcpy(buf, aml_chip->aml_nand_data_buf, (nand_page_size + nand_oob_size));
  1390. memcpy(oob_buf, aml_chip->aml_nand_data_buf + nand_page_size, nand_oob_size);
  1391. oob_buf += nand_oob_size;
  1392. buf += (nand_page_size + nand_oob_size);
  1393. }
  1394. else if (aml_chip->plane_num == 1) {
  1395. aml_chip->aml_nand_dma_read(aml_chip, aml_chip->aml_nand_data_buf, nand_page_size + nand_oob_size, 0);
  1396. memcpy(buf, aml_chip->aml_nand_data_buf, (nand_page_size + nand_oob_size));
  1397. memcpy(oob_buf, aml_chip->aml_nand_data_buf + nand_page_size, nand_oob_size);
  1398. oob_buf += nand_oob_size;
  1399. buf += nand_page_size;
  1400. }
  1401. else {
  1402. error = -ENODEV;
  1403. aml_nand_debug ("plane_num mistake\n");
  1404. goto exit;
  1405. }
  1406. }
  1407. }
  1408. }
  1409. exit:
  1410. return error;
  1411. }
  1412. static void aml_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
  1413. {
  1414. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1415. unsigned nand_page_size = aml_chip->page_size;
  1416. unsigned nand_oob_size = aml_chip->oob_size;
  1417. uint8_t *oob_buf = chip->oob_poi;
  1418. int i, error = 0, j = 0, page_addr, internal_chipnr = 1;
  1419. if (aml_chip->ops_mode & AML_INTERLEAVING_MODE)
  1420. internal_chipnr = aml_chip->internal_chipnr;
  1421. for (i=0; i<aml_chip->chip_num; i++) {
  1422. if (aml_chip->valid_chip[i]) {
  1423. aml_chip->aml_nand_select_chip(aml_chip, i);
  1424. page_addr = aml_chip->page_addr;
  1425. for (j=0; j<internal_chipnr; j++) {
  1426. if (j > 0) {
  1427. page_addr = aml_chip->page_addr;
  1428. page_addr |= (1 << aml_chip->internal_chip_shift) * j;
  1429. aml_chip->aml_nand_command(aml_chip, NAND_CMD_SEQIN, 0, page_addr, i);
  1430. }
  1431. if (aml_chip->plane_num == 2) {
  1432. memcpy(aml_chip->aml_nand_data_buf, buf, nand_page_size);
  1433. memcpy(aml_chip->aml_nand_data_buf + nand_page_size, oob_buf, nand_oob_size);
  1434. aml_chip->aml_nand_dma_write(aml_chip, aml_chip->aml_nand_data_buf, nand_page_size + nand_oob_size, 0);
  1435. aml_chip->aml_nand_command(aml_chip, NAND_CMD_DUMMY_PROGRAM, -1, -1, i);
  1436. oob_buf += nand_oob_size;
  1437. buf += nand_page_size;
  1438. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1439. aml_nand_debug ("couldn`t found selected chip: %d ready\n", i);
  1440. error = -EBUSY;
  1441. goto exit;
  1442. }
  1443. memcpy(aml_chip->aml_nand_data_buf, buf, nand_page_size);
  1444. memcpy(aml_chip->aml_nand_data_buf + nand_page_size, oob_buf, nand_oob_size);
  1445. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_WRITE2, 0x00, page_addr, i);
  1446. aml_chip->aml_nand_dma_write(aml_chip, aml_chip->aml_nand_data_buf, nand_page_size + nand_oob_size, 0);
  1447. aml_chip->aml_nand_command(aml_chip, NAND_CMD_PAGEPROG, -1, -1, i);
  1448. oob_buf += nand_oob_size;
  1449. buf += nand_page_size;
  1450. }
  1451. else if (aml_chip->plane_num == 1) {
  1452. memcpy(aml_chip->aml_nand_data_buf, buf, nand_page_size);
  1453. memcpy(aml_chip->aml_nand_data_buf + nand_page_size, oob_buf, nand_oob_size);
  1454. aml_chip->aml_nand_dma_write(aml_chip, aml_chip->aml_nand_data_buf, nand_page_size + nand_oob_size, 0);
  1455. if (chip->cmdfunc == aml_nand_command)
  1456. aml_chip->aml_nand_command(aml_chip, NAND_CMD_PAGEPROG, -1, -1, i);
  1457. oob_buf += nand_oob_size;
  1458. buf += nand_page_size;
  1459. }
  1460. else {
  1461. error = -ENODEV;
  1462. aml_nand_debug ("plane_num mistake\n");
  1463. goto exit;
  1464. }
  1465. }
  1466. }
  1467. }
  1468. exit:
  1469. return ;
  1470. }
  1471. static int aml_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page)
  1472. {
  1473. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1474. uint8_t *oob_buf = chip->oob_poi;
  1475. unsigned nand_page_size = (1 << chip->page_shift);
  1476. unsigned pages_per_blk_shift = (chip->phys_erase_shift - chip->page_shift);
  1477. int user_byte_num = (((nand_page_size + chip->ecc.size - 1) / chip->ecc.size) * aml_chip->user_byte_mode);
  1478. int error = 0, i = 0, stat = 0, j = 0, page_addr, internal_chipnr = 1;
  1479. int readretry_failed_cnt = 0;
  1480. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1481. int ran_mode = aml_chip->ran_mode;
  1482. #endif
  1483. if (aml_chip->ops_mode & AML_INTERLEAVING_MODE)
  1484. internal_chipnr = aml_chip->internal_chipnr;
  1485. if (nand_page_size > chip->ecc.steps * chip->ecc.size) {
  1486. nand_page_size = chip->ecc.steps * chip->ecc.size;
  1487. user_byte_num = chip->ecc.steps;
  1488. }
  1489. for (i=0; i<aml_chip->chip_num; i++) {
  1490. if (aml_chip->valid_chip[i]) {
  1491. readretry_failed_cnt = 0;
  1492. read_retry:
  1493. page_addr = aml_chip->page_addr;
  1494. for (j=0; j<internal_chipnr; j++) {
  1495. if (j > 0) {
  1496. page_addr = aml_chip->page_addr;
  1497. page_addr |= (1 << aml_chip->internal_chip_shift) * j;
  1498. aml_chip->aml_nand_select_chip(aml_chip, i);
  1499. aml_chip->aml_nand_command(aml_chip, NAND_CMD_READ0, 0, page_addr, i);
  1500. }
  1501. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1502. printk ("read couldn`t found selected chip: %d ready\n", i);
  1503. mdelay(50);
  1504. if (!aml_chip->aml_nand_wait_devready(aml_chip, i))
  1505. {
  1506. printk ("read couldn`t found selected chip: %d ready\n", i);
  1507. mdelay(100);
  1508. error = -EBUSY;
  1509. goto exit;
  1510. }
  1511. }
  1512. if (aml_chip->ops_mode & AML_CHIP_NONE_RB)
  1513. chip->cmd_ctrl(mtd, NAND_CMD_READ0 & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1514. if (aml_chip->plane_num == 2) {
  1515. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_READ1, 0x00, page_addr, i);
  1516. dma_retry_plane0:
  1517. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, aml_chip->bch_mode);
  1518. if (error){
  1519. mdelay(50);
  1520. printk("aml nand read data ecc plane0 failed at page %d chip %d\n", page_addr, i);
  1521. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, aml_chip->bch_mode);
  1522. if(error){
  1523. mdelay(100);
  1524. goto exit;
  1525. }
  1526. }
  1527. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buf, user_byte_num);
  1528. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, buf, nand_page_size, oob_buf);
  1529. if (stat < 0) {
  1530. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1531. if(aml_chip->ran_mode && (aml_chip->zero_cnt < aml_chip->ecc_max)){
  1532. memset(buf, 0xff, nand_page_size);
  1533. memset(oob_buf, 0xff, user_byte_num);
  1534. goto plane0_ff;
  1535. }
  1536. if(ran_mode && aml_chip->ran_mode){
  1537. //printk("%s dma retry here at page:%d blk %d chip %d\n", __func__, page_addr, (page_addr >> pages_per_blk_shift), i);
  1538. aml_chip->ran_mode = 0;
  1539. ndelay(300);
  1540. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RNDOUT, 0, -1, i);
  1541. ndelay(500);
  1542. goto dma_retry_plane0;
  1543. }
  1544. #endif
  1545. memset(buf, 0xff, nand_page_size);
  1546. memset(oob_buf, 0xff, user_byte_num);
  1547. mtd->ecc_stats.failed++;
  1548. printk("aml nand read data ecc plane0 failed at page %d chip %d \n", page_addr, i);
  1549. }
  1550. else{
  1551. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1552. if(aml_chip->ecc_cnt_cur > aml_chip->ecc_cnt_limit){
  1553. printk("%s line:%d uncorrected ecc_cnt_cur:%d, and limit:%d and at page:%d, blk:%d chip[%d]\n",\
  1554. __func__, __LINE__, aml_chip->ecc_cnt_cur, aml_chip->ecc_cnt_limit, page_addr, (page_addr >> pages_per_blk_shift), i);
  1555. mtd->ecc_stats.corrected++;
  1556. }
  1557. #endif
  1558. mtd->ecc_stats.corrected += stat;
  1559. }
  1560. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1561. plane0_ff:
  1562. aml_chip->ran_mode = ran_mode;
  1563. #endif
  1564. oob_buf += user_byte_num;
  1565. buf += nand_page_size;
  1566. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_READ2, 0x00, page_addr, i);
  1567. dma_retry_plane1:
  1568. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, aml_chip->bch_mode);
  1569. if (error){
  1570. printk("aml nand read data dma plane1 failed at page %d chip %d\n", page_addr, i);
  1571. mdelay(50);
  1572. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, aml_chip->bch_mode);
  1573. if(error){
  1574. mdelay(100);
  1575. goto exit;
  1576. }
  1577. }
  1578. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buf, user_byte_num);
  1579. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, buf, nand_page_size, oob_buf);
  1580. if (stat < 0) {
  1581. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1582. if(aml_chip->ran_mode && (aml_chip->zero_cnt < aml_chip->ecc_max)){
  1583. memset(buf, 0xff, nand_page_size);
  1584. memset(oob_buf, 0xff, user_byte_num);
  1585. oob_buf += user_byte_num;
  1586. buf += nand_page_size;
  1587. continue;
  1588. }
  1589. if(ran_mode && aml_chip->ran_mode){
  1590. //printk("%s dma retry here at page:%d blk %d chip %d\n", __func__, page_addr, (page_addr >> pages_per_blk_shift), i);
  1591. aml_chip->ran_mode = 0;
  1592. ndelay(300);
  1593. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RNDOUT, 0, -1, i);
  1594. ndelay(500);
  1595. goto dma_retry_plane1;
  1596. }
  1597. #endif
  1598. memset(buf, 0xff, nand_page_size);
  1599. memset(oob_buf, 0xff, user_byte_num);
  1600. mtd->ecc_stats.failed++;
  1601. printk("aml nand read data ecc plane1 failed at page %d chip %d \n", page_addr, i);
  1602. }
  1603. else{
  1604. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1605. if(aml_chip->ecc_cnt_cur > aml_chip->ecc_cnt_limit) {
  1606. printk("%s line:%d uncorrected ecc_cnt_cur:%d, and limit:%d and at page:%d, blk:%d chip[%d]\n",
  1607. __func__, __LINE__, aml_chip->ecc_cnt_cur, aml_chip->ecc_cnt_limit, page_addr, (page_addr >> pages_per_blk_shift), i);
  1608. mtd->ecc_stats.corrected++;
  1609. }
  1610. #endif
  1611. mtd->ecc_stats.corrected += stat;
  1612. }
  1613. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1614. aml_chip->ran_mode = ran_mode;
  1615. #endif
  1616. oob_buf += user_byte_num;
  1617. buf += nand_page_size;
  1618. }
  1619. else if (aml_chip->plane_num == 1) {
  1620. dma_retry_3:
  1621. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, aml_chip->bch_mode);
  1622. if (error){
  1623. printk("aml nand read data dma plane failed at page %d chip %d\n", page_addr, i);
  1624. mdelay(50);
  1625. error = aml_chip->aml_nand_dma_read(aml_chip, buf, nand_page_size, aml_chip->bch_mode);
  1626. if(error){
  1627. mdelay(100);
  1628. goto exit;
  1629. }
  1630. }
  1631. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buf, user_byte_num);
  1632. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, buf, nand_page_size, oob_buf);
  1633. if (stat < 0) {
  1634. //mtd->ecc_stats.failed++;
  1635. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1636. if(aml_chip->ran_mode && (aml_chip->zero_cnt < aml_chip->ecc_max)){
  1637. memset(buf, 0xff, nand_page_size);
  1638. memset(oob_buf, 0xff, user_byte_num);
  1639. oob_buf += user_byte_num;
  1640. buf += nand_page_size;
  1641. continue;
  1642. }
  1643. if(ran_mode && aml_chip->ran_mode && (readretry_failed_cnt == 0)){
  1644. //printk("%s dma retry here at page:%d blk %d chip %d\n", __func__, page_addr, (page_addr >> pages_per_blk_shift), i);
  1645. aml_chip->ran_mode = 0;
  1646. ndelay(300);
  1647. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RNDOUT, 0, -1, i);
  1648. ndelay(500);
  1649. goto dma_retry_3;
  1650. }
  1651. aml_chip->ran_mode = ran_mode;
  1652. #endif
  1653. #ifdef NEW_NAND_SUPPORT
  1654. if((aml_chip->new_nand_info.type) && (readretry_failed_cnt++ < aml_chip->new_nand_info.read_rety_info.retry_cnt)){
  1655. printk("aml nand read data ecc failed at page:%d blk %d chip %d, readretry_failed_cnt:%d\n",
  1656. page_addr, (page_addr >> pages_per_blk_shift), i, readretry_failed_cnt);
  1657. aml_chip->new_nand_info.read_rety_info.read_retry_handle(mtd, i);
  1658. aml_chip->aml_nand_command(aml_chip, NAND_CMD_READ0, 0, page_addr, i);
  1659. goto read_retry;
  1660. }
  1661. #endif
  1662. memset(buf, 0xff, nand_page_size);
  1663. memset(oob_buf, 0xff, user_byte_num);
  1664. printk("########%s %d read ecc failed here at at page:%d, blk:%d chip[%d]\n", __func__, __LINE__, page_addr, (page_addr >> pages_per_blk_shift), i);
  1665. mtd->ecc_stats.failed++;
  1666. }
  1667. else{
  1668. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1669. aml_chip->ran_mode = ran_mode;
  1670. #endif
  1671. #ifdef NEW_NAND_SUPPORT
  1672. if((aml_chip->ecc_cnt_cur > aml_chip->ecc_cnt_limit) ||((readretry_failed_cnt > (aml_chip->new_nand_info.read_rety_info.retry_cnt-2)) && aml_chip->new_nand_info.type)){
  1673. printk("%s line:%d uncorrected ecc_cnt_cur:%d, and limit:%d and at page:%d, blk:%d chip[%d], readretry_failed_cnt:%d\n",
  1674. __func__, __LINE__, aml_chip->ecc_cnt_cur, aml_chip->ecc_cnt_limit, page_addr, (page_addr >> pages_per_blk_shift), i, readretry_failed_cnt);
  1675. mtd->ecc_stats.corrected++;
  1676. }
  1677. #endif
  1678. mtd->ecc_stats.corrected += stat;
  1679. }
  1680. #ifdef NEW_NAND_SUPPORT
  1681. if(readretry_failed_cnt && aml_chip->new_nand_info.read_rety_info.read_retry_exit){
  1682. aml_chip->new_nand_info.read_rety_info.read_retry_exit(mtd, i);
  1683. }
  1684. #endif
  1685. oob_buf += user_byte_num;
  1686. buf += nand_page_size;
  1687. }
  1688. else {
  1689. error = -ENODEV;
  1690. mdelay(100);
  1691. goto exit;
  1692. }
  1693. }
  1694. }
  1695. }
  1696. exit:
  1697. return 0; //do not return error when failed
  1698. }
  1699. static void aml_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf)
  1700. {
  1701. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1702. uint8_t *oob_buf = chip->oob_poi;
  1703. unsigned nand_page_size = (1 << chip->page_shift);
  1704. int user_byte_num = (((nand_page_size + chip->ecc.size - 1) / chip->ecc.size) * aml_chip->user_byte_mode);
  1705. int error = 0, i = 0, j = 0, page_addr, internal_chipnr = 1;
  1706. if (aml_chip->ops_mode & AML_INTERLEAVING_MODE)
  1707. internal_chipnr = aml_chip->internal_chipnr;
  1708. memset(oob_buf + mtd->oobavail, 0xa5, user_byte_num * (mtd->writesize / nand_page_size));
  1709. for (i=0; i<aml_chip->chip_num; i++) {
  1710. if (aml_chip->valid_chip[i]) {
  1711. aml_chip->aml_nand_select_chip(aml_chip, i);
  1712. page_addr = aml_chip->page_addr;
  1713. if (i > 0) {
  1714. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1715. printk ("chip: %d: busy\n", i);
  1716. error = -EBUSY;
  1717. goto exit;
  1718. }
  1719. page_addr = aml_chip->page_addr;
  1720. aml_chip->aml_nand_command(aml_chip, NAND_CMD_SEQIN, 0, page_addr, i);
  1721. }
  1722. for (j=0; j<internal_chipnr; j++) {
  1723. if (j > 0) {
  1724. page_addr = aml_chip->page_addr;
  1725. page_addr |= (1 << aml_chip->internal_chip_shift) * j;
  1726. aml_chip->aml_nand_command(aml_chip, NAND_CMD_SEQIN, 0, page_addr, i);
  1727. }
  1728. if (aml_chip->plane_num == 2) {
  1729. aml_chip->aml_nand_set_user_byte(aml_chip, oob_buf, user_byte_num);
  1730. error = aml_chip->aml_nand_dma_write(aml_chip, (unsigned char *)buf, nand_page_size, aml_chip->bch_mode);
  1731. if (error)
  1732. goto exit;
  1733. aml_chip->aml_nand_command(aml_chip, NAND_CMD_DUMMY_PROGRAM, -1, -1, i);
  1734. oob_buf += user_byte_num;
  1735. buf += nand_page_size;
  1736. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1737. printk ("write couldn`t found selected chip: %d ready\n", i);
  1738. error = -EBUSY;
  1739. goto exit;
  1740. }
  1741. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_WRITE2, 0x00, page_addr, i);
  1742. aml_chip->aml_nand_set_user_byte(aml_chip, oob_buf, user_byte_num);
  1743. error = aml_chip->aml_nand_dma_write(aml_chip, (unsigned char *)buf, nand_page_size, aml_chip->bch_mode);
  1744. if (error)
  1745. goto exit;
  1746. if (aml_chip->cached_prog_status)
  1747. aml_chip->aml_nand_command(aml_chip, NAND_CMD_CACHEDPROG, -1, -1, i);
  1748. else
  1749. aml_chip->aml_nand_command(aml_chip, NAND_CMD_PAGEPROG, -1, -1, i);
  1750. oob_buf += user_byte_num;
  1751. buf += nand_page_size;
  1752. }
  1753. else if (aml_chip->plane_num == 1) {
  1754. aml_chip->aml_nand_set_user_byte(aml_chip, oob_buf, user_byte_num);
  1755. error = aml_chip->aml_nand_dma_write(aml_chip, (unsigned char *)buf, nand_page_size, aml_chip->bch_mode);
  1756. if (error)
  1757. goto exit;
  1758. if (chip->cmdfunc == aml_nand_command) {
  1759. if (aml_chip->cached_prog_status)
  1760. aml_chip->aml_nand_command(aml_chip, NAND_CMD_CACHEDPROG, -1, -1, i);
  1761. else
  1762. aml_chip->aml_nand_command(aml_chip, NAND_CMD_PAGEPROG, -1, -1, i);
  1763. }
  1764. oob_buf += user_byte_num;
  1765. buf += nand_page_size;
  1766. }
  1767. else {
  1768. error = -ENODEV;
  1769. goto exit;
  1770. }
  1771. }
  1772. }
  1773. }
  1774. exit:
  1775. return;
  1776. }
  1777. static int aml_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int page, int cached, int raw)
  1778. {
  1779. int status;
  1780. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1781. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
  1782. /*if ((cached) && (chip->options & NAND_CACHEPRG))
  1783. aml_chip->cached_prog_status = 1;
  1784. else
  1785. aml_chip->cached_prog_status = 0;*/
  1786. if (unlikely(raw))
  1787. chip->ecc.write_page_raw(mtd, chip, buf);
  1788. else
  1789. chip->ecc.write_page(mtd, chip, buf);
  1790. if (!cached || !(chip->options & NAND_CACHEPRG)) {
  1791. //chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  1792. status = chip->waitfunc(mtd, chip);
  1793. /*
  1794. * See if operation failed and additional status checks are
  1795. * available
  1796. */
  1797. if ((status & NAND_STATUS_FAIL) && (chip->errstat))
  1798. status = chip->errstat(mtd, chip, FL_WRITING, status, page);
  1799. if (status & NAND_STATUS_FAIL) {
  1800. printk("aml nand write failed at %d \n", page);
  1801. return -EIO;
  1802. }
  1803. } else {
  1804. //chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
  1805. status = chip->waitfunc(mtd, chip);
  1806. }
  1807. aml_chip->cached_prog_status = 0;
  1808. #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
  1809. chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
  1810. status = chip->ecc.read_page(mtd, chip, chip->buffers->databuf, page);
  1811. if (status == -EUCLEAN)
  1812. status = 0;
  1813. chip->pagebuf = page;
  1814. if (memcmp(buf, chip->buffers->databuf, mtd->writesize)) {
  1815. printk("nand verify failed at %d \n", page);
  1816. return -EFAULT;
  1817. }
  1818. #endif
  1819. return 0;
  1820. }
  1821. static int aml_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page, int readlen)
  1822. {
  1823. int32_t error = 0, i, stat = 0, j = 0, page_addr, user_byte_num, internal_chipnr = 1;
  1824. unsigned dma_once_size;
  1825. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  1826. unsigned char *nand_buffer = aml_chip->aml_nand_data_buf;
  1827. unsigned char *oob_buffer = chip->oob_poi;
  1828. unsigned pages_per_blk_shift = (chip->phys_erase_shift - chip->page_shift);
  1829. unsigned nand_page_size = (1 << chip->page_shift);
  1830. //unsigned nand_read_size = ((readlen / (aml_chip->user_byte_mode * aml_chip->plane_num)) * chip->ecc.size);
  1831. unsigned nand_read_size = ((readlen / aml_chip->user_byte_mode) * chip->ecc.size);
  1832. unsigned read_chip_num = (((nand_read_size + (aml_chip->plane_num * nand_page_size) - 1) / (aml_chip->plane_num * nand_page_size)));
  1833. int readretry_failed_cnt = 0;
  1834. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1835. int ran_mode = aml_chip->ran_mode;
  1836. #endif
  1837. if (nand_read_size >= nand_page_size)
  1838. user_byte_num = (((nand_page_size + chip->ecc.size - 1) / chip->ecc.size) * aml_chip->user_byte_mode);
  1839. else
  1840. user_byte_num = (((nand_read_size + chip->ecc.size - 1) / chip->ecc.size) * aml_chip->user_byte_mode);
  1841. page_addr = page;
  1842. if (aml_chip->ops_mode & AML_INTERLEAVING_MODE) {
  1843. internal_chipnr = aml_chip->internal_chipnr;
  1844. if (read_chip_num < internal_chipnr) {
  1845. internal_chipnr = (read_chip_num + aml_chip->internal_chipnr - 1) / aml_chip->internal_chipnr;
  1846. read_chip_num = 1;
  1847. }
  1848. else {
  1849. read_chip_num = (read_chip_num + aml_chip->internal_chipnr - 1) / aml_chip->internal_chipnr;
  1850. }
  1851. }
  1852. if (chip->cmdfunc == aml_nand_command)
  1853. chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page_addr);
  1854. else {
  1855. aml_chip->aml_nand_select_chip(aml_chip, 0);
  1856. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page_addr);
  1857. }
  1858. for (i=0; i<read_chip_num; i++) {
  1859. if (aml_chip->valid_chip[i]) {
  1860. readretry_failed_cnt = 0;
  1861. read_retry:
  1862. page_addr = aml_chip->page_addr;
  1863. if (i > 0) {
  1864. aml_chip->aml_nand_select_chip(aml_chip, i);
  1865. aml_chip->aml_nand_command(aml_chip, NAND_CMD_READ0, 0, page_addr, i);
  1866. }
  1867. for (j=0; j<internal_chipnr; j++) {
  1868. if (j > 0) {
  1869. page_addr = aml_chip->page_addr;
  1870. page_addr |= (1 << aml_chip->internal_chip_shift) * j;
  1871. aml_chip->aml_nand_select_chip(aml_chip, i);
  1872. aml_chip->aml_nand_command(aml_chip, NAND_CMD_READ0, 0, page_addr, i);
  1873. }
  1874. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1875. mdelay(50);
  1876. if (!aml_chip->aml_nand_wait_devready(aml_chip, i)) {
  1877. printk ("read oob couldn`t found selected chip: %d ready\n", i);
  1878. error = -EBUSY;
  1879. mdelay(100);
  1880. goto exit;
  1881. }
  1882. }
  1883. if (aml_chip->ops_mode & AML_CHIP_NONE_RB)
  1884. chip->cmd_ctrl(mtd, NAND_CMD_READ0 & 0xff, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  1885. if (aml_chip->plane_num == 2) {
  1886. dma_once_size = min(nand_read_size, nand_page_size);
  1887. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_READ1, 0x00, page_addr, i);
  1888. dma_retry_plane0:
  1889. error = aml_chip->aml_nand_dma_read(aml_chip, nand_buffer, dma_once_size, aml_chip->bch_mode);
  1890. if (error)
  1891. {
  1892. printk("read oob dma failed at page %d\n", page_addr);
  1893. mdelay(50);
  1894. error = aml_chip->aml_nand_dma_read(aml_chip, nand_buffer, dma_once_size, aml_chip->bch_mode);
  1895. if(error)
  1896. {
  1897. printk("read oob dma failed again at page %d\n", page_addr);
  1898. mdelay(100);
  1899. return 0;//error;
  1900. }
  1901. }
  1902. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buffer, user_byte_num);
  1903. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, nand_buffer, dma_once_size, oob_buffer);
  1904. if (stat < 0) {
  1905. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1906. if(aml_chip->ran_mode && (aml_chip->zero_cnt < aml_chip->ecc_max)){
  1907. memset(oob_buffer, 0xff, user_byte_num);
  1908. goto plane0_ff;
  1909. }
  1910. if(ran_mode && aml_chip->ran_mode){
  1911. //printk("%s dma retry here at page:%d blk %d chip %d\n", __func__, page_addr, (page_addr >> pages_per_blk_shift), i);
  1912. aml_chip->ran_mode = 0;
  1913. ndelay(300);
  1914. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RNDOUT, 0, -1, i);
  1915. ndelay(500);
  1916. goto dma_retry_plane0;
  1917. }
  1918. #endif
  1919. memset(oob_buffer, 0xff, user_byte_num);
  1920. mtd->ecc_stats.failed++;
  1921. printk("aml nand read oob plane0 failed at page %d chip %d \n", page_addr, i);
  1922. }
  1923. else{
  1924. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1925. if(aml_chip->ecc_cnt_cur > aml_chip->ecc_cnt_limit){
  1926. printk("%s line:%d uncorrected ecc_cnt_cur:%d, and limit:%d and at page:%d, blk:%d chip[%d]\n",
  1927. __func__, __LINE__, aml_chip->ecc_cnt_cur, aml_chip->ecc_cnt_limit, page_addr, (page_addr >> pages_per_blk_shift), i);
  1928. mtd->ecc_stats.corrected++;
  1929. }
  1930. #endif
  1931. mtd->ecc_stats.corrected += stat;
  1932. }
  1933. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1934. plane0_ff:
  1935. aml_chip->ran_mode = ran_mode;
  1936. #endif
  1937. oob_buffer += user_byte_num;
  1938. nand_read_size -= dma_once_size;
  1939. if (nand_read_size > 0) {
  1940. dma_once_size = min(nand_read_size, nand_page_size);
  1941. aml_chip->aml_nand_command(aml_chip, NAND_CMD_TWOPLANE_READ2, 0x00, page_addr, i);
  1942. dma_retry_plane1:
  1943. error = aml_chip->aml_nand_dma_read(aml_chip, nand_buffer, dma_once_size, aml_chip->bch_mode);
  1944. if (error){
  1945. printk("read oob dma failed at page %d\n", page_addr);
  1946. mdelay(50);
  1947. error = aml_chip->aml_nand_dma_read(aml_chip, nand_buffer, dma_once_size, aml_chip->bch_mode);
  1948. if(error){
  1949. printk("read oob dma failed again at page %d\n", page_addr);
  1950. mdelay(100);
  1951. return 0;//error;
  1952. }
  1953. }
  1954. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buffer, user_byte_num);
  1955. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, nand_buffer, dma_once_size, oob_buffer);
  1956. if (stat < 0) {
  1957. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1958. if(aml_chip->ran_mode && (aml_chip->zero_cnt < aml_chip->ecc_max)){
  1959. memset(oob_buffer, 0xff, user_byte_num);
  1960. oob_buffer += user_byte_num;
  1961. nand_read_size -= dma_once_size;
  1962. continue;
  1963. }
  1964. if(ran_mode && aml_chip->ran_mode){
  1965. //printk("%s dma retry here at page:%d blk %d chip %d\n", __func__, page_addr, (page_addr >> pages_per_blk_shift), i);
  1966. aml_chip->ran_mode = 0;
  1967. ndelay(300);
  1968. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RNDOUT, 0, -1, i);
  1969. ndelay(500);
  1970. goto dma_retry_plane1;
  1971. }
  1972. #endif
  1973. memset(oob_buffer, 0xff, user_byte_num);
  1974. mtd->ecc_stats.failed++;
  1975. printk("aml nand read oob plane1 failed at page %d chip %d \n", page_addr, i);
  1976. }
  1977. else{
  1978. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1979. if(aml_chip->ecc_cnt_cur > aml_chip->ecc_cnt_limit){
  1980. printk("%s line:%d uncorrected ecc_cnt_cur:%d, and limit:%d and at page:%d, blk:%d chip[%d]\n",
  1981. __func__, __LINE__, aml_chip->ecc_cnt_cur, aml_chip->ecc_cnt_limit, page_addr, (page_addr >> pages_per_blk_shift), i);
  1982. mtd->ecc_stats.corrected++;
  1983. }
  1984. #endif
  1985. mtd->ecc_stats.corrected += stat;
  1986. }
  1987. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  1988. aml_chip->ran_mode = ran_mode;
  1989. #endif
  1990. oob_buffer += user_byte_num;
  1991. nand_read_size -= dma_once_size;
  1992. }
  1993. }
  1994. else if (aml_chip->plane_num == 1) {
  1995. dma_once_size = min(nand_read_size, nand_page_size);
  1996. dma_retry:
  1997. error = aml_chip->aml_nand_dma_read(aml_chip, nand_buffer, dma_once_size, aml_chip->bch_mode);
  1998. if (error){
  1999. printk("read oob dma failed at page %d\n", page_addr);
  2000. mdelay(50);
  2001. error = aml_chip->aml_nand_dma_read(aml_chip, nand_buffer, dma_once_size, aml_chip->bch_mode);
  2002. if(error){
  2003. printk("read oob dma failed again at page %d\n", page_addr);
  2004. mdelay(100);
  2005. return 0;//error;
  2006. }
  2007. }
  2008. aml_chip->aml_nand_get_user_byte(aml_chip, oob_buffer, user_byte_num);
  2009. stat = aml_chip->aml_nand_hwecc_correct(aml_chip, nand_buffer, dma_once_size, oob_buffer);
  2010. if (stat < 0) {
  2011. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  2012. if(aml_chip->ran_mode && (aml_chip->zero_cnt < aml_chip->ecc_max)){
  2013. memset(oob_buffer, 0xff, user_byte_num);
  2014. oob_buffer += user_byte_num;
  2015. nand_read_size -= dma_once_size;
  2016. continue;
  2017. }
  2018. if(ran_mode && aml_chip->ran_mode && (readretry_failed_cnt == 0)){
  2019. //printk("%s dma retry here at page:%d blk %d chip %d\n", __func__, page_addr, (page_addr >> pages_per_blk_shift), i);
  2020. aml_chip->ran_mode = 0;
  2021. ndelay(300);
  2022. aml_chip->aml_nand_command(aml_chip, NAND_CMD_RNDOUT, 0, -1, i);
  2023. ndelay(500);
  2024. goto dma_retry;
  2025. }
  2026. aml_chip->ran_mode = ran_mode;
  2027. #endif
  2028. #ifdef NEW_NAND_SUPPORT
  2029. if((aml_chip->new_nand_info.type) && (readretry_failed_cnt++ < aml_chip->new_nand_info.read_rety_info.retry_cnt)){
  2030. printk("aml nand read oob failed at page:%d blk %d chip %d, readretry_failed_cnt:%d\n",
  2031. page_addr, (page_addr >> pages_per_blk_shift), i, readretry_failed_cnt);
  2032. aml_chip->new_nand_info.read_rety_info.read_retry_handle(mtd, i);
  2033. aml_chip->aml_nand_command(aml_chip, NAND_CMD_READ0, 0, page_addr, i);
  2034. goto read_retry;
  2035. }
  2036. #endif
  2037. printk("########%s %d read oob failed here at at page:%d, blk:%d chip[%d]\n", __func__, __LINE__, page_addr, (page_addr >> pages_per_blk_shift), i);
  2038. memset(oob_buffer, 0xff, user_byte_num);
  2039. mtd->ecc_stats.failed++;
  2040. }
  2041. else{
  2042. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  2043. aml_chip->ran_mode = ran_mode;
  2044. #endif
  2045. #ifdef NEW_NAND_SUPPORT
  2046. if((aml_chip->ecc_cnt_cur > aml_chip->ecc_cnt_limit) ||((readretry_failed_cnt > (aml_chip->new_nand_info.read_rety_info.retry_cnt-2)) && aml_chip->new_nand_info.type)){
  2047. printk("%s line:%d uncorrected ecc_cnt_cur:%d, and limit:%d and at page:%d, blk:%d chip[%d], readretry_failed_cnt:%d\n",
  2048. __func__, __LINE__, aml_chip->ecc_cnt_cur, aml_chip->ecc_cnt_limit, page_addr, (page_addr >> pages_per_blk_shift), i, readretry_failed_cnt);
  2049. mtd->ecc_stats.corrected++;
  2050. }
  2051. #endif
  2052. mtd->ecc_stats.corrected += stat;
  2053. }
  2054. #ifdef NEW_NAND_SUPPORT
  2055. if(readretry_failed_cnt && aml_chip->new_nand_info.read_rety_info.read_retry_exit){
  2056. aml_chip->new_nand_info.read_rety_info.read_retry_exit(mtd, i);
  2057. }
  2058. #endif
  2059. oob_buffer += user_byte_num;
  2060. nand_read_size -= dma_once_size;
  2061. }
  2062. else {
  2063. error = -ENODEV;
  2064. mdelay(100);
  2065. goto exit;
  2066. }
  2067. }
  2068. }
  2069. }
  2070. exit:
  2071. return readlen;
  2072. }
  2073. static int aml_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
  2074. {
  2075. printk("our host controller`s structure couldn`t support oob write\n");
  2076. BUG();
  2077. return 0;
  2078. }
  2079. static int aml_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
  2080. {
  2081. struct nand_chip * chip = mtd->priv;
  2082. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2083. struct aml_nand_platform *plat = aml_chip->platform;
  2084. struct mtd_oob_ops aml_oob_ops;
  2085. int32_t ret = 0, read_cnt, page, mtd_erase_shift, blk_addr, pages_per_blk;
  2086. loff_t addr;
  2087. if ((!strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME)))/* && ((chip->ecc.read_page == aml_nand_read_page_hwecc) || (!getchip))*/)
  2088. return 0;
  2089. mtd_erase_shift = fls(mtd->erasesize) - 1;
  2090. blk_addr = (int)(ofs >> mtd_erase_shift);
  2091. if (aml_chip->block_status != NULL) {
  2092. if (aml_chip->block_status[blk_addr] == NAND_BLOCK_BAD) {
  2093. printk(" NAND bbt detect Bad block at %llx \n", (uint64_t)ofs);
  2094. return EFAULT;
  2095. }
  2096. else if (aml_chip->block_status[blk_addr] == NAND_BLOCK_GOOD) {
  2097. return 0;
  2098. }
  2099. }
  2100. chip->pagebuf = -1;
  2101. pages_per_blk = (1 << (chip->phys_erase_shift - chip->page_shift));
  2102. if (getchip) {
  2103. aml_oob_ops.mode = MTD_OOB_AUTO;
  2104. aml_oob_ops.len = mtd->writesize;
  2105. aml_oob_ops.ooblen = mtd->oobavail;
  2106. aml_oob_ops.ooboffs = chip->ecc.layout->oobfree[0].offset;
  2107. aml_oob_ops.datbuf = chip->buffers->databuf;
  2108. aml_oob_ops.oobbuf = chip->oob_poi;
  2109. for (read_cnt=0; read_cnt<2; read_cnt++) {
  2110. addr = ofs + (pages_per_blk - 1) * read_cnt * mtd->writesize;
  2111. ret = mtd->read_oob(mtd, addr, &aml_oob_ops);
  2112. if (ret == -EUCLEAN)
  2113. ret = 0;
  2114. if (ret < 0) {
  2115. printk(" NAND detect Bad block at %llx \n", (uint64_t)addr);
  2116. return EFAULT;
  2117. }
  2118. if (aml_oob_ops.oobbuf[chip->badblockpos] == 0xFF)
  2119. continue;
  2120. if (aml_oob_ops.oobbuf[chip->badblockpos] == 0) {
  2121. memset(aml_chip->aml_nand_data_buf, 0, aml_oob_ops.ooblen);
  2122. if (!memcmp(aml_chip->aml_nand_data_buf, aml_oob_ops.oobbuf, aml_oob_ops.ooblen)) {
  2123. printk(" NAND detect Bad block at %llx \n", (uint64_t)addr);
  2124. return EFAULT;
  2125. }
  2126. }
  2127. }
  2128. }
  2129. else {
  2130. for (read_cnt=0; read_cnt<2; read_cnt++) {
  2131. addr = ofs + (pages_per_blk - 1) * read_cnt * mtd->writesize;
  2132. page = (int)(addr >> chip->page_shift);
  2133. ret = chip->ecc.read_oob(mtd, chip, page, mtd->oobavail);
  2134. if (ret == -EUCLEAN)
  2135. ret = 0;
  2136. if (ret < 0)
  2137. return EFAULT;
  2138. if (chip->oob_poi[chip->badblockpos] == 0xFF)
  2139. return 0;
  2140. if (chip->oob_poi[chip->badblockpos] == 0) {
  2141. memset(aml_chip->aml_nand_data_buf, 0, (mtd->writesize + mtd->oobsize));
  2142. if (!memcmp(aml_chip->aml_nand_data_buf + mtd->writesize, chip->oob_poi, mtd->oobavail)) {
  2143. printk(" NAND detect Bad block at %llx \n", (uint64_t)addr);
  2144. return EFAULT;
  2145. }
  2146. }
  2147. }
  2148. }
  2149. return 0;
  2150. }
  2151. static int aml_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  2152. {
  2153. struct nand_chip * chip = mtd->priv;
  2154. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2155. struct mtd_oob_ops aml_oob_ops;
  2156. int blk_addr, mtd_erase_shift, j;
  2157. mtd_erase_shift = fls(mtd->erasesize) - 1;
  2158. blk_addr = (int)(ofs >> mtd_erase_shift);
  2159. if (aml_chip->block_status != NULL) {
  2160. if (aml_chip->block_status[blk_addr] == NAND_BLOCK_BAD) {
  2161. return 0;
  2162. }
  2163. else if (aml_chip->block_status[blk_addr] == NAND_BLOCK_GOOD) {
  2164. aml_chip->block_status[blk_addr] = NAND_BLOCK_BAD;
  2165. for (j=0; j<MAX_BAD_BLK_NUM; j++) {
  2166. if (aml_chip->aml_nandenv_info->nand_bbt_info.nand_bbt[j] == 0) {
  2167. aml_chip->aml_nandenv_info->nand_bbt_info.nand_bbt[j] = blk_addr;
  2168. if (aml_nand_update_env(mtd))
  2169. printk("update env bbt failed %d \n", blk_addr);
  2170. break;
  2171. }
  2172. }
  2173. }
  2174. }
  2175. aml_oob_ops.mode = MTD_OOB_AUTO;
  2176. aml_oob_ops.len = mtd->writesize;
  2177. aml_oob_ops.ooblen = mtd->oobavail;
  2178. aml_oob_ops.ooboffs = chip->ecc.layout->oobfree[0].offset;
  2179. aml_oob_ops.datbuf = chip->buffers->databuf;
  2180. aml_oob_ops.oobbuf = chip->oob_poi;
  2181. chip->pagebuf = -1;
  2182. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  2183. memset((unsigned char *)aml_oob_ops.oobbuf, 0x0, aml_oob_ops.ooblen);
  2184. return mtd->write_oob(mtd, ofs, &aml_oob_ops);
  2185. }
  2186. #ifdef CONFIG_HAS_EARLYSUSPEND
  2187. static void aml_platform_nand_suspend(struct early_suspend *nand_early_suspend)
  2188. {
  2189. struct aml_nand_chip *aml_chip = (struct aml_nand_chip *)nand_early_suspend->param;
  2190. struct nand_chip *chip = &aml_chip->chip;
  2191. spinlock_t *lock = &chip->controller->lock;
  2192. if (nand_erarly_suspend_flag == 1)
  2193. return;
  2194. spin_lock(lock);
  2195. nand_erarly_suspend_flag = 1;
  2196. spin_unlock(lock);
  2197. printk("aml_m1_nand_early suspend entered\n");
  2198. return;
  2199. }
  2200. static void aml_platform_nand_resume(struct early_suspend *nand_early_suspend)
  2201. {
  2202. struct aml_nand_chip *aml_chip = (struct aml_nand_chip *)nand_early_suspend->param;
  2203. if (((READ_CBUS_REG(HHI_MPEG_CLK_CNTL)&(1<<8))) && (nand_erarly_suspend_flag == 2)) {
  2204. aml_chip->aml_nand_adjust_timing(aml_chip);
  2205. nand_erarly_suspend_flag = 0;
  2206. }
  2207. printk("aml_m1_nand_early resume entered\n");
  2208. return;
  2209. }
  2210. #endif
  2211. static int aml_nand_suspend(struct mtd_info *mtd)
  2212. {
  2213. printk("aml_nand suspend entered\n");
  2214. return 0;
  2215. }
  2216. static void aml_nand_resume(struct mtd_info *mtd)
  2217. {
  2218. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2219. if (((READ_CBUS_REG(HHI_MPEG_CLK_CNTL)&(1<<8))) && (nand_erarly_suspend_flag == 2)) {
  2220. aml_chip->aml_nand_adjust_timing(aml_chip);
  2221. nand_erarly_suspend_flag = 0;
  2222. }
  2223. printk("aml_m1_nand resume entered\n");
  2224. return;
  2225. }
  2226. static struct aml_nand_flash_dev *aml_nand_get_flash_type(struct mtd_info *mtd,
  2227. struct nand_chip *chip,
  2228. int busw, int *maf_id)
  2229. {
  2230. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2231. struct aml_nand_platform *plat = aml_chip->platform;
  2232. struct aml_nand_flash_dev *type = NULL;
  2233. int i, maf_idx;
  2234. u8 dev_id[MAX_ID_LEN];
  2235. #ifdef NEW_NAND_SUPPORT
  2236. u8 dev_id_hynix_26nm_8g[MAX_ID_LEN] = {NAND_MFR_HYNIX, 0xde, 0x94, 0xd2, 0x04, 0x43};
  2237. u8 dev_id_hynix_26nm_4g[MAX_ID_LEN] = {NAND_MFR_HYNIX, 0xd7, 0x94, 0xda, 0x74, 0xc3};
  2238. u8 dev_id_toshiba_24nm_4g[MAX_ID_LEN] = {NAND_MFR_TOSHIBA, 0xD7, 0x94, 0x32, 0x76, 0x56};
  2239. u8 dev_id_toshiba_24nm_8g[MAX_ID_LEN] = {NAND_MFR_TOSHIBA, 0xDE, 0x94, 0x82, 0x76, 0x56};
  2240. #endif
  2241. //int tmp_id, tmp_manf;
  2242. /* Send the command for reading device ID */
  2243. chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  2244. /* Read manufacturer and device IDs */
  2245. for (i=0; i<MAX_ID_LEN; i++) {
  2246. dev_id[i] = chip->read_byte(mtd);
  2247. }
  2248. *maf_id = dev_id[0];
  2249. printk("NAND device id: %x %x %x %x %x %x \n", dev_id[0], dev_id[1], dev_id[2], dev_id[3], dev_id[4], dev_id[5]);
  2250. /* Lookup the flash id */
  2251. for (i = 0; aml_nand_flash_ids[i].name != NULL; i++) {
  2252. if(!strncmp((char*)aml_nand_flash_ids[i].id, (char*)dev_id, strlen((const char*)aml_nand_flash_ids[i].id))){
  2253. type = &aml_nand_flash_ids[i];
  2254. break;
  2255. }
  2256. }
  2257. if (!type) {
  2258. if (plat->nand_flash_dev) {
  2259. if(!strncmp((char*)plat->nand_flash_dev->id, (char*)dev_id, strlen((const char*)plat->nand_flash_dev->id))){
  2260. type = plat->nand_flash_dev;
  2261. }
  2262. }
  2263. if (!type)
  2264. return ERR_PTR(-ENODEV);
  2265. }
  2266. #ifdef NEW_NAND_SUPPORT
  2267. memset(&aml_chip->new_nand_info, 0, sizeof(struct new_tech_nand_t));
  2268. if(!strncmp((char*)type->id, (char*)dev_id_hynix_26nm_8g, strlen((const char*)aml_nand_flash_ids[i].id))){
  2269. aml_chip->new_nand_info.type = 1;
  2270. printk("aml_chip->hynix_new_nand_type =: %d \n", aml_chip->new_nand_info.type);
  2271. //read retry
  2272. aml_chip->new_nand_info.read_rety_info.reg_cnt = 4;
  2273. aml_chip->new_nand_info.read_rety_info.retry_cnt = 6;
  2274. aml_chip->new_nand_info.read_rety_info.reg_addr[0] = 0xAC;
  2275. aml_chip->new_nand_info.read_rety_info.reg_addr[1] = 0xAD;
  2276. aml_chip->new_nand_info.read_rety_info.reg_addr[2] = 0xAE;
  2277. aml_chip->new_nand_info.read_rety_info.reg_addr[3] = 0xAF;
  2278. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][0] = 0;
  2279. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][1] = 0x06;
  2280. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][2] = 0x0A;
  2281. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][3] = 0x06;
  2282. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][0] = READ_RETRY_ZERO;
  2283. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][1] = -0x03;
  2284. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][2] = -0x07;
  2285. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][3] = -0x08;
  2286. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][0] = READ_RETRY_ZERO;
  2287. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][1] = -0x06;
  2288. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][2] = -0x0D;
  2289. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][3] = -0x0F;
  2290. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][0] = READ_RETRY_ZERO;
  2291. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][1] = -0x0B;
  2292. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][2] = -0x14;
  2293. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][3] = -0x17;
  2294. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][0] = READ_RETRY_ZERO;
  2295. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][1] = READ_RETRY_ZERO;
  2296. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][2] = -0x1A;
  2297. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][3] = -0x1E;
  2298. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][0] = READ_RETRY_ZERO;
  2299. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][1] = READ_RETRY_ZERO;
  2300. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][2] = -0x20;
  2301. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][3] = -0x25;
  2302. aml_chip->new_nand_info.slc_program_info.reg_cnt = 5;
  2303. aml_chip->new_nand_info.slc_program_info.reg_addr[0] = 0xA4; //not same
  2304. aml_chip->new_nand_info.slc_program_info.reg_addr[1] = 0xA5;
  2305. aml_chip->new_nand_info.slc_program_info.reg_addr[2] = 0xB0;
  2306. aml_chip->new_nand_info.slc_program_info.reg_addr[3] = 0xB1;
  2307. aml_chip->new_nand_info.slc_program_info.reg_addr[4] = 0xC9;
  2308. aml_chip->new_nand_info.slc_program_info.reg_offset_value[0] = 0x25; //not same
  2309. aml_chip->new_nand_info.slc_program_info.reg_offset_value[1] = 0x25;
  2310. aml_chip->new_nand_info.slc_program_info.reg_offset_value[2] = 0x25;
  2311. aml_chip->new_nand_info.slc_program_info.reg_offset_value[3] = 0x25;
  2312. aml_chip->new_nand_info.slc_program_info.reg_offset_value[4] = 0x01;
  2313. aml_chip->new_nand_info.read_rety_info.get_default_value = aml_nand_get_read_default_value_hynix;
  2314. aml_chip->new_nand_info.read_rety_info.read_retry_handle = aml_nand_read_retry_handle_hynix;
  2315. aml_chip->new_nand_info.read_rety_info.set_default_value= aml_nand_set_readretry_default_value_hynix;
  2316. aml_chip->new_nand_info.slc_program_info.enter_enslc_mode = aml_nand_enter_enslc_mode_hynix;
  2317. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode = aml_nand_exit_enslc_mode_hynix;
  2318. aml_chip->new_nand_info.slc_program_info.get_default_value = aml_nand_get_slc_default_value_hynix;
  2319. }
  2320. else if(!strncmp((char*)type->id, (char*)dev_id_hynix_26nm_4g, strlen((const char*)aml_nand_flash_ids[i].id))){
  2321. aml_chip->new_nand_info.type = 2;
  2322. printk("aml_chip->hynix_new_nand_type =: %d \n", aml_chip->new_nand_info.type);
  2323. //read retry
  2324. aml_chip->new_nand_info.read_rety_info.reg_cnt = 4;
  2325. aml_chip->new_nand_info.read_rety_info.retry_cnt = 6;
  2326. aml_chip->new_nand_info.read_rety_info.reg_addr[0] = 0xA7; //not same
  2327. aml_chip->new_nand_info.read_rety_info.reg_addr[1] = 0xAD;
  2328. aml_chip->new_nand_info.read_rety_info.reg_addr[2] = 0xAE;
  2329. aml_chip->new_nand_info.read_rety_info.reg_addr[3] = 0xAF;
  2330. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][0] = 0;
  2331. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][1] = 0x06;
  2332. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][2] = 0x0A;
  2333. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][3] = 0x06;
  2334. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][0] = READ_RETRY_ZERO;
  2335. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][1] = -0x03;
  2336. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][2] = -0x07;
  2337. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][3] = -0x08;
  2338. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][0] = READ_RETRY_ZERO;
  2339. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][1] = -0x06;
  2340. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][2] = -0x0D;
  2341. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][3] = -0x0F;
  2342. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][0] = READ_RETRY_ZERO;
  2343. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][1] = -0x09; //not same
  2344. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][2] = -0x14;
  2345. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][3] = -0x17;
  2346. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][0] = READ_RETRY_ZERO;
  2347. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][1] = READ_RETRY_ZERO;
  2348. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][2] = -0x1A;
  2349. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][3] = -0x1E;
  2350. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][0] = READ_RETRY_ZERO;
  2351. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][1] = READ_RETRY_ZERO;
  2352. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][2] = -0x20;
  2353. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][3] = -0x25;
  2354. aml_chip->new_nand_info.slc_program_info.reg_cnt = 5;
  2355. aml_chip->new_nand_info.slc_program_info.reg_addr[0] = 0xA0; //not same
  2356. aml_chip->new_nand_info.slc_program_info.reg_addr[1] = 0xA1;
  2357. aml_chip->new_nand_info.slc_program_info.reg_addr[2] = 0xB0;
  2358. aml_chip->new_nand_info.slc_program_info.reg_addr[3] = 0xB1;
  2359. aml_chip->new_nand_info.slc_program_info.reg_addr[4] = 0xC9;
  2360. aml_chip->new_nand_info.slc_program_info.reg_offset_value[0] = 0x26; //not same
  2361. aml_chip->new_nand_info.slc_program_info.reg_offset_value[1] = 0x26;
  2362. aml_chip->new_nand_info.slc_program_info.reg_offset_value[2] = 0x26;
  2363. aml_chip->new_nand_info.slc_program_info.reg_offset_value[3] = 0x26;
  2364. aml_chip->new_nand_info.slc_program_info.reg_offset_value[4] = 0x01;
  2365. aml_chip->new_nand_info.read_rety_info.get_default_value = aml_nand_get_read_default_value_hynix;
  2366. aml_chip->new_nand_info.read_rety_info.read_retry_handle = aml_nand_read_retry_handle_hynix;
  2367. aml_chip->new_nand_info.read_rety_info.set_default_value= aml_nand_set_readretry_default_value_hynix;
  2368. aml_chip->new_nand_info.slc_program_info.enter_enslc_mode = aml_nand_enter_enslc_mode_hynix;
  2369. aml_chip->new_nand_info.slc_program_info.exit_enslc_mode = aml_nand_exit_enslc_mode_hynix;
  2370. aml_chip->new_nand_info.slc_program_info.get_default_value = aml_nand_get_slc_default_value_hynix;
  2371. }
  2372. else if((!strncmp((char*)type->id, (char*)dev_id_toshiba_24nm_4g, strlen((const char*)aml_nand_flash_ids[i].id)))
  2373. ||(!strncmp((char*)type->id, (char*)dev_id_toshiba_24nm_8g, strlen((const char*)aml_nand_flash_ids[i].id)))){
  2374. aml_chip->new_nand_info.type = TOSHIBA_24NM;
  2375. aml_chip->new_nand_info.read_rety_info.reg_addr[0] = 0x04;
  2376. aml_chip->new_nand_info.read_rety_info.reg_addr[1] = 0x05;
  2377. aml_chip->new_nand_info.read_rety_info.reg_addr[2] = 0x06;
  2378. aml_chip->new_nand_info.read_rety_info.reg_addr[3] = 0x07;
  2379. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][0] = 0;
  2380. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][1] = 0;
  2381. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][2] = 0;
  2382. aml_chip->new_nand_info.read_rety_info.reg_offset_value[0][3] = 0;
  2383. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][0] = 0x04;
  2384. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][1] = 0x04;
  2385. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][2] = 0x04;
  2386. aml_chip->new_nand_info.read_rety_info.reg_offset_value[1][3] = 0x04;
  2387. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][0] = 0x7c;
  2388. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][1] = 0x7c;
  2389. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][2] = 0x7c;
  2390. aml_chip->new_nand_info.read_rety_info.reg_offset_value[2][3] = 0x7c;
  2391. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][0] = 0x78;
  2392. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][1] = 0x78;
  2393. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][2] = 0x78;
  2394. aml_chip->new_nand_info.read_rety_info.reg_offset_value[3][3] = 0x78;
  2395. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][0] = 0x74;
  2396. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][1] = 0x74;
  2397. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][2] = 0x74;
  2398. aml_chip->new_nand_info.read_rety_info.reg_offset_value[4][3] = 0x74;
  2399. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][0] = 0x08;
  2400. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][1] = 0x08;
  2401. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][2] = 0x08;
  2402. aml_chip->new_nand_info.read_rety_info.reg_offset_value[5][3] = 0x08;
  2403. aml_chip->new_nand_info.read_rety_info.reg_cnt = 4;
  2404. aml_chip->new_nand_info.read_rety_info.retry_cnt = 6;
  2405. aml_chip->new_nand_info.read_rety_info.read_retry_handle = aml_nand_read_retry_handle_toshiba;
  2406. aml_chip->new_nand_info.read_rety_info.read_retry_exit = aml_nand_read_retry_exit_toshiba;
  2407. }
  2408. #endif
  2409. if (!mtd->name)
  2410. mtd->name = type->name;
  2411. chip->chipsize = type->chipsize;
  2412. chip->chipsize = chip->chipsize << 20;
  2413. /* Newer devices have all the information in additional id bytes */
  2414. if (!type->pagesize) {
  2415. int extid;
  2416. /* The 3rd id byte holds MLC / multichip data */
  2417. chip->cellinfo = chip->read_byte(mtd);
  2418. /* The 4th id byte is the important one */
  2419. extid = chip->read_byte(mtd);
  2420. /* Calc pagesize */
  2421. mtd->writesize = 1024 << (extid & 0x3);
  2422. extid >>= 2;
  2423. /* Calc oobsize */
  2424. mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
  2425. extid >>= 2;
  2426. /* Calc blocksize. Blocksize is multiples of 64KiB */
  2427. mtd->erasesize = (64 * 1024) << (extid & 0x03);
  2428. extid >>= 2;
  2429. /* Get buswidth information */
  2430. busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
  2431. } else {
  2432. /*
  2433. * Old devices have chip data hardcoded in the device id table
  2434. */
  2435. mtd->erasesize = type->erasesize;
  2436. mtd->writesize = type->pagesize;
  2437. mtd->oobsize = type->oobsize;
  2438. busw = type->options & NAND_BUSW_OPTIONS_MASK;
  2439. }
  2440. /* Try to identify manufacturer */
  2441. for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
  2442. if (nand_manuf_ids[maf_idx].id == *maf_id)
  2443. break;
  2444. }
  2445. /*
  2446. * Check, if buswidth is correct. Hardware drivers should set
  2447. * chip correct !
  2448. */
  2449. if (busw != (chip->options & NAND_BUSWIDTH_16)) {
  2450. printk(KERN_INFO "NAND device: Manufacturer ID:"
  2451. " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
  2452. dev_id[0], nand_manuf_ids[maf_idx].name, mtd->name);
  2453. printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
  2454. (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
  2455. busw ? 16 : 8);
  2456. return ERR_PTR(-EINVAL);
  2457. }
  2458. /* Calculate the address shift from the page size */
  2459. chip->page_shift = ffs(mtd->writesize) - 1;
  2460. /* Convert chipsize to number of pages per chip -1. */
  2461. chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
  2462. chip->bbt_erase_shift = chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
  2463. chip->chip_shift = ffs(chip->chipsize) - 1;
  2464. /* Set the bad block position */
  2465. chip->badblockpos = AML_BADBLK_POS;
  2466. /* Get chip options, preserve non chip based options */
  2467. //chip->options &= ~NAND_CHIPOPTIONS_MSK;
  2468. //chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
  2469. /*
  2470. * Set chip as a default. Board drivers can override it, if necessary
  2471. */
  2472. chip->options |= NAND_NO_AUTOINCR;
  2473. /* Check if chip is a not a samsung device. Do not clear the
  2474. * options for chips which are not having an extended id.
  2475. */
  2476. //if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
  2477. //chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
  2478. printk(KERN_INFO "NAND device: Manufacturer ID:"
  2479. " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, dev_id[0],
  2480. nand_manuf_ids[maf_idx].name, type->name);
  2481. return type;
  2482. }
  2483. static int aml_nand_scan_ident(struct mtd_info *mtd, int maxchips)
  2484. {
  2485. int i, busw, nand_maf_id, valid_chip_num = 1;
  2486. struct nand_chip *chip = mtd->priv;
  2487. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2488. struct aml_nand_flash_dev *aml_type;
  2489. u8 dev_id[MAX_ID_LEN], onfi_features[4];
  2490. unsigned temp_chip_shift;
  2491. /* Get buswidth to select the correct functions */
  2492. busw = chip->options & NAND_BUSWIDTH_16;
  2493. /* Select the device */
  2494. chip->select_chip(mtd, 0);
  2495. //reset chip for some nand need reset after power up
  2496. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  2497. aml_chip->aml_nand_wait_devready(aml_chip, 0);
  2498. /* Read the flash type */
  2499. aml_type = aml_nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
  2500. if (IS_ERR(aml_type)) {
  2501. printk(KERN_WARNING "No NAND device found!!!\n");
  2502. chip->select_chip(mtd, -1);
  2503. return PTR_ERR(aml_type);
  2504. }
  2505. chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
  2506. for (i=0; i<MAX_ID_LEN; i++) {
  2507. dev_id[i] = chip->read_byte(mtd);
  2508. }
  2509. if(!memcmp((char*)dev_id, "ONFI", 4))
  2510. aml_chip->onfi_mode = aml_type->onfi_mode;
  2511. aml_chip->T_REA = aml_type->T_REA;
  2512. aml_chip->T_RHOH = aml_type->T_RHOH;
  2513. aml_chip->mfr_type = aml_type->id[0];
  2514. /* Check for a chip array */
  2515. for (i = 1; i < maxchips; i++) {
  2516. aml_chip->aml_nand_select_chip(aml_chip, i);
  2517. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  2518. aml_chip->aml_nand_wait_devready(aml_chip, i);
  2519. /* Send the command for reading device ID */
  2520. chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  2521. /* Read manufacturer and device IDs */
  2522. if (nand_maf_id != chip->read_byte(mtd) || aml_type->id[1] != chip->read_byte(mtd))
  2523. //if (nand_maf_id != dev_id[0] || aml_type->id[1] != dev_id[1])
  2524. aml_chip->valid_chip[i] = 0;
  2525. else
  2526. valid_chip_num ++;
  2527. }
  2528. if (i > 1) {
  2529. printk(KERN_INFO "%d NAND chips detected\n", valid_chip_num);
  2530. /*if ((aml_chip->valid_chip[1] == 0) && (aml_chip->valid_chip[2] == 1)) {
  2531. printk("ce1 and ce2 connected\n");
  2532. aml_chip->chip_enable[2] = (aml_chip->chip_enable[1] & aml_chip->chip_enable[2]);
  2533. }*/
  2534. }
  2535. if (aml_chip->onfi_mode) {
  2536. aml_nand_set_onfi_features(aml_chip, (uint8_t *)(&aml_chip->onfi_mode), ONFI_TIMING_ADDR);
  2537. aml_nand_get_onfi_features(aml_chip, onfi_features, ONFI_TIMING_ADDR);
  2538. if (onfi_features[0] != aml_chip->onfi_mode) {
  2539. aml_chip->T_REA = DEFAULT_T_REA;
  2540. aml_chip->T_RHOH = DEFAULT_T_RHOH;
  2541. printk("onfi timing mode set failed: %x\n", onfi_features[0]);
  2542. }
  2543. }
  2544. /* Store the number of chips and calc total size for mtd */
  2545. chip->numchips = 1;
  2546. if ((chip->chipsize >> 32) & 0xffffffff)
  2547. chip->chip_shift = fls((unsigned)(chip->chipsize >> 32))-1 + fls(valid_chip_num)-1 + 32;
  2548. else
  2549. chip->chip_shift = fls((unsigned)chip->chipsize)-1 + fls(valid_chip_num)-1;
  2550. chip->pagemask = ((chip->chipsize * valid_chip_num) >> chip->page_shift) - 1;
  2551. chip->options &= ~NAND_CACHEPRG;
  2552. aml_chip->internal_chipnr = aml_type->internal_chipnr;
  2553. aml_chip->internal_page_nums = (chip->chipsize >> chip->page_shift);
  2554. aml_chip->internal_page_nums /= aml_chip->internal_chipnr;
  2555. aml_chip->internal_chip_shift = fls((unsigned)aml_chip->internal_page_nums) - 1;
  2556. temp_chip_shift = ffs((unsigned)aml_chip->internal_page_nums) - 1;
  2557. if (aml_chip->internal_chip_shift != temp_chip_shift) {
  2558. aml_chip->internal_chip_shift += 1;
  2559. chip->chip_shift += 1;
  2560. chip->pagemask = ((1 << (chip->chip_shift + 1)) >> chip->page_shift) - 1;
  2561. }
  2562. aml_chip->options = aml_type->options;
  2563. aml_chip->page_size = aml_type->pagesize;
  2564. aml_chip->block_size = aml_type->erasesize;
  2565. aml_chip->oob_size = aml_type->oobsize;
  2566. mtd->erasesize = valid_chip_num * aml_type->erasesize;
  2567. mtd->writesize = valid_chip_num * aml_type->pagesize;
  2568. mtd->oobsize = valid_chip_num * aml_type->oobsize;
  2569. mtd->size = valid_chip_num * chip->chipsize;
  2570. chip->cmdfunc = aml_nand_command;
  2571. chip->waitfunc = aml_nand_wait;
  2572. chip->erase_cmd = aml_nand_erase_cmd;
  2573. chip->write_page = aml_nand_write_page;
  2574. return 0;
  2575. }
  2576. int aml_nand_scan(struct mtd_info *mtd, int maxchips)
  2577. {
  2578. int ret;
  2579. ret = aml_nand_scan_ident(mtd, maxchips);
  2580. if (!ret)
  2581. ret = nand_scan_tail(mtd);
  2582. return ret;
  2583. }
  2584. static int aml_platform_options_confirm(struct aml_nand_chip *aml_chip)
  2585. {
  2586. struct mtd_info *mtd = &aml_chip->mtd;
  2587. struct nand_chip *chip = &aml_chip->chip;
  2588. struct aml_nand_platform *plat = aml_chip->platform;
  2589. unsigned options_selected = 0, options_support = 0, ecc_bytes, options_define;
  2590. int error = 0;
  2591. options_selected = (plat->platform_nand_data.chip.options & NAND_ECC_OPTIONS_MASK);
  2592. options_define = (aml_chip->options & NAND_ECC_OPTIONS_MASK);
  2593. ecc_unit_change:
  2594. ecc_bytes = aml_chip->oob_size / (aml_chip->page_size / chip->ecc.size);
  2595. if (chip->ecc.size == NAND_ECC_UNIT_1KSIZE) {
  2596. if (ecc_bytes >= (NAND_BCH24_1K_ECC_SIZE + 2))
  2597. options_support = NAND_ECC_BCH24_1K_MODE;
  2598. else {
  2599. aml_nand_debug("oob size is not enough for 1K UNIT ECC mode: %d try 512 UNIT ECC\n", aml_chip->oob_size);
  2600. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  2601. goto ecc_unit_change;
  2602. }
  2603. }
  2604. else {
  2605. if (ecc_bytes >= (NAND_BCH16_ECC_SIZE + 2))
  2606. options_support = NAND_ECC_BCH16_MODE;
  2607. else if (ecc_bytes >= (NAND_BCH12_ECC_SIZE + 2))
  2608. options_support = NAND_ECC_BCH12_MODE;
  2609. else if (ecc_bytes >= (NAND_BCH8_ECC_SIZE + 2))
  2610. options_support = NAND_ECC_BCH8_MODE;
  2611. else {
  2612. options_support = NAND_ECC_SOFT_MODE;
  2613. aml_nand_debug("page size: %d oob size %d is not enough for HW ECC\n", aml_chip->page_size, aml_chip->oob_size);
  2614. }
  2615. }
  2616. if (options_define != options_support) {
  2617. options_define = options_support;
  2618. aml_nand_debug("define oob size: %d could support bch mode: %s\n", aml_chip->oob_size, aml_nand_bch_string[options_support]);
  2619. }
  2620. if ((options_selected > options_define) && (strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME)))) {
  2621. aml_nand_debug("oob size is not enough for selected bch mode: %s force bch to mode: %s\n", aml_nand_bch_string[options_selected], aml_nand_bch_string[options_define]);
  2622. options_selected = options_define;
  2623. }
  2624. switch (options_selected) {
  2625. case NAND_ECC_BCH9_MODE:
  2626. chip->ecc.size = NAND_ECC_UNIT_SIZE; //our hardware ecc unit is 512bytes
  2627. chip->ecc.bytes = NAND_BCH9_ECC_SIZE;
  2628. aml_chip->bch_mode = NAND_ECC_BCH9;
  2629. aml_chip->user_byte_mode = 1;
  2630. break;
  2631. case NAND_ECC_BCH8_MODE:
  2632. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  2633. chip->ecc.bytes = NAND_BCH8_ECC_SIZE;
  2634. aml_chip->bch_mode = NAND_ECC_BCH8;
  2635. aml_chip->user_byte_mode = 2;
  2636. break;
  2637. case NAND_ECC_BCH12_MODE:
  2638. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  2639. chip->ecc.bytes = NAND_BCH12_ECC_SIZE;
  2640. aml_chip->bch_mode = NAND_ECC_BCH12;
  2641. aml_chip->user_byte_mode = 2;
  2642. break;
  2643. case NAND_ECC_BCH16_MODE:
  2644. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  2645. chip->ecc.bytes = NAND_BCH16_ECC_SIZE;
  2646. aml_chip->bch_mode = NAND_ECC_BCH16;
  2647. aml_chip->user_byte_mode = 2;
  2648. break;
  2649. case NAND_ECC_BCH24_1K_MODE:
  2650. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  2651. chip->ecc.bytes = NAND_BCH24_1K_ECC_SIZE;
  2652. aml_chip->bch_mode = NAND_ECC_BCH24_1K;
  2653. aml_chip->user_byte_mode = 2;
  2654. break;
  2655. default :
  2656. if ((plat->platform_nand_data.chip.options & NAND_ECC_OPTIONS_MASK) != NAND_ECC_SOFT_MODE) {
  2657. aml_nand_debug("soft ecc or none ecc just support in linux self nand base please selected it at platform options\n");
  2658. error = -ENXIO;
  2659. }
  2660. break;
  2661. }
  2662. options_selected = (plat->platform_nand_data.chip.options & NAND_PLANE_OPTIONS_MASK);
  2663. options_define = (aml_chip->options & NAND_PLANE_OPTIONS_MASK);
  2664. if (options_selected > options_define) {
  2665. aml_nand_debug("multi plane error for selected plane mode: %s force plane to : %s\n", aml_nand_plane_string[options_selected >> 4], aml_nand_plane_string[options_define >> 4]);
  2666. options_selected = options_define;
  2667. }
  2668. switch (options_selected) {
  2669. case NAND_TWO_PLANE_MODE:
  2670. aml_chip->plane_num = 2;
  2671. mtd->erasesize *= 2;
  2672. mtd->writesize *= 2;
  2673. mtd->oobsize *= 2;
  2674. break;
  2675. default:
  2676. aml_chip->plane_num = 1;
  2677. break;
  2678. }
  2679. options_selected = (plat->platform_nand_data.chip.options & NAND_INTERLEAVING_OPTIONS_MASK);
  2680. options_define = (aml_chip->options & NAND_INTERLEAVING_OPTIONS_MASK);
  2681. if (options_selected > options_define) {
  2682. aml_nand_debug("internal mode error for selected internal mode: %s force internal mode to : %s\n", aml_nand_internal_string[options_selected >> 16], aml_nand_internal_string[options_define >> 16]);
  2683. options_selected = options_define;
  2684. }
  2685. switch (options_selected) {
  2686. case NAND_INTERLEAVING_MODE:
  2687. aml_chip->ops_mode |= AML_INTERLEAVING_MODE;
  2688. mtd->erasesize *= aml_chip->internal_chipnr;
  2689. mtd->writesize *= aml_chip->internal_chipnr;
  2690. mtd->oobsize *= aml_chip->internal_chipnr;
  2691. break;
  2692. default:
  2693. break;
  2694. }
  2695. return error;
  2696. }
  2697. static uint8_t aml_platform_read_byte(struct mtd_info *mtd)
  2698. {
  2699. struct nand_chip *chip = mtd->priv;
  2700. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2701. NFC_SEND_CMD(aml_chip->chip_selected | DRD | 0);
  2702. NFC_SEND_CMD(aml_chip->chip_selected | IDLE | 5);
  2703. while(NFC_CMDFIFO_SIZE()>0);
  2704. return readb(chip->IO_ADDR_R);
  2705. }
  2706. static void aml_platform_write_byte(struct aml_nand_chip *aml_chip, uint8_t data)
  2707. {
  2708. NFC_SEND_CMD(aml_chip->chip_selected | IDLE | 5);
  2709. NFC_SEND_CMD(aml_chip->chip_selected | DWR | data);
  2710. NFC_SEND_CMD(aml_chip->chip_selected | IDLE | 5);
  2711. while(NFC_CMDFIFO_SIZE()>0);
  2712. return;
  2713. }
  2714. static int aml_nand_read_env (struct mtd_info *mtd, size_t offset, u_char * buf)
  2715. {
  2716. struct env_oobinfo_t *env_oobinfo;
  2717. int error = 0, start_blk, total_blk;
  2718. size_t addr = 0;
  2719. size_t amount_loaded = 0;
  2720. size_t len;
  2721. struct mtd_oob_ops aml_oob_ops;
  2722. unsigned char *data_buf;
  2723. unsigned char env_oob_buf[sizeof(struct env_oobinfo_t)];
  2724. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2725. if (!aml_chip->aml_nandenv_info->env_valid)
  2726. return 1;
  2727. addr = (1024 * mtd->writesize / aml_chip->plane_num);
  2728. #ifdef NEW_NAND_SUPPORT
  2729. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10))
  2730. addr += RETRY_NAND_BLK_NUM* mtd->erasesize;
  2731. #endif
  2732. start_blk = addr / mtd->erasesize;
  2733. total_blk = mtd->size / mtd->erasesize;
  2734. addr = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  2735. addr *= mtd->erasesize;
  2736. addr += aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr * mtd->writesize;
  2737. data_buf = kzalloc(mtd->writesize, GFP_KERNEL);
  2738. if (data_buf == NULL)
  2739. return -ENOMEM;
  2740. env_oobinfo = (struct env_oobinfo_t *)env_oob_buf;
  2741. while (amount_loaded < CONFIG_ENV_SIZE ) {
  2742. aml_oob_ops.mode = MTD_OOB_AUTO;
  2743. aml_oob_ops.len = mtd->writesize;
  2744. aml_oob_ops.ooblen = sizeof(struct env_oobinfo_t);
  2745. aml_oob_ops.ooboffs = mtd->ecclayout->oobfree[0].offset;
  2746. aml_oob_ops.datbuf = data_buf;
  2747. aml_oob_ops.oobbuf = env_oob_buf;
  2748. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  2749. memset((unsigned char *)aml_oob_ops.oobbuf, 0x0, aml_oob_ops.ooblen);
  2750. error = mtd->read_oob(mtd, addr, &aml_oob_ops);
  2751. if ((error != 0) && (error != -EUCLEAN)) {
  2752. printk("blk check good but read failed: %llx, %d\n", (uint64_t)addr, error);
  2753. return 1;
  2754. }
  2755. if (memcmp(env_oobinfo->name, ENV_NAND_MAGIC, 4))
  2756. printk("invalid nand env magic: %llx\n", (uint64_t)addr);
  2757. addr += mtd->writesize;
  2758. len = min(mtd->writesize, CONFIG_ENV_SIZE - amount_loaded);
  2759. memcpy(buf + amount_loaded, data_buf, len);
  2760. amount_loaded += mtd->writesize;
  2761. }
  2762. if (amount_loaded < CONFIG_ENV_SIZE)
  2763. return 1;
  2764. kfree(data_buf);
  2765. return 0;
  2766. }
  2767. static int aml_nand_write_env(struct mtd_info *mtd, loff_t offset, u_char *buf)
  2768. {
  2769. struct env_oobinfo_t *env_oobinfo;
  2770. int error = 0;
  2771. loff_t addr = 0;
  2772. size_t amount_saved = 0;
  2773. size_t len;
  2774. struct mtd_oob_ops aml_oob_ops;
  2775. unsigned char *data_buf;
  2776. unsigned char env_oob_buf[sizeof(struct env_oobinfo_t)];
  2777. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2778. data_buf = kzalloc(mtd->writesize, GFP_KERNEL);
  2779. if (data_buf == NULL)
  2780. return -ENOMEM;
  2781. addr = offset;
  2782. env_oobinfo = (struct env_oobinfo_t *)env_oob_buf;
  2783. memcpy(env_oobinfo->name, ENV_NAND_MAGIC, 4);
  2784. env_oobinfo->ec = aml_chip->aml_nandenv_info->env_valid_node->ec;
  2785. env_oobinfo->timestamp = aml_chip->aml_nandenv_info->env_valid_node->timestamp;
  2786. env_oobinfo->status_page = 1;
  2787. while (amount_saved < CONFIG_ENV_SIZE ) {
  2788. aml_oob_ops.mode = MTD_OOB_AUTO;
  2789. aml_oob_ops.len = mtd->writesize;
  2790. aml_oob_ops.ooblen = sizeof(struct env_oobinfo_t);
  2791. aml_oob_ops.ooboffs = mtd->ecclayout->oobfree[0].offset;
  2792. aml_oob_ops.datbuf = data_buf;
  2793. aml_oob_ops.oobbuf = env_oob_buf;
  2794. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  2795. len = min(mtd->writesize, CONFIG_ENV_SIZE - amount_saved);
  2796. memcpy((unsigned char *)aml_oob_ops.datbuf, buf + amount_saved, len);
  2797. error = mtd->write_oob(mtd, addr, &aml_oob_ops);
  2798. if (error) {
  2799. printk("blk check good but write failed: %llx, %d\n", (uint64_t)addr, error);
  2800. return 1;
  2801. }
  2802. addr += mtd->writesize;;
  2803. amount_saved += mtd->writesize;
  2804. }
  2805. if (amount_saved < CONFIG_ENV_SIZE)
  2806. return 1;
  2807. kfree(data_buf);
  2808. return 0;
  2809. }
  2810. static int aml_nand_save_env(struct mtd_info *mtd, u_char *buf)
  2811. {
  2812. struct aml_nand_bbt_info *nand_bbt_info;
  2813. struct env_free_node_t *env_free_node, *env_tmp_node;
  2814. int error = 0, pages_per_blk, i = 1;
  2815. loff_t addr = 0;
  2816. struct erase_info aml_env_erase_info;
  2817. env_t *env_ptr = (env_t *)buf;
  2818. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2819. if (!aml_chip->aml_nandenv_info->env_init)
  2820. return 1;
  2821. pages_per_blk = mtd->erasesize / mtd->writesize;
  2822. if ((mtd->writesize < CONFIG_ENV_SIZE) && (aml_chip->aml_nandenv_info->env_valid == 1))
  2823. i = (CONFIG_ENV_SIZE + mtd->writesize - 1) / mtd->writesize;
  2824. if (aml_chip->aml_nandenv_info->env_valid) {
  2825. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr += i;
  2826. if ((aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr + i) > pages_per_blk) {
  2827. env_free_node = kzalloc(sizeof(struct env_free_node_t), GFP_KERNEL);
  2828. if (env_free_node == NULL)
  2829. return -ENOMEM;
  2830. env_free_node->phy_blk_addr = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  2831. env_free_node->ec = aml_chip->aml_nandenv_info->env_valid_node->ec;
  2832. env_tmp_node = aml_chip->aml_nandenv_info->env_free_node;
  2833. while (env_tmp_node->next != NULL) {
  2834. env_tmp_node = env_tmp_node->next;
  2835. }
  2836. env_tmp_node->next = env_free_node;
  2837. env_tmp_node = aml_chip->aml_nandenv_info->env_free_node;
  2838. aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr = env_tmp_node->phy_blk_addr;
  2839. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr = 0;
  2840. aml_chip->aml_nandenv_info->env_valid_node->ec = env_tmp_node->ec;
  2841. aml_chip->aml_nandenv_info->env_valid_node->timestamp += 1;
  2842. aml_chip->aml_nandenv_info->env_free_node = env_tmp_node->next;
  2843. kfree(env_tmp_node);
  2844. }
  2845. }
  2846. else {
  2847. env_tmp_node = aml_chip->aml_nandenv_info->env_free_node;
  2848. aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr = env_tmp_node->phy_blk_addr;
  2849. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr = 0;
  2850. aml_chip->aml_nandenv_info->env_valid_node->ec = env_tmp_node->ec;
  2851. aml_chip->aml_nandenv_info->env_valid_node->timestamp += 1;
  2852. aml_chip->aml_nandenv_info->env_free_node = env_tmp_node->next;
  2853. kfree(env_tmp_node);
  2854. }
  2855. addr = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  2856. addr *= mtd->erasesize;
  2857. addr += aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr * mtd->writesize;
  2858. if (aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr == 0) {
  2859. memset(&aml_env_erase_info, 0, sizeof(struct erase_info));
  2860. aml_env_erase_info.mtd = mtd;
  2861. aml_env_erase_info.addr = addr;
  2862. aml_env_erase_info.len = mtd->erasesize;
  2863. error = mtd->erase(mtd, &aml_env_erase_info);
  2864. if (error) {
  2865. printk("env free blk erase failed %d\n", error);
  2866. mtd->block_markbad(mtd, addr);
  2867. return error;
  2868. }
  2869. aml_chip->aml_nandenv_info->env_valid_node->ec++;
  2870. }
  2871. nand_bbt_info = &aml_chip->aml_nandenv_info->nand_bbt_info;
  2872. if ((!memcmp(nand_bbt_info->bbt_head_magic, BBT_HEAD_MAGIC, 4)) && (!memcmp(nand_bbt_info->bbt_tail_magic, BBT_TAIL_MAGIC, 4))) {
  2873. memcpy(env_ptr->data + default_environment_size, aml_chip->aml_nandenv_info->nand_bbt_info.bbt_head_magic, sizeof(struct aml_nand_bbt_info));
  2874. env_ptr->crc = (crc32((0 ^ 0xffffffffL), env_ptr->data, ENV_SIZE) ^ 0xffffffffL);
  2875. }
  2876. if (aml_nand_write_env(mtd, addr, (u_char *) env_ptr)) {
  2877. printk("update nand env FAILED!\n");
  2878. return 1;
  2879. }
  2880. return error;
  2881. }
  2882. static int aml_nand_env_init(struct mtd_info *mtd)
  2883. {
  2884. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  2885. struct nand_chip *chip = &aml_chip->chip;
  2886. struct env_oobinfo_t *env_oobinfo;
  2887. struct env_free_node_t *env_free_node, *env_tmp_node, *env_prev_node;
  2888. int error = 0, start_blk, total_blk, env_blk, i, pages_per_blk, bad_blk_cnt = 0, max_env_blk, phys_erase_shift;
  2889. loff_t offset;
  2890. unsigned char *data_buf;
  2891. struct mtd_oob_ops aml_oob_ops;
  2892. unsigned char env_oob_buf[sizeof(struct env_oobinfo_t)];
  2893. data_buf = kzalloc(mtd->writesize, GFP_KERNEL);
  2894. if (data_buf == NULL)
  2895. return -ENOMEM;
  2896. aml_chip->aml_nandenv_info = kzalloc(sizeof(struct aml_nandenv_info_t), GFP_KERNEL);
  2897. if (aml_chip->aml_nandenv_info == NULL)
  2898. return -ENOMEM;
  2899. aml_chip->aml_nandenv_info->mtd = mtd;
  2900. aml_chip->aml_nandenv_info->env_valid_node = kzalloc(sizeof(struct env_valid_node_t), GFP_KERNEL);
  2901. if (aml_chip->aml_nandenv_info->env_valid_node == NULL)
  2902. return -ENOMEM;
  2903. aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr = -1;
  2904. phys_erase_shift = fls(mtd->erasesize) - 1;
  2905. max_env_blk = (NAND_MINI_PART_SIZE >> phys_erase_shift);
  2906. if (max_env_blk < 2)
  2907. max_env_blk = 2;
  2908. if (nand_boot_flag)
  2909. offset = (1024 * mtd->writesize / aml_chip->plane_num);
  2910. else {
  2911. default_environment_size = 0;
  2912. offset = 0;
  2913. }
  2914. #ifdef NEW_NAND_SUPPORT
  2915. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10))
  2916. offset += RETRY_NAND_BLK_NUM* mtd->erasesize;
  2917. #endif
  2918. start_blk = (int)(offset >> phys_erase_shift);
  2919. total_blk = (int)(mtd->size >> phys_erase_shift);
  2920. pages_per_blk = (1 << (chip->phys_erase_shift - chip->page_shift));
  2921. env_oobinfo = (struct env_oobinfo_t *)env_oob_buf;
  2922. if ((default_environment_size + sizeof(struct aml_nand_bbt_info)) > ENV_SIZE)
  2923. total_blk = start_blk + max_env_blk;
  2924. env_blk = 0;
  2925. do {
  2926. offset = mtd->erasesize;
  2927. offset *= start_blk;
  2928. error = mtd->block_isbad(mtd, offset);
  2929. if (error) {
  2930. aml_chip->aml_nandenv_info->nand_bbt_info.nand_bbt[bad_blk_cnt++] = start_blk;
  2931. continue;
  2932. }
  2933. aml_oob_ops.mode = MTD_OOB_AUTO;
  2934. aml_oob_ops.len = mtd->writesize;
  2935. aml_oob_ops.ooblen = sizeof(struct env_oobinfo_t);
  2936. aml_oob_ops.ooboffs = mtd->ecclayout->oobfree[0].offset;
  2937. aml_oob_ops.datbuf = data_buf;
  2938. aml_oob_ops.oobbuf = env_oob_buf;
  2939. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  2940. memset((unsigned char *)aml_oob_ops.oobbuf, 0x0, aml_oob_ops.ooblen);
  2941. error = mtd->read_oob(mtd, offset, &aml_oob_ops);
  2942. if ((error != 0) && (error != -EUCLEAN)) {
  2943. printk("blk check good but read failed: %llx, %d\n", (uint64_t)offset, error);
  2944. continue;
  2945. }
  2946. aml_chip->aml_nandenv_info->env_init = 1;
  2947. if (!memcmp(env_oobinfo->name, ENV_NAND_MAGIC, 4)) {
  2948. aml_chip->aml_nandenv_info->env_valid = 1;
  2949. if (aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr >= 0) {
  2950. env_free_node = kzalloc(sizeof(struct env_free_node_t), GFP_KERNEL);
  2951. if (env_free_node == NULL)
  2952. return -ENOMEM;
  2953. env_free_node->dirty_flag = 1;
  2954. if (env_oobinfo->timestamp > aml_chip->aml_nandenv_info->env_valid_node->timestamp) {
  2955. env_free_node->phy_blk_addr = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  2956. env_free_node->ec = aml_chip->aml_nandenv_info->env_valid_node->ec;
  2957. aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr = start_blk;
  2958. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr = 0;
  2959. aml_chip->aml_nandenv_info->env_valid_node->ec = env_oobinfo->ec;
  2960. aml_chip->aml_nandenv_info->env_valid_node->timestamp = env_oobinfo->timestamp;
  2961. }
  2962. else {
  2963. env_free_node->phy_blk_addr = start_blk;
  2964. env_free_node->ec = env_oobinfo->ec;
  2965. }
  2966. if (aml_chip->aml_nandenv_info->env_free_node == NULL)
  2967. aml_chip->aml_nandenv_info->env_free_node = env_free_node;
  2968. else {
  2969. env_tmp_node = aml_chip->aml_nandenv_info->env_free_node;
  2970. while (env_tmp_node->next != NULL) {
  2971. env_tmp_node = env_tmp_node->next;
  2972. }
  2973. env_tmp_node->next = env_free_node;
  2974. }
  2975. }
  2976. else {
  2977. aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr = start_blk;
  2978. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr = 0;
  2979. aml_chip->aml_nandenv_info->env_valid_node->ec = env_oobinfo->ec;
  2980. aml_chip->aml_nandenv_info->env_valid_node->timestamp = env_oobinfo->timestamp;
  2981. }
  2982. }
  2983. else if (env_blk < max_env_blk) {
  2984. env_free_node = kzalloc(sizeof(struct env_free_node_t), GFP_KERNEL);
  2985. if (env_free_node == NULL)
  2986. return -ENOMEM;
  2987. env_free_node->phy_blk_addr = start_blk;
  2988. env_free_node->ec = env_oobinfo->ec;
  2989. if (aml_chip->aml_nandenv_info->env_free_node == NULL)
  2990. aml_chip->aml_nandenv_info->env_free_node = env_free_node;
  2991. else {
  2992. env_tmp_node = aml_chip->aml_nandenv_info->env_free_node;
  2993. env_prev_node = env_tmp_node;
  2994. while (env_tmp_node != NULL) {
  2995. if (env_tmp_node->dirty_flag == 1)
  2996. break;
  2997. env_prev_node = env_tmp_node;
  2998. env_tmp_node = env_tmp_node->next;
  2999. }
  3000. if (env_prev_node == env_tmp_node) {
  3001. env_free_node->next = env_tmp_node;
  3002. aml_chip->aml_nandenv_info->env_free_node = env_free_node;
  3003. }
  3004. else {
  3005. env_prev_node->next = env_free_node;
  3006. env_free_node->next = env_tmp_node;
  3007. }
  3008. }
  3009. }
  3010. env_blk++;
  3011. if ((env_blk >= max_env_blk) && (aml_chip->aml_nandenv_info->env_valid == 1))
  3012. break;
  3013. } while ((++start_blk) < total_blk);
  3014. if (start_blk >= total_blk) {
  3015. memcpy(aml_chip->aml_nandenv_info->nand_bbt_info.bbt_head_magic, BBT_HEAD_MAGIC, 4);
  3016. memcpy(aml_chip->aml_nandenv_info->nand_bbt_info.bbt_tail_magic, BBT_TAIL_MAGIC, 4);
  3017. }
  3018. if (aml_chip->aml_nandenv_info->env_valid == 1) {
  3019. aml_oob_ops.mode = MTD_OOB_AUTO;
  3020. aml_oob_ops.len = mtd->writesize;
  3021. aml_oob_ops.ooblen = sizeof(struct env_oobinfo_t);
  3022. aml_oob_ops.ooboffs = mtd->ecclayout->oobfree[0].offset;
  3023. aml_oob_ops.datbuf = data_buf;
  3024. aml_oob_ops.oobbuf = env_oob_buf;
  3025. for (i=0; i<pages_per_blk; i++) {
  3026. memset((unsigned char *)aml_oob_ops.datbuf, 0x0, mtd->writesize);
  3027. memset((unsigned char *)aml_oob_ops.oobbuf, 0x0, aml_oob_ops.ooblen);
  3028. offset = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  3029. offset *= mtd->erasesize;
  3030. offset += i * mtd->writesize;
  3031. error = mtd->read_oob(mtd, offset, &aml_oob_ops);
  3032. if ((error != 0) && (error != -EUCLEAN)) {
  3033. printk("blk check good but read failed: %llx, %d\n", (uint64_t)offset, error);
  3034. continue;
  3035. }
  3036. if (!memcmp(env_oobinfo->name, ENV_NAND_MAGIC, 4))
  3037. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr = i;
  3038. else
  3039. break;
  3040. }
  3041. }
  3042. if ((mtd->writesize < CONFIG_ENV_SIZE) && (aml_chip->aml_nandenv_info->env_valid == 1)) {
  3043. i = (CONFIG_ENV_SIZE + mtd->writesize - 1) / mtd->writesize;
  3044. aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr -= (i - 1);
  3045. }
  3046. offset = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  3047. offset *= mtd->erasesize;
  3048. offset += aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr * mtd->writesize;
  3049. printk("aml nand env valid addr: %llx \n", (uint64_t)offset);
  3050. printk(KERN_DEBUG "CONFIG_ENV_SIZE=0x%x; ENV_SIZE=0x%x; bbt=0x%x; default_environment_size=0x%x\n",
  3051. CONFIG_ENV_SIZE, ENV_SIZE, sizeof(struct aml_nand_bbt_info), default_environment_size);
  3052. kfree(data_buf);
  3053. return 0;
  3054. }
  3055. static int aml_nand_update_env(struct mtd_info *mtd)
  3056. {
  3057. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  3058. env_t *env_ptr;
  3059. loff_t offset;
  3060. int error = 0;
  3061. env_ptr = kzalloc(sizeof(env_t), GFP_KERNEL);
  3062. if (env_ptr == NULL)
  3063. return -ENOMEM;
  3064. if (aml_chip->aml_nandenv_info->env_valid == 1) {
  3065. offset = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  3066. offset *= mtd->erasesize;
  3067. offset += aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr * mtd->writesize;
  3068. error = aml_nand_read_env (mtd, offset, (u_char *)env_ptr);
  3069. if (error) {
  3070. printk("nand env read failed: %llx, %d\n", (uint64_t)offset, error);
  3071. return error;
  3072. }
  3073. error = aml_nand_save_env(mtd, (u_char *)env_ptr);
  3074. if (error) {
  3075. printk("update env bbt failed %d \n", error);
  3076. return error;
  3077. }
  3078. }
  3079. return error;
  3080. }
  3081. static int aml_nand_env_check(struct mtd_info *mtd)
  3082. {
  3083. struct aml_nand_chip *aml_chip = mtd_to_nand_chip(mtd);
  3084. struct aml_nand_platform *plat = aml_chip->platform;
  3085. struct platform_nand_chip *chip = &plat->platform_nand_data.chip;
  3086. struct aml_nand_bbt_info *nand_bbt_info;
  3087. struct aml_nand_part_info *aml_nand_part;
  3088. struct mtd_partition *parts;
  3089. env_t *env_ptr;
  3090. int error = 0, start_blk, total_blk, update_env_flag = 0, i, j, nr, phys_erase_shift;
  3091. loff_t offset;
  3092. error = aml_nand_env_init(mtd);
  3093. if (error)
  3094. return error;
  3095. env_ptr = kzalloc(sizeof(env_t), GFP_KERNEL);
  3096. if (env_ptr == NULL)
  3097. return -ENOMEM;
  3098. if (aml_chip->aml_nandenv_info->env_valid == 1) {
  3099. offset = aml_chip->aml_nandenv_info->env_valid_node->phy_blk_addr;
  3100. offset *= mtd->erasesize;
  3101. offset += aml_chip->aml_nandenv_info->env_valid_node->phy_page_addr * mtd->writesize;
  3102. error = aml_nand_read_env (mtd, offset, (u_char *)env_ptr);
  3103. if (error) {
  3104. printk("nand env read failed: %llx, %d\n", (uint64_t)offset, error);
  3105. goto exit;
  3106. }
  3107. phys_erase_shift = fls(mtd->erasesize) - 1;
  3108. offset = (1024 * mtd->writesize / aml_chip->plane_num);
  3109. #ifdef NEW_NAND_SUPPORT
  3110. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10))
  3111. offset += RETRY_NAND_BLK_NUM* mtd->erasesize;
  3112. #endif
  3113. start_blk = (int)(offset >> phys_erase_shift);
  3114. total_blk = (int)(mtd->size >> phys_erase_shift);
  3115. nand_bbt_info = (struct aml_nand_bbt_info *)(env_ptr->data + default_environment_size);
  3116. if ((!memcmp(nand_bbt_info->bbt_head_magic, BBT_HEAD_MAGIC, 4)) && (!memcmp(nand_bbt_info->bbt_tail_magic, BBT_TAIL_MAGIC, 4))) {
  3117. for (i=start_blk; i<total_blk; i++) {
  3118. aml_chip->block_status[i] = NAND_BLOCK_GOOD;
  3119. for (j=0; j<MAX_BAD_BLK_NUM; j++) {
  3120. if (nand_bbt_info->nand_bbt[j] == i) {
  3121. aml_chip->block_status[i] = NAND_BLOCK_BAD;
  3122. break;
  3123. }
  3124. }
  3125. }
  3126. if (chip->set_parts)
  3127. chip->set_parts(mtd->size, chip);
  3128. aml_nand_part = nand_bbt_info->aml_nand_part;
  3129. if (plat->platform_nand_data.chip.nr_partitions == 0) {
  3130. parts = kzalloc((MAX_MTD_PART_NUM * sizeof(struct mtd_partition)), GFP_KERNEL);
  3131. if (!parts) {
  3132. error = -ENOMEM;
  3133. goto exit;
  3134. }
  3135. plat->platform_nand_data.chip.partitions = parts;
  3136. nr = 0;
  3137. while(memcmp(aml_nand_part->mtd_part_magic, MTD_PART_MAGIC, 4) == 0) {
  3138. parts->name = kzalloc(MAX_MTD_PART_NAME_LEN, GFP_KERNEL);
  3139. if (!parts->name) {
  3140. error = -ENOMEM;
  3141. goto exit;
  3142. }
  3143. strncpy(parts->name, aml_nand_part->mtd_part_name, MAX_MTD_PART_NAME_LEN);
  3144. parts->offset = aml_nand_part->offset;
  3145. parts->size = aml_nand_part->size;
  3146. parts->mask_flags = aml_nand_part->mask_flags;
  3147. nr++;
  3148. parts++;
  3149. aml_nand_part++;
  3150. }
  3151. plat->platform_nand_data.chip.nr_partitions = nr;
  3152. }
  3153. else {
  3154. parts = plat->platform_nand_data.chip.partitions;
  3155. nr = 0;
  3156. if (strlen(parts->name) >= MAX_MTD_PART_NAME_LEN)
  3157. parts->name[MAX_MTD_PART_NAME_LEN - 1] = '\0';
  3158. while(memcmp(aml_nand_part->mtd_part_magic, MTD_PART_MAGIC, 4) == 0) {
  3159. nr++;
  3160. if (nr > plat->platform_nand_data.chip.nr_partitions) {
  3161. update_env_flag = 1;
  3162. memset((unsigned char *)aml_nand_part, 0, sizeof(struct aml_nand_part_info));
  3163. aml_nand_part++;
  3164. continue;
  3165. }
  3166. if (strcmp(parts->name, aml_nand_part->mtd_part_name)) {
  3167. printk("mtd parttion %d name %s changed to %s \n", nr, parts->name, aml_nand_part->mtd_part_name);
  3168. update_env_flag = 1;
  3169. strncpy(aml_nand_part->mtd_part_name, parts->name, MAX_MTD_PART_NAME_LEN);
  3170. }
  3171. if (parts->offset != aml_nand_part->offset) {
  3172. printk("mtd parttion %d offset %llx changed to %llx \n", nr, aml_nand_part->offset, parts->offset);
  3173. update_env_flag = 1;
  3174. aml_nand_part->offset = parts->offset;
  3175. }
  3176. if (parts->size != aml_nand_part->size) {
  3177. printk("mtd parttion %d size %llx changed to %llx \n", nr, aml_nand_part->size, parts->size);
  3178. update_env_flag = 1;
  3179. aml_nand_part->size = parts->size;
  3180. }
  3181. if (parts->mask_flags != aml_nand_part->mask_flags) {
  3182. printk("mtd parttion %d mask_flags %x changed to %x \n", nr, aml_nand_part->mask_flags, parts->mask_flags);
  3183. update_env_flag = 1;
  3184. aml_nand_part->mask_flags = parts->mask_flags;
  3185. }
  3186. parts++;
  3187. aml_nand_part++;
  3188. }
  3189. if (nr < plat->platform_nand_data.chip.nr_partitions) {
  3190. update_env_flag = 1;
  3191. for (i=nr; i<plat->platform_nand_data.chip.nr_partitions; i++) {
  3192. parts = plat->platform_nand_data.chip.partitions + i;
  3193. aml_nand_part = nand_bbt_info->aml_nand_part + i;
  3194. memcpy(aml_nand_part->mtd_part_magic, MTD_PART_MAGIC, 4);
  3195. strncpy(aml_nand_part->mtd_part_name, parts->name, MAX_MTD_PART_NAME_LEN);
  3196. aml_nand_part->offset = parts->offset;
  3197. aml_nand_part->size = parts->size;
  3198. aml_nand_part->mask_flags = parts->mask_flags;
  3199. }
  3200. }
  3201. }
  3202. memcpy((unsigned char *)aml_chip->aml_nandenv_info->nand_bbt_info.bbt_head_magic, (unsigned char *)nand_bbt_info, sizeof(struct aml_nand_bbt_info));
  3203. }
  3204. }
  3205. if (update_env_flag) {
  3206. error = aml_nand_save_env(mtd, (u_char *)env_ptr);
  3207. if (error) {
  3208. printk("nand env save failed: %d\n", error);
  3209. goto exit;
  3210. }
  3211. }
  3212. exit:
  3213. kfree(env_ptr);
  3214. return 0;
  3215. }
  3216. static int aml_nand_scan_bbt(struct mtd_info *mtd)
  3217. {
  3218. return 0;
  3219. }
  3220. #ifdef CONFIG_AML_NAND_ENV
  3221. static struct mtd_info *nand_env_mtd = NULL;
  3222. #define NAND_ENV_DEVICE_NAME "nand_env"
  3223. static int nand_env_open(struct inode * inode, struct file * filp)
  3224. {
  3225. return 0;
  3226. }
  3227. /*
  3228. * This funcion reads the u-boot envionment variables.
  3229. * The f_pos points directly to the env location.
  3230. */
  3231. static ssize_t nand_env_read(struct file *file, char __user *buf,
  3232. size_t count, loff_t *ppos)
  3233. {
  3234. env_t *env_ptr = NULL;
  3235. ssize_t read_size;
  3236. int error = 0;
  3237. if(*ppos == CONFIG_ENV_SIZE)
  3238. {
  3239. return 0;
  3240. }
  3241. if(*ppos >= CONFIG_ENV_SIZE)
  3242. {
  3243. printk(KERN_ERR "nand env: data access violation!\n");
  3244. return -EFAULT;
  3245. }
  3246. env_ptr = kzalloc(sizeof(env_t), GFP_KERNEL);
  3247. if (env_ptr == NULL)
  3248. {
  3249. return -ENOMEM;
  3250. }
  3251. error = aml_nand_read_env (nand_env_mtd, 0, (u_char *)env_ptr);
  3252. if (error)
  3253. {
  3254. printk("nand_env_read: nand env read failed: %llx, %d\n", (uint64_t)*ppos, error);
  3255. kfree(env_ptr);
  3256. return -EFAULT;
  3257. }
  3258. if((*ppos + count) > CONFIG_ENV_SIZE)
  3259. {
  3260. read_size = CONFIG_ENV_SIZE - *ppos;
  3261. }
  3262. else
  3263. {
  3264. read_size = count;
  3265. }
  3266. copy_to_user(buf, (env_ptr + *ppos), read_size);
  3267. *ppos += read_size;
  3268. kfree(env_ptr);
  3269. return read_size;
  3270. }
  3271. static ssize_t nand_env_write(struct file *file, const char __user *buf,
  3272. size_t count, loff_t *ppos)
  3273. {
  3274. u_char *env_ptr = NULL;
  3275. ssize_t write_size;
  3276. int error = 0;
  3277. if(*ppos == CONFIG_ENV_SIZE)
  3278. {
  3279. return 0;
  3280. }
  3281. if(*ppos >= CONFIG_ENV_SIZE)
  3282. {
  3283. printk(KERN_ERR "nand env: data access violation!\n");
  3284. return -EFAULT;
  3285. }
  3286. env_ptr = kzalloc(sizeof(env_t), GFP_KERNEL);
  3287. if (env_ptr == NULL)
  3288. {
  3289. return -ENOMEM;
  3290. }
  3291. error = aml_nand_read_env (nand_env_mtd, 0, (u_char *)env_ptr);
  3292. if (error)
  3293. {
  3294. printk("nand_env_read: nand env read failed: %llx, %d\n", (uint64_t)*ppos, error);
  3295. kfree(env_ptr);
  3296. return -EFAULT;
  3297. }
  3298. if((*ppos + count) > CONFIG_ENV_SIZE)
  3299. {
  3300. write_size = CONFIG_ENV_SIZE - *ppos;
  3301. }
  3302. else
  3303. {
  3304. write_size = count;
  3305. }
  3306. copy_from_user((env_ptr + *ppos), buf, write_size);
  3307. error = aml_nand_save_env(nand_env_mtd, env_ptr);
  3308. if (error)
  3309. {
  3310. printk("nand_env_read: nand env read failed: %llx, %d\n", (uint64_t)*ppos, error);
  3311. kfree(env_ptr);
  3312. return -EFAULT;
  3313. }
  3314. *ppos += write_size;
  3315. kfree(env_ptr);
  3316. return write_size;
  3317. }
  3318. static int nand_env_close(struct inode *inode, struct file *file)
  3319. {
  3320. return 0;
  3321. }
  3322. //static int nand_env_ioctl(struct inode *inode, struct file *file,
  3323. // u_int cmd, u_long arg)
  3324. //{
  3325. // return 0;
  3326. //}
  3327. static int nand_env_cls_suspend(struct device *dev, pm_message_t state)
  3328. {
  3329. return 0;
  3330. }
  3331. static int nand_env_cls_resume(struct device *dev)
  3332. {
  3333. return 0;
  3334. }
  3335. static struct class nand_env_class = {
  3336. .name = "nand_env",
  3337. .owner = THIS_MODULE,
  3338. .suspend = nand_env_cls_suspend,
  3339. .resume = nand_env_cls_resume,
  3340. };
  3341. static struct file_operations nand_env_fops = {
  3342. .owner = THIS_MODULE,
  3343. .open = nand_env_open,
  3344. .read = nand_env_read,
  3345. .write = nand_env_write,
  3346. .release = nand_env_close,
  3347. // .ioctl = nand_env_ioctl,
  3348. };
  3349. #endif
  3350. static ssize_t show_nand_info(struct class *class,
  3351. struct class_attribute *attr, char *buf)
  3352. {
  3353. struct aml_nand_chip *aml_chip = container_of(class, struct aml_nand_chip, cls);
  3354. printk("mfr_type:\t\t0x%x\n", aml_chip->mfr_type);
  3355. printk("onfi_mode:\t\t0x%x\n", aml_chip->onfi_mode);
  3356. printk("T_REA:\t\t0x%x\n", aml_chip->T_REA);
  3357. printk("T_RHOH:\t\t0x%x\n", aml_chip->T_RHOH);
  3358. printk("options:\t\t0x%x\n", aml_chip->options);
  3359. printk("page_size:\t\t0x%x\n", aml_chip->page_size);
  3360. printk("block_size:\t\t0x%x\n", aml_chip->block_size);
  3361. printk("oob_size:\t\t0x%x\n", aml_chip->oob_size);
  3362. printk("virtual_page_size:\t\t0x%x\n", aml_chip->virtual_page_size);
  3363. printk("virtual_block_size:\t\t0x%x\n", aml_chip->virtual_block_size);
  3364. printk("plane_num:\t\t0x%x\n", aml_chip->plane_num);
  3365. printk("chip_num:\t\t0x%x\n", aml_chip->chip_num);
  3366. printk("internal_chipnr:\t\t0x%x\n", aml_chip->internal_chipnr);
  3367. printk("internal_page_nums:\t\t0x%x\n", aml_chip->internal_page_nums);
  3368. printk("internal_chip_shift:\t\t0x%x\n", aml_chip->internal_chip_shift);
  3369. printk("bch_mode:\t\t0x%x\n", aml_chip->bch_mode);
  3370. printk("user_byte_mode:\t\t0x%x\n", aml_chip->user_byte_mode);
  3371. printk("ops_mode:\t\t0x%x\n", aml_chip->ops_mode);
  3372. printk("cached_prog_status:\t\t0x%x\n", aml_chip->cached_prog_status);
  3373. printk("max_bch_mode:\t\t0x%x\n", aml_chip->max_bch_mode);
  3374. return 0;
  3375. }
  3376. static ssize_t show_bbt_table(struct class *class,
  3377. struct class_attribute *attr, char *buf)
  3378. {
  3379. struct aml_nand_chip *aml_chip = container_of(class, struct aml_nand_chip, cls);
  3380. struct mtd_info *mtd = &aml_chip->mtd;
  3381. int start_blk, total_blk, i, phys_erase_shift;
  3382. loff_t offset;
  3383. phys_erase_shift = fls(mtd->erasesize) - 1;
  3384. offset = (1024 * mtd->writesize / aml_chip->plane_num);
  3385. start_blk = (int)(offset >> phys_erase_shift);
  3386. total_blk = (int)(mtd->size >> phys_erase_shift);
  3387. for (i = start_blk; i < total_blk; i++) {
  3388. if(NAND_BLOCK_BAD == aml_chip->block_status[i])
  3389. printk("block %d is a bad block\n", i);
  3390. }
  3391. return 0;
  3392. }
  3393. static ssize_t nand_page_dump(struct class *class,
  3394. struct class_attribute *attr, const char *buf, size_t count)
  3395. {
  3396. struct aml_nand_chip *aml_chip = container_of(class, struct aml_nand_chip, cls);
  3397. struct mtd_info *mtd = &aml_chip->mtd;
  3398. struct mtd_oob_ops ops;
  3399. loff_t off;
  3400. loff_t addr;
  3401. u_char *datbuf, *oobbuf, *p;
  3402. int ret, i;
  3403. printk(KERN_DEBUG "enter %s\n", __FUNCTION__);
  3404. ret = sscanf(buf, "%llx", &off);
  3405. datbuf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  3406. oobbuf = kmalloc(mtd->oobsize, GFP_KERNEL);
  3407. if (!datbuf || !oobbuf) {
  3408. printk("No memory for page buffer\n");
  3409. return 1;
  3410. }
  3411. addr = ~((loff_t)mtd->writesize - 1);
  3412. addr &= off;
  3413. memset(&ops, 0, sizeof(ops));
  3414. ops.datbuf = datbuf;
  3415. ops.oobbuf = NULL; /* must exist, but oob data will be appended to ops.datbuf */
  3416. ops.len = mtd->writesize;
  3417. ops.ooblen = mtd->oobsize;
  3418. ops.mode = MTD_OOB_RAW;
  3419. i = mtd->read_oob(mtd, addr, &ops);
  3420. if (i < 0) {
  3421. printk("Error (%d) reading page %09llx\n", i, off);
  3422. kfree(datbuf);
  3423. kfree(oobbuf);
  3424. return 1;
  3425. }
  3426. printk("Page %09llx dump,page size %d:\n", off,mtd->writesize);
  3427. i = (mtd->writesize + mtd->oobsize) >> 4;
  3428. p = datbuf;
  3429. while (i--) {
  3430. printk("\t%02x %02x %02x %02x %02x %02x %02x %02x"
  3431. " %02x %02x %02x %02x %02x %02x %02x %02x\n",
  3432. p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
  3433. p[8], p[9], p[10], p[11], p[12], p[13], p[14],
  3434. p[15]);
  3435. p += 16;
  3436. }
  3437. /*printf("OOB oob size %d:\n",nand->oobsize);
  3438. i = nand->oobsize >> 3;
  3439. while (i--) {
  3440. printf("\t%02x %02x %02x %02x %02x %02x %02x %02x\n",
  3441. p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
  3442. p += 8;
  3443. }*/
  3444. kfree(datbuf);
  3445. kfree(oobbuf);
  3446. printk(KERN_DEBUG "exit %s\n", __FUNCTION__);
  3447. return count;
  3448. }
  3449. static ssize_t nand_page_read(struct class *class,
  3450. struct class_attribute *attr, const char *buf, size_t count)
  3451. {
  3452. struct aml_nand_chip *aml_chip = container_of(class, struct aml_nand_chip, cls);
  3453. struct mtd_info *mtd = &aml_chip->mtd;
  3454. struct mtd_oob_ops ops;
  3455. loff_t off;
  3456. loff_t addr;
  3457. u_char *datbuf, *oobbuf, *p;
  3458. size_t ret;
  3459. int i;
  3460. printk(KERN_DEBUG "enter %s\n", __FUNCTION__);
  3461. ret = sscanf(buf, "%llx", &off);
  3462. datbuf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  3463. oobbuf = kmalloc(mtd->oobsize, GFP_KERNEL);
  3464. if (!datbuf || !oobbuf) {
  3465. printk("No memory for page buffer\n");
  3466. return 1;
  3467. }
  3468. addr = ~((loff_t)mtd->writesize - 1);
  3469. addr &= off;
  3470. mtd->read(mtd, addr, mtd->writesize, &ret, datbuf);
  3471. if (ret < 0) {
  3472. printk("Error (%d) reading page %09llx\n", i, off);
  3473. kfree(datbuf);
  3474. kfree(oobbuf);
  3475. return 1;
  3476. }
  3477. printk("Page %09llx read,page size %d:\n", off,mtd->writesize);
  3478. i = (mtd->writesize ) >> 4;
  3479. p = datbuf;
  3480. while (i--) {
  3481. printk("\t%02x %02x %02x %02x %02x %02x %02x %02x"
  3482. " %02x %02x %02x %02x %02x %02x %02x %02x\n",
  3483. p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7],
  3484. p[8], p[9], p[10], p[11], p[12], p[13], p[14],
  3485. p[15]);
  3486. p += 16;
  3487. }
  3488. kfree(datbuf);
  3489. kfree(oobbuf);
  3490. printk(KERN_DEBUG "exit %s\n", __FUNCTION__);
  3491. return count;
  3492. }
  3493. static struct class_attribute nand_class_attrs[] = {
  3494. __ATTR(info, S_IRUGO | S_IWUSR, show_nand_info, NULL),
  3495. __ATTR(bbt_table, S_IRUGO | S_IWUSR, show_bbt_table, NULL),
  3496. __ATTR(page_dump, S_IRUGO | S_IWUSR, NULL, nand_page_dump),
  3497. __ATTR(page_read, S_IRUGO | S_IWUSR, NULL, nand_page_read),
  3498. __ATTR_NULL
  3499. };
  3500. int aml_nand_init(struct aml_nand_chip *aml_chip)
  3501. {
  3502. struct aml_nand_platform *plat = aml_chip->platform;
  3503. struct nand_chip *chip = &aml_chip->chip;
  3504. struct mtd_info *mtd = &aml_chip->mtd;
  3505. int err = 0, i = 0, phys_erase_shift;
  3506. int oobmul ;
  3507. unsigned por_cfg, valid_chip_num = 0;
  3508. #ifdef CONFIG_HAS_EARLYSUSPEND
  3509. aml_chip->nand_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
  3510. if (!aml_chip->nand_early_suspend.suspend)
  3511. aml_chip->nand_early_suspend.suspend = aml_platform_nand_suspend;
  3512. if (!aml_chip->nand_early_suspend.resume)
  3513. aml_chip->nand_early_suspend.resume = aml_platform_nand_resume;
  3514. aml_chip->nand_early_suspend.param = aml_chip;
  3515. register_early_suspend(&aml_chip->nand_early_suspend);
  3516. #endif
  3517. switch (plat->platform_nand_data.chip.options & NAND_ECC_OPTIONS_MASK) {
  3518. case NAND_ECC_SOFT_MODE:
  3519. chip->write_buf = aml_nand_dma_write_buf;
  3520. chip->read_buf = aml_nand_dma_read_buf;
  3521. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3522. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3523. chip->ecc.mode = NAND_ECC_SOFT;
  3524. aml_chip->user_byte_mode = 1;
  3525. aml_chip->bch_mode = 0;
  3526. break;
  3527. case NAND_ECC_BCH9_MODE:
  3528. chip->write_buf = aml_nand_dma_write_buf;
  3529. chip->read_buf = aml_nand_dma_read_buf;
  3530. chip->block_bad = aml_nand_block_bad;
  3531. chip->block_markbad = aml_nand_block_markbad;
  3532. chip->ecc.mode = NAND_ECC_HW;
  3533. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  3534. chip->ecc.bytes = NAND_BCH9_ECC_SIZE;
  3535. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3536. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3537. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3538. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3539. chip->ecc.read_oob = aml_nand_read_oob;
  3540. chip->ecc.write_oob = aml_nand_write_oob;
  3541. aml_chip->bch_mode = NAND_ECC_BCH9;
  3542. aml_chip->user_byte_mode = 1;
  3543. break;
  3544. case NAND_ECC_BCH8_MODE:
  3545. chip->write_buf = aml_nand_dma_write_buf;
  3546. chip->read_buf = aml_nand_dma_read_buf;
  3547. chip->block_bad = aml_nand_block_bad;
  3548. chip->block_markbad = aml_nand_block_markbad;
  3549. chip->ecc.mode = NAND_ECC_HW;
  3550. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  3551. chip->ecc.bytes = NAND_BCH8_ECC_SIZE;
  3552. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3553. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3554. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3555. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3556. chip->ecc.read_oob = aml_nand_read_oob;
  3557. chip->ecc.write_oob = aml_nand_write_oob;
  3558. aml_chip->bch_mode = NAND_ECC_BCH8;
  3559. aml_chip->user_byte_mode = 2;
  3560. break;
  3561. case NAND_ECC_BCH12_MODE:
  3562. chip->write_buf = aml_nand_dma_write_buf;
  3563. chip->read_buf = aml_nand_dma_read_buf;
  3564. chip->block_bad = aml_nand_block_bad;
  3565. chip->block_markbad = aml_nand_block_markbad;
  3566. chip->ecc.mode = NAND_ECC_HW;
  3567. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  3568. chip->ecc.bytes = NAND_BCH12_ECC_SIZE;
  3569. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3570. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3571. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3572. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3573. chip->ecc.read_oob = aml_nand_read_oob;
  3574. chip->ecc.write_oob = aml_nand_write_oob;
  3575. aml_chip->bch_mode = NAND_ECC_BCH12;
  3576. aml_chip->user_byte_mode = 2;
  3577. break;
  3578. case NAND_ECC_BCH16_MODE:
  3579. chip->write_buf = aml_nand_dma_write_buf;
  3580. chip->read_buf = aml_nand_dma_read_buf;
  3581. chip->block_bad = aml_nand_block_bad;
  3582. chip->block_markbad = aml_nand_block_markbad;
  3583. chip->ecc.mode = NAND_ECC_HW;
  3584. chip->ecc.size = NAND_ECC_UNIT_SIZE;
  3585. chip->ecc.bytes = NAND_BCH16_ECC_SIZE;
  3586. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3587. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3588. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3589. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3590. chip->ecc.read_oob = aml_nand_read_oob;
  3591. chip->ecc.write_oob = aml_nand_write_oob;
  3592. aml_chip->bch_mode = NAND_ECC_BCH16;
  3593. aml_chip->user_byte_mode = 2;
  3594. break;
  3595. case NAND_ECC_BCH8_1K_MODE:
  3596. chip->write_buf = aml_nand_dma_write_buf;
  3597. chip->read_buf = aml_nand_dma_read_buf;
  3598. chip->block_bad = aml_nand_block_bad;
  3599. chip->block_markbad = aml_nand_block_markbad;
  3600. chip->ecc.mode = NAND_ECC_HW;
  3601. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  3602. chip->ecc.bytes = NAND_BCH8_1K_ECC_SIZE;
  3603. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3604. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3605. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3606. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3607. chip->ecc.read_oob = aml_nand_read_oob;
  3608. chip->ecc.write_oob = aml_nand_write_oob;
  3609. aml_chip->bch_mode = NAND_ECC_BCH8_1K;
  3610. aml_chip->user_byte_mode = 2;
  3611. break;
  3612. case NAND_ECC_BCH16_1K_MODE:
  3613. chip->write_buf = aml_nand_dma_write_buf;
  3614. chip->read_buf = aml_nand_dma_read_buf;
  3615. chip->block_bad = aml_nand_block_bad;
  3616. chip->block_markbad = aml_nand_block_markbad;
  3617. chip->ecc.mode = NAND_ECC_HW;
  3618. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  3619. chip->ecc.bytes = NAND_BCH16_1K_ECC_SIZE;
  3620. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3621. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3622. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3623. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3624. chip->ecc.read_oob = aml_nand_read_oob;
  3625. chip->ecc.write_oob = aml_nand_write_oob;
  3626. aml_chip->bch_mode = NAND_ECC_BCH16_1K;
  3627. aml_chip->user_byte_mode = 2;
  3628. break;
  3629. case NAND_ECC_BCH24_1K_MODE:
  3630. chip->write_buf = aml_nand_dma_write_buf;
  3631. chip->read_buf = aml_nand_dma_read_buf;
  3632. chip->block_bad = aml_nand_block_bad;
  3633. chip->block_markbad = aml_nand_block_markbad;
  3634. chip->ecc.mode = NAND_ECC_HW;
  3635. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  3636. chip->ecc.bytes = NAND_BCH24_1K_ECC_SIZE;
  3637. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3638. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3639. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3640. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3641. chip->ecc.read_oob = aml_nand_read_oob;
  3642. chip->ecc.write_oob = aml_nand_write_oob;
  3643. aml_chip->bch_mode = NAND_ECC_BCH24_1K;
  3644. aml_chip->user_byte_mode = 2;
  3645. break;
  3646. case NAND_ECC_BCH30_1K_MODE:
  3647. chip->write_buf = aml_nand_dma_write_buf;
  3648. chip->read_buf = aml_nand_dma_read_buf;
  3649. chip->block_bad = aml_nand_block_bad;
  3650. chip->block_markbad = aml_nand_block_markbad;
  3651. chip->ecc.mode = NAND_ECC_HW;
  3652. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  3653. chip->ecc.bytes = NAND_BCH30_1K_ECC_SIZE;
  3654. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3655. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3656. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3657. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3658. chip->ecc.read_oob = aml_nand_read_oob;
  3659. chip->ecc.write_oob = aml_nand_write_oob;
  3660. aml_chip->bch_mode = NAND_ECC_BCH30_1K;
  3661. aml_chip->user_byte_mode = 2;
  3662. break;
  3663. case NAND_ECC_BCH40_1K_MODE:
  3664. chip->write_buf = aml_nand_dma_write_buf;
  3665. chip->read_buf = aml_nand_dma_read_buf;
  3666. chip->block_bad = aml_nand_block_bad;
  3667. chip->block_markbad = aml_nand_block_markbad;
  3668. chip->ecc.mode = NAND_ECC_HW;
  3669. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  3670. chip->ecc.bytes = NAND_BCH40_1K_ECC_SIZE;
  3671. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3672. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3673. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3674. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3675. chip->ecc.read_oob = aml_nand_read_oob;
  3676. chip->ecc.write_oob = aml_nand_write_oob;
  3677. aml_chip->bch_mode = NAND_ECC_BCH40_1K;
  3678. aml_chip->user_byte_mode = 2;
  3679. break;
  3680. case NAND_ECC_BCH60_1K_MODE:
  3681. chip->write_buf = aml_nand_dma_write_buf;
  3682. chip->read_buf = aml_nand_dma_read_buf;
  3683. chip->block_bad = aml_nand_block_bad;
  3684. chip->block_markbad = aml_nand_block_markbad;
  3685. chip->ecc.mode = NAND_ECC_HW;
  3686. chip->ecc.size = NAND_ECC_UNIT_1KSIZE;
  3687. chip->ecc.bytes = NAND_BCH60_1K_ECC_SIZE;
  3688. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3689. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3690. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3691. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3692. chip->ecc.read_oob = aml_nand_read_oob;
  3693. chip->ecc.write_oob = aml_nand_write_oob;
  3694. aml_chip->bch_mode = NAND_ECC_BCH60_1K;
  3695. aml_chip->user_byte_mode = 2;
  3696. break;
  3697. case NAND_ECC_SHORT_MODE:
  3698. chip->write_buf = aml_nand_dma_write_buf;
  3699. chip->read_buf = aml_nand_dma_read_buf;
  3700. chip->block_bad = aml_nand_block_bad;
  3701. chip->block_markbad = aml_nand_block_markbad;
  3702. chip->ecc.mode = NAND_ECC_HW;
  3703. chip->ecc.size = NAND_ECC_UNIT_SHORT;
  3704. chip->ecc.bytes = NAND_BCH60_1K_ECC_SIZE;
  3705. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3706. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3707. chip->ecc.read_page = aml_nand_read_page_hwecc;
  3708. chip->ecc.write_page = aml_nand_write_page_hwecc;
  3709. chip->ecc.read_oob = aml_nand_read_oob;
  3710. chip->ecc.write_oob = aml_nand_write_oob;
  3711. aml_chip->bch_mode = NAND_ECC_BCH_SHORT;
  3712. aml_chip->user_byte_mode = 2;
  3713. break;
  3714. default :
  3715. printk(KERN_WARNING "haven`t found any ecc mode just selected NAND_ECC_NONE\n");
  3716. chip->write_buf = aml_nand_dma_write_buf;
  3717. chip->read_buf = aml_nand_dma_read_buf;
  3718. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3719. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3720. chip->ecc.mode = NAND_ECC_NONE;
  3721. aml_chip->user_byte_mode = 1;
  3722. aml_chip->bch_mode = 0;
  3723. break;
  3724. }
  3725. if (!aml_chip->aml_nand_hw_init)
  3726. aml_chip->aml_nand_hw_init = aml_platform_hw_init;
  3727. if (!aml_chip->aml_nand_adjust_timing)
  3728. aml_chip->aml_nand_adjust_timing = aml_platform_adjust_timing;
  3729. if (!aml_chip->aml_nand_options_confirm)
  3730. aml_chip->aml_nand_options_confirm = aml_platform_options_confirm;
  3731. if (!aml_chip->aml_nand_cmd_ctrl)
  3732. aml_chip->aml_nand_cmd_ctrl = aml_platform_cmd_ctrl;
  3733. if (!aml_chip->aml_nand_select_chip)
  3734. aml_chip->aml_nand_select_chip = aml_platform_select_chip;
  3735. if (!aml_chip->aml_nand_write_byte)
  3736. aml_chip->aml_nand_write_byte = aml_platform_write_byte;
  3737. if (!aml_chip->aml_nand_wait_devready)
  3738. aml_chip->aml_nand_wait_devready = aml_platform_wait_devready;
  3739. if (!aml_chip->aml_nand_get_user_byte)
  3740. aml_chip->aml_nand_get_user_byte = aml_platform_get_user_byte;
  3741. if (!aml_chip->aml_nand_set_user_byte)
  3742. aml_chip->aml_nand_set_user_byte = aml_platform_set_user_byte;
  3743. if (!aml_chip->aml_nand_command)
  3744. aml_chip->aml_nand_command = aml_nand_base_command;
  3745. if (!aml_chip->aml_nand_dma_read)
  3746. aml_chip->aml_nand_dma_read = aml_platform_dma_read;
  3747. if (!aml_chip->aml_nand_dma_write)
  3748. aml_chip->aml_nand_dma_write = aml_platform_dma_write;
  3749. if (!aml_chip->aml_nand_hwecc_correct)
  3750. aml_chip->aml_nand_hwecc_correct = aml_platform_hwecc_correct;
  3751. if (!chip->IO_ADDR_R)
  3752. chip->IO_ADDR_R = (void __iomem *) CBUS_REG_ADDR(NAND_BUF);
  3753. if (!chip->IO_ADDR_W)
  3754. chip->IO_ADDR_W = (void __iomem *) CBUS_REG_ADDR(NAND_BUF);
  3755. chip->options |= NAND_SKIP_BBTSCAN;
  3756. chip->options |= NAND_NO_SUBPAGE_WRITE;
  3757. if (chip->ecc.mode != NAND_ECC_SOFT) {
  3758. if (aml_chip->user_byte_mode == 2)
  3759. chip->ecc.layout = &aml_nand_oob_64_2info;
  3760. else
  3761. chip->ecc.layout = &aml_nand_oob_64;
  3762. }
  3763. chip->select_chip = aml_nand_select_chip;
  3764. chip->cmd_ctrl = aml_nand_cmd_ctrl;
  3765. //chip->dev_ready = aml_nand_dev_ready;
  3766. chip->verify_buf = aml_nand_verify_buf;
  3767. chip->read_byte = aml_platform_read_byte;
  3768. aml_chip->chip_num = plat->platform_nand_data.chip.nr_chips;
  3769. if (aml_chip->chip_num > MAX_CHIP_NUM) {
  3770. dev_err(aml_chip->device, "couldn`t support for so many chips\n");
  3771. err = -ENXIO;
  3772. goto exit_error;
  3773. }
  3774. for (i=0; i<aml_chip->chip_num; i++) {
  3775. aml_chip->valid_chip[i] = 1;
  3776. aml_chip->chip_enable[i] = (((plat->chip_enable_pad >> i*4) & 0xf) << 10);
  3777. aml_chip->rb_enable[i] = (((plat->ready_busy_pad >> i*4) & 0xf) << 10);
  3778. }
  3779. //use NO RB mode to detect nand chip num
  3780. aml_chip->ops_mode |= AML_CHIP_NONE_RB;
  3781. chip->chip_delay = 100;
  3782. aml_chip->aml_nand_hw_init(aml_chip);
  3783. if (nand_scan(mtd, aml_chip->chip_num) == -ENODEV) {
  3784. chip->options = 0;
  3785. chip->options |= NAND_SKIP_BBTSCAN;
  3786. chip->options |= NAND_NO_SUBPAGE_WRITE; if (aml_nand_scan(mtd, aml_chip->chip_num)) {
  3787. err = -ENXIO;
  3788. goto exit_error;
  3789. }
  3790. }
  3791. else {
  3792. for (i=1; i<aml_chip->chip_num; i++) {
  3793. aml_chip->valid_chip[i] = 0;
  3794. }
  3795. aml_chip->options = NAND_DEFAULT_OPTIONS;
  3796. aml_chip->page_size = mtd->writesize;
  3797. aml_chip->block_size = mtd->erasesize;
  3798. aml_chip->oob_size = mtd->oobsize;
  3799. aml_chip->plane_num = 1;
  3800. aml_chip->internal_chipnr = 1;
  3801. chip->ecc.read_page_raw = aml_nand_read_page_raw;
  3802. chip->ecc.write_page_raw = aml_nand_write_page_raw;
  3803. }
  3804. valid_chip_num = 0;
  3805. for (i=0; i<aml_chip->chip_num; i++) {
  3806. if (aml_chip->valid_chip[i]) {
  3807. valid_chip_num++;
  3808. }
  3809. }
  3810. //due to hardware limit, for chip num over 2, must use NO RB mode.
  3811. if(valid_chip_num > 2){
  3812. printk("dect valid_chip_num:%d over 2, using NO RB mode\n", valid_chip_num);
  3813. }
  3814. else{
  3815. if(aml_chip->rbpin_detect){
  3816. por_cfg = READ_CBUS_REG(ASSIST_POR_CONFIG);
  3817. printk("%s auto detect RB pin here and por_cfg:%x\n", __func__, por_cfg);
  3818. if(por_cfg&4){
  3819. if(por_cfg&1){
  3820. printk("%s detect without RB pin here\n", __func__);
  3821. aml_chip->rb_enable[0] = NULL;
  3822. }
  3823. else{
  3824. printk("%s detect with RB pin here\n", __func__);
  3825. }
  3826. }
  3827. else{
  3828. printk("%s power config ERROR and force using NO RB mode here\n", __func__);
  3829. aml_chip->rb_enable[0] = NULL;
  3830. }
  3831. }
  3832. if (!aml_chip->rb_enable[0]) {
  3833. aml_chip->ops_mode |= AML_CHIP_NONE_RB;
  3834. chip->dev_ready = NULL;
  3835. chip->chip_delay = 100;
  3836. printk("#####%s, none RB and chip->chip_delay:%d\n", __func__, chip->chip_delay);
  3837. }
  3838. else{
  3839. chip->chip_delay = 20;
  3840. chip->dev_ready = aml_nand_dev_ready;
  3841. aml_chip->ops_mode &= ~AML_CHIP_NONE_RB;
  3842. printk("#####%s, with RB pins and chip->chip_delay:%d\n", __func__, chip->chip_delay);
  3843. }
  3844. }
  3845. chip->scan_bbt = aml_nand_scan_bbt;
  3846. mtd->suspend = aml_nand_suspend;
  3847. mtd->resume = aml_nand_resume;
  3848. if (aml_chip->aml_nand_adjust_timing)
  3849. aml_chip->aml_nand_adjust_timing(aml_chip);
  3850. if (chip->ecc.mode != NAND_ECC_SOFT) {
  3851. if (aml_chip->aml_nand_options_confirm(aml_chip)) {
  3852. err = -ENXIO;
  3853. goto exit_error;
  3854. }
  3855. }
  3856. mtd->writebufsize = mtd->writesize;
  3857. #if ((defined CONFIG_ARCH_MESON3) || (defined CONFIG_ARCH_MESON6))
  3858. switch(aml_chip->bch_mode){
  3859. case NAND_ECC_BCH8:
  3860. case NAND_ECC_BCH8_1K:
  3861. aml_chip->ecc_cnt_limit = 6;
  3862. aml_chip->ecc_max = 8;
  3863. break;
  3864. case NAND_ECC_BCH9:
  3865. aml_chip->ecc_cnt_limit = 6;
  3866. aml_chip->ecc_max = 9;
  3867. break;
  3868. case NAND_ECC_BCH12:
  3869. aml_chip->ecc_cnt_limit = 6;
  3870. aml_chip->ecc_max = 12;
  3871. break;
  3872. case NAND_ECC_BCH16:
  3873. case NAND_ECC_BCH16_1K:
  3874. aml_chip->ecc_cnt_limit = 13;
  3875. aml_chip->ecc_max = 16;
  3876. break;
  3877. case NAND_ECC_BCH24_1K:
  3878. aml_chip->ecc_cnt_limit = 20;
  3879. aml_chip->ecc_max = 24;
  3880. break;
  3881. case NAND_ECC_BCH30_1K:
  3882. aml_chip->ecc_cnt_limit = 25;
  3883. aml_chip->ecc_max = 30;
  3884. break;
  3885. case NAND_ECC_BCH40_1K:
  3886. aml_chip->ecc_cnt_limit = 35;
  3887. aml_chip->ecc_max = 40;
  3888. break;
  3889. case NAND_ECC_BCH60_1K:
  3890. aml_chip->ecc_cnt_limit = 50;
  3891. aml_chip->ecc_max = 60;
  3892. break;
  3893. default:
  3894. aml_chip->ecc_cnt_limit = 9;
  3895. aml_chip->ecc_max = 16;
  3896. break;
  3897. }
  3898. #endif
  3899. if (plat->platform_nand_data.chip.ecclayout) {
  3900. chip->ecc.layout = plat->platform_nand_data.chip.ecclayout;
  3901. }
  3902. else {
  3903. oobmul =mtd->oobsize /aml_chip->oob_size ;
  3904. if (!strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME))) {
  3905. chip->ecc.layout = &aml_nand_uboot_oob;
  3906. }
  3907. else if (chip->ecc.mode != NAND_ECC_SOFT) {
  3908. /* switch (mtd->oobsize) {*/
  3909. switch (aml_chip->oob_size) {
  3910. case 64:
  3911. chip->ecc.layout = &aml_nand_oob_64_2info;
  3912. break;
  3913. case 128:
  3914. chip->ecc.layout = &aml_nand_oob_128;
  3915. break;
  3916. case 218:
  3917. chip->ecc.layout = &aml_nand_oob_218;
  3918. break;
  3919. case 224:
  3920. chip->ecc.layout = &aml_nand_oob_224;
  3921. break;
  3922. case 256:
  3923. chip->ecc.layout = &aml_nand_oob_256;
  3924. break;
  3925. case 376:
  3926. chip->ecc.layout = &aml_nand_oob_376;
  3927. break;
  3928. case 436:
  3929. chip->ecc.layout = &aml_nand_oob_436;
  3930. break;
  3931. case 448:
  3932. chip->ecc.layout = &aml_nand_oob_448;
  3933. break;
  3934. case 640:
  3935. chip->ecc.layout = &aml_nand_oob_640;
  3936. break;
  3937. default:
  3938. printk("havn`t found any oob layout use nand base oob layout " "oobsize %d\n", mtd->oobsize);
  3939. chip->ecc.layout = kzalloc(sizeof(struct nand_ecclayout), GFP_KERNEL);
  3940. if (!chip->ecc.layout)
  3941. chip->ecc.layout = &aml_nand_oob_64_2info;
  3942. else
  3943. chip->ecc.layout->oobfree[0].length = ((mtd->writesize / chip->ecc.size) * aml_chip->user_byte_mode);
  3944. break;
  3945. }
  3946. chip->ecc.layout->eccbytes *= oobmul;
  3947. chip->ecc.layout->oobfree[0].length *=oobmul;
  3948. printk(" oob layout use nand base oob layout oobsize = %d,oobmul =%d,mtd->oobsize =%d,aml_chip->oob_size =%d\n", chip->ecc.layout->oobfree[0].length,oobmul,mtd->oobsize,aml_chip->oob_size);
  3949. }
  3950. }
  3951. /*
  3952. * The number of bytes available for a client to place data into
  3953. * the out of band area
  3954. */
  3955. chip->ecc.layout->oobavail = 0;
  3956. for (i = 0; chip->ecc.layout->oobfree[i].length && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
  3957. chip->ecc.layout->oobavail += chip->ecc.layout->oobfree[i].length;
  3958. mtd->oobavail = chip->ecc.layout->oobavail;
  3959. mtd->ecclayout = chip->ecc.layout;
  3960. aml_chip->virtual_page_size = mtd->writesize;
  3961. aml_chip->virtual_block_size = mtd->erasesize;
  3962. aml_chip->aml_nand_data_buf = dma_alloc_coherent(aml_chip->device, (mtd->writesize + mtd->oobsize), &aml_chip->data_dma_addr, GFP_KERNEL);
  3963. if (aml_chip->aml_nand_data_buf == NULL) {
  3964. printk("no memory for flash data buf\n");
  3965. err = -ENOMEM;
  3966. goto exit_error;
  3967. }
  3968. aml_chip->user_info_buf = dma_alloc_coherent(aml_chip->device, (mtd->writesize / chip->ecc.size)*sizeof(int), &(aml_chip->nand_info_dma_addr), GFP_KERNEL);
  3969. if (aml_chip->user_info_buf == NULL) {
  3970. printk("no memory for flash info buf\n");
  3971. err = -ENOMEM;
  3972. goto exit_error;
  3973. }
  3974. if (chip->buffers)
  3975. kfree(chip->buffers);
  3976. if (mtd->oobsize >= NAND_MAX_OOBSIZE)
  3977. chip->buffers = kzalloc((mtd->writesize + 3*mtd->oobsize), GFP_KERNEL);
  3978. else
  3979. chip->buffers = kzalloc((mtd->writesize + 3*NAND_MAX_OOBSIZE), GFP_KERNEL);
  3980. if (chip->buffers == NULL) {
  3981. printk("no memory for flash data buf\n");
  3982. err = -ENOMEM;
  3983. goto exit_error;
  3984. }
  3985. chip->oob_poi = chip->buffers->databuf + mtd->writesize;
  3986. chip->options |= NAND_OWN_BUFFERS;
  3987. #ifdef NEW_NAND_SUPPORT
  3988. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10)){
  3989. aml_chip->new_nand_info.slc_program_info.get_default_value(mtd);
  3990. }
  3991. #endif
  3992. if (strncmp((char*)plat->name, NAND_BOOT_NAME, strlen((const char*)NAND_BOOT_NAME))) {
  3993. #ifdef NEW_NAND_SUPPORT
  3994. if((aml_chip->new_nand_info.type) && (aml_chip->new_nand_info.type < 10)){
  3995. aml_chip->new_nand_info.read_rety_info.get_default_value(mtd);
  3996. }
  3997. #endif
  3998. phys_erase_shift = fls(mtd->erasesize) - 1;
  3999. aml_chip->block_status = kzalloc((mtd->size >> phys_erase_shift), GFP_KERNEL);
  4000. if (aml_chip->block_status == NULL) {
  4001. printk("no memory for flash block status\n");
  4002. err = -ENOMEM;
  4003. goto exit_error;
  4004. }
  4005. memset(aml_chip->block_status, 0xff, (mtd->size >> phys_erase_shift));
  4006. err = aml_nand_env_check(mtd);
  4007. if (err)
  4008. printk("invalid nand env\n");
  4009. #ifdef CONFIG_AML_NAND_ENV
  4010. int ret;
  4011. struct device *devp;
  4012. static dev_t nand_env_devno;
  4013. pr_info("nand env: nand_env_probe. \n");
  4014. nand_env_mtd = mtd;
  4015. ret = alloc_chrdev_region(&nand_env_devno, 0, 1, NAND_ENV_DEVICE_NAME);
  4016. if (ret < 0) {
  4017. pr_err("nand_env: failed to allocate chrdev. \n");
  4018. return 0;
  4019. }
  4020. /* connect the file operations with cdev */
  4021. cdev_init(&aml_chip->nand_env_cdev, &nand_env_fops);
  4022. aml_chip->nand_env_cdev.owner = THIS_MODULE;
  4023. /* connect the major/minor number to the cdev */
  4024. ret = cdev_add(&aml_chip->nand_env_cdev, nand_env_devno, 1);
  4025. if (ret) {
  4026. pr_err("nand env: failed to add device. \n");
  4027. /* @todo do with error */
  4028. return ret;
  4029. }
  4030. ret = class_register(&nand_env_class);
  4031. if (ret < 0) {
  4032. printk(KERN_NOTICE "class_register(&nand_env_class) failed!\n");
  4033. }
  4034. devp = device_create(&nand_env_class, NULL, nand_env_devno, NULL, "nand_env");
  4035. if (IS_ERR(devp)) {
  4036. printk(KERN_ERR "nand_env: failed to create device node\n");
  4037. ret = PTR_ERR(devp);
  4038. }
  4039. #endif
  4040. /*setup class*/
  4041. aml_chip->cls.name = kzalloc(strlen((const char*)NAND_MULTI_NAME)+1, GFP_KERNEL);
  4042. strcpy(aml_chip->cls.name, (const char*)NAND_MULTI_NAME);
  4043. //sprintf(aml_chip->cls.name, NAND_MULTI_NAME);
  4044. aml_chip->cls.class_attrs = nand_class_attrs;
  4045. err = class_register(&aml_chip->cls);
  4046. if(err)
  4047. printk(" class register nand_class fail!\n");
  4048. }
  4049. if (aml_nand_add_partition(aml_chip) != 0) {
  4050. err = -ENXIO;
  4051. goto exit_error;
  4052. }
  4053. dev_dbg(aml_chip->device, "initialized ok\n");
  4054. return 0;
  4055. exit_error:
  4056. unregister_early_suspend(&aml_chip->nand_early_suspend);
  4057. if (aml_chip->user_info_buf) {
  4058. dma_free_coherent(aml_chip->device, (mtd->writesize / chip->ecc.size)*sizeof(int), aml_chip->user_info_buf, (dma_addr_t)aml_chip->nand_info_dma_addr);
  4059. aml_chip->user_info_buf = NULL;
  4060. }
  4061. if (chip->buffers) {
  4062. kfree(chip->buffers);
  4063. chip->buffers = NULL;
  4064. }
  4065. if (aml_chip->aml_nand_data_buf) {
  4066. dma_free_coherent(aml_chip->device, (mtd->writesize + mtd->oobsize), aml_chip->aml_nand_data_buf, (dma_addr_t)aml_chip->data_dma_addr);
  4067. aml_chip->aml_nand_data_buf = NULL;
  4068. }
  4069. if (aml_chip->block_status) {
  4070. kfree(aml_chip->block_status);
  4071. aml_chip->block_status = NULL;
  4072. }
  4073. return err;
  4074. }
  4075. #define DRV_NAME "aml-nand"
  4076. #define DRV_VERSION "1.1"
  4077. #define DRV_AUTHOR "xiaojun_yoyo"
  4078. #define DRV_DESC "Amlogic nand flash host controll driver for M1"
  4079. MODULE_LICENSE("GPL");
  4080. MODULE_AUTHOR(DRV_AUTHOR);
  4081. MODULE_DESCRIPTION(DRV_DESC);
  4082. MODULE_ALIAS("platform:" DRV_NAME);