mtk_nand.c 253 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236
  1. /******************************************************************************
  2. * mtk_nand.c - MTK NAND Flash Device Driver
  3. *
  4. * Copyright 2009-2012 MediaTek Co., Ltd.
  5. *
  6. * DESCRIPTION:
  7. * This file provid the other drivers nand relative functions
  8. *
  9. * modification history
  10. * ----------------------------------------
  11. * v3.0, 11 Feb 2010, mtk
  12. * ----------------------------------------
  13. ******************************************************************************/
  14. #include <linux/slab.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/delay.h>
  18. #include <linux/errno.h>
  19. #include <linux/sched.h>
  20. #include <linux/types.h>
  21. #include <linux/wait.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/jiffies.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/time.h>
  29. #include <linux/mm.h>
  30. #include <asm/io.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/uaccess.h>
  33. #include <linux/miscdevice.h>
  34. #include <mach/dma.h>
  35. #include <mach/mt_clkmgr.h>
  36. #include <mach/mtk_nand.h>
  37. #include <mtk_nand_util.h>
  38. #include "bmt.h"
  39. #include <linux/of.h>
  40. #include <linux/of_address.h>
  41. #include <linux/of_irq.h>
  42. #include <linux/rtc.h>
  43. #ifdef CONFIG_PWR_LOSS_MTK_SPOH
  44. #include <mach/power_loss_test.h>
  45. #endif
  46. #include <asm/div64.h>
  47. #include <linux/regulator/consumer.h>
  48. #define VERSION "v2.1 Fix AHB virt2phys error"
  49. #define MODULE_NAME "# MTK NAND #"
  50. #define PROCNAME "driver/nand"
  51. #define PMT 1
  52. /*#define _MTK_NAND_DUMMY_DRIVER_*/
  53. #define __INTERNAL_USE_AHB_MODE__ 1
  54. /*
  55. * Access Pattern Logger
  56. *
  57. * Enable the feacility to record MTD/DRV read/program/erase pattern
  58. */
  59. #if (defined(CONFIG_MTK_MLC_NAND_SUPPORT) || defined(CONFIG_MTK_TLC_NAND_SUPPORT))
  60. bool MLC_DEVICE = TRUE;
  61. #endif
  62. static bool DDR_INTERFACE = FALSE;
  63. void __iomem *mtk_nfi_base;
  64. void __iomem *mtk_nfiecc_base;
  65. void __iomem *mtk_io_base;
  66. struct device_node *mtk_nfi_node;
  67. struct device_node *mtk_nfiecc_node;
  68. struct device_node *mtk_io_node;
  69. struct device_node *mtk_pm_node;
  70. struct regulator *nfi_reg_vemc_3v3 = NULL;
  71. unsigned int nfi_irq = 0;
  72. #define MT_NFI_IRQ_ID nfi_irq
  73. struct device *mtk_dev;
  74. struct scatterlist mtk_sg;
  75. enum dma_data_direction mtk_dir;
  76. bool tlc_lg_left_plane = TRUE;
  77. enum NFI_TLC_PG_CYCLE tlc_program_cycle;
  78. bool tlc_not_keep_erase_lvl = FALSE;
  79. u32 slc_ratio = 6;
  80. u32 sys_slc_ratio = 6;
  81. u32 usr_slc_ratio = 6;
  82. u32 force_slc_flag = 0;
  83. #if defined(NAND_OTP_SUPPORT)
  84. #define SAMSUNG_OTP_SUPPORT 1
  85. #define OTP_MAGIC_NUM 0x4E3AF28B
  86. #define SAMSUNG_OTP_PAGE_NUM 6
  87. static const unsigned int Samsung_OTP_Page[SAMSUNG_OTP_PAGE_NUM] = {
  88. 0x15, 0x16, 0x17, 0x18, 0x19, 0x1b };
  89. static struct mtk_otp_config g_mtk_otp_fuc;
  90. static spinlock_t g_OTPLock;
  91. #define OTP_MAGIC 'k'
  92. /* NAND OTP IO control number */
  93. #define OTP_GET_LENGTH _IOW(OTP_MAGIC, 1, int)
  94. #define OTP_READ _IOW(OTP_MAGIC, 2, int)
  95. #define OTP_WRITE _IOW(OTP_MAGIC, 3, int)
  96. #define FS_OTP_READ 0
  97. #define FS_OTP_WRITE 1
  98. /* NAND OTP Error codes */
  99. #define OTP_SUCCESS 0
  100. #define OTP_ERROR_OVERSCOPE -1
  101. #define OTP_ERROR_TIMEOUT -2
  102. #define OTP_ERROR_BUSY -3
  103. #define OTP_ERROR_NOMEM -4
  104. #define OTP_ERROR_RESET -5
  105. struct mtk_otp_config {
  106. u32 (*OTPRead)(u32 PageAddr, void *BufferPtr, void *SparePtr);
  107. u32 (*OTPWrite)(u32 PageAddr, void *BufferPtr, void *SparePtr);
  108. u32 (*OTPQueryLength)(u32 *Length);
  109. };
  110. struct otp_ctl {
  111. unsigned int QLength;
  112. unsigned int Offset;
  113. unsigned int Length;
  114. char *BufferPtr;
  115. unsigned int status;
  116. };
  117. #endif
  118. #define ERR_RTN_SUCCESS 1
  119. #define ERR_RTN_FAIL 0
  120. #define ERR_RTN_BCH_FAIL -1
  121. #define NFI_SET_REG32(reg, value) \
  122. do { \
  123. g_value = (DRV_Reg32(reg) | (value));\
  124. DRV_WriteReg32(reg, g_value); \
  125. } while (0)
  126. #define NFI_SET_REG16(reg, value) \
  127. do { \
  128. g_value = (DRV_Reg16(reg) | (value));\
  129. DRV_WriteReg16(reg, g_value); \
  130. } while (0)
  131. #define NFI_CLN_REG32(reg, value) \
  132. do { \
  133. g_value = (DRV_Reg32(reg) & (~(value)));\
  134. DRV_WriteReg32(reg, g_value); \
  135. } while (0)
  136. #define NFI_CLN_REG16(reg, value) \
  137. do { \
  138. g_value = (DRV_Reg16(reg) & (~(value)));\
  139. DRV_WriteReg16(reg, g_value); \
  140. } while (0)
  141. #define NFI_WAIT_STATE_DONE(state) do {; } while (__raw_readl(NFI_STA_REG32) & state)
  142. #define NFI_WAIT_TO_READY() do {; } while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
  143. #define FIFO_PIO_READY(x) (0x1 & x)
  144. #define WAIT_NFI_PIO_READY(timeout) \
  145. do {\
  146. --timeout;\
  147. while ((!FIFO_PIO_READY(DRV_Reg16(NFI_PIO_DIRDY_REG16))) && \
  148. (timeout)) \
  149. ; \
  150. } while (0)
  151. #define NAND_SECTOR_SIZE (512)
  152. #define OOB_PER_SECTOR (16)
  153. #define OOB_AVAI_PER_SECTOR (8)
  154. #if defined(CONFIG_MTK_COMBO_NAND_SUPPORT)
  155. #ifndef PART_SIZE_BMTPOOL
  156. #define BMT_POOL_SIZE (80)
  157. #else
  158. #define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
  159. #endif
  160. #else
  161. #ifndef PART_SIZE_BMTPOOL
  162. #define BMT_POOL_SIZE (80)
  163. #else
  164. #define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
  165. #endif
  166. #endif
  167. u8 ecc_threshold;
  168. #define PMT_POOL_SIZE (2)
  169. bool g_b2Die_CS;
  170. /*******************************************************************************
  171. * Gloable Varible Definition
  172. *******************************************************************************/
  173. struct nand_perf_log {
  174. unsigned int ReadPageCount;
  175. suseconds_t ReadPageTotalTime;
  176. unsigned int ReadBusyCount;
  177. suseconds_t ReadBusyTotalTime;
  178. unsigned int ReadDMACount;
  179. suseconds_t ReadDMATotalTime;
  180. unsigned int ReadSubPageCount;
  181. suseconds_t ReadSubPageTotalTime;
  182. unsigned int WritePageCount;
  183. suseconds_t WritePageTotalTime;
  184. unsigned int WriteBusyCount;
  185. suseconds_t WriteBusyTotalTime;
  186. unsigned int WriteDMACount;
  187. suseconds_t WriteDMATotalTime;
  188. unsigned int EraseBlockCount;
  189. suseconds_t EraseBlockTotalTime;
  190. };
  191. #ifdef PWR_LOSS_SPOH
  192. #define PL_TIME_RAND_PROG(chip, page_addr, time) do { \
  193. if (host->pl.nand_program_wdt_enable == 1) \
  194. PL_TIME_RAND(page_addr, time, host->pl.last_prog_time); \
  195. else \
  196. time = 0; \
  197. } while (0)
  198. #define PL_TIME_RAND_ERASE(chip, page_addr, time) do { \
  199. if (host->pl.nand_erase_wdt_enable == 1) { \
  200. PL_TIME_RAND(page_addr, time, host->pl.last_erase_time); \
  201. if (time != 0) \
  202. pr_err("[MVG_TEST]: Erase reset in %d us\n", time); \
  203. } else \
  204. time = 0; \
  205. } while (0)
  206. #define PL_TIME_PROG(duration) host->pl.last_prog_time = duration
  207. #define PL_TIME_ERASE(duration) host->pl.last_erase_time = duration
  208. #define PL_TIME_PROG_WDT_SET(WDT) host->pl.nand_program_wdt_enable = WDT
  209. #define PL_TIME_ERASE_WDT_SET(WDT) host->pl.nand_erase_wdt_enable = WDT
  210. #define PL_NAND_BEGIN(time) PL_BEGIN(time)
  211. #define PL_NAND_RESET(time) PL_RESET(time)
  212. #define PL_NAND_END(pl_time_write, duration) PL_END(pl_time_write, duration)
  213. #else
  214. #define PL_TIME_RAND_PROG(chip, page_addr, time)
  215. #define PL_TIME_RAND_ERASE(chip, page_addr, time)
  216. #define PL_TIME_PROG(duration)
  217. #define PL_TIME_ERASE(duration)
  218. #define PL_TIME_PROG_WDT_SET(WDT)
  219. #define PL_TIME_ERASE_WDT_SET(WDT)
  220. #define PL_NAND_BEGIN(time)
  221. #define PL_NAND_RESET(time)
  222. #define PL_NAND_END(pl_time_write, duration)
  223. #endif
  224. #ifdef DUMP_PEF
  225. static struct nand_perf_log g_NandPerfLog = {0};
  226. static struct timeval g_NandLogTimer = {0};
  227. #endif
  228. #ifdef MTK_NAND_CMD_DUMP
  229. struct cmd_sequence {
  230. u8 cmd_array[2];
  231. u32 address[3];
  232. };
  233. struct write_trace_dbg {
  234. struct cmd_sequence cmd;
  235. };
  236. #define MAX_CMD_LOG_CNT (20480)
  237. struct write_trace_dbg dbg_inf[MAX_CMD_LOG_CNT];
  238. u32 current_idx = 0;
  239. u32 last_idx = 0;
  240. #define current_idx_add() do {\
  241. current_idx++;\
  242. if (current_idx == MAX_CMD_LOG_CNT)\
  243. current_idx = 0;\
  244. dbg_inf[current_idx].cmd.cmd_array[0] = 0xFF;\
  245. dbg_inf[current_idx].cmd.cmd_array[1] = 0xFF;\
  246. dbg_inf[current_idx].cmd.address[0] = 0xFF;\
  247. dbg_inf[current_idx].cmd.address[1] = 0xFF;\
  248. dbg_inf[current_idx].cmd.address[2] = 0xFF;\
  249. } while (0)
  250. void dump_cmd_log(void)
  251. {
  252. u32 idx;
  253. idx = current_idx;
  254. while (idx != last_idx) {
  255. MSG(INIT, "dbg_inf[%d].cmd = (0x%x, 0x%x) addr(%d, %d, %d)\n",
  256. idx, dbg_inf[idx].cmd.cmd_array[0], dbg_inf[idx].cmd.cmd_array[1],
  257. dbg_inf[idx].cmd.address[0], dbg_inf[idx].cmd.address[1],
  258. dbg_inf[idx].cmd.address[2]);
  259. if (idx == 0)
  260. idx = MAX_CMD_LOG_CNT;
  261. idx--;
  262. }
  263. last_idx = current_idx;
  264. }
  265. #else
  266. #define dump_cmd_log()
  267. #define current_idx_add()
  268. #endif
  269. #ifdef NAND_PFM
  270. static suseconds_t g_PFM_R;
  271. static suseconds_t g_PFM_W;
  272. static suseconds_t g_PFM_E;
  273. static u32 g_PFM_RNum;
  274. static u32 g_PFM_RD;
  275. static u32 g_PFM_WD;
  276. static struct timeval g_now;
  277. #define PFM_BEGIN(time) \
  278. do { \
  279. do_gettimeofday(&g_now); \
  280. (time) = g_now; \
  281. } while (0)
  282. #define PFM_END_R(time, n) \
  283. do { \
  284. do_gettimeofday(&g_now); \
  285. g_PFM_R += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  286. g_PFM_RNum += 1; \
  287. g_PFM_RD += n; \
  288. MSG(PERFORMANCE, "%s - Read PFM: %lu, data: %d, ReadOOB: %d (%d, %d)\n", MODULE_NAME , g_PFM_R, g_PFM_RD, \
  289. g_kCMD.pureReadOOB, g_kCMD.pureReadOOBNum, g_PFM_RNum); \
  290. } while (0)
  291. #define PFM_END_W(time, n) \
  292. do { \
  293. do_gettimeofday(&g_now); \
  294. g_PFM_W += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  295. g_PFM_WD += n; \
  296. MSG(PERFORMANCE, "%s - Write PFM: %lu, data: %d\n", MODULE_NAME, g_PFM_W, g_PFM_WD); \
  297. } while (0)
  298. #define PFM_END_E(time) \
  299. do { \
  300. do_gettimeofday(&g_now); \
  301. g_PFM_E += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  302. MSG(PERFORMANCE, "%s - Erase PFM: %lu\n", MODULE_NAME, g_PFM_E); \
  303. } while (0)
  304. #else
  305. #define PFM_BEGIN(time)
  306. #define PFM_END_R(time, n)
  307. #define PFM_END_W(time, n)
  308. #define PFM_END_E(time)
  309. #endif
  310. #define TIMEOUT_1 0x1fff
  311. #define TIMEOUT_2 0x8ff
  312. #define TIMEOUT_3 0xffff
  313. #define TIMEOUT_4 0xffff
  314. #define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
  315. do { \
  316. DRV_WriteReg16(NFI_CMD_REG16, cmd);\
  317. while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE)\
  318. ;\
  319. DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
  320. DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
  321. DRV_WriteReg16(NFI_ADDRNOB_REG16, col_num | (row_num << ADDR_ROW_NOB_SHIFT));\
  322. while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE)\
  323. ;\
  324. } while (0)
  325. /*-------------------------------------------------------------------------------*/
  326. static struct completion g_comp_AHB_Done;
  327. static struct NAND_CMD g_kCMD;
  328. bool g_bInitDone;
  329. static int g_i4Interrupt;
  330. static bool g_bcmdstatus;
  331. static bool g_brandstatus;
  332. static u32 g_value;
  333. static int g_page_size;
  334. static int g_block_size;
  335. static u32 PAGES_PER_BLOCK = 255;
  336. #if __INTERNAL_USE_AHB_MODE__
  337. bool g_bHwEcc = true;
  338. #else
  339. bool g_bHwEcc = false;
  340. #endif
  341. #define LPAGE 16384
  342. #define LSPARE 2048
  343. /* create default to read id */
  344. struct mtk_nand_host_hw mtk_nand_hw = {
  345. .nfi_bus_width = 8,
  346. .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
  347. .nfi_cs_num = NFI_CS_NUM,
  348. .nand_sec_size = 512,
  349. .nand_sec_shift = 9,
  350. .nand_ecc_size = 2048,
  351. .nand_ecc_bytes = 32,
  352. .nand_ecc_mode = NAND_ECC_HW,
  353. };
  354. static u8 *local_buffer_16_align; /* 16 byte aligned buffer, for HW issue */
  355. __aligned(64)
  356. static u8 local_buffer[16384 + 2048];
  357. static u8 *temp_buffer_16_align; /* 16 byte aligned buffer, for HW issue */
  358. __aligned(64)
  359. static u8 temp_buffer[16384 + 2048];
  360. __aligned(64)
  361. u8 rrtry_buffer[16384 + 2048];
  362. #ifdef NAND_FEATURE_TEST
  363. __aligned(64)
  364. static u8 test_buffer[16384 + 2048];
  365. #endif
  366. __aligned(64)
  367. static u8 local_tlc_wl_buffer[16384 + 2048];
  368. static int mtk_nand_interface_config(struct mtd_info *mtd);
  369. static bmt_struct *g_bmt;
  370. struct mtk_nand_host *host;
  371. static u8 g_running_dma;
  372. #ifdef DUMP_NATIVE_BACKTRACE
  373. static u32 g_dump_count;
  374. #endif
  375. int manu_id;
  376. int dev_id;
  377. static u8 local_oob_buf[2048];
  378. #ifdef _MTK_NAND_DUMMY_DRIVER_
  379. int dummy_driver_debug;
  380. #endif
  381. typedef u32 (*GetLowPageNumber)(u32 pageNo);
  382. typedef u32 (*TransferPageNumber)(u32 pageNo, bool high_to_low);
  383. enum NAND_TYPE_MASK {
  384. TYPE_ASYNC = 0x0,
  385. TYPE_TOGGLE = 0x1,
  386. TYPE_SYNC = 0x2,
  387. TYPE_RESERVED = 0x3,
  388. TYPE_MLC = 0x4,
  389. TYPE_SLC = 0x4,
  390. };
  391. flashdev_info gn_devinfo;
  392. GetLowPageNumber functArray[] = {
  393. MICRON_TRANSFER,
  394. HYNIX_TRANSFER,
  395. SANDISK_TRANSFER,
  396. };
  397. TransferPageNumber fsFuncArray[] = {
  398. micron_pairpage_mapping,
  399. hynix_pairpage_mapping,
  400. sandisk_pairpage_mapping,
  401. };
  402. u32 SANDISK_TRANSFER(u32 pageNo)
  403. {
  404. if (0 == pageNo)
  405. return pageNo;
  406. else
  407. return pageNo+pageNo-1;
  408. }
  409. u32 HYNIX_TRANSFER(u32 pageNo)
  410. {
  411. u32 temp;
  412. if (pageNo < 4)
  413. return pageNo;
  414. temp = pageNo+(pageNo&0xFFFFFFFE)-2;
  415. return temp;
  416. }
  417. u32 MICRON_TRANSFER(u32 pageNo)
  418. {
  419. u32 temp;
  420. if (pageNo < 4)
  421. return pageNo;
  422. temp = (pageNo - 4) & 0xFFFFFFFE;
  423. if (pageNo <= 130)
  424. return pageNo + temp;
  425. return pageNo + temp - 2;
  426. }
  427. u32 sandisk_pairpage_mapping(u32 page, bool high_to_low)
  428. {
  429. if (TRUE == high_to_low) {
  430. if (page == 255)
  431. return page-2;
  432. if ((page == 0) || (1 == (page%2)))
  433. return page;
  434. if (page == 2)
  435. return 0;
  436. return page - 3;
  437. }
  438. if (TRUE != high_to_low) {
  439. if ((page != 0) && (0 == (page%2)))
  440. return page;
  441. if (page == 255)
  442. return page;
  443. if (page == 0 || page == 253)
  444. return page + 2;
  445. return page+3;
  446. }
  447. return page + 3;
  448. }
  449. u32 hynix_pairpage_mapping(u32 page, bool high_to_low)
  450. {
  451. u32 offset;
  452. if (TRUE == high_to_low) {
  453. /*Micron 256pages */
  454. if (page < 4)
  455. return page;
  456. offset = page % 4;
  457. if (offset == 2 || offset == 3)
  458. return page;
  459. if (page == 4 || page == 5 || page == 254 || page == 255)
  460. return page - 4;
  461. return page - 6;
  462. }
  463. if (TRUE != high_to_low) {
  464. if (page > 251)
  465. return page;
  466. if (page == 0 || page == 1)
  467. return page + 4;
  468. offset = page % 4;
  469. if (offset == 0 || offset == 1)
  470. return page;
  471. return page + 6;
  472. }
  473. return page + 6;
  474. }
  475. u32 micron_pairpage_mapping(u32 page, bool high_to_low)
  476. {
  477. u32 offset;
  478. if (TRUE == high_to_low) {
  479. /*Micron 256pages */
  480. if ((page < 4) || (page > 251))
  481. return page;
  482. offset = page % 4;
  483. if (offset == 0 || offset == 1)
  484. return page;
  485. else
  486. return page - 6;
  487. } else {
  488. if ((page == 2) || (page == 3) || (page > 247))
  489. return page;
  490. offset = page % 4;
  491. if (offset == 0 || offset == 1)
  492. return page + 6;
  493. else
  494. return page;
  495. }
  496. }
  497. int mtk_nand_paired_page_transfer(u32 pageNo, bool high_to_low)
  498. {
  499. if (gn_devinfo.vendor != VEND_NONE)
  500. return fsFuncArray[gn_devinfo.feature_set.ptbl_idx] (pageNo, high_to_low);
  501. else
  502. return 0xFFFFFFFF;
  503. }
  504. void nand_enable_clock(void)
  505. {
  506. #if 1
  507. enable_clock(MT_CG_NFI_SW_CG, "NFI");
  508. enable_clock(MT_CG_NFI2X_SW_CG, "NFI");
  509. enable_clock(MT_CG_NFI_BUS_SW_CG, "NFI");
  510. enable_clock(MT_CG_NFIECC_SW_CG, "NFI");
  511. #endif /*TODO*/
  512. return;
  513. }
  514. void nand_disable_clock(void)
  515. {
  516. #if 1
  517. disable_clock(MT_CG_NFI_SW_CG, "NFI");
  518. disable_clock(MT_CG_NFI2X_SW_CG, "NFI");
  519. disable_clock(MT_CG_NFI_BUS_SW_CG, "NFI");
  520. disable_clock(MT_CG_NFIECC_SW_CG, "NFI");
  521. #endif /*TODO*/
  522. return;
  523. }
  524. static struct nand_ecclayout nand_oob_16 = {
  525. .eccbytes = 8,
  526. .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
  527. .oobfree = {{1, 6}, {0, 0} }
  528. };
  529. struct nand_ecclayout nand_oob_64 = {
  530. .eccbytes = 32,
  531. .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
  532. 40, 41, 42, 43, 44, 45, 46, 47,
  533. 48, 49, 50, 51, 52, 53, 54, 55,
  534. 56, 57, 58, 59, 60, 61, 62, 63},
  535. .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0} }
  536. };
  537. struct nand_ecclayout nand_oob_128 = {
  538. .eccbytes = 64,
  539. .eccpos = {
  540. 64, 65, 66, 67, 68, 69, 70, 71,
  541. 72, 73, 74, 75, 76, 77, 78, 79,
  542. 80, 81, 82, 83, 84, 85, 86, 86,
  543. 88, 89, 90, 91, 92, 93, 94, 95,
  544. 96, 97, 98, 99, 100, 101, 102, 103,
  545. 104, 105, 106, 107, 108, 109, 110, 111,
  546. 112, 113, 114, 115, 116, 117, 118, 119,
  547. 120, 121, 122, 123, 124, 125, 126, 127},
  548. .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6} }
  549. };
  550. static bool use_randomizer = FALSE;
  551. #ifdef DUMP_PEF
  552. static suseconds_t Cal_timediff(struct timeval *end_time, struct timeval *start_time)
  553. {
  554. struct timeval difference;
  555. difference.tv_sec = end_time->tv_sec - start_time->tv_sec;
  556. difference.tv_usec = end_time->tv_usec - start_time->tv_usec;
  557. /* Using while instead of if below makes the code slightly more robust. */
  558. while (difference.tv_usec < 0) {
  559. difference.tv_usec += 1000000;
  560. difference.tv_sec -= 1;
  561. }
  562. return 1000000LL * difference.tv_sec + difference.tv_usec;
  563. } /* timeval_diff() */
  564. void dump_nand_rwcount(void)
  565. {
  566. #if 0
  567. struct timeval now_time;
  568. do_gettimeofday(&now_time);
  569. if (Cal_timediff(&now_time, &g_NandLogTimer) > (500 * 1000)) {
  570. MSG(INIT,
  571. "RP: %d (%lu us) RSC: %d (%lu us) WPC: %d (%lu us) EC: %d mtd(0/512/1K/2K/3K/4K): %d %d %d %d %d %d\n ",
  572. g_NandPerfLog.ReadPageCount,
  573. g_NandPerfLog.ReadPageCount ? (g_NandPerfLog.ReadPageTotalTime /
  574. g_NandPerfLog.ReadPageCount) : 0,
  575. g_NandPerfLog.ReadSubPageCount,
  576. g_NandPerfLog.ReadSubPageCount ? (g_NandPerfLog.ReadSubPageTotalTime /
  577. g_NandPerfLog.ReadSubPageCount) : 0,
  578. g_NandPerfLog.WritePageCount,
  579. g_NandPerfLog.WritePageCount ? (g_NandPerfLog.WritePageTotalTime /
  580. g_NandPerfLog.WritePageCount) : 0,
  581. g_NandPerfLog.EraseBlockCount, g_MtdPerfLog.read_size_0_512,
  582. g_MtdPerfLog.read_size_512_1K, g_MtdPerfLog.read_size_1K_2K,
  583. g_MtdPerfLog.read_size_2K_3K, g_MtdPerfLog.read_size_3K_4K,
  584. g_MtdPerfLog.read_size_Above_4K);
  585. memset(&g_NandPerfLog, 0x00, sizeof(g_NandPerfLog));
  586. memset(&g_MtdPerfLog, 0x00, sizeof(g_MtdPerfLog));
  587. do_gettimeofday(&g_NandLogTimer);
  588. }
  589. #endif
  590. }
  591. #endif
  592. void dump_nfi(void)
  593. {
  594. #if __DEBUG_NAND
  595. pr_err("~~~~Dump NFI Register in Kernel~~~~\n");
  596. pr_err("NFI_CNFG_REG16: 0x%x\n", DRV_Reg16(NFI_CNFG_REG16));
  597. pr_err("NFI_PAGEFMT_REG16: 0x%x\n", DRV_Reg16(NFI_PAGEFMT_REG16));
  598. pr_err("NFI_CON_REG16: 0x%x\n", DRV_Reg16(NFI_CON_REG16));
  599. pr_err("NFI_ACCCON_REG32: 0x%x\n", DRV_Reg32(NFI_ACCCON_REG32));
  600. pr_err("NFI_INTR_EN_REG16: 0x%x\n", DRV_Reg16(NFI_INTR_EN_REG16));
  601. pr_err("NFI_INTR_REG16: 0x%x\n", DRV_Reg16(NFI_INTR_REG16));
  602. pr_err("NFI_CMD_REG16: 0x%x\n", DRV_Reg16(NFI_CMD_REG16));
  603. pr_err("NFI_ADDRNOB_REG16: 0x%x\n", DRV_Reg16(NFI_ADDRNOB_REG16));
  604. pr_err("NFI_COLADDR_REG32: 0x%x\n", DRV_Reg32(NFI_COLADDR_REG32));
  605. pr_err("NFI_ROWADDR_REG32: 0x%x\n", DRV_Reg32(NFI_ROWADDR_REG32));
  606. pr_err("NFI_STRDATA_REG16: 0x%x\n", DRV_Reg16(NFI_STRDATA_REG16));
  607. pr_err("NFI_DATAW_REG32: 0x%x\n", DRV_Reg32(NFI_DATAW_REG32));
  608. pr_err("NFI_DATAR_REG32: 0x%x\n", DRV_Reg32(NFI_DATAR_REG32));
  609. pr_err("NFI_PIO_DIRDY_REG16: 0x%x\n", DRV_Reg16(NFI_PIO_DIRDY_REG16));
  610. pr_err("NFI_STA_REG32: 0x%x\n", DRV_Reg32(NFI_STA_REG32));
  611. pr_err("NFI_FIFOSTA_REG16: 0x%x\n", DRV_Reg16(NFI_FIFOSTA_REG16));
  612. /* pr_err("NFI_LOCKSTA_REG16: 0x%x\n", DRV_Reg16(NFI_LOCKSTA_REG16));*/
  613. pr_err("NFI_ADDRCNTR_REG16: 0x%x\n", DRV_Reg16(NFI_ADDRCNTR_REG16));
  614. pr_err("NFI_STRADDR_REG32: 0x%x\n", DRV_Reg32(NFI_STRADDR_REG32));
  615. pr_err("NFI_BYTELEN_REG16: 0x%x\n", DRV_Reg16(NFI_BYTELEN_REG16));
  616. pr_err("NFI_CSEL_REG16: 0x%x\n", DRV_Reg16(NFI_CSEL_REG16));
  617. pr_err("NFI_IOCON_REG16: 0x%x\n", DRV_Reg16(NFI_IOCON_REG16));
  618. pr_err("NFI_FDM0L_REG32: 0x%x\n", DRV_Reg32(NFI_FDM0L_REG32));
  619. pr_err("NFI_FDM0M_REG32: 0x%x\n", DRV_Reg32(NFI_FDM0M_REG32));
  620. pr_err("NFI_LOCK_REG16: 0x%x\n", DRV_Reg16(NFI_LOCK_REG16));
  621. pr_err("NFI_LOCKCON_REG32: 0x%x\n", DRV_Reg32(NFI_LOCKCON_REG32));
  622. pr_err("NFI_LOCKANOB_REG16: 0x%x\n", DRV_Reg16(NFI_LOCKANOB_REG16));
  623. pr_err("NFI_FIFODATA0_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA0_REG32));
  624. pr_err("NFI_FIFODATA1_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA1_REG32));
  625. pr_err("NFI_FIFODATA2_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA2_REG32));
  626. pr_err("NFI_FIFODATA3_REG32: 0x%x\n", DRV_Reg32(NFI_FIFODATA3_REG32));
  627. pr_err("NFI_MASTERSTA_REG16: 0x%x\n", DRV_Reg16(NFI_MASTERSTA_REG16));
  628. pr_err("NFI_DEBUG_CON1_REG16: 0x%x\n", DRV_Reg16(NFI_DEBUG_CON1_REG16));
  629. pr_err("ECC_ENCCON_REG16 : %x\n", *ECC_ENCCON_REG16);
  630. pr_err("ECC_ENCCNFG_REG32 : %x\n", *ECC_ENCCNFG_REG32);
  631. pr_err("ECC_ENCDIADDR_REG32 : %x\n", *ECC_ENCDIADDR_REG32);
  632. pr_err("ECC_ENCIDLE_REG32 : %x\n", *ECC_ENCIDLE_REG32);
  633. pr_err("ECC_ENCPAR0_REG32 : %x\n", *ECC_ENCPAR0_REG32);
  634. pr_err("ECC_ENCPAR1_REG32 : %x\n", *ECC_ENCPAR1_REG32);
  635. pr_err("ECC_ENCPAR2_REG32 : %x\n", *ECC_ENCPAR2_REG32);
  636. pr_err("ECC_ENCPAR3_REG32 : %x\n", *ECC_ENCPAR3_REG32);
  637. pr_err("ECC_ENCPAR4_REG32 : %x\n", *ECC_ENCPAR4_REG32);
  638. pr_err("ECC_ENCPAR5_REG32 : %x\n", *ECC_ENCPAR5_REG32);
  639. pr_err("ECC_ENCPAR6_REG32 : %x\n", *ECC_ENCPAR6_REG32);
  640. pr_err("ECC_ENCSTA_REG32 : %x\n", *ECC_ENCSTA_REG32);
  641. pr_err("ECC_ENCIRQEN_REG16 : %x\n", *ECC_ENCIRQEN_REG16);
  642. pr_err("ECC_ENCIRQSTA_REG16: %x\n", *ECC_ENCIRQSTA_REG16);
  643. pr_err("ECC_DECCON_REG16 : %x\n", *ECC_DECCON_REG16);
  644. pr_err("ECC_DECCNFG_REG32 : %x\n", *ECC_DECCNFG_REG32);
  645. pr_err("ECC_DECDIADDR_REG32: %x\n", *ECC_DECDIADDR_REG32);
  646. pr_err("ECC_DECIDLE_REG16 : %x\n", *ECC_DECIDLE_REG16);
  647. pr_err("ECC_DECFER_REG16 : %x\n", *ECC_DECFER_REG16);
  648. pr_err("ECC_DECENUM0_REG32 : %x\n", *ECC_DECENUM0_REG32);
  649. pr_err("ECC_DECENUM1_REG32 : %x\n", *ECC_DECENUM1_REG32);
  650. pr_err("ECC_DECDONE_REG16 : %x\n", *ECC_DECDONE_REG16);
  651. pr_err("ECC_DECEL0_REG32 : %x\n", *ECC_DECEL0_REG32);
  652. pr_err("ECC_DECEL1_REG32 : %x\n", *ECC_DECEL1_REG32);
  653. pr_err("ECC_DECEL2_REG32 : %x\n", *ECC_DECEL2_REG32);
  654. pr_err("ECC_DECEL3_REG32 : %x\n", *ECC_DECEL3_REG32);
  655. pr_err("ECC_DECEL4_REG32 : %x\n", *ECC_DECEL4_REG32);
  656. pr_err("ECC_DECEL5_REG32 : %x\n", *ECC_DECEL5_REG32);
  657. pr_err("ECC_DECEL6_REG32 : %x\n", *ECC_DECEL6_REG32);
  658. pr_err("ECC_DECEL7_REG32 : %x\n", *ECC_DECEL7_REG32);
  659. pr_err("ECC_DECIRQEN_REG16 : %x\n", *ECC_DECIRQEN_REG16);
  660. pr_err("ECC_DECIRQSTA_REG16: %x\n", *ECC_DECIRQSTA_REG16);
  661. pr_err("ECC_DECFSM_REG32 : %x\n", *ECC_DECFSM_REG32);
  662. pr_err("ECC_BYPASS_REG32 : %x\n", *ECC_BYPASS_REG32);
  663. #endif
  664. }
  665. u8 NFI_DMA_status(void)
  666. {
  667. return g_running_dma;
  668. }
  669. EXPORT_SYMBOL(NFI_DMA_status);
  670. u32 NFI_DMA_address(void)
  671. {
  672. return DRV_Reg32(NFI_STRADDR_REG32);
  673. }
  674. EXPORT_SYMBOL(NFI_DMA_address);
  675. u32 nand_virt_to_phys_add(u32 va)
  676. {
  677. u32 pageOffset = (va & (PAGE_SIZE - 1));
  678. pgd_t *pgd;
  679. pmd_t *pmd;
  680. pte_t *pte;
  681. u32 pa;
  682. if (virt_addr_valid(va))
  683. return __virt_to_phys(va);
  684. if (NULL == current) {
  685. pr_err("[nand_virt_to_phys_add] ERROR , current is NULL!\n");
  686. return 0;
  687. }
  688. if (NULL == current->mm) {
  689. pr_err
  690. ("[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid = 0x%x, name = %s\n",
  691. current->tgid, current->comm);
  692. return 0;
  693. }
  694. pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
  695. if (pgd_none(*pgd) || pgd_bad(*pgd)) {
  696. pr_err("[nand_virt_to_phys_add] ERROR, va = 0x%x, pgd invalid!\n", va);
  697. return 0;
  698. }
  699. pmd = pmd_offset((pud_t *) pgd, va);
  700. if (pmd_none(*pmd) || pmd_bad(*pmd)) {
  701. pr_err("[nand_virt_to_phys_add] ERROR, va = 0x%x, pmd invalid!\n", va);
  702. return 0;
  703. }
  704. pte = pte_offset_map(pmd, va);
  705. if (pte_present(*pte)) {
  706. pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
  707. return pa;
  708. }
  709. pr_err("[nand_virt_to_phys_add] ERROR va = 0x%x, pte invalid!\n", va);
  710. return 0;
  711. }
  712. EXPORT_SYMBOL(nand_virt_to_phys_add);
  713. bool get_device_info(u8 *id, flashdev_info *gn_devinfo)
  714. {
  715. u32 i, m, n, mismatch;
  716. int target = -1;
  717. u8 target_id_len = 0;
  718. unsigned int flash_number = sizeof(gen_FlashTable) / sizeof(gen_FlashTable[0]);
  719. for (i = 0; i < flash_number; i++) {
  720. mismatch = 0;
  721. for (m = 0; m < gen_FlashTable[i].id_length; m++) {
  722. if (id[m] != gen_FlashTable[i].id[m]) {
  723. mismatch = 1;
  724. break;
  725. }
  726. }
  727. if (mismatch == 0 && gen_FlashTable[i].id_length > target_id_len) {
  728. target = i;
  729. target_id_len = gen_FlashTable[i].id_length;
  730. }
  731. }
  732. if (target != -1) {
  733. MSG(INIT, "Recognize NAND: ID [");
  734. for (n = 0; n < gen_FlashTable[target].id_length; n++) {
  735. gn_devinfo->id[n] = gen_FlashTable[target].id[n];
  736. MSG(INIT, "%x ", gn_devinfo->id[n]);
  737. }
  738. MSG(INIT,
  739. "], Device Name [%s], Page Size [%d]B Spare Size [%d]B Total Size [%d]MB\n",
  740. gen_FlashTable[target].devciename, gen_FlashTable[target].pagesize,
  741. gen_FlashTable[target].sparesize, gen_FlashTable[target].totalsize);
  742. gn_devinfo->id_length = gen_FlashTable[target].id_length;
  743. gn_devinfo->blocksize = gen_FlashTable[target].blocksize;
  744. gn_devinfo->addr_cycle = gen_FlashTable[target].addr_cycle;
  745. gn_devinfo->iowidth = gen_FlashTable[target].iowidth;
  746. gn_devinfo->timmingsetting = gen_FlashTable[target].timmingsetting;
  747. gn_devinfo->advancedmode = gen_FlashTable[target].advancedmode;
  748. gn_devinfo->pagesize = gen_FlashTable[target].pagesize;
  749. gn_devinfo->sparesize = gen_FlashTable[target].sparesize;
  750. gn_devinfo->totalsize = gen_FlashTable[target].totalsize;
  751. gn_devinfo->sectorsize = gen_FlashTable[target].sectorsize;
  752. gn_devinfo->s_acccon = gen_FlashTable[target].s_acccon;
  753. gn_devinfo->s_acccon1 = gen_FlashTable[target].s_acccon1;
  754. gn_devinfo->freq = gen_FlashTable[target].freq;
  755. gn_devinfo->vendor = gen_FlashTable[target].vendor;
  756. gn_devinfo->dqs_delay_ctrl = gen_FlashTable[target].dqs_delay_ctrl;
  757. memcpy((u8 *) &gn_devinfo->feature_set,
  758. (u8 *) &gen_FlashTable[target].feature_set, sizeof(struct MLC_feature_set));
  759. memcpy(gn_devinfo->devciename, gen_FlashTable[target].devciename,
  760. sizeof(gn_devinfo->devciename));
  761. gn_devinfo->NAND_FLASH_TYPE = gen_FlashTable[target].NAND_FLASH_TYPE;
  762. memcpy
  763. ((u8 *)&gn_devinfo->tlcControl,
  764. (u8 *)&gen_FlashTable[target].tlcControl, sizeof(struct NFI_TLC_CTRL));
  765. return true;
  766. }
  767. MSG(INIT, "Not Found NAND: ID [");
  768. for (n = 0; n < NAND_MAX_ID; n++)
  769. MSG(INIT, "%x ", id[n]);
  770. MSG(INIT, "]\n");
  771. return false;
  772. }
  773. #ifdef DUMP_NATIVE_BACKTRACE
  774. #define NFI_NATIVE_LOG_SD "/sdcard/NFI_native_log_%s-%02d-%02d-%02d_%02d-%02d-%02d.log"
  775. #define NFI_NATIVE_LOG_DATA "/data/NFI_native_log_%s-%02d-%02d-%02d_%02d-%02d-%02d.log"
  776. static int nfi_flush_log(char *s)
  777. {
  778. mm_segment_t old_fs;
  779. struct rtc_time tm;
  780. struct timeval tv = { 0 };
  781. struct file *filp;
  782. char name[256];
  783. unsigned int re = 0;
  784. int data_write = 0;
  785. do_gettimeofday(&tv);
  786. rtc_time_to_tm(tv.tv_sec, &tm);
  787. memset(name, 0, sizeof(name));
  788. sprintf(name, NFI_NATIVE_LOG_DATA, s, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
  789. tm.tm_hour, tm.tm_min, tm.tm_sec);
  790. old_fs = get_fs();
  791. set_fs(KERNEL_DS);
  792. filp = filp_open(name, O_WRONLY | O_CREAT, 0777);
  793. if (IS_ERR(filp)) {
  794. pr_err("[NFI_flush_log]error create file in %s, IS_ERR: %ld, PTR_ERR: %ld\n", name,
  795. IS_ERR(filp), PTR_ERR(filp));
  796. memset(name, 0, sizeof(name));
  797. sprintf(name, NFI_NATIVE_LOG_SD, s, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
  798. tm.tm_hour, tm.tm_min, tm.tm_sec);
  799. filp = filp_open(name, O_WRONLY | O_CREAT, 0777);
  800. if (IS_ERR(filp)) {
  801. pr_err
  802. ("[NFI_flush_log]error create file in %s, IS_ERR: %ld, PTR_ERR: %ld\n",
  803. name, IS_ERR(filp), PTR_ERR(filp));
  804. set_fs(old_fs);
  805. return -1;
  806. }
  807. }
  808. pr_err("[NFI_flush_log]log file: %s\n", name);
  809. set_fs(old_fs);
  810. if (!(filp->f_op) || !(filp->f_op->write)) {
  811. pr_err("[NFI_flush_log] No operation\n");
  812. re = -1;
  813. goto ClOSE_FILE;
  814. }
  815. DumpNativeInfo();
  816. old_fs = get_fs();
  817. set_fs(KERNEL_DS);
  818. data_write = vfs_write(filp, (char __user *)NativeInfo, strlen(NativeInfo), &filp->f_pos);
  819. if (!data_write) {
  820. pr_err("[nfi_flush_log] write fail\n");
  821. re = -1;
  822. }
  823. set_fs(old_fs);
  824. ClOSE_FILE:
  825. if (filp) {
  826. filp_close(filp, current->files);
  827. filp = NULL;
  828. }
  829. return re;
  830. }
  831. #endif
  832. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  833. void NFI_TLC_GetMappedWL(u32 pageidx, struct NFI_TLC_WL_INFO *WL_Info)
  834. {
  835. WL_Info->word_line_idx = pageidx / 3;
  836. WL_Info->wl_pre = (enum NFI_TLC_WL_PRE)(pageidx % 3);
  837. }
  838. u32 NFI_TLC_GetRowAddr(u32 rowaddr)
  839. {
  840. u32 real_row;
  841. u32 temp = 0xFF;
  842. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  843. if (gn_devinfo.tlcControl.normaltlc)
  844. temp = page_per_block / 3;
  845. else
  846. temp = page_per_block;
  847. real_row = ((rowaddr / temp) << gn_devinfo.tlcControl.block_bit) | (rowaddr % temp);
  848. return real_row;
  849. }
  850. u32 NFI_TLC_SetpPlaneAddr(u32 rowaddr, bool left_plane)
  851. {
  852. u32 real_row;
  853. if (gn_devinfo.tlcControl.pPlaneEn) {
  854. if (left_plane)
  855. real_row = (rowaddr & (~(1 << gn_devinfo.tlcControl.pPlane_bit)));
  856. else
  857. real_row = (rowaddr | (1 << gn_devinfo.tlcControl.pPlane_bit));
  858. } else
  859. real_row = rowaddr;
  860. return real_row;
  861. }
  862. u32 NFI_TLC_GetMappedPgAddr(u32 rowaddr)
  863. {
  864. u32 page_idx;
  865. u32 page_shift = 0;
  866. u32 real_row;
  867. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  868. real_row = rowaddr;
  869. if (gn_devinfo.tlcControl.normaltlc) {
  870. page_shift = gn_devinfo.tlcControl.block_bit;
  871. if (gn_devinfo.tlcControl.pPlaneEn)
  872. real_row &= (~(1 << gn_devinfo.tlcControl.pPlane_bit));
  873. page_idx =
  874. ((real_row >> page_shift) * page_per_block)
  875. + (((real_row << (32-page_shift)) >> (32-page_shift)) * 3);
  876. } else {
  877. page_shift = gn_devinfo.tlcControl.block_bit;
  878. page_idx = ((real_row >> page_shift) * page_per_block)
  879. + ((real_row << (32-page_shift)) >> (32-page_shift));
  880. }
  881. return page_idx;
  882. }
  883. #endif
  884. static bool mtk_nand_reset(void);
  885. u32 mtk_nand_page_transform(struct mtd_info *mtd, struct nand_chip *chip, u32 page, u32 *blk,
  886. u32 *map_blk)
  887. {
  888. u32 block_size = (gn_devinfo.blocksize * 1024);
  889. u32 page_size = (1 << chip->page_shift);
  890. loff_t start_address = 0;
  891. u32 idx;
  892. u32 block;
  893. u32 page_in_block;
  894. u32 mapped_block;
  895. bool raw_part = FALSE;
  896. loff_t logical_address = (loff_t)page * (1 << chip->page_shift);
  897. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  898. loff_t temp, temp1;
  899. #endif
  900. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  901. if (MLC_DEVICE && init_pmt_done == TRUE) {
  902. start_address = part_get_startaddress(logical_address, &idx);
  903. if (raw_partition(idx))
  904. raw_part = TRUE;
  905. else
  906. raw_part = FALSE;
  907. }
  908. if (init_pmt_done != TRUE && gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  909. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  910. if (raw_part == TRUE) {
  911. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  912. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  913. if (gn_devinfo.tlcControl.normaltlc) {
  914. temp = start_address;
  915. temp1 = logical_address - start_address;
  916. do_div(temp, (block_size & 0xFFFFFFFF));
  917. do_div(temp1, ((block_size / 3) & 0xFFFFFFFF));
  918. block = (u32)((u32)temp + (u32)temp1);
  919. /*block = (u32)((u32)(start_address / block_size) +
  920. (u32)((logical_address - start_address) / (block_size / 3)));*/
  921. page_in_block = ((u32)((logical_address - start_address) >> chip->page_shift)
  922. % ((mtd->erasesize / page_size) / 3));
  923. page_in_block *= 3;
  924. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  925. } else {
  926. temp = start_address;
  927. temp1 = logical_address - start_address;
  928. do_div(temp, (block_size & 0xFFFFFFFF));
  929. do_div(temp1, ((block_size / 3) & 0xFFFFFFFF));
  930. block = (u32)((u32)temp + (u32)temp1);
  931. page_in_block = ((u32)((logical_address - start_address) >> chip->page_shift)
  932. % ((mtd->erasesize / page_size) / 3));
  933. if (gn_devinfo.vendor != VEND_NONE)
  934. page_in_block = functArray[gn_devinfo.feature_set.ptbl_idx](page_in_block);
  935. }
  936. } else
  937. #endif
  938. {
  939. block = (u32)((u32)(start_address >> chip->phys_erase_shift)
  940. + (u32)((logical_address-start_address) >> (chip->phys_erase_shift - 1)));
  941. page_in_block =
  942. ((u32)((logical_address - start_address) >> chip->page_shift)
  943. % ((mtd->erasesize / page_size) / 2));
  944. if (gn_devinfo.vendor != VEND_NONE)
  945. page_in_block = functArray[gn_devinfo.feature_set.ptbl_idx](page_in_block);
  946. }
  947. mapped_block = get_mapping_block_index(block);
  948. /*MSG(INIT , "[page_in_block]mapped_block = %d, page_in_block = %d\n", mapped_block, page_in_block); */
  949. *blk = block;
  950. *map_blk = mapped_block;
  951. } else {
  952. if (((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  953. || (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER))
  954. && (!mtk_block_istlc(logical_address))) {
  955. mtk_slc_blk_addr(logical_address, &block, &page_in_block);
  956. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  957. } else {
  958. block = page / (block_size / page_size);
  959. page_in_block = page % (block_size / page_size);
  960. }
  961. mapped_block = get_mapping_block_index(block);
  962. *blk = block;
  963. *map_blk = mapped_block;
  964. }
  965. return page_in_block;
  966. }
  967. bool mtk_nand_IsRawPartition(loff_t logical_address)
  968. {
  969. u32 idx;
  970. part_get_startaddress(logical_address, &idx);
  971. if (raw_partition(idx))
  972. return true;
  973. else
  974. return false;
  975. }
  976. u16 randomizer_seed[128] = {
  977. 0x576A, 0x05E8, 0x629D, 0x45A3,
  978. 0x649C, 0x4BF0, 0x2342, 0x272E,
  979. 0x7358, 0x4FF3, 0x73EC, 0x5F70,
  980. 0x7A60, 0x1AD8, 0x3472, 0x3612,
  981. 0x224F, 0x0454, 0x030E, 0x70A5,
  982. 0x7809, 0x2521, 0x48F4, 0x5A2D,
  983. 0x492A, 0x043D, 0x7F61, 0x3969,
  984. 0x517A, 0x3B42, 0x769D, 0x0647,
  985. 0x7E2A, 0x1383, 0x49D9, 0x07B8,
  986. 0x2578, 0x4EEC, 0x4423, 0x352F,
  987. 0x5B22, 0x72B9, 0x367B, 0x24B6,
  988. 0x7E8E, 0x2318, 0x6BD0, 0x5519,
  989. 0x1783, 0x18A7, 0x7B6E, 0x7602,
  990. 0x4B7F, 0x3648, 0x2C53, 0x6B99,
  991. 0x0C23, 0x67CF, 0x7E0E, 0x4D8C,
  992. 0x5079, 0x209D, 0x244A, 0x747B,
  993. 0x350B, 0x0E4D, 0x7004, 0x6AC3,
  994. 0x7F3E, 0x21F5, 0x7A15, 0x2379,
  995. 0x1517, 0x1ABA, 0x4E77, 0x15A1,
  996. 0x04FA, 0x2D61, 0x253A, 0x1302,
  997. 0x1F63, 0x5AB3, 0x049A, 0x5AE8,
  998. 0x1CD7, 0x4A00, 0x30C8, 0x3247,
  999. 0x729C, 0x5034, 0x2B0E, 0x57F2,
  1000. 0x00E4, 0x575B, 0x6192, 0x38F8,
  1001. 0x2F6A, 0x0C14, 0x45FC, 0x41DF,
  1002. 0x38DA, 0x7AE1, 0x7322, 0x62DF,
  1003. 0x5E39, 0x0E64, 0x6D85, 0x5951,
  1004. 0x5937, 0x6281, 0x33A1, 0x6A32,
  1005. 0x3A5A, 0x2BAC, 0x743A, 0x5E74,
  1006. 0x3B2E, 0x7EC7, 0x4FD2, 0x5D28,
  1007. 0x751F, 0x3EF8, 0x39B1, 0x4E49,
  1008. 0x746B, 0x6EF6, 0x44BE, 0x6DB7
  1009. };
  1010. static int mtk_nand_randomizer_config(struct gRandConfig *conf, u16 seed)
  1011. {
  1012. #if 1
  1013. if (gn_devinfo.vendor != VEND_NONE) {
  1014. u16 nfi_cnfg = 0;
  1015. u32 nfi_ran_cnfg = 0;
  1016. u8 i;
  1017. /* set up NFI_CNFG */
  1018. nfi_cnfg = DRV_Reg16(NFI_CNFG_REG16);
  1019. nfi_ran_cnfg = DRV_Reg32(NFI_RANDOM_CNFG_REG32);
  1020. if (conf->type == RAND_TYPE_SAMSUNG) {
  1021. nfi_ran_cnfg = 0;
  1022. nfi_ran_cnfg |= seed << EN_SEED_SHIFT;
  1023. nfi_ran_cnfg |= seed << DE_SEED_SHIFT;
  1024. nfi_cnfg |= CNFG_RAN_SEC;
  1025. nfi_cnfg |= CNFG_RAN_SEL;
  1026. use_randomizer = TRUE;
  1027. /*nfi_ran_cnfg |= 0x00010001; */
  1028. } else if (conf->type == RAND_TYPE_TOSHIBA) {
  1029. use_randomizer = TRUE;
  1030. for (i = 0; i < 6; i++) {
  1031. DRV_WriteReg32(NFI_RANDOM_ENSEED01_TS_REG32 + i, conf->seed[i]);
  1032. DRV_WriteReg32(NFI_RANDOM_DESEED01_TS_REG32 + i, conf->seed[i]);
  1033. }
  1034. nfi_cnfg |= CNFG_RAN_SEC;
  1035. nfi_cnfg &= ~CNFG_RAN_SEL;
  1036. /*nfi_ran_cnfg |= 0x00010001; */
  1037. } else {
  1038. nfi_ran_cnfg &= ~0x00010001;
  1039. use_randomizer = FALSE;
  1040. return 0;
  1041. }
  1042. DRV_WriteReg16(NFI_CNFG_REG16, nfi_cnfg);
  1043. DRV_WriteReg32(NFI_RANDOM_CNFG_REG32, nfi_ran_cnfg);
  1044. }
  1045. #endif
  1046. return 0;
  1047. }
  1048. static bool mtk_nand_israndomizeron(void)
  1049. {
  1050. #if 1
  1051. if (gn_devinfo.vendor != VEND_NONE) {
  1052. u32 nfi_ran_cnfg = 0;
  1053. nfi_ran_cnfg = DRV_Reg32(NFI_RANDOM_CNFG_REG32);
  1054. if (nfi_ran_cnfg & 0x00010001)
  1055. return TRUE;
  1056. }
  1057. #endif
  1058. return FALSE;
  1059. }
  1060. static void mtk_nand_interface_switch(struct mtd_info *mtd)
  1061. {
  1062. if (gn_devinfo.iowidth == IO_ONFI || gn_devinfo.iowidth == IO_TOGGLEDDR
  1063. || gn_devinfo.iowidth == IO_TOGGLESDR) {
  1064. if (DDR_INTERFACE == FALSE) {
  1065. if (mtk_nand_interface_config(mtd)) {
  1066. MSG(INIT, "[NFI] interface switch sync!!!!\n");
  1067. DDR_INTERFACE = TRUE;
  1068. } else {
  1069. MSG(INIT, "[NFI] interface switch fail!!!!\n");
  1070. DDR_INTERFACE = FALSE;
  1071. }
  1072. }
  1073. }
  1074. }
  1075. static void mtk_nand_turn_on_randomizer(struct mtd_info *mtd, struct nand_chip *chip,
  1076. u32 page)
  1077. {
  1078. #if 1
  1079. /*struct gRandConfig *conf = &gn_devinfo.feature_set.randConfig; */
  1080. if (gn_devinfo.vendor != VEND_NONE) {
  1081. u32 nfi_ran_cnfg = 0;
  1082. u16 seed;
  1083. u32 page_size = (1 << chip->page_shift);
  1084. u32 page_per_blk = (mtd->erasesize / page_size);
  1085. if (page_per_blk < 128)
  1086. seed = randomizer_seed[page % page_per_blk];
  1087. else
  1088. seed = randomizer_seed[page % 128];
  1089. mtk_nand_randomizer_config(&gn_devinfo.feature_set.randConfig, seed);
  1090. nfi_ran_cnfg = DRV_Reg32(NFI_RANDOM_CNFG_REG32);
  1091. nfi_ran_cnfg |= 0x00010001;
  1092. DRV_WriteReg32(NFI_RANDOM_CNFG_REG32, nfi_ran_cnfg);
  1093. }
  1094. #endif
  1095. }
  1096. static void mtk_nand_turn_off_randomizer(void)
  1097. {
  1098. #if 1
  1099. if (gn_devinfo.vendor != VEND_NONE) {
  1100. u32 nfi_ran_cnfg = 0;
  1101. nfi_ran_cnfg = DRV_Reg32(NFI_RANDOM_CNFG_REG32);
  1102. nfi_ran_cnfg &= ~0x00010001;
  1103. DRV_WriteReg32(NFI_RANDOM_CNFG_REG32, nfi_ran_cnfg);
  1104. }
  1105. #endif
  1106. }
  1107. /******************************************************************************
  1108. * mtk_nand_irq_handler
  1109. *
  1110. * DESCRIPTION:
  1111. *NAND interrupt handler!
  1112. *
  1113. * PARAMETERS:
  1114. *int irq
  1115. *void *dev_id
  1116. *
  1117. * RETURNS:
  1118. *IRQ_HANDLED : Successfully handle the IRQ
  1119. *
  1120. * NOTES:
  1121. *None
  1122. *
  1123. ******************************************************************************/
  1124. /* Modified for TCM used */
  1125. static irqreturn_t mtk_nand_irq_handler(int irqno, void *dev_id)
  1126. {
  1127. u16 u16IntStatus = DRV_Reg16(NFI_INTR_REG16);
  1128. (void)irqno;
  1129. if (u16IntStatus & (u16) INTR_AHB_DONE_EN)
  1130. complete(&g_comp_AHB_Done);
  1131. return IRQ_HANDLED;
  1132. }
  1133. /******************************************************************************
  1134. * ECC_Config
  1135. *
  1136. * DESCRIPTION:
  1137. *Configure HW ECC!
  1138. *
  1139. * PARAMETERS:
  1140. *struct mtk_nand_host_hw *hw
  1141. *
  1142. * RETURNS:
  1143. *None
  1144. *
  1145. * NOTES:
  1146. *None
  1147. *
  1148. ******************************************************************************/
  1149. static void ECC_Config(struct mtk_nand_host_hw *hw, u32 ecc_bit)
  1150. {
  1151. u32 u4ENCODESize;
  1152. u32 u4DECODESize;
  1153. u32 ecc_bit_cfg = ECC_CNFG_ECC4;
  1154. /* Sector + FDM + YAFFS2 meta data bits */
  1155. u4DECODESize = ((hw->nand_sec_size + hw->nand_fdm_size) << 3) + ecc_bit * ECC_PARITY_BIT;
  1156. switch (ecc_bit) {
  1157. case 4:
  1158. ecc_bit_cfg = ECC_CNFG_ECC4;
  1159. break;
  1160. case 8:
  1161. ecc_bit_cfg = ECC_CNFG_ECC8;
  1162. break;
  1163. case 10:
  1164. ecc_bit_cfg = ECC_CNFG_ECC10;
  1165. break;
  1166. case 12:
  1167. ecc_bit_cfg = ECC_CNFG_ECC12;
  1168. break;
  1169. case 14:
  1170. ecc_bit_cfg = ECC_CNFG_ECC14;
  1171. break;
  1172. case 16:
  1173. ecc_bit_cfg = ECC_CNFG_ECC16;
  1174. break;
  1175. case 18:
  1176. ecc_bit_cfg = ECC_CNFG_ECC18;
  1177. break;
  1178. case 20:
  1179. ecc_bit_cfg = ECC_CNFG_ECC20;
  1180. break;
  1181. case 22:
  1182. ecc_bit_cfg = ECC_CNFG_ECC22;
  1183. break;
  1184. case 24:
  1185. ecc_bit_cfg = ECC_CNFG_ECC24;
  1186. break;
  1187. case 28:
  1188. ecc_bit_cfg = ECC_CNFG_ECC28;
  1189. break;
  1190. case 32:
  1191. ecc_bit_cfg = ECC_CNFG_ECC32;
  1192. break;
  1193. case 36:
  1194. ecc_bit_cfg = ECC_CNFG_ECC36;
  1195. break;
  1196. case 40:
  1197. ecc_bit_cfg = ECC_CNFG_ECC40;
  1198. break;
  1199. case 44:
  1200. ecc_bit_cfg = ECC_CNFG_ECC44;
  1201. break;
  1202. case 48:
  1203. ecc_bit_cfg = ECC_CNFG_ECC48;
  1204. break;
  1205. case 52:
  1206. ecc_bit_cfg = ECC_CNFG_ECC52;
  1207. break;
  1208. case 56:
  1209. ecc_bit_cfg = ECC_CNFG_ECC56;
  1210. break;
  1211. case 60:
  1212. ecc_bit_cfg = ECC_CNFG_ECC60;
  1213. break;
  1214. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1215. case 68:
  1216. ecc_bit_cfg = ECC_CNFG_ECC68;
  1217. u4DECODESize -= 7;
  1218. break;
  1219. case 72:
  1220. ecc_bit_cfg = ECC_CNFG_ECC72;
  1221. u4DECODESize -= 7;
  1222. break;
  1223. case 80:
  1224. ecc_bit_cfg = ECC_CNFG_ECC80;
  1225. u4DECODESize -= 7;
  1226. break;
  1227. #endif
  1228. default:
  1229. break;
  1230. }
  1231. DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
  1232. do {
  1233. ;
  1234. } while (!DRV_Reg16(ECC_DECIDLE_REG16));
  1235. DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
  1236. do {
  1237. ;
  1238. } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
  1239. /* setup FDM register base */
  1240. /* Sector + FDM */
  1241. u4ENCODESize = (hw->nand_sec_size + hw->nand_fdm_size) << 3;
  1242. /* Sector + FDM + YAFFS2 meta data bits */
  1243. /* configure ECC decoder && encoder */
  1244. DRV_WriteReg32(ECC_DECCNFG_REG32,
  1245. ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize <<
  1246. DEC_CNFG_CODE_SHIFT));
  1247. DRV_WriteReg32(ECC_ENCCNFG_REG32,
  1248. ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
  1249. #ifndef MANUAL_CORRECT
  1250. NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_CORRECT);
  1251. #else
  1252. NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
  1253. #endif
  1254. }
  1255. /******************************************************************************
  1256. * ECC_Decode_Start
  1257. *
  1258. * DESCRIPTION:
  1259. *HW ECC Decode Start !
  1260. *
  1261. * PARAMETERS:
  1262. *None
  1263. *
  1264. * RETURNS:
  1265. *None
  1266. *
  1267. * NOTES:
  1268. *None
  1269. *
  1270. ******************************************************************************/
  1271. static void ECC_Decode_Start(void)
  1272. {
  1273. /* wait for device returning idle */
  1274. while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
  1275. ;
  1276. DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
  1277. }
  1278. /******************************************************************************
  1279. * ECC_Decode_End
  1280. *
  1281. * DESCRIPTION:
  1282. *HW ECC Decode End !
  1283. *
  1284. * PARAMETERS:
  1285. *None
  1286. *
  1287. * RETURNS:
  1288. *None
  1289. *
  1290. * NOTES:
  1291. *None
  1292. *
  1293. ******************************************************************************/
  1294. static void ECC_Decode_End(void)
  1295. {
  1296. /* wait for device returning idle */
  1297. while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
  1298. ;
  1299. DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
  1300. }
  1301. /******************************************************************************
  1302. * ECC_Encode_Start
  1303. *
  1304. * DESCRIPTION:
  1305. *HW ECC Encode Start !
  1306. *
  1307. * PARAMETERS:
  1308. *None
  1309. *
  1310. * RETURNS:
  1311. *None
  1312. *
  1313. * NOTES:
  1314. *None
  1315. *
  1316. ******************************************************************************/
  1317. static void ECC_Encode_Start(void)
  1318. {
  1319. /* wait for device returning idle */
  1320. while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
  1321. ;
  1322. mb(); /*make sure process order */
  1323. DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
  1324. }
  1325. /******************************************************************************
  1326. * ECC_Encode_End
  1327. *
  1328. * DESCRIPTION:
  1329. *HW ECC Encode End !
  1330. *
  1331. * PARAMETERS:
  1332. *None
  1333. *
  1334. * RETURNS:
  1335. *None
  1336. *
  1337. * NOTES:
  1338. *None
  1339. *
  1340. ******************************************************************************/
  1341. static void ECC_Encode_End(void)
  1342. {
  1343. /* wait for device returning idle */
  1344. while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
  1345. ;
  1346. mb(); /*make sure process order */
  1347. DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
  1348. }
  1349. /******************************************************************************
  1350. * mtk_nand_check_bch_error
  1351. *
  1352. * DESCRIPTION:
  1353. *Check BCH error or not !
  1354. *
  1355. * PARAMETERS:
  1356. *struct mtd_info *mtd
  1357. * u8 *pDataBuf
  1358. * u32 u4SecIndex
  1359. * u32 u4PageAddr
  1360. *
  1361. * RETURNS:
  1362. *None
  1363. *
  1364. * NOTES:
  1365. *None
  1366. *
  1367. ******************************************************************************/
  1368. static bool mtk_nand_check_bch_error(struct mtd_info *mtd, u8 *pDataBuf, u8 *spareBuf,
  1369. u32 u4SecIndex, u32 u4PageAddr, u32 *bitmap)
  1370. {
  1371. bool ret = true;
  1372. u16 u2SectorDoneMask = 1 << u4SecIndex;
  1373. u32 u4ErrorNumDebug0, u4ErrorNumDebug1, i, u4ErrNum;
  1374. u32 timeout = 0xFFFF;
  1375. u32 correct_count = 0;
  1376. u32 page_size = (u4SecIndex+1)*host->hw->nand_sec_size;
  1377. u32 sec_num = u4SecIndex+1;
  1378. u16 failed_sec = 0;
  1379. u32 maxSectorBitErr = 0;
  1380. u32 uncorrect_sector = 0;
  1381. #ifdef MANUAL_CORRECT
  1382. u32 au4ErrBitLoc[6];
  1383. u32 u4ErrByteLoc, u4BitOffset;
  1384. u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
  1385. #endif
  1386. while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
  1387. timeout--;
  1388. if (0 == timeout)
  1389. return false;
  1390. }
  1391. #ifndef MANUAL_CORRECT
  1392. if (0 == (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY)) {
  1393. u4ErrorNumDebug0 = DRV_Reg32(ECC_DECENUM0_REG32);
  1394. u4ErrorNumDebug1 = DRV_Reg32(ECC_DECENUM1_REG32);
  1395. if (0 != (u4ErrorNumDebug0 & 0xFFFFFFFF) || 0 != (u4ErrorNumDebug1 & 0xFFFFFFFF)) {
  1396. for (i = 0; i <= u4SecIndex; ++i) {
  1397. #if 1
  1398. u4ErrNum =
  1399. (DRV_Reg32((ECC_DECENUM0_REG32 + (i / 4))) >> ((i % 4) * 8)) &
  1400. ERR_NUM0;
  1401. #else
  1402. if (i < 4)
  1403. u4ErrNum = DRV_Reg32(ECC_DECENUM0_REG32) >> (i * 8);
  1404. else
  1405. u4ErrNum = DRV_Reg32(ECC_DECENUM1_REG32) >> ((i - 4) * 8);
  1406. u4ErrNum &= ERR_NUM0;
  1407. #endif
  1408. if (ERR_NUM0 == u4ErrNum) {
  1409. failed_sec++;
  1410. ret = false;
  1411. uncorrect_sector |= (1 << i);
  1412. } else {
  1413. if (bitmap)
  1414. *bitmap |= 1 << i;
  1415. if (maxSectorBitErr < u4ErrNum)
  1416. maxSectorBitErr = u4ErrNum;
  1417. correct_count += u4ErrNum;
  1418. }
  1419. }
  1420. mtd->ecc_stats.failed += failed_sec;
  1421. if ((maxSectorBitErr > ecc_threshold) && (FALSE != ret)) {
  1422. MSG(INIT,
  1423. "ECC bit flips (0x%x) exceed eccthreshold (0x%x), u4PageAddr 0x%x\n",
  1424. maxSectorBitErr, ecc_threshold, u4PageAddr);
  1425. mtd->ecc_stats.corrected++;
  1426. }
  1427. }
  1428. }
  1429. if (0 != (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY)) {
  1430. ret = true;
  1431. uncorrect_sector = 0;
  1432. memset(pDataBuf, 0xff, page_size);
  1433. memset(spareBuf, 0xff, sec_num*host->hw->nand_fdm_size);
  1434. maxSectorBitErr = 0;
  1435. failed_sec = 0;
  1436. }
  1437. #else
  1438. /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
  1439. memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
  1440. u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
  1441. u4ErrNum =
  1442. (DRV_Reg32((ECC_DECENUM_REG32 + (u4SecIndex / 4))) >> ((u4SecIndex % 4) * 8)) &
  1443. ERR_NUM0;
  1444. if (u4ErrNum) {
  1445. if (ERR_NUM0 == u4ErrNum) {
  1446. mtd->ecc_stats.failed++;
  1447. ret = false;
  1448. } else {
  1449. for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
  1450. au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
  1451. u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x3FFF;
  1452. if (u4ErrBitLoc1th < 0x1000) {
  1453. u4ErrByteLoc = u4ErrBitLoc1th / 8;
  1454. u4BitOffset = u4ErrBitLoc1th % 8;
  1455. pDataBuf[u4ErrByteLoc] =
  1456. pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
  1457. mtd->ecc_stats.corrected++;
  1458. } else {
  1459. mtd->ecc_stats.failed++;
  1460. uncorrect_sector |= (1 << i);
  1461. }
  1462. u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x3FFF;
  1463. if (0 != u4ErrBitLoc2nd) {
  1464. if (u4ErrBitLoc2nd < 0x1000) {
  1465. u4ErrByteLoc = u4ErrBitLoc2nd / 8;
  1466. u4BitOffset = u4ErrBitLoc2nd % 8;
  1467. pDataBuf[u4ErrByteLoc] =
  1468. pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
  1469. mtd->ecc_stats.corrected++;
  1470. } else {
  1471. mtd->ecc_stats.failed++;
  1472. uncorrect_sector |= (1 << i);
  1473. }
  1474. }
  1475. }
  1476. }
  1477. if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
  1478. ret = false;
  1479. }
  1480. #endif
  1481. if (uncorrect_sector) {
  1482. pr_err("UnCorrectable ECC errors at PageAddr = %d, Sectormap = 0x%x\n",
  1483. u4PageAddr, uncorrect_sector);
  1484. }
  1485. return ret;
  1486. }
  1487. /******************************************************************************
  1488. * mtk_nand_RFIFOValidSize
  1489. *
  1490. * DESCRIPTION:
  1491. *Check the Read FIFO data bytes !
  1492. *
  1493. * PARAMETERS:
  1494. *u16 u2Size
  1495. *
  1496. * RETURNS:
  1497. *None
  1498. *
  1499. * NOTES:
  1500. *None
  1501. *
  1502. ******************************************************************************/
  1503. static bool mtk_nand_RFIFOValidSize(u16 u2Size)
  1504. {
  1505. u32 timeout = 0xFFFF;
  1506. while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
  1507. timeout--;
  1508. if (0 == timeout)
  1509. return false;
  1510. }
  1511. return true;
  1512. }
  1513. /******************************************************************************
  1514. * mtk_nand_WFIFOValidSize
  1515. *
  1516. * DESCRIPTION:
  1517. *Check the Write FIFO data bytes !
  1518. *
  1519. * PARAMETERS:
  1520. *u16 u2Size
  1521. *
  1522. * RETURNS:
  1523. *None
  1524. *
  1525. * NOTES:
  1526. *None
  1527. *
  1528. ******************************************************************************/
  1529. static bool mtk_nand_WFIFOValidSize(u16 u2Size)
  1530. {
  1531. u32 timeout = 0xFFFF;
  1532. while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
  1533. timeout--;
  1534. if (0 == timeout)
  1535. return false;
  1536. }
  1537. return true;
  1538. }
  1539. /******************************************************************************
  1540. * mtk_nand_status_ready
  1541. *
  1542. * DESCRIPTION:
  1543. *Indicate the NAND device is ready or not !
  1544. *
  1545. * PARAMETERS:
  1546. *u32 u4Status
  1547. *
  1548. * RETURNS:
  1549. *None
  1550. *
  1551. * NOTES:
  1552. *None
  1553. *
  1554. ******************************************************************************/
  1555. static bool mtk_nand_status_ready(u32 u4Status)
  1556. {
  1557. u32 timeout = 0xFFFF;
  1558. while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
  1559. timeout--;
  1560. if (0 == timeout)
  1561. return false;
  1562. }
  1563. return true;
  1564. }
  1565. /******************************************************************************
  1566. * mtk_nand_reset
  1567. *
  1568. * DESCRIPTION:
  1569. *Reset the NAND device hardware component !
  1570. *
  1571. * PARAMETERS:
  1572. *struct mtk_nand_host *host (Initial setting data)
  1573. *
  1574. * RETURNS:
  1575. *None
  1576. *
  1577. * NOTES:
  1578. *None
  1579. *
  1580. ******************************************************************************/
  1581. static bool mtk_nand_reset(void)
  1582. {
  1583. /* HW recommended reset flow */
  1584. int timeout = 0xFFFF;
  1585. if (DRV_Reg16(NFI_MASTERSTA_REG16) & 0xFFF) {
  1586. mb(); /*make sure process order */
  1587. DRV_WriteReg32(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
  1588. while (DRV_Reg16(NFI_MASTERSTA_REG16) & 0xFFF) {
  1589. timeout--;
  1590. if (!timeout)
  1591. MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
  1592. }
  1593. }
  1594. /* issue reset operation */
  1595. mb(); /*make sure process order */
  1596. DRV_WriteReg32(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
  1597. return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0)
  1598. && mtk_nand_WFIFOValidSize(0);
  1599. }
  1600. /******************************************************************************
  1601. * mtk_nand_set_mode
  1602. *
  1603. * DESCRIPTION:
  1604. * Set the oepration mode !
  1605. *
  1606. * PARAMETERS:
  1607. *u16 u2OpMode (read/write)
  1608. *
  1609. * RETURNS:
  1610. *None
  1611. *
  1612. * NOTES:
  1613. *None
  1614. *
  1615. ******************************************************************************/
  1616. static void mtk_nand_set_mode(u16 u2OpMode)
  1617. {
  1618. u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
  1619. u2Mode &= ~CNFG_OP_MODE_MASK;
  1620. u2Mode |= u2OpMode;
  1621. DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
  1622. }
  1623. /******************************************************************************
  1624. * mtk_nand_set_autoformat
  1625. *
  1626. * DESCRIPTION:
  1627. * Enable/Disable hardware autoformat !
  1628. *
  1629. * PARAMETERS:
  1630. *bool bEnable (Enable/Disable)
  1631. *
  1632. * RETURNS:
  1633. *None
  1634. *
  1635. * NOTES:
  1636. *None
  1637. *
  1638. ******************************************************************************/
  1639. static void mtk_nand_set_autoformat(bool bEnable)
  1640. {
  1641. if (bEnable)
  1642. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
  1643. else
  1644. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
  1645. }
  1646. /******************************************************************************
  1647. * mtk_nand_configure_fdm
  1648. *
  1649. * DESCRIPTION:
  1650. *Configure the FDM data size !
  1651. *
  1652. * PARAMETERS:
  1653. *u16 u2FDMSize
  1654. *
  1655. * RETURNS:
  1656. *None
  1657. *
  1658. * NOTES:
  1659. *None
  1660. *
  1661. ******************************************************************************/
  1662. static void mtk_nand_configure_fdm(u16 u2FDMSize)
  1663. {
  1664. NFI_CLN_REG32(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
  1665. NFI_SET_REG32(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
  1666. NFI_SET_REG32(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
  1667. }
  1668. static bool mtk_nand_pio_ready(void)
  1669. {
  1670. int count = 0;
  1671. while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
  1672. count++;
  1673. if (count > 0xffff) {
  1674. pr_err("PIO_DIRDY timeout\n");
  1675. return false;
  1676. }
  1677. }
  1678. return true;
  1679. }
  1680. /******************************************************************************
  1681. * mtk_nand_set_command
  1682. *
  1683. * DESCRIPTION:
  1684. * Send hardware commands to NAND devices !
  1685. *
  1686. * PARAMETERS:
  1687. *u16 command
  1688. *
  1689. * RETURNS:
  1690. *None
  1691. *
  1692. * NOTES:
  1693. *None
  1694. *
  1695. ******************************************************************************/
  1696. static bool mtk_nand_set_command(u16 command)
  1697. {
  1698. /* Write command to device */
  1699. #ifdef MTK_NAND_CMD_DUMP
  1700. if ((command == NAND_CMD_READ0) ||
  1701. (command == NAND_CMD_SEQIN) ||
  1702. (command == NAND_CMD_ERASE1) || (command == 0xEE) || (command == 0xEF)) {
  1703. if (dbg_inf[current_idx].cmd.cmd_array[0] != 0xFF) {
  1704. MSG(INIT, "Kuohong help com[0] is not 0xFF, idx: %d value: %x!\n",
  1705. current_idx, dbg_inf[current_idx].cmd.cmd_array[0]);
  1706. dump_stack();
  1707. }
  1708. dbg_inf[current_idx].cmd.cmd_array[0] = command;
  1709. } else if ((command == NAND_CMD_READSTART) ||
  1710. (command == NAND_CMD_PAGEPROG) || (command == NAND_CMD_ERASE2)) {
  1711. if (dbg_inf[current_idx].cmd.cmd_array[1] != 0xFF) {
  1712. MSG(INIT, "Kuohong help com[1] is not 0xFF!\n");
  1713. dump_stack();
  1714. }
  1715. dbg_inf[current_idx].cmd.cmd_array[1] = command;
  1716. current_idx_add();
  1717. } else {
  1718. dbg_inf[current_idx].cmd.cmd_array[0] = command;
  1719. current_idx_add();
  1720. }
  1721. #endif
  1722. mb(); /*make sure process order */
  1723. DRV_WriteReg16(NFI_CMD_REG16, command);
  1724. return mtk_nand_status_ready(STA_CMD_STATE);
  1725. }
  1726. /******************************************************************************
  1727. * mtk_nand_set_address
  1728. *
  1729. * DESCRIPTION:
  1730. * Set the hardware address register !
  1731. *
  1732. * PARAMETERS:
  1733. *struct nand_chip *nand, u32 u4RowAddr
  1734. *
  1735. * RETURNS:
  1736. *None
  1737. *
  1738. * NOTES:
  1739. *None
  1740. *
  1741. ******************************************************************************/
  1742. static bool mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
  1743. {
  1744. /* fill cycle addr */
  1745. #ifdef MTK_NAND_CMD_DUMP
  1746. u16 command;
  1747. command = dbg_inf[current_idx].cmd.cmd_array[0];
  1748. if ((command == NAND_CMD_READ0) ||
  1749. (command == NAND_CMD_SEQIN) || (command == NAND_CMD_ERASE1)) {
  1750. if (dbg_inf[current_idx].cmd.cmd_array[1] != 0xFF) {
  1751. MSG(INIT, "Kuohong help com[1] is not 0xFF!\n");
  1752. dump_stack();
  1753. }
  1754. dbg_inf[current_idx].cmd.address[0] = u4ColAddr;
  1755. dbg_inf[current_idx].cmd.address[1] = u4RowAddr;
  1756. dbg_inf[current_idx].cmd.address[2] = u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT);
  1757. } else if ((command == 0xEE) || (command == 0xEF)) {
  1758. dbg_inf[current_idx].cmd.address[0] = u4ColAddr;
  1759. dbg_inf[current_idx].cmd.address[1] = u4RowAddr;
  1760. dbg_inf[current_idx].cmd.address[2] = u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT);
  1761. current_idx_add();
  1762. } else {
  1763. dbg_inf[current_idx].cmd.address[0] = u4ColAddr;
  1764. dbg_inf[current_idx].cmd.address[1] = u4RowAddr;
  1765. dbg_inf[current_idx].cmd.address[2] = u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT);
  1766. current_idx_add();
  1767. }
  1768. #endif
  1769. mb(); /*make sure process order */
  1770. DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
  1771. DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
  1772. DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
  1773. return mtk_nand_status_ready(STA_ADDR_STATE);
  1774. }
  1775. /*-------------------------------------------------------------------------------*/
  1776. static bool mtk_nand_device_reset(void)
  1777. {
  1778. u32 timeout = 0xFFFF;
  1779. mtk_nand_reset();
  1780. DRV_WriteReg16(NFI_CNFG_REG16, CNFG_OP_RESET);
  1781. mtk_nand_set_command(NAND_CMD_RESET);
  1782. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--))
  1783. ;
  1784. mtk_nand_interface_async();
  1785. if (!timeout)
  1786. return FALSE;
  1787. else
  1788. return TRUE;
  1789. }
  1790. /*-------------------------------------------------------------------------------*/
  1791. /******************************************************************************
  1792. * mtk_nand_check_RW_count
  1793. *
  1794. * DESCRIPTION:
  1795. * Check the RW how many sectors !
  1796. *
  1797. * PARAMETERS:
  1798. *u16 u2WriteSize
  1799. *
  1800. * RETURNS:
  1801. *None
  1802. *
  1803. * NOTES:
  1804. *None
  1805. *
  1806. ******************************************************************************/
  1807. static bool mtk_nand_check_RW_count(u16 u2WriteSize)
  1808. {
  1809. u32 timeout = 0xFFFF;
  1810. u16 u2SecNum = u2WriteSize >> host->hw->nand_sec_shift;
  1811. while (ADDRCNTR_CNTR(DRV_Reg32(NFI_ADDRCNTR_REG16)) < u2SecNum) {
  1812. timeout--;
  1813. if (0 == timeout) {
  1814. pr_debug("[%s] timeout\n", __func__);
  1815. return false;
  1816. }
  1817. }
  1818. return true;
  1819. }
  1820. int mtk_nand_interface_async(void)
  1821. {
  1822. int retry = 10;
  1823. if (DDR_INTERFACE == TRUE) {
  1824. while ((DRV_Reg16(NFI_NAND_TYPE_CNFG_REG32) != 4) && retry--) {
  1825. DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 0);
  1826. MSG(INIT, "NFI_NAND_TYPE_CNFG_REG32 0x%x\n",
  1827. DRV_Reg16(NFI_NAND_TYPE_CNFG_REG32));
  1828. }
  1829. mb(); /*make sure process order */
  1830. clkmux_sel(MT_CLKMUX_NFI1X_INFRA_SEL, MT_CG_SYS_26M, "NFI");
  1831. mb(); /*make sure process order */
  1832. NFI_SET_REG32(NFI_DEBUG_CON1_REG16, NFI_BYPASS);
  1833. NFI_SET_REG32(ECC_BYPASS_REG32, ECC_BYPASS);
  1834. DRV_WriteReg32(NFI_ACCCON_REG32, gn_devinfo.timmingsetting);
  1835. DDR_INTERFACE = FALSE;
  1836. MSG(INIT, "Disable DDR mode\n");
  1837. } else
  1838. MSG(INIT, "already legacy mode\n");
  1839. return 0;
  1840. }
  1841. static int mtk_nand_interface_config(struct mtd_info *mtd)
  1842. {
  1843. #if 1
  1844. u32 timeout;
  1845. u32 val;
  1846. int retry = 10;
  1847. int sretry = 10;
  1848. struct gFeatureSet *feature_set = &(gn_devinfo.feature_set.FeatureSet);
  1849. enum cg_clk_id id = MT_CG_SYS_26M;
  1850. if (gn_devinfo.iowidth == IO_ONFI || gn_devinfo.iowidth == IO_TOGGLEDDR
  1851. || gn_devinfo.iowidth == IO_TOGGLESDR) {
  1852. do {
  1853. if (gn_devinfo.freq == 80) {
  1854. id = MT_CG_MPLL_D14;
  1855. MSG(INIT, "[%s] MT_CG_MPLL_D14\n", __func__);
  1856. } else if (gn_devinfo.freq == 100) {
  1857. id = MT_CG_MPLL_D8;
  1858. MSG(INIT, "[%s] MT_CG_MPLL_D8\n", __func__);
  1859. }
  1860. /*reset*/
  1861. mtk_nand_set_command(NAND_CMD_RESET);
  1862. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN))
  1863. ;
  1864. mtk_nand_reset();
  1865. /*set feature*/
  1866. mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd,
  1867. feature_set->Interface.address,
  1868. (u8 *) &feature_set->Interface.feature,
  1869. sizeof(feature_set->Interface.feature));
  1870. NFI_CLN_REG32(NFI_DEBUG_CON1_REG16, HWDCM_SWCON_ON);
  1871. /*setup register*/
  1872. NFI_CLN_REG32(NFI_DEBUG_CON1_REG16, NFI_BYPASS);
  1873. /*clear bypass of ecc */
  1874. NFI_CLN_REG32(ECC_BYPASS_REG32, ECC_BYPASS);
  1875. /*set infra_sel */
  1876. disable_clock(MT_CG_NFI_SW_CG, "NFI");
  1877. mb(); /*make sure process order */
  1878. disable_clock(MT_CG_NFI2X_SW_CG, "NFI");
  1879. mb(); /*make sure process order */
  1880. clkmux_sel(MT_CLKMUX_NFI2X_GFMUX_SEL, id, "NFI");
  1881. mb(); /*make sure process order */
  1882. clkmux_sel(MT_CLKMUX_NFI1X_INFRA_SEL, MT_CG_SYS_TEMP, "NFI");
  1883. mb(); /*make sure process order */
  1884. enable_clock(MT_CG_NFI2X_SW_CG, "NFI");
  1885. mb(); /*make sure process order */
  1886. enable_clock(MT_CG_NFI_SW_CG, "NFI");
  1887. /*enable_clock(MT_CG_MPLL_D7, "NFI"); */
  1888. mb(); /*make sure process order */
  1889. DRV_WriteReg32(NFI_DLYCTRL_REG32, 0xA001);
  1890. val = gn_devinfo.dqs_delay_ctrl + (3 << 24);
  1891. DRV_WriteReg32(NFI_DQS_DELAY_CTRL, val);
  1892. if (gn_devinfo.iowidth == IO_ONFI) {
  1893. while ((DRV_Reg16(NFI_NAND_TYPE_CNFG_REG32) != 2) && retry--) {
  1894. DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 2);
  1895. MSG(INIT, "NFI_NAND_TYPE_CNFG_REG32 0x%x\n",
  1896. DRV_Reg16(NFI_NAND_TYPE_CNFG_REG32));
  1897. }
  1898. } else {
  1899. while ((DRV_Reg16(NFI_NAND_TYPE_CNFG_REG32) != 1) && retry--) {
  1900. DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 1);
  1901. MSG(INIT, "NFI_NAND_TYPE_CNFG_REG32 0x%x\n",
  1902. DRV_Reg16(NFI_NAND_TYPE_CNFG_REG32));
  1903. }
  1904. }
  1905. DRV_WriteReg32(NFI_ACCCON1_REG3, gn_devinfo.s_acccon1);
  1906. DRV_WriteReg32(NFI_ACCCON_REG32, gn_devinfo.s_acccon);
  1907. mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd,
  1908. feature_set->Interface.address, (u8 *) &val, 4);
  1909. } while (((val & 0xFF) != (feature_set->Interface.feature & 0xFF)) && sretry--);
  1910. if ((val & 0xFF) != (feature_set->Interface.feature & 0xFF)) {
  1911. MSG(INIT, "[%s] fail %d\n", __func__, val);
  1912. mtk_nand_set_command(NAND_CMD_RESET);
  1913. timeout = TIMEOUT_4;
  1914. while (timeout)
  1915. timeout--;
  1916. mtk_nand_reset();
  1917. DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 0);
  1918. mb(); /*make sure process order */
  1919. disable_clock(MT_CG_NFI_SW_CG, "NFI");
  1920. mb(); /*make sure process order */
  1921. clkmux_sel(MT_CLKMUX_NFI1X_INFRA_SEL, MT_CG_SYS_26M, "NFI");
  1922. mb(); /*make sure process order */
  1923. enable_clock(MT_CG_NFI_SW_CG, "NFI");
  1924. mb(); /*make sure process order */
  1925. NFI_SET_REG32(NFI_DEBUG_CON1_REG16, NFI_BYPASS);
  1926. NFI_SET_REG32(ECC_BYPASS_REG32, ECC_BYPASS);
  1927. DRV_WriteReg32(NFI_ACCCON_REG32, gn_devinfo.timmingsetting);
  1928. return 0;
  1929. }
  1930. MSG(INIT, "[%s] success\n", __func__);
  1931. } else {
  1932. MSG(INIT, "[%s] legacy interface\n", __func__);
  1933. }
  1934. #endif /*TODO*/
  1935. return 1;
  1936. }
  1937. /******************************************************************************
  1938. * mtk_nand_ready_for_read
  1939. *
  1940. * DESCRIPTION:
  1941. * Prepare hardware environment for read !
  1942. *
  1943. * PARAMETERS:
  1944. *struct nand_chip *nand, u32 u4RowAddr
  1945. *
  1946. * RETURNS:
  1947. *None
  1948. *
  1949. * NOTES:
  1950. *None
  1951. *
  1952. ******************************************************************************/
  1953. static bool mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr,
  1954. u16 sec_num, bool full, u8 *buf, enum readCommand cmd)
  1955. {
  1956. /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
  1957. bool bRet = false;
  1958. /*u16 sec_num = 1 << (nand->page_shift - host->hw->nand_sec_shift); */
  1959. u32 col_addr = u4ColAddr;
  1960. u32 colnob = 2, rownob = gn_devinfo.addr_cycle - 2;
  1961. /*u32 reg_val = DRV_Reg32(NFI_MASTERRST_REG32); */
  1962. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  1963. u32 phys = 0;
  1964. #endif
  1965. #ifdef DUMP_PEF
  1966. struct timeval stimer, etimer;
  1967. do_gettimeofday(&stimer);
  1968. #endif
  1969. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  1970. if (full) {
  1971. mtk_dir = DMA_FROM_DEVICE;
  1972. sg_init_one(&mtk_sg, buf, (sec_num * (1 << host->hw->nand_sec_shift)));
  1973. dma_map_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  1974. phys = mtk_sg.dma_address;
  1975. }
  1976. #endif
  1977. if (DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32) & 0x3) {
  1978. NFI_SET_REG16(NFI_MASTERRST_REG32, PAD_MACRO_RST); /*reset */
  1979. NFI_CLN_REG16(NFI_MASTERRST_REG32, PAD_MACRO_RST); /*dereset */
  1980. }
  1981. if (nand->options & NAND_BUSWIDTH_16)
  1982. col_addr /= 2;
  1983. if (!mtk_nand_reset())
  1984. goto cleanup;
  1985. if (g_bHwEcc) {
  1986. /* Enable HW ECC */
  1987. NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1988. } else
  1989. NFI_CLN_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  1990. mtk_nand_set_mode(CNFG_OP_READ);
  1991. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  1992. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  1993. DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
  1994. #endif
  1995. if (full) {
  1996. #if __INTERNAL_USE_AHB_MODE__
  1997. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
  1998. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  1999. /* phys = nand_virt_to_phys_add((u32) buf); */
  2000. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  2001. if (!phys) {
  2002. pr_err
  2003. ("[mtk_nand_ready_for_read]convert virt addr (%x) to phys add (%x)fail!!!",
  2004. (u32) buf, phys);
  2005. return false;
  2006. }
  2007. DRV_WriteReg32(NFI_STRADDR_REG32, phys);
  2008. #endif
  2009. #else
  2010. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2011. #endif
  2012. if (g_bHwEcc)
  2013. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2014. else
  2015. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2016. } else {
  2017. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2018. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2019. }
  2020. mtk_nand_set_autoformat(full);
  2021. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  2022. if (full) {
  2023. if (g_bHwEcc)
  2024. ECC_Decode_Start();
  2025. }
  2026. #endif
  2027. if (cmd == AD_CACHE_FINAL) {
  2028. if (!mtk_nand_set_command(0x3F))
  2029. goto cleanup;
  2030. if (!mtk_nand_status_ready(STA_NAND_BUSY))
  2031. goto cleanup;
  2032. return true;
  2033. }
  2034. if (!mtk_nand_set_command(NAND_CMD_READ0))
  2035. goto cleanup;
  2036. if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
  2037. goto cleanup;
  2038. if (cmd == NORMAL_READ) {
  2039. if (!mtk_nand_set_command(NAND_CMD_READSTART))
  2040. goto cleanup;
  2041. } else {
  2042. if (!mtk_nand_set_command(0x31))
  2043. goto cleanup;
  2044. }
  2045. if (!mtk_nand_status_ready(STA_NAND_BUSY))
  2046. goto cleanup;
  2047. bRet = true;
  2048. cleanup:
  2049. #ifdef DUMP_PEF
  2050. do_gettimeofday(&etimer);
  2051. g_NandPerfLog.ReadBusyTotalTime += Cal_timediff(&etimer, &stimer);
  2052. g_NandPerfLog.ReadBusyCount++;
  2053. #endif
  2054. return bRet;
  2055. }
  2056. /******************************************************************************
  2057. * mtk_nand_ready_for_write
  2058. *
  2059. * DESCRIPTION:
  2060. * Prepare hardware environment for write !
  2061. *
  2062. * PARAMETERS:
  2063. *struct nand_chip *nand, u32 u4RowAddr
  2064. *
  2065. * RETURNS:
  2066. *None
  2067. *
  2068. * NOTES:
  2069. *None
  2070. *
  2071. ******************************************************************************/
  2072. static bool mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full,
  2073. u8 *buf)
  2074. {
  2075. bool bRet = false;
  2076. u32 sec_num = 1 << (nand->page_shift - host->hw->nand_sec_shift);
  2077. u32 colnob = 2, rownob = gn_devinfo.addr_cycle - 2;
  2078. #if __INTERNAL_USE_AHB_MODE__
  2079. u32 phys = 0;
  2080. /*u32 T_phys = 0; */
  2081. #endif
  2082. u32 temp_sec_num;
  2083. temp_sec_num = sec_num;
  2084. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2085. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  2086. && gn_devinfo.tlcControl.normaltlc
  2087. && gn_devinfo.tlcControl.pPlaneEn) {
  2088. temp_sec_num = sec_num / 2;
  2089. }
  2090. #endif
  2091. if (full) {
  2092. mtk_dir = DMA_TO_DEVICE;
  2093. sg_init_one(&mtk_sg, buf, temp_sec_num * (1 << host->hw->nand_sec_shift));
  2094. dma_map_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  2095. phys = mtk_sg.dma_address;
  2096. }
  2097. if (nand->options & NAND_BUSWIDTH_16)
  2098. col_addr /= 2;
  2099. /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
  2100. if (!mtk_nand_reset())
  2101. return false;
  2102. mtk_nand_set_mode(CNFG_OP_PRGM);
  2103. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  2104. DRV_WriteReg32(NFI_CON_REG16, temp_sec_num << CON_NFI_SEC_SHIFT);
  2105. if (full) {
  2106. #if __INTERNAL_USE_AHB_MODE__
  2107. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2108. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2109. /*phys = nand_virt_to_phys_add((unsigned long) buf);*/
  2110. /*T_phys = __virt_to_phys(buf); */
  2111. if (!phys) {
  2112. pr_err
  2113. ("[mt65xx_nand_ready_for_write]convert virt addr (%x) to phys add fail!!!",
  2114. (u32) buf);
  2115. return false;
  2116. }
  2117. DRV_WriteReg32(NFI_STRADDR_REG32, phys);
  2118. #else
  2119. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2120. #endif
  2121. if (g_bHwEcc)
  2122. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2123. else
  2124. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2125. } else {
  2126. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2127. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  2128. }
  2129. mtk_nand_set_autoformat(full);
  2130. if (full) {
  2131. if (g_bHwEcc)
  2132. ECC_Encode_Start();
  2133. }
  2134. if (!mtk_nand_set_command(NAND_CMD_SEQIN))
  2135. goto cleanup;
  2136. /*1 FIXED ME: For Any Kind of AddrCycle */
  2137. if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
  2138. goto cleanup;
  2139. if (!mtk_nand_status_ready(STA_NAND_BUSY))
  2140. goto cleanup;
  2141. bRet = true;
  2142. cleanup:
  2143. return bRet;
  2144. }
  2145. static bool mtk_nand_check_dececc_done(u32 u4SecNum)
  2146. {
  2147. struct timeval timer_timeout, timer_cur;
  2148. u32 dec_mask;
  2149. do_gettimeofday(&timer_timeout);
  2150. timer_timeout.tv_usec += 800 * 1000; /* 500ms */
  2151. if (timer_timeout.tv_usec >= 1000000) {
  2152. timer_timeout.tv_usec -= 1000000;
  2153. timer_timeout.tv_sec += 1;
  2154. }
  2155. dec_mask = (1 << (u4SecNum - 1));
  2156. while (dec_mask != (DRV_Reg16(ECC_DECDONE_REG16) & dec_mask)) {
  2157. do_gettimeofday(&timer_cur);
  2158. if (timeval_compare(&timer_cur, &timer_timeout) >= 0) {
  2159. MSG(INIT, "ECC_DECDONE: timeout 0x%x %d time sec %d, usec %d\n",
  2160. DRV_Reg16(ECC_DECDONE_REG16), u4SecNum,
  2161. (int)(timer_cur.tv_sec - timer_timeout.tv_sec),
  2162. (int)(timer_cur.tv_usec - timer_timeout.tv_usec));
  2163. dump_nfi();
  2164. if (dec_mask == (DRV_Reg16(ECC_DECDONE_REG16) & dec_mask)) {
  2165. MSG(INIT, "ECC_DECDONE: timeout but finish job\n");
  2166. break;
  2167. }
  2168. return false;
  2169. }
  2170. }
  2171. while (DRV_Reg32(ECC_DECFSM_REG32) != ECC_DECFSM_IDLE) {
  2172. do_gettimeofday(&timer_cur);
  2173. if (timeval_compare(&timer_cur, &timer_timeout) >= 0) {
  2174. MSG(INIT,
  2175. "ECC_DECDONE: timeout ECC_DECFSM_REG32 0x%x 0x%x %d time sec %d, usec %d\n",
  2176. DRV_Reg16(ECC_DECFSM_REG32), DRV_Reg16(ECC_DECDONE_REG16), u4SecNum,
  2177. (int)(timer_cur.tv_sec - timer_timeout.tv_sec),
  2178. (int)(timer_cur.tv_usec - timer_timeout.tv_usec));
  2179. dump_nfi();
  2180. if (DRV_Reg32(ECC_DECFSM_REG32) == ECC_DECFSM_IDLE) {
  2181. MSG(INIT, "ECC_DECDONE: timeout but finish job\n");
  2182. break;
  2183. }
  2184. return false;
  2185. }
  2186. }
  2187. return true;
  2188. }
  2189. /******************************************************************************
  2190. * mtk_nand_read_page_data
  2191. *
  2192. * DESCRIPTION:
  2193. *Fill the page data into buffer !
  2194. *
  2195. * PARAMETERS:
  2196. *u8 *pDataBuf, u32 u4Size
  2197. *
  2198. * RETURNS:
  2199. *None
  2200. *
  2201. * NOTES:
  2202. *None
  2203. *
  2204. ******************************************************************************/
  2205. static bool mtk_nand_dma_read_data(struct mtd_info *mtd, u8 *buf, u32 length)
  2206. {
  2207. int interrupt_en = g_i4Interrupt;
  2208. int timeout = 0xfffff;
  2209. struct scatterlist sg;
  2210. enum dma_data_direction dir = DMA_FROM_DEVICE;
  2211. #ifdef DUMP_PEF
  2212. struct timeval stimer, etimer;
  2213. do_gettimeofday(&stimer);
  2214. #endif
  2215. sg_init_one(&sg, buf, length);
  2216. dma_map_sg(&(mtd->dev), &sg, 1, dir);
  2217. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2218. /* DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(pDataBuf)); */
  2219. if ((unsigned int)buf % 16) {
  2220. pr_debug("Un-16-aligned address\n");
  2221. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2222. } else {
  2223. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2224. }
  2225. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2226. DRV_Reg16(NFI_INTR_REG16);
  2227. DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_AHB_DONE_EN);
  2228. if (interrupt_en)
  2229. init_completion(&g_comp_AHB_Done);
  2230. /*dmac_inv_range(pDataBuf, pDataBuf + u4Size); */
  2231. mb(); /*make sure process order */
  2232. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BRD);
  2233. g_running_dma = 1;
  2234. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2235. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  2236. && (gn_devinfo.tlcControl.needchangecolumn))
  2237. DRV_WriteReg16(NFI_TLC_RD_WHR2_REG16, (TLC_RD_WHR2_EN | 0x055));
  2238. #endif
  2239. if (interrupt_en) {
  2240. /* Wait 10ms for AHB done */
  2241. if (!wait_for_completion_timeout(&g_comp_AHB_Done, 0xFFF)) {
  2242. MSG(INIT, "wait for completion timeout happened @ [%s]: %d\n", __func__,
  2243. __LINE__);
  2244. dump_nfi();
  2245. g_running_dma = 0;
  2246. return false;
  2247. }
  2248. g_running_dma = 0;
  2249. while ((length >> host->hw->nand_sec_shift) >
  2250. ((DRV_Reg32(NFI_BYTELEN_REG16) & 0x1f000) >> 12)) {
  2251. timeout--;
  2252. if (0 == timeout) {
  2253. pr_err("[%s] poll BYTELEN error\n", __func__);
  2254. g_running_dma = 0;
  2255. return false; /*4 AHB Mode Time Out! */
  2256. }
  2257. }
  2258. } else {
  2259. while (!DRV_Reg16(NFI_INTR_REG16)) {
  2260. timeout--;
  2261. if (0 == timeout) {
  2262. pr_err("[%s] poll nfi_intr error\n", __func__);
  2263. dump_nfi();
  2264. g_running_dma = 0;
  2265. return false; /*4 AHB Mode Time Out! */
  2266. }
  2267. }
  2268. g_running_dma = 0;
  2269. while ((length >> host->hw->nand_sec_shift) >
  2270. ((DRV_Reg32(NFI_BYTELEN_REG16) & 0x1f000) >> 12)) {
  2271. timeout--;
  2272. if (0 == timeout) {
  2273. pr_err("[%s] poll BYTELEN error\n", __func__);
  2274. dump_nfi();
  2275. g_running_dma = 0;
  2276. return false; /*4 AHB Mode Time Out! */
  2277. }
  2278. }
  2279. }
  2280. dma_unmap_sg(&(mtd->dev), &sg, 1, dir);
  2281. #ifdef DUMP_PEF
  2282. do_gettimeofday(&etimer);
  2283. g_NandPerfLog.ReadDMATotalTime += Cal_timediff(&etimer, &stimer);
  2284. g_NandPerfLog.ReadDMACount++;
  2285. #endif
  2286. return true;
  2287. }
  2288. static bool mtk_nand_mcu_read_data(u8 *buf, u32 length)
  2289. {
  2290. int timeout = 0xffff;
  2291. u32 i;
  2292. u32 *buf32 = (u32 *) buf;
  2293. #ifdef TESTTIME
  2294. unsigned long long time1, time2;
  2295. time1 = sched_clock();
  2296. #endif
  2297. if ((u32) buf % 4 || length % 4)
  2298. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2299. else
  2300. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2301. /*DRV_WriteReg32(NFI_STRADDR_REG32, 0); */
  2302. mb(); /*make sure process order */
  2303. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BRD);
  2304. if ((u32) buf % 4 || length % 4) {
  2305. for (i = 0; (i < (length)) && (timeout > 0);) {
  2306. /*if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4) */
  2307. if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  2308. *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
  2309. i++;
  2310. } else {
  2311. timeout--;
  2312. }
  2313. if (0 == timeout) {
  2314. pr_err("[%s] timeout\n", __func__);
  2315. dump_nfi();
  2316. return false;
  2317. }
  2318. }
  2319. } else {
  2320. for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
  2321. /*if (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) >= 4) */
  2322. if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  2323. *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
  2324. i++;
  2325. } else {
  2326. timeout--;
  2327. }
  2328. if (0 == timeout) {
  2329. pr_err("[%s] timeout\n", __func__);
  2330. dump_nfi();
  2331. return false;
  2332. }
  2333. }
  2334. }
  2335. #ifdef TESTTIME
  2336. time2 = sched_clock() - time1;
  2337. if (!readdatatime)
  2338. readdatatime = (time2);
  2339. #endif
  2340. return true;
  2341. }
  2342. static bool mtk_nand_read_page_data(struct mtd_info *mtd, u8 *pDataBuf, u32 u4Size)
  2343. {
  2344. #if (__INTERNAL_USE_AHB_MODE__)
  2345. return mtk_nand_dma_read_data(mtd, pDataBuf, u4Size);
  2346. #else
  2347. return mtk_nand_mcu_read_data(mtd, pDataBuf, u4Size);
  2348. #endif
  2349. }
  2350. /******************************************************************************
  2351. * mtk_nand_write_page_data
  2352. *
  2353. * DESCRIPTION:
  2354. *Fill the page data into buffer !
  2355. *
  2356. * PARAMETERS:
  2357. *u8 *pDataBuf, u32 u4Size
  2358. *
  2359. * RETURNS:
  2360. *None
  2361. *
  2362. * NOTES:
  2363. *None
  2364. *
  2365. ******************************************************************************/
  2366. static bool mtk_nand_dma_write_data(struct mtd_info *mtd, u8 *pDataBuf, u32 u4Size)
  2367. {
  2368. int i4Interrupt = 0; /*g_i4Interrupt; */
  2369. u32 timeout = 0xFFFF;
  2370. struct scatterlist sg;
  2371. enum dma_data_direction dir = DMA_TO_DEVICE;
  2372. #ifdef DUMP_PEF
  2373. struct timeval stimer, etimer;
  2374. do_gettimeofday(&stimer);
  2375. #endif
  2376. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2377. u32 reg_val;
  2378. #endif
  2379. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2380. DRV_Reg16(NFI_INTR_REG16);
  2381. DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
  2382. /* DRV_WriteReg32(NFI_STRADDR_REG32, (u32 *)virt_to_phys(pDataBuf)); */
  2383. if ((unsigned int)pDataBuf % 16) { /* TODO: can not use AHB mode here */
  2384. pr_debug("Un-16-aligned address\n");
  2385. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2386. } else {
  2387. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2388. }
  2389. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  2390. if (i4Interrupt) {
  2391. init_completion(&g_comp_AHB_Done);
  2392. DRV_Reg16(NFI_INTR_REG16);
  2393. DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_AHB_DONE_EN);
  2394. }
  2395. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2396. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  2397. reg_val = DRV_Reg16(NFI_DEBUG_CON1_REG16);
  2398. reg_val |= 0x4000;
  2399. DRV_WriteReg16(NFI_DEBUG_CON1_REG16, reg_val);
  2400. }
  2401. #endif
  2402. /*dmac_clean_range(pDataBuf, pDataBuf + u4Size); */
  2403. mb(); /*make sure process order */
  2404. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2405. g_running_dma = 3;
  2406. if (i4Interrupt) {
  2407. /* Wait 10ms for AHB done */
  2408. if (!wait_for_completion_timeout(&g_comp_AHB_Done, 10)) {
  2409. MSG(READ, "wait for completion timeout happened @ [%s]: %d\n", __func__,
  2410. __LINE__);
  2411. dump_nfi();
  2412. g_running_dma = 0;
  2413. return false;
  2414. }
  2415. g_running_dma = 0;
  2416. /* wait_for_completion(&g_comp_AHB_Done); */
  2417. } else {
  2418. while ((u4Size >> host->hw->nand_sec_shift) >
  2419. ((DRV_Reg32(NFI_BYTELEN_REG16) & 0x1f000) >> 12)) {
  2420. timeout--;
  2421. if (0 == timeout) {
  2422. pr_err("[%s] poll BYTELEN error\n", __func__);
  2423. g_running_dma = 0;
  2424. return false; /*4 AHB Mode Time Out! */
  2425. }
  2426. }
  2427. g_running_dma = 0;
  2428. }
  2429. dma_unmap_sg(&(mtd->dev), &sg, 1, dir);
  2430. #ifdef DUMP_PEF
  2431. do_gettimeofday(&etimer);
  2432. g_NandPerfLog.WriteDMATotalTime += Cal_timediff(&etimer, &stimer);
  2433. g_NandPerfLog.WriteDMACount++;
  2434. #endif
  2435. return true;
  2436. }
  2437. static bool mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 *buf, u32 length)
  2438. {
  2439. u32 timeout = 0xFFFF;
  2440. u32 i;
  2441. u32 *pBuf32;
  2442. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2443. mb(); /*make sure process order */
  2444. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2445. pBuf32 = (u32 *) buf;
  2446. if ((u32) buf % 4 || length % 4)
  2447. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2448. else
  2449. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2450. if ((u32) buf % 4 || length % 4) {
  2451. for (i = 0; (i < (length)) && (timeout > 0);) {
  2452. if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  2453. DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
  2454. i++;
  2455. } else {
  2456. timeout--;
  2457. }
  2458. if (0 == timeout) {
  2459. pr_err("[%s] timeout\n", __func__);
  2460. dump_nfi();
  2461. return false;
  2462. }
  2463. }
  2464. } else {
  2465. for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
  2466. /* if (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) <= 12) */
  2467. if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
  2468. DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
  2469. i++;
  2470. } else {
  2471. timeout--;
  2472. }
  2473. if (0 == timeout) {
  2474. pr_err("[%s] timeout\n", __func__);
  2475. dump_nfi();
  2476. return false;
  2477. }
  2478. }
  2479. }
  2480. return true;
  2481. }
  2482. static bool mtk_nand_write_page_data(struct mtd_info *mtd, u8 *buf, u32 size)
  2483. {
  2484. #if (__INTERNAL_USE_AHB_MODE__)
  2485. return mtk_nand_dma_write_data(mtd, buf, size);
  2486. #else
  2487. return mtk_nand_mcu_write_data(mtd, buf, size);
  2488. #endif
  2489. }
  2490. /******************************************************************************
  2491. * mtk_nand_read_fdm_data
  2492. *
  2493. * DESCRIPTION:
  2494. *Read a fdm data !
  2495. *
  2496. * PARAMETERS:
  2497. *u8 *pDataBuf, u32 u4SecNum
  2498. *
  2499. * RETURNS:
  2500. *None
  2501. *
  2502. * NOTES:
  2503. *None
  2504. *
  2505. ******************************************************************************/
  2506. static void mtk_nand_read_fdm_data(u8 *pDataBuf, u32 u4SecNum)
  2507. {
  2508. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  2509. u32 i;
  2510. u32 *pBuf32 = (u32 *) pDataBuf;
  2511. if (pBuf32) {
  2512. for (i = 0; i < u4SecNum; ++i) {
  2513. *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
  2514. *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
  2515. }
  2516. }
  2517. #else
  2518. u32 fdm_temp[2];
  2519. u32 i, j;
  2520. u8 *byte_ptr;
  2521. byte_ptr = (u8 *)fdm_temp;
  2522. if (pDataBuf) {
  2523. for (i = 0; i < u4SecNum; ++i) {
  2524. fdm_temp[0] = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
  2525. fdm_temp[1] = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
  2526. for (j = 0; j < host->hw->nand_fdm_size; j++)
  2527. *(pDataBuf + (i * host->hw->nand_fdm_size) + j) = *(byte_ptr + j);
  2528. }
  2529. }
  2530. #endif
  2531. }
  2532. /******************************************************************************
  2533. * mtk_nand_write_fdm_data
  2534. *
  2535. * DESCRIPTION:
  2536. *Write a fdm data !
  2537. *
  2538. * PARAMETERS:
  2539. *u8 *pDataBuf, u32 u4SecNum
  2540. *
  2541. * RETURNS:
  2542. *None
  2543. *
  2544. * NOTES:
  2545. *None
  2546. *
  2547. ******************************************************************************/
  2548. static u8 fdm_buf[128];
  2549. static void mtk_nand_write_fdm_data(struct nand_chip *chip, u8 *pDataBuf, u32 u4SecNum)
  2550. {
  2551. u32 i, j;
  2552. u8 checksum = 0;
  2553. bool empty = true;
  2554. struct nand_oobfree *free_entry;
  2555. u8 *pBuf;
  2556. u8 *byte_ptr;
  2557. u32 fdm_data[2];
  2558. memcpy(fdm_buf, pDataBuf, u4SecNum * host->hw->nand_fdm_size);
  2559. free_entry = chip->ecc.layout->oobfree;
  2560. for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
  2561. for (j = 0; j < free_entry[i].length; j++) {
  2562. if (pDataBuf[free_entry[i].offset + j] != 0xFF)
  2563. empty = false;
  2564. checksum ^= pDataBuf[free_entry[i].offset + j];
  2565. }
  2566. }
  2567. if (!empty)
  2568. fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
  2569. pBuf = (u8 *)fdm_data;
  2570. byte_ptr = (u8 *)fdm_buf;
  2571. for (i = 0; i < u4SecNum; ++i) {
  2572. fdm_data[0] = 0xFFFFFFFF;
  2573. fdm_data[1] = 0xFFFFFFFF;
  2574. for (j = 0; j < host->hw->nand_fdm_size; j++)
  2575. *(pBuf + j) = *(byte_ptr + j + (i * host->hw->nand_fdm_size));
  2576. DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), fdm_data[0]);
  2577. DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), fdm_data[1]);
  2578. }
  2579. }
  2580. /******************************************************************************
  2581. * mtk_nand_stop_read
  2582. *
  2583. * DESCRIPTION:
  2584. *Stop read operation !
  2585. *
  2586. * PARAMETERS:
  2587. *None
  2588. *
  2589. * RETURNS:
  2590. *None
  2591. *
  2592. * NOTES:
  2593. *None
  2594. *
  2595. ******************************************************************************/
  2596. static void mtk_nand_stop_read(void)
  2597. {
  2598. NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BRD);
  2599. mtk_nand_reset();
  2600. if (g_bHwEcc)
  2601. ECC_Decode_End();
  2602. DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
  2603. }
  2604. /******************************************************************************
  2605. * mtk_nand_stop_write
  2606. *
  2607. * DESCRIPTION:
  2608. *Stop write operation !
  2609. *
  2610. * PARAMETERS:
  2611. *None
  2612. *
  2613. * RETURNS:
  2614. *None
  2615. *
  2616. * NOTES:
  2617. *None
  2618. *
  2619. ******************************************************************************/
  2620. static void mtk_nand_stop_write(void)
  2621. {
  2622. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2623. u32 reg_val;
  2624. #endif
  2625. NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2626. if (g_bHwEcc)
  2627. ECC_Encode_End();
  2628. DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
  2629. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2630. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  2631. reg_val = DRV_Reg16(NFI_DEBUG_CON1_REG16);
  2632. reg_val &= (~0x4000);
  2633. DRV_WriteReg16(NFI_DEBUG_CON1_REG16, reg_val);
  2634. }
  2635. #endif
  2636. }
  2637. /*---------------------------------------------------------------------------*/
  2638. #define STATUS_READY (0x40)
  2639. #define STATUS_FAIL (0x01)
  2640. #define STATUS_WR_ALLOW (0x80)
  2641. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2642. static bool mtk_nand_read_status(void)
  2643. {
  2644. int status = 0;
  2645. unsigned int timeout;
  2646. mtk_nand_reset();
  2647. /* Disable HW ECC */
  2648. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  2649. /* Disable 16-bit I/O */
  2650. NFI_CLN_REG32(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
  2651. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_OP_SRD | CNFG_READ_EN | CNFG_BYTE_RW);
  2652. DRV_WriteReg32(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
  2653. DRV_WriteReg32(NFI_CON_REG16, 0x3);
  2654. mtk_nand_set_mode(CNFG_OP_SRD);
  2655. DRV_WriteReg16(NFI_CNFG_REG16, 0x2042);
  2656. mtk_nand_set_command(NAND_CMD_STATUS);
  2657. DRV_WriteReg32(NFI_CON_REG16, 0x90);
  2658. timeout = TIMEOUT_4;
  2659. WAIT_NFI_PIO_READY(timeout);
  2660. if (timeout)
  2661. status = (DRV_Reg16(NFI_DATAR_REG32));
  2662. /*~ clear NOB */
  2663. DRV_WriteReg32(NFI_CON_REG16, 0);
  2664. if (gn_devinfo.iowidth == 16) {
  2665. NFI_SET_REG32(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
  2666. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  2667. }
  2668. /* flash is ready now, check status code */
  2669. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  2670. && (gn_devinfo.tlcControl.slcopmodeEn)) {
  2671. /*pr_warn("status 0x%x", status);*/
  2672. if (SLC_MODE_OP_FALI & status) {
  2673. if (!(STATUS_WR_ALLOW & status))
  2674. MSG(INIT, "status locked\n");
  2675. else
  2676. MSG(INIT, "status unknown\n");
  2677. return FALSE;
  2678. }
  2679. return TRUE;
  2680. }
  2681. if (STATUS_FAIL & status)
  2682. return FALSE;
  2683. return TRUE;
  2684. }
  2685. #endif
  2686. bool mtk_nand_SetFeature(struct mtd_info *mtd, u16 cmd, u32 addr, u8 *value, u8 bytes)
  2687. {
  2688. u16 reg_val = 0;
  2689. u8 write_count = 0;
  2690. u32 reg = 0;
  2691. u32 timeout = TIMEOUT_3;
  2692. mtk_nand_reset();
  2693. reg = DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32);
  2694. if (!(reg&TYPE_SLC))
  2695. bytes <<= 1;
  2696. reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
  2697. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  2698. mtk_nand_set_command(cmd);
  2699. mtk_nand_set_address(addr, 0, 1, 0);
  2700. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2701. DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
  2702. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2703. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  2704. while ((write_count < bytes) && timeout) {
  2705. WAIT_NFI_PIO_READY(timeout);
  2706. if (timeout == 0)
  2707. break;
  2708. if (reg&TYPE_SLC)
  2709. DRV_WriteReg8(NFI_DATAW_REG32, *value++);
  2710. else if (write_count % 2)
  2711. DRV_WriteReg8(NFI_DATAW_REG32, *value++);
  2712. else
  2713. DRV_WriteReg8(NFI_DATAW_REG32, *value);
  2714. write_count++;
  2715. timeout = TIMEOUT_3;
  2716. }
  2717. *NFI_CNRNB_REG16 = 0x81;
  2718. if (!mtk_nand_status_ready(STA_NAND_BUSY_RETURN))
  2719. return FALSE;
  2720. return TRUE;
  2721. }
  2722. bool mtk_nand_GetFeature(struct mtd_info *mtd, u16 cmd, u32 addr, u8 *value, u8 bytes)
  2723. {
  2724. u16 reg_val = 0;
  2725. u8 read_count = 0;
  2726. u32 timeout = TIMEOUT_3;
  2727. mtk_nand_reset();
  2728. reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW | CNFG_READ_EN);
  2729. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  2730. mtk_nand_set_command(cmd);
  2731. mtk_nand_set_address(addr, 0, 1, 0);
  2732. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2733. *NFI_CNRNB_REG16 = 0x81;
  2734. mtk_nand_status_ready(STA_NAND_BUSY_RETURN);
  2735. reg_val = DRV_Reg32(NFI_CON_REG16);
  2736. reg_val &= ~CON_NFI_NOB_MASK;
  2737. reg_val |= ((4 << CON_NFI_NOB_SHIFT)|CON_NFI_SRD);
  2738. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  2739. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  2740. while ((read_count < bytes) && timeout) {
  2741. WAIT_NFI_PIO_READY(timeout);
  2742. if (timeout == 0)
  2743. break;
  2744. *value++ = DRV_Reg8(NFI_DATAR_REG32);
  2745. read_count++;
  2746. timeout = TIMEOUT_3;
  2747. }
  2748. if (timeout != 0)
  2749. return TRUE;
  2750. else
  2751. return FALSE;
  2752. }
  2753. #if 1
  2754. const u8 data_tbl[8][5] = {
  2755. {0x04, 0x04, 0x7C, 0x7E, 0x00},
  2756. {0x00, 0x7C, 0x78, 0x78, 0x00},
  2757. {0x7C, 0x76, 0x74, 0x72, 0x00},
  2758. {0x08, 0x08, 0x00, 0x00, 0x00},
  2759. {0x0B, 0x7E, 0x76, 0x74, 0x00},
  2760. {0x10, 0x76, 0x72, 0x70, 0x00},
  2761. {0x02, 0x7C, 0x7E, 0x70, 0x00},
  2762. {0x00, 0x00, 0x00, 0x00, 0x00}
  2763. };
  2764. static void mtk_nand_modeentry_rrtry(void)
  2765. {
  2766. mtk_nand_reset();
  2767. mtk_nand_set_mode(CNFG_OP_CUST);
  2768. mtk_nand_set_command(0x5C);
  2769. mtk_nand_set_command(0xC5);
  2770. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2771. }
  2772. static void mtk_nand_rren_rrtry(bool needB3)
  2773. {
  2774. mtk_nand_reset();
  2775. mtk_nand_set_mode(CNFG_OP_CUST);
  2776. if (needB3)
  2777. mtk_nand_set_command(0xB3);
  2778. mtk_nand_set_command(0x26);
  2779. mtk_nand_set_command(0x5D);
  2780. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2781. }
  2782. static void mtk_nand_sprmset_rrtry(u32 addr, u32 data)
  2783. {
  2784. u16 reg_val = 0;
  2785. u32 timeout = TIMEOUT_3; /*0xffff; */
  2786. mtk_nand_reset();
  2787. reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
  2788. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  2789. mtk_nand_set_command(0x55);
  2790. mtk_nand_set_address(addr, 0, 1, 0);
  2791. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2792. DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
  2793. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2794. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  2795. WAIT_NFI_PIO_READY(timeout);
  2796. timeout = TIMEOUT_3;
  2797. DRV_WriteReg8(NFI_DATAW_REG32, data);
  2798. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--))
  2799. ;
  2800. }
  2801. static void mtk_nand_toshiba_rrtry(struct mtd_info *mtd, flashdev_info deviceinfo, u32 retryCount,
  2802. bool defValue)
  2803. {
  2804. u32 acccon;
  2805. u8 cnt = 0;
  2806. u8 add_reg[6] = {0x04, 0x05, 0x06, 0x07, 0x0D};
  2807. acccon = DRV_Reg32(NFI_ACCCON_REG32);
  2808. if (0 == retryCount)
  2809. mtk_nand_modeentry_rrtry();
  2810. for (cnt = 0; cnt < 5; cnt++)
  2811. mtk_nand_sprmset_rrtry(add_reg[cnt], data_tbl[retryCount][cnt]);
  2812. if (3 == retryCount)
  2813. mtk_nand_rren_rrtry(TRUE);
  2814. else if (6 > retryCount)
  2815. mtk_nand_rren_rrtry(FALSE);
  2816. if (7 == retryCount) {
  2817. mtk_nand_device_reset();
  2818. mtk_nand_reset();
  2819. }
  2820. }
  2821. const u8 tb_slc_1y[8][2] = {
  2822. {0xF0, 0x00},
  2823. {0xE0, 0x00},
  2824. {0xD0, 0x00},
  2825. {0xC0, 0x00},
  2826. {0x20, 0x00},
  2827. {0x30, 0x00},
  2828. {0x40, 0x00},
  2829. {0x00, 0x00}
  2830. };
  2831. const u8 tb_tlc_1y[31][7] = {
  2832. {0xFE, 0x03, 0x02, 0x02, 0xFF, 0xFC, 0xFD},
  2833. {0xFE, 0x02, 0x01, 0x01, 0xFE, 0xFA, 0xFB},
  2834. {0xFE, 0x00, 0x00, 0xFF, 0xFC, 0xF8, 0xF9},
  2835. {0xFD, 0xFF, 0xFE, 0xFE, 0xFA, 0xF6, 0xF7},
  2836. {0xFD, 0xFE, 0xFD, 0xFC, 0xF8, 0xF4, 0xF5},
  2837. {0xFD, 0xFD, 0xFC, 0xFB, 0xF6, 0xF2, 0xF2},
  2838. {0xFD, 0xFB, 0xFB, 0xF9, 0xF5, 0xF0, 0xF0},
  2839. {0xFD, 0xFA, 0xF9, 0xF8, 0xF3, 0xEE, 0xEE},
  2840. {0xFD, 0xF9, 0xF8, 0xF6, 0xF1, 0xEC, 0xEC},
  2841. {0xFD, 0xF8, 0xF7, 0xF5, 0xEF, 0xEA, 0xE9},
  2842. {0xFC, 0xF6, 0xF6, 0xF3, 0xEE, 0xE8, 0xE7},
  2843. {0xFA, 0xFA, 0xFB, 0xFA, 0xFB, 0xFA, 0xFA},
  2844. {0xFA, 0xFA, 0xFA, 0xF9, 0xFA, 0xF8, 0xF8},
  2845. {0xFA, 0xFA, 0xFA, 0xF8, 0xF9, 0xF6, 0xF5},
  2846. {0xFB, 0xFA, 0xF9, 0xF7, 0xF7, 0xF4, 0xF3},
  2847. {0xFB, 0xFB, 0xF9, 0xF6, 0xF6, 0xF2, 0xF0},
  2848. {0xFB, 0xFB, 0xF8, 0xF5, 0xF5, 0xF0, 0xEE},
  2849. {0xFB, 0xFB, 0xF8, 0xF5, 0xF4, 0xEE, 0xEB},
  2850. {0xFC, 0xFB, 0xF7, 0xF4, 0xF2, 0xEC, 0xE9},
  2851. {0xFC, 0xFE, 0xFE, 0xF9, 0xFA, 0xF8, 0xF8},
  2852. {0xFD, 0xFE, 0xFD, 0xF7, 0xF7, 0xF4, 0xF3},
  2853. {0xFD, 0xFF, 0xFC, 0xF5, 0xF5, 0xF0, 0xEE},
  2854. {0xFE, 0x03, 0x03, 0x04, 0x01, 0xFF, 0x01},
  2855. {0xFC, 0x00, 0x00, 0x01, 0xFE, 0xFC, 0xFE},
  2856. {0xFA, 0xFA, 0xFC, 0xFC, 0xFA, 0xF7, 0xFA},
  2857. {0x00, 0x03, 0x02, 0x03, 0xFF, 0xFC, 0xFE},
  2858. {0x04, 0x03, 0x03, 0x03, 0x00, 0xFC, 0xFD},
  2859. {0x08, 0x04, 0x03, 0x04, 0x00, 0xFC, 0xFC},
  2860. {0xFC, 0x00, 0x00, 0x00, 0x04, 0x04, 0x08},
  2861. {0xF8, 0x00, 0x00, 0x00, 0x08, 0x08, 0x10},
  2862. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
  2863. };
  2864. static void mtk_nand_modeentry_tlc_rrtry(void)
  2865. {
  2866. u32 reg_val = 0;
  2867. u32 timeout = TIMEOUT_3;
  2868. mtk_nand_reset();
  2869. mtk_nand_set_mode(CNFG_OP_CUST);
  2870. mtk_nand_set_command(0x5C);
  2871. mtk_nand_set_command(0xC5);
  2872. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2873. mtk_nand_reset();
  2874. reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
  2875. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  2876. mtk_nand_set_command(0x55);
  2877. mtk_nand_set_address(0, 0, 1, 0);
  2878. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2879. DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
  2880. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2881. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  2882. WAIT_NFI_PIO_READY(timeout);
  2883. timeout = TIMEOUT_3;
  2884. DRV_WriteReg8(NFI_DATAW_REG32, 0x01);
  2885. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--))
  2886. ;
  2887. }
  2888. static void mtk_nand_toshiba_tlc_1y_rrtry(flashdev_info deviceinfo, u32 retryCount, bool defValue)
  2889. {
  2890. u8 add_reg[7] = {0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A};
  2891. u8 cnt = 0;
  2892. if (TRUE == defValue) {
  2893. for (cnt = 0; cnt < 7; cnt++)
  2894. mtk_nand_sprmset_rrtry(add_reg[cnt], tb_tlc_1y[30][cnt]);
  2895. mtk_nand_set_mode(CNFG_OP_RESET);
  2896. NFI_ISSUE_COMMAND(NAND_CMD_RESET, 0, 0, 0, 0);
  2897. mtk_nand_reset();
  2898. return;
  2899. }
  2900. if (0 == retryCount)
  2901. mtk_nand_modeentry_tlc_rrtry();
  2902. for (cnt = 0; cnt < 7; cnt++)
  2903. mtk_nand_sprmset_rrtry(add_reg[cnt], tb_tlc_1y[retryCount][cnt]);
  2904. mtk_nand_reset();
  2905. mtk_nand_set_mode(CNFG_OP_CUST);
  2906. if (31 == retryCount)
  2907. mtk_nand_set_command(0xB3);
  2908. mtk_nand_set_command(0x5D);
  2909. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2910. }
  2911. static void mtk_nand_toshiba_slc_1y_rrtry(flashdev_info deviceinfo, u32 retryCount, bool defValue)
  2912. {
  2913. u8 add_reg[2] = {0x0B, 0x0D};
  2914. u8 cnt = 0;
  2915. if (TRUE == defValue) {
  2916. for (cnt = 0; cnt < 2; cnt++)
  2917. mtk_nand_sprmset_rrtry(add_reg[cnt], tb_slc_1y[7][cnt]);
  2918. mtk_nand_set_mode(CNFG_OP_RESET);
  2919. NFI_ISSUE_COMMAND(NAND_CMD_RESET, 0, 0, 0, 0);
  2920. mtk_nand_reset();
  2921. }
  2922. if (0 == retryCount)
  2923. mtk_nand_modeentry_tlc_rrtry();
  2924. for (cnt = 0; cnt < 2; cnt++)
  2925. mtk_nand_sprmset_rrtry(add_reg[cnt], tb_slc_1y[retryCount][cnt]);
  2926. mtk_nand_reset();
  2927. mtk_nand_set_mode(CNFG_OP_CUST);
  2928. mtk_nand_set_command(0x5D);
  2929. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2930. }
  2931. static void mtk_nand_toshiba_tlc_rrtry(struct mtd_info *mtd, struct flashdev_info_t deviceinfo,
  2932. u32 retryCount, bool defValue)
  2933. {
  2934. if (gn_devinfo.tlcControl.slcopmodeEn)
  2935. mtk_nand_toshiba_slc_1y_rrtry(deviceinfo, retryCount, defValue);
  2936. else
  2937. mtk_nand_toshiba_tlc_1y_rrtry(deviceinfo, retryCount, defValue);
  2938. }
  2939. #endif
  2940. static void mtk_nand_micron_rrtry(struct mtd_info *mtd, flashdev_info deviceinfo, u32 feature,
  2941. bool defValue)
  2942. {
  2943. mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd,
  2944. deviceinfo.feature_set.FeatureSet.readRetryAddress,
  2945. (u8 *)&feature, 4);
  2946. }
  2947. static int g_sandisk_retry_case; /*for new read retry table case 1, 2, 3, 4 */
  2948. static void mtk_nand_sandisk_rrtry(struct mtd_info *mtd, flashdev_info deviceinfo, u32 feature,
  2949. bool defValue)
  2950. {
  2951. if (FALSE == defValue)
  2952. mtk_nand_reset();
  2953. else {
  2954. mtk_nand_device_reset();
  2955. mtk_nand_reset();
  2956. /*should do NAND DEVICE interface change under sync mode */
  2957. }
  2958. mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd,
  2959. deviceinfo.feature_set.FeatureSet.readRetryAddress,
  2960. (u8 *) &feature, 4);
  2961. if (FALSE == defValue) {
  2962. if (g_sandisk_retry_case > 1) {
  2963. if (g_sandisk_retry_case == 3) {
  2964. u32 timeout = TIMEOUT_3;
  2965. mtk_nand_reset();
  2966. DRV_WriteReg16(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW));
  2967. mtk_nand_set_command(0x5C);
  2968. mtk_nand_set_command(0xC5);
  2969. mtk_nand_set_command(0x55);
  2970. mtk_nand_set_address(0x00, 0, 1, 0); /* test mode entry */
  2971. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2972. DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
  2973. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2974. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  2975. WAIT_NFI_PIO_READY(timeout);
  2976. DRV_WriteReg8(NFI_DATAW_REG32, 0x01);
  2977. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN)
  2978. && (timeout--))
  2979. ;
  2980. mtk_nand_reset();
  2981. timeout = TIMEOUT_3;
  2982. mtk_nand_set_command(0x55);
  2983. mtk_nand_set_address(0x23, 0, 1, 0);
  2984. /*changing parameter LMFLGFIX_NEXT = 1 to all die */
  2985. mtk_nand_status_ready(STA_NFI_OP_MASK);
  2986. DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
  2987. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  2988. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  2989. WAIT_NFI_PIO_READY(timeout);
  2990. DRV_WriteReg8(NFI_DATAW_REG32, 0xC0);
  2991. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN)
  2992. && (timeout--))
  2993. ;
  2994. mtk_nand_reset();
  2995. /*pr_err("Case3# Set LMFLGFIX_NEXT = 1\n"); */
  2996. }
  2997. mtk_nand_set_command(0x25);
  2998. /*pr_err("Case2#3# Set cmd 25\n"); */
  2999. }
  3000. mtk_nand_set_command(deviceinfo.feature_set.FeatureSet.readRetryPreCmd);
  3001. }
  3002. }
  3003. /*sandisk 19nm read retry*/
  3004. u16 sandisk_19nm_rr_table[18] = {
  3005. 0x0000,
  3006. 0xFF0F, 0xEEFE, 0xDDFD, 0x11EE,
  3007. 0x22ED, 0x33DF, 0xCDDE, 0x01DD,
  3008. 0x0211, 0x1222, 0xBD21, 0xAD32,
  3009. 0x9DF0, 0xBCEF, 0xACDC, 0x9CFF,
  3010. 0x0000
  3011. };
  3012. static void sandisk_19nm_rr_init(void)
  3013. {
  3014. u32 reg_val = 0;
  3015. u32 count = 0;
  3016. u32 timeout = 0xffff;
  3017. u32 acccon;
  3018. acccon = DRV_Reg32(NFI_ACCCON_REG32);
  3019. mtk_nand_reset();
  3020. reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW);
  3021. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3022. mtk_nand_set_command(0x3B);
  3023. mtk_nand_set_command(0xB9);
  3024. for (count = 0; count < 9; count++) {
  3025. mtk_nand_set_command(0x53);
  3026. mtk_nand_set_address((0x04 + count), 0, 1, 0);
  3027. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
  3028. DRV_WriteReg16(NFI_STRDATA_REG16, 1);
  3029. timeout = 0xffff;
  3030. WAIT_NFI_PIO_READY(timeout);
  3031. DRV_WriteReg32(NFI_DATAW_REG32, 0x00);
  3032. mtk_nand_reset();
  3033. }
  3034. }
  3035. static void sandisk_19nm_rr_loading(u32 retryCount, bool defValue)
  3036. {
  3037. u32 reg_val = 0;
  3038. u32 timeout = 0xffff;
  3039. u32 acccon;
  3040. u8 count;
  3041. u8 cmd_reg[4] = { 0x4, 0x5, 0x7 };
  3042. acccon = DRV_Reg32(NFI_ACCCON_REG32);
  3043. mtk_nand_reset();
  3044. reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW);
  3045. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3046. if ((0 != retryCount) || defValue)
  3047. mtk_nand_set_command(0xD6);
  3048. mtk_nand_set_command(0x3B);
  3049. mtk_nand_set_command(0xB9);
  3050. for (count = 0; count < 3; count++) {
  3051. mtk_nand_set_command(0x53);
  3052. mtk_nand_set_address(cmd_reg[count], 0, 1, 0);
  3053. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
  3054. DRV_WriteReg16(NFI_STRDATA_REG16, 1);
  3055. timeout = 0xffff;
  3056. WAIT_NFI_PIO_READY(timeout);
  3057. if (count == 0)
  3058. DRV_WriteReg32(NFI_DATAW_REG32,
  3059. (((sandisk_19nm_rr_table[retryCount] & 0xF000) >> 8) |
  3060. ((sandisk_19nm_rr_table[retryCount] & 0x00F0) >> 4)));
  3061. else if (count == 1)
  3062. DRV_WriteReg32(NFI_DATAW_REG32,
  3063. ((sandisk_19nm_rr_table[retryCount] & 0x000F) << 4));
  3064. else if (count == 2)
  3065. DRV_WriteReg32(NFI_DATAW_REG32,
  3066. ((sandisk_19nm_rr_table[retryCount] & 0x0F00) >> 4));
  3067. mtk_nand_reset();
  3068. }
  3069. if (!defValue)
  3070. mtk_nand_set_command(0xB6);
  3071. }
  3072. static void mtk_nand_sandisk_19nm_rrtry(struct mtd_info *mtd, flashdev_info deviceinfo,
  3073. u32 retryCount, bool defValue)
  3074. {
  3075. if ((retryCount == 0) && (!defValue))
  3076. sandisk_19nm_rr_init();
  3077. sandisk_19nm_rr_loading(retryCount, defValue);
  3078. }
  3079. /*hynix 16nm read retry table read*/
  3080. #define HYNIX_RR_TABLE_SIZE (1026) /*hynix read retry table size */
  3081. #define SINGLE_RR_TABLE_SIZE (64)
  3082. #define READ_RETRY_STEP (gn_devinfo.feature_set.FeatureSet.readRetryCnt + \
  3083. gn_devinfo.feature_set.FeatureSet.readRetryStart)
  3084. #define HYNIX_16NM_RR_TABLE_SIZE ((READ_RETRY_STEP == 12) ? (784) : (528)) /*hynix read retry table size */
  3085. #define SINGLE_RR_TABLE_16NM_SIZE ((READ_RETRY_STEP == 12)?(48):(32))
  3086. u8 nand_hynix_rr_table[(HYNIX_RR_TABLE_SIZE + 16) / 16 * 16]; /*align as 16 byte */
  3087. #define NAND_HYX_RR_TBL_BUF nand_hynix_rr_table
  3088. static u8 real_hynix_rr_table_idx;
  3089. static u32 g_hynix_retry_count;
  3090. static bool hynix_rr_table_select(u8 table_index, flashdev_info *deviceinfo)
  3091. {
  3092. u32 i;
  3093. u32 table_size =
  3094. (deviceinfo->feature_set.FeatureSet.rtype ==
  3095. RTYPE_HYNIX_16NM) ? SINGLE_RR_TABLE_16NM_SIZE : SINGLE_RR_TABLE_SIZE;
  3096. for (i = 0; i < table_size; i++) {
  3097. u8 *temp_rr_table = (u8 *) NAND_HYX_RR_TBL_BUF + table_size * table_index * 2 + 2;
  3098. u8 *temp_inversed_rr_table =
  3099. (u8 *) NAND_HYX_RR_TBL_BUF + table_size * table_index * 2 + table_size + 2;
  3100. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) {
  3101. temp_rr_table += 14;
  3102. temp_inversed_rr_table += 14;
  3103. }
  3104. if (0xFF != (temp_rr_table[i] ^ temp_inversed_rr_table[i]))
  3105. return FALSE; /* error table */
  3106. }
  3107. /* print table*/
  3108. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
  3109. table_size += 16;
  3110. else
  3111. table_size += 2;
  3112. return TRUE; /* correct table */
  3113. }
  3114. static void HYNIX_RR_TABLE_READ(flashdev_info *deviceinfo)
  3115. {
  3116. u32 reg_val = 0;
  3117. u32 read_count = 0, max_count = HYNIX_RR_TABLE_SIZE;
  3118. u32 timeout = 0xffff;
  3119. u8 *rr_table = (u8 *) (NAND_HYX_RR_TBL_BUF);
  3120. u8 table_index = 0;
  3121. u8 add_reg1[3] = { 0xFF, 0xCC };
  3122. u8 data_reg1[3] = { 0x40, 0x4D };
  3123. u8 cmd_reg[6] = { 0x16, 0x17, 0x04, 0x19, 0x00 };
  3124. u8 add_reg2[6] = { 0x00, 0x00, 0x00, 0x02, 0x00 };
  3125. bool RR_TABLE_EXIST = TRUE;
  3126. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) {
  3127. read_count = 1;
  3128. add_reg1[1] = 0x38;
  3129. data_reg1[1] = 0x52;
  3130. max_count = HYNIX_16NM_RR_TABLE_SIZE;
  3131. if (READ_RETRY_STEP == 12)
  3132. add_reg2[2] = 0x1F;
  3133. }
  3134. mtk_nand_device_reset();
  3135. mtk_nand_reset();
  3136. DRV_WriteReg16(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW));
  3137. mtk_nand_set_command(0x36);
  3138. for (; read_count < 2; read_count++) {
  3139. mtk_nand_set_address(add_reg1[read_count], 0, 1, 0);
  3140. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
  3141. DRV_WriteReg16(NFI_STRDATA_REG16, 1);
  3142. timeout = 0xffff;
  3143. WAIT_NFI_PIO_READY(timeout);
  3144. DRV_WriteReg32(NFI_DATAW_REG32, data_reg1[read_count]);
  3145. mtk_nand_reset();
  3146. }
  3147. for (read_count = 0; read_count < 5; read_count++)
  3148. mtk_nand_set_command(cmd_reg[read_count]);
  3149. for (read_count = 0; read_count < 5; read_count++)
  3150. mtk_nand_set_address(add_reg2[read_count], 0, 1, 0);
  3151. mtk_nand_set_command(0x30);
  3152. DRV_WriteReg16(NFI_CNRNB_REG16, 0xF1);
  3153. timeout = 0xffff;
  3154. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--))
  3155. ;
  3156. reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW | CNFG_READ_EN);
  3157. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3158. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_BRD | (2 << CON_NFI_SEC_SHIFT)));
  3159. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  3160. timeout = 0xffff;
  3161. read_count = 0;
  3162. while ((read_count < max_count) && timeout) {
  3163. WAIT_NFI_PIO_READY(timeout);
  3164. *rr_table++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
  3165. read_count++;
  3166. timeout = 0xFFFF;
  3167. }
  3168. mtk_nand_device_reset();
  3169. reg_val = (CNFG_OP_CUST | CNFG_BYTE_RW);
  3170. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) {
  3171. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3172. mtk_nand_set_command(0x36);
  3173. mtk_nand_set_address(0x38, 0, 1, 0);
  3174. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
  3175. DRV_WriteReg16(NFI_STRDATA_REG16, 1);
  3176. WAIT_NFI_PIO_READY(timeout);
  3177. DRV_WriteReg32(NFI_DATAW_REG32, 0x00);
  3178. mtk_nand_reset();
  3179. mtk_nand_set_command(0x16);
  3180. mtk_nand_set_command(0x00);
  3181. mtk_nand_set_address(0x00, 0, 1, 0);
  3182. mtk_nand_set_command(0x30);
  3183. } else {
  3184. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3185. mtk_nand_set_command(0x38);
  3186. }
  3187. timeout = 0xffff;
  3188. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN) && (timeout--))
  3189. ;
  3190. rr_table = (u8 *) (NAND_HYX_RR_TBL_BUF);
  3191. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX) {
  3192. if ((rr_table[0] != 8) || (rr_table[1] != 8)) {
  3193. RR_TABLE_EXIST = FALSE;
  3194. ASSERT(0);
  3195. }
  3196. } else if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) {
  3197. for (read_count = 0; read_count < 8; read_count++) {
  3198. if ((rr_table[read_count] != 8) || (rr_table[read_count + 8] != 4)) {
  3199. RR_TABLE_EXIST = FALSE;
  3200. break;
  3201. }
  3202. }
  3203. }
  3204. if (RR_TABLE_EXIST) {
  3205. for (table_index = 0; table_index < 8; table_index++) {
  3206. if (hynix_rr_table_select(table_index, deviceinfo)) {
  3207. real_hynix_rr_table_idx = table_index;
  3208. MSG(INIT, "Hynix rr_tbl_id %d\n", real_hynix_rr_table_idx);
  3209. break;
  3210. }
  3211. }
  3212. if (table_index == 8)
  3213. ASSERT(0);
  3214. } else {
  3215. MSG(INIT, "Hynix RR table index error!\n");
  3216. }
  3217. }
  3218. static void HYNIX_Set_RR_Para(u32 rr_index, flashdev_info *deviceinfo)
  3219. {
  3220. u32 timeout = 0xffff;
  3221. u8 count, max_count = 8;
  3222. u8 add_reg[9] = { 0xCC, 0xBF, 0xAA, 0xAB, 0xCD, 0xAD, 0xAE, 0xAF };
  3223. u8 *hynix_rr_table =
  3224. (u8 *) NAND_HYX_RR_TBL_BUF + SINGLE_RR_TABLE_SIZE * real_hynix_rr_table_idx * 2 + 2;
  3225. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) {
  3226. add_reg[0] = 0x38;
  3227. for (count = 1; count < 4; count++)
  3228. add_reg[count] = add_reg[0] + count;
  3229. hynix_rr_table += 14;
  3230. max_count = 4;
  3231. }
  3232. mtk_nand_reset();
  3233. DRV_WriteReg16(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW));
  3234. mtk_nand_set_command(0x36);
  3235. for (count = 0; count < max_count; count++) {
  3236. mtk_nand_set_address(add_reg[count], 0, 1, 0);
  3237. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_BWR | (1 << CON_NFI_SEC_SHIFT)));
  3238. DRV_WriteReg16(NFI_STRDATA_REG16, 1);
  3239. timeout = 0xffff;
  3240. WAIT_NFI_PIO_READY(timeout);
  3241. if (timeout == 0) {
  3242. pr_err("HYNIX_Set_RR_Para timeout\n");
  3243. break;
  3244. }
  3245. DRV_WriteReg32(NFI_DATAW_REG32, hynix_rr_table[rr_index * max_count + count]);
  3246. }
  3247. mtk_nand_set_command(0x16);
  3248. mtk_nand_reset();
  3249. }
  3250. #if 0
  3251. static void HYNIX_Get_RR_Para(u32 rr_index, flashdev_info *deviceinfo)
  3252. {
  3253. u32 reg_val = 0;
  3254. u32 timeout = 0xffff;
  3255. u8 count, max_count = 8;
  3256. u8 add_reg[9] = { 0xCC, 0xBF, 0xAA, 0xAB, 0xCD, 0xAD, 0xAE, 0xAF };
  3257. u8 *hynix_rr_table =
  3258. (u8 *) NAND_HYX_RR_TBL_BUF + SINGLE_RR_TABLE_SIZE * real_hynix_rr_table_idx * 2 + 2;
  3259. if (deviceinfo->feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM) {
  3260. add_reg[0] = 0x38;
  3261. for (count = 1; count < 4; count++)
  3262. add_reg[count] = add_reg[0] + count;
  3263. hynix_rr_table += 14;
  3264. max_count = 4;
  3265. }
  3266. mtk_nand_reset();
  3267. DRV_WriteReg16(NFI_CNFG_REG16, (CNFG_OP_CUST | CNFG_BYTE_RW | CNFG_READ_EN));
  3268. for (count = 0; count < max_count; count++) {
  3269. mtk_nand_set_command(0x37);
  3270. mtk_nand_set_address(add_reg[count], 0, 1, 0);
  3271. DRV_WriteReg16(NFI_CON_REG16, (CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT)));
  3272. DRV_WriteReg16(NFI_STRDATA_REG16, 1);
  3273. timeout = 0xffff;
  3274. WAIT_NFI_PIO_READY(timeout);
  3275. if (timeout == 0)
  3276. pr_err("HYNIX_Get_RR_Para timeout\n");
  3277. pr_err("Get[%02X]%02X\n", add_reg[count], DRV_Reg8(NFI_DATAR_REG32));
  3278. mtk_nand_reset();
  3279. }
  3280. }
  3281. #endif
  3282. static void mtk_nand_hynix_rrtry(struct mtd_info *mtd, flashdev_info deviceinfo, u32 retryCount,
  3283. bool defValue)
  3284. {
  3285. if (defValue == FALSE) {
  3286. if (g_hynix_retry_count == READ_RETRY_STEP)
  3287. g_hynix_retry_count = 0;
  3288. pr_err("Hynix Retry %d\n", g_hynix_retry_count);
  3289. HYNIX_Set_RR_Para(g_hynix_retry_count, &deviceinfo);
  3290. g_hynix_retry_count++;
  3291. }
  3292. }
  3293. static void mtk_nand_hynix_16nm_rrtry(struct mtd_info *mtd, flashdev_info deviceinfo,
  3294. u32 retryCount, bool defValue)
  3295. {
  3296. if (defValue == FALSE) {
  3297. if (g_hynix_retry_count == READ_RETRY_STEP)
  3298. g_hynix_retry_count = 0;
  3299. pr_err("Hynix 16nm Retry %d\n", g_hynix_retry_count);
  3300. HYNIX_Set_RR_Para(g_hynix_retry_count, &deviceinfo);
  3301. g_hynix_retry_count++;
  3302. }
  3303. }
  3304. u32 special_rrtry_setting[37] = {
  3305. 0x00000000, 0x7C00007C, 0x787C0004, 0x74780078,
  3306. 0x7C007C08, 0x787C7C00, 0x74787C7C, 0x70747C00,
  3307. 0x7C007800, 0x787C7800, 0x74787800, 0x70747800,
  3308. 0x6C707800, 0x00040400, 0x7C000400, 0x787C040C,
  3309. 0x7478040C, 0x7C000810, 0x00040810, 0x04040C0C,
  3310. 0x00040C10, 0x00081014, 0x000C1418, 0x7C040C0C,
  3311. 0x74787478, 0x70747478, 0x6C707478, 0x686C7478,
  3312. 0x74787078, 0x70747078, 0x686C7078, 0x6C707078,
  3313. 0x6C706C78, 0x686C6C78, 0x64686C78, 0x686C6874,
  3314. 0x64686874,
  3315. };
  3316. u32 special_mlcslc_rrtry_setting[23]= {
  3317. 0x00,0x04,0x08,0x0C,0x10,0x14,0x7C,0x78,
  3318. 0x74,0x18,0x1C,0x20,0x70,0x6C,0x68,0x24,
  3319. 0x28,0x2C,0x64,0x60,0x5C,0x58,0x54,
  3320. };
  3321. u32 sandisk_tlc_rrtbl_12h[40] = {
  3322. 0x00000000, 0x08000004, 0x00000404, 0x04040408,
  3323. 0x08040408, 0x0004080C, 0x04040810, 0x0C0C0C00,
  3324. 0x0E0E0E00, 0x10101000, 0x12121200, 0x080808FC,
  3325. 0xFC08FCF8, 0x0000FBF6, 0x0408FBF4, 0xFEFCF8FA,
  3326. 0xFCF8F4EC, 0xF8F8F8EC, 0x0002FCE4, 0xFCFEFEFE,
  3327. 0xFFFC00FD, 0xFEFB00FC, 0xFEFAFEFA, 0xFDF9FDFA,
  3328. 0xFBF8FBFA, 0xF9F7FAF8, 0xF8F6F9F4, 0xF5F4F8F2,
  3329. 0xF4F2F6EE, 0xF0F0F4E8, 0xECECF0E6, 0x020400FA,
  3330. 0x00FEFFF8, 0xFEFEFDF6, 0xFDFDFCF4, 0xFBFCFCF2,
  3331. 0xF9FBFBF0, 0xF8F9F9EE, 0xF6F8F8ED, 0xF4F7F6EA,
  3332. };
  3333. u32 sandisk_tlc_rrtbl_13h[40] = {
  3334. 0x00000000, 0x00040800, 0x00080004, 0x00020404,
  3335. 0x00040800, 0x00080000, 0x00FC0000, 0x000C0C0C,
  3336. 0x000E0E0E, 0x00101010, 0x00141414, 0x000008FC,
  3337. 0x0004FCF8, 0x00FC00F6, 0x00FC0404, 0x00FCFE08,
  3338. 0x00FCFC00, 0x00F8F8FA, 0x000000F4, 0x00FAFC02,
  3339. 0x00F8FF00, 0x00F6FDFE, 0x00F4FBFC, 0x00F2F9FA,
  3340. 0x00F0F7F8, 0x00EEF5F6, 0x00ECF3F4, 0x00EAF1F2,
  3341. 0x00E8ECEE, 0x00E0E4E8, 0x00DAE0E2, 0x00000000,
  3342. 0x00FEFEFE, 0x00FBFCFC, 0x00F9FAFA, 0x00F7F8F8,
  3343. 0x00F5F6F6, 0x00F3F4F4, 0x00F1F2F2, 0x00EFF0EF,
  3344. };
  3345. u32 sandisk_tlc_rrtbl_14h[11] = {
  3346. 0x00000000, 0x00000010, 0x00000020, 0x00000030,
  3347. 0x00000040, 0x00000050, 0x00000060, 0x000000F0,
  3348. 0x000000E0, 0x000000D0, 0x000000C0,
  3349. };
  3350. static void mtk_nand_sandisk_tlc_1ynm_rrtry(struct mtd_info *mtd,
  3351. struct flashdev_info_t deviceinfo, u32 feature, bool defValue)
  3352. {
  3353. u16 reg_val = 0;
  3354. u32 timeout = TIMEOUT_3;
  3355. u32 value1, value2, value3;
  3356. if ((feature > 1) || defValue) {
  3357. mtk_nand_reset();
  3358. reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
  3359. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3360. mtk_nand_set_command(0x55);
  3361. mtk_nand_set_address(0, 0, 1, 0);
  3362. DRV_WriteReg32(NFI_CON_REG16, 1 << CON_NFI_SEC_SHIFT);
  3363. NFI_SET_REG32(NFI_CON_REG16, CON_NFI_BWR);
  3364. DRV_WriteReg16(NFI_STRDATA_REG16, 0x1);
  3365. WAIT_NFI_PIO_READY(timeout);
  3366. if (timeout == 0)
  3367. MSG(INIT, "mtk_nand_sandisk_tlc_1ynm_rrtry: timeout\n");
  3368. DRV_WriteReg32(NFI_DATAW_REG32, 0);
  3369. mtk_nand_device_reset();
  3370. }
  3371. if (gn_devinfo.tlcControl.slcopmodeEn) {
  3372. value3 = sandisk_tlc_rrtbl_14h[feature];
  3373. mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd, 0x14, (u8 *)&value3, 4);
  3374. } else {
  3375. value1 = sandisk_tlc_rrtbl_12h[feature];
  3376. value2 = sandisk_tlc_rrtbl_13h[feature];
  3377. mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd, 0x12, (u8 *)&value1, 4);
  3378. mtk_nand_SetFeature(mtd, deviceinfo.feature_set.FeatureSet.sfeatureCmd, 0x13, (u8 *)&value2, 4);
  3379. }
  3380. if (FALSE == defValue) {
  3381. mtk_nand_reset();
  3382. reg_val |= (CNFG_OP_CUST | CNFG_BYTE_RW);
  3383. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3384. mtk_nand_set_command(0x5D);
  3385. mtk_nand_reset();
  3386. }
  3387. }
  3388. static u32 mtk_nand_rrtry_setting(flashdev_info deviceinfo, enum readRetryType type, u32 retryStart,
  3389. u32 loopNo)
  3390. {
  3391. u32 value;
  3392. {
  3393. if (retryStart != 0xFFFFFFFF)
  3394. value = retryStart + loopNo;
  3395. else {
  3396. if (gn_devinfo.tlcControl.slcopmodeEn)
  3397. value = special_mlcslc_rrtry_setting[loopNo];
  3398. else
  3399. value = special_rrtry_setting[loopNo];
  3400. }
  3401. }
  3402. return value;
  3403. }
  3404. typedef void(*rrtryFunctionType) (struct mtd_info *mtd, flashdev_info deviceinfo, u32 feature,
  3405. bool defValue);
  3406. static rrtryFunctionType rtyFuncArray[] = {
  3407. mtk_nand_micron_rrtry,
  3408. mtk_nand_sandisk_rrtry,
  3409. mtk_nand_sandisk_19nm_rrtry,
  3410. mtk_nand_toshiba_rrtry,
  3411. mtk_nand_hynix_rrtry,
  3412. mtk_nand_hynix_16nm_rrtry,
  3413. mtk_nand_sandisk_tlc_1ynm_rrtry,
  3414. mtk_nand_toshiba_tlc_rrtry
  3415. };
  3416. static void mtk_nand_rrtry_func(struct mtd_info *mtd, flashdev_info deviceinfo, u32 feature,
  3417. bool defValue)
  3418. {
  3419. if (MLC_DEVICE)
  3420. rtyFuncArray[deviceinfo.feature_set.FeatureSet.rtype] (mtd, deviceinfo, feature,
  3421. defValue);
  3422. }
  3423. /******************************************************************************
  3424. * mtk_nand_exec_read_page
  3425. *
  3426. * DESCRIPTION:
  3427. *Read a page data !
  3428. *
  3429. * PARAMETERS:
  3430. *struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
  3431. *u8 *pPageBuf, u8 *pFDMBuf
  3432. *
  3433. * RETURNS:
  3434. *None
  3435. *
  3436. * NOTES:
  3437. *None
  3438. *
  3439. ******************************************************************************/
  3440. int mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 *pPageBuf,
  3441. u8 *pFDMBuf)
  3442. {
  3443. u8 *buf;
  3444. int bRet = ERR_RTN_SUCCESS;
  3445. struct nand_chip *nand = mtd->priv;
  3446. u32 u4SecNum = u4PageSize >> host->hw->nand_sec_shift;
  3447. u32 backup_corrected, backup_failed;
  3448. bool readRetry = FALSE;
  3449. int retryCount = 0;
  3450. u32 retrytotalcnt = gn_devinfo.feature_set.FeatureSet.readRetryCnt;
  3451. u32 tempBitMap, bitMap;
  3452. #ifdef NAND_PFM
  3453. struct timeval pfm_time_read;
  3454. #endif
  3455. struct NFI_TLC_WL_INFO tlc_wl_info;
  3456. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3457. bool tlc_left_plane = TRUE;
  3458. unsigned int phys = 0;
  3459. #endif
  3460. u32 reg_val = 0;
  3461. u32 real_row_addr = 0;
  3462. u32 logical_plane_num = 1;
  3463. u32 data_sector_num = 0;
  3464. u8 *temp_byte_ptr = NULL;
  3465. u8 *spare_ptr = NULL;
  3466. #if 0
  3467. unsigned short PageFmt_Reg = 0;
  3468. unsigned int NAND_ECC_Enc_Reg = 0;
  3469. unsigned int NAND_ECC_Dec_Reg = 0;
  3470. #endif
  3471. u32 block_addr =0;
  3472. u32 page_in_block =0;
  3473. u32 page_per_block =0;
  3474. /*MSG(INIT, "mtk_nand_exec_read_page, u4RowAddr: %x\n", u4RowAddr);*/
  3475. page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  3476. PFM_BEGIN(pfm_time_read);
  3477. tempBitMap = 0;
  3478. if (((u32) pPageBuf % 16) && local_buffer_16_align)
  3479. buf = local_buffer_16_align;
  3480. else {
  3481. if (virt_addr_valid(pPageBuf) == 0)
  3482. buf = local_buffer_16_align;
  3483. else
  3484. buf = pPageBuf;
  3485. }
  3486. backup_corrected = mtd->ecc_stats.corrected;
  3487. backup_failed = mtd->ecc_stats.failed;
  3488. bitMap = 0;
  3489. do {
  3490. mtk_nand_interface_switch(mtd);
  3491. data_sector_num = u4SecNum;
  3492. temp_byte_ptr = buf;
  3493. spare_ptr = pFDMBuf;
  3494. logical_plane_num = 1;
  3495. tlc_wl_info.wl_pre = WL_LOW_PAGE; /* init for build warning*/
  3496. tlc_wl_info.word_line_idx = u4RowAddr;
  3497. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3498. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3499. if (gn_devinfo.tlcControl.normaltlc) {
  3500. NFI_TLC_GetMappedWL(u4RowAddr, &tlc_wl_info);
  3501. real_row_addr = NFI_TLC_GetRowAddr(tlc_wl_info.word_line_idx);
  3502. if (gn_devinfo.tlcControl.pPlaneEn) {
  3503. tlc_left_plane = TRUE;
  3504. logical_plane_num = 2;
  3505. data_sector_num /= 2;
  3506. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  3507. }
  3508. /* MSG(INIT, "mtk_nand_exec_read_page, u4RowAddr: 0x%x real_row_addr 0x%x %d\n",
  3509. u4RowAddr, real_row_addr, gn_devinfo.tlcControl.slcopmodeEn); */
  3510. } else {
  3511. real_row_addr = NFI_TLC_GetRowAddr(u4RowAddr);
  3512. }
  3513. if (gn_devinfo.tlcControl.slcopmodeEn) {
  3514. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  3515. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3516. reg_val &= ~CNFG_READ_EN;
  3517. reg_val &= ~CNFG_OP_MODE_MASK;
  3518. reg_val |= CNFG_OP_CUST;
  3519. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3520. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  3521. reg_val = DRV_Reg32(NFI_CON_REG16);
  3522. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3523. /* issue reset operation */
  3524. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3525. }
  3526. } else {
  3527. if (gn_devinfo.tlcControl.normaltlc) {
  3528. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3529. reg_val &= ~CNFG_READ_EN;
  3530. reg_val &= ~CNFG_OP_MODE_MASK;
  3531. reg_val |= CNFG_OP_CUST;
  3532. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3533. if (tlc_wl_info.wl_pre == WL_LOW_PAGE)
  3534. mtk_nand_set_command(LOW_PG_SELECT_CMD);
  3535. else if (tlc_wl_info.wl_pre == WL_MID_PAGE)
  3536. mtk_nand_set_command(MID_PG_SELECT_CMD);
  3537. else if (tlc_wl_info.wl_pre == WL_HIGH_PAGE)
  3538. mtk_nand_set_command(HIGH_PG_SELECT_CMD);
  3539. reg_val = DRV_Reg32(NFI_CON_REG16);
  3540. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3541. /* issue reset operation */
  3542. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3543. }
  3544. }
  3545. reg_val = 0;
  3546. } else
  3547. #endif
  3548. real_row_addr = u4RowAddr;
  3549. /* if(force_slc_flag == 1)
  3550. gn_devinfo.tlcControl.slcopmodeEn = true; */
  3551. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  3552. if (gn_devinfo.tlcControl.slcopmodeEn) {
  3553. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  3554. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3555. reg_val &= ~CNFG_READ_EN;
  3556. reg_val &= ~CNFG_OP_MODE_MASK;
  3557. reg_val |= CNFG_OP_CUST;
  3558. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3559. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  3560. reg_val = DRV_Reg32(NFI_CON_REG16);
  3561. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3562. /* issue reset operation */
  3563. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3564. if (gn_devinfo.vendor == VEND_SANDISK) {
  3565. block_addr = real_row_addr/page_per_block;
  3566. page_in_block = real_row_addr % page_per_block;
  3567. page_in_block <<= 1;
  3568. real_row_addr = page_in_block + block_addr * page_per_block;
  3569. /* pr_err("mtk_nand_exec_read_sector SLC Mode real_row_addr:%d, u4RowAddr:%d\n",
  3570. real_row_addr, u4RowAddr); */
  3571. }
  3572. }
  3573. } else {
  3574. if (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd) {
  3575. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3576. reg_val &= ~CNFG_READ_EN;
  3577. reg_val &= ~CNFG_OP_MODE_MASK;
  3578. reg_val |= CNFG_OP_CUST;
  3579. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3580. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  3581. reg_val = DRV_Reg32(NFI_CON_REG16);
  3582. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3583. /* issue reset operation */
  3584. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3585. }
  3586. }
  3587. }
  3588. if (use_randomizer && u4RowAddr >= RAND_START_ADDR) {
  3589. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3590. if (gn_devinfo.tlcControl.slcopmodeEn)
  3591. mtk_nand_turn_on_randomizer(mtd, nand, tlc_wl_info.word_line_idx);
  3592. else
  3593. mtk_nand_turn_on_randomizer(mtd, nand,
  3594. (tlc_wl_info.word_line_idx*3+tlc_wl_info.wl_pre));
  3595. } else
  3596. mtk_nand_turn_on_randomizer(mtd, nand, u4RowAddr);
  3597. }
  3598. if (mtk_nand_ready_for_read(nand, real_row_addr, 0, data_sector_num, true, buf, NORMAL_READ)) {
  3599. while (logical_plane_num) {
  3600. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3601. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3602. if (gn_devinfo.tlcControl.needchangecolumn) {
  3603. if (gn_devinfo.tlcControl.pPlaneEn)
  3604. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  3605. #if 1
  3606. reg_val = DRV_Reg32(NFI_CON_REG16);
  3607. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3608. /* issue reset operation */
  3609. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3610. #endif
  3611. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3612. reg_val &= ~CNFG_READ_EN;
  3613. reg_val &= ~CNFG_OP_MODE_MASK;
  3614. reg_val |= CNFG_OP_CUST;
  3615. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3616. mtk_nand_set_command(CHANGE_COLUNM_ADDR_1ST_CMD);
  3617. mtk_nand_set_address(0, real_row_addr, 2, gn_devinfo.addr_cycle - 2);
  3618. mtk_nand_set_command(CHANGE_COLUNM_ADDR_2ND_CMD);
  3619. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3620. reg_val |= CNFG_READ_EN;
  3621. reg_val &= ~CNFG_OP_MODE_MASK;
  3622. reg_val |= CNFG_OP_READ;
  3623. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3624. }
  3625. }
  3626. mtk_dir = DMA_FROM_DEVICE;
  3627. sg_init_one(&mtk_sg, temp_byte_ptr, (data_sector_num * (1 << host->hw->nand_sec_shift)));
  3628. dma_map_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  3629. phys = mtk_sg.dma_address;
  3630. #if __INTERNAL_USE_AHB_MODE__
  3631. if (!phys)
  3632. pr_warn("[%s]convert virt addr (%lx) to phys add (%x)fail!!!",
  3633. __func__, (unsigned long) temp_byte_ptr, phys);
  3634. else
  3635. DRV_WriteReg32(NFI_STRADDR_REG32, phys);
  3636. #endif
  3637. DRV_WriteReg32(NFI_CON_REG16, data_sector_num << CON_NFI_SEC_SHIFT);
  3638. if (g_bHwEcc)
  3639. ECC_Decode_Start();
  3640. #endif
  3641. if (!mtk_nand_read_page_data(mtd, temp_byte_ptr,
  3642. data_sector_num * (1 << host->hw->nand_sec_shift))) {
  3643. MSG(INIT, "mtk_nand_read_page_data fail\n");
  3644. bRet = ERR_RTN_FAIL;
  3645. }
  3646. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  3647. if (!mtk_nand_status_ready(STA_NAND_BUSY)) {
  3648. MSG(INIT, "mtk_nand_status_ready fail\n");
  3649. bRet = ERR_RTN_FAIL;
  3650. }
  3651. if (g_bHwEcc) {
  3652. if (!mtk_nand_check_dececc_done(data_sector_num)) {
  3653. MSG(INIT, "mtk_nand_check_dececc_done fail\n");
  3654. bRet = ERR_RTN_FAIL;
  3655. }
  3656. }
  3657. mtk_nand_read_fdm_data(spare_ptr, data_sector_num);
  3658. if (g_bHwEcc) {
  3659. if (!mtk_nand_check_bch_error
  3660. (mtd, temp_byte_ptr, spare_ptr, data_sector_num - 1, u4RowAddr, &tempBitMap)) {
  3661. if (gn_devinfo.vendor != VEND_NONE)
  3662. readRetry = TRUE;
  3663. MSG(INIT, "mtk_nand_check_bch_error fail, retryCount: %d\n",
  3664. retryCount);
  3665. bRet = ERR_RTN_BCH_FAIL;
  3666. } else {
  3667. if ((0 != (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY)) &&
  3668. (retryCount != 0)) {
  3669. MSG(INIT,
  3670. "read retry read empty page, return as uncorrectable\n");
  3671. mtd->ecc_stats.failed += data_sector_num;
  3672. bRet = ERR_RTN_BCH_FAIL;
  3673. }
  3674. }
  3675. }
  3676. mtk_nand_stop_read();
  3677. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3678. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3679. if (gn_devinfo.tlcControl.needchangecolumn)
  3680. DRV_WriteReg16(NFI_TLC_RD_WHR2_REG16, 0x055);
  3681. if (2 == logical_plane_num) {
  3682. tlc_left_plane = FALSE;
  3683. spare_ptr += (host->hw->nand_fdm_size * data_sector_num);
  3684. #if __INTERNAL_USE_AHB_MODE__
  3685. temp_byte_ptr += (data_sector_num * (1 << host->hw->nand_sec_shift));
  3686. #else
  3687. temp_byte_ptr +=
  3688. (data_sector_num * ((1 << host->hw->nand_sec_shift) + spare_per_sector);
  3689. #endif
  3690. }
  3691. }
  3692. #endif
  3693. logical_plane_num--;
  3694. if (bRet == ERR_RTN_BCH_FAIL)
  3695. break;
  3696. }
  3697. }
  3698. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  3699. else
  3700. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  3701. #endif
  3702. if (use_randomizer && u4RowAddr >= RAND_START_ADDR)
  3703. mtk_nand_turn_off_randomizer();
  3704. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3705. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC
  3706. || gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  3707. if ((gn_devinfo.tlcControl.slcopmodeEn)
  3708. && (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd)) {
  3709. reg_val = DRV_Reg32(NFI_CON_REG16);
  3710. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3711. /* issue reset operation */
  3712. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3713. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3714. reg_val &= ~CNFG_READ_EN;
  3715. reg_val &= ~CNFG_OP_MODE_MASK;
  3716. reg_val |= CNFG_OP_CUST;
  3717. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3718. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  3719. }
  3720. }
  3721. #endif
  3722. if (bRet == ERR_RTN_BCH_FAIL) {
  3723. u32 feature;
  3724. tempBitMap = 0;
  3725. feature =
  3726. mtk_nand_rrtry_setting(gn_devinfo,
  3727. gn_devinfo.feature_set.FeatureSet.rtype,
  3728. gn_devinfo.feature_set.FeatureSet.readRetryStart,
  3729. retryCount);
  3730. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3731. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK_TLC_1YNM)
  3732. && (gn_devinfo.tlcControl.slcopmodeEn))
  3733. retrytotalcnt = 10;
  3734. if (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_TOSHIBA_TLC) {
  3735. if (gn_devinfo.tlcControl.slcopmodeEn)
  3736. retrytotalcnt = 8;
  3737. else
  3738. retrytotalcnt = 31;
  3739. }
  3740. #endif
  3741. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  3742. if ((gn_devinfo.tlcControl.slcopmodeEn)
  3743. && (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK)){
  3744. retrytotalcnt = 22;
  3745. }
  3746. }
  3747. if (retryCount < retrytotalcnt) {
  3748. mtd->ecc_stats.corrected = backup_corrected;
  3749. mtd->ecc_stats.failed = backup_failed;
  3750. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, FALSE);
  3751. retryCount++;
  3752. } else {
  3753. feature = gn_devinfo.feature_set.FeatureSet.readRetryDefault;
  3754. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK)
  3755. && (g_sandisk_retry_case < 3)) {
  3756. g_sandisk_retry_case++;
  3757. /*pr_err("Sandisk read retry case#%d\n", g_sandisk_retry_case); */
  3758. tempBitMap = 0;
  3759. mtd->ecc_stats.corrected = backup_corrected;
  3760. mtd->ecc_stats.failed = backup_failed;
  3761. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, FALSE);
  3762. retryCount = 0;
  3763. } else {
  3764. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, TRUE);
  3765. readRetry = FALSE;
  3766. g_sandisk_retry_case = 0;
  3767. }
  3768. }
  3769. if ((g_sandisk_retry_case == 1) || (g_sandisk_retry_case == 3)) {
  3770. mtk_nand_set_command(0x26);
  3771. /*pr_err("Case1#3# Set cmd 26\n"); */
  3772. }
  3773. } else {
  3774. if ((retryCount != 0) && MLC_DEVICE) {
  3775. u32 feature = gn_devinfo.feature_set.FeatureSet.readRetryDefault;
  3776. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, TRUE);
  3777. }
  3778. readRetry = FALSE;
  3779. g_sandisk_retry_case = 0;
  3780. }
  3781. if (TRUE == readRetry)
  3782. bRet = ERR_RTN_SUCCESS;
  3783. } while (readRetry);
  3784. if (retryCount != 0) {
  3785. u32 feature = gn_devinfo.feature_set.FeatureSet.readRetryDefault;
  3786. if (bRet == ERR_RTN_SUCCESS) {
  3787. MSG(INIT, "OK Read retry Buf: %x %x %x %x\n", pPageBuf[0], pPageBuf[1],
  3788. pPageBuf[2], pPageBuf[3]);
  3789. MSG(INIT,
  3790. "u4RowAddr: 0x%x read retry pass, retrycnt: %d ENUM0: %x, ENUM1: %x, mtd_ecc(A): %x, mtd_ecc(B): %x\n",
  3791. u4RowAddr, retryCount, DRV_Reg32(ECC_DECENUM1_REG32),
  3792. DRV_Reg32(ECC_DECENUM0_REG32), mtd->ecc_stats.failed, backup_failed);
  3793. MSG(INIT, "Read retry over %d times, trigger re-write\n", retryCount);
  3794. mtd->ecc_stats.corrected++;
  3795. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
  3796. || (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX))
  3797. g_hynix_retry_count--;
  3798. } else {
  3799. MSG(INIT,
  3800. "u4RowAddr: 0x%x read retry fail, mtd_ecc(A): %x , fail, mtd_ecc(B): %x\n",
  3801. u4RowAddr, mtd->ecc_stats.failed, backup_failed);
  3802. }
  3803. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, TRUE);
  3804. g_sandisk_retry_case = 0;
  3805. }
  3806. if (buf == local_buffer_16_align)
  3807. memcpy(pPageBuf, buf, u4PageSize);
  3808. if (bRet != ERR_RTN_SUCCESS) {
  3809. MSG(INIT, "ECC uncorrectable , fake buffer returned\n");
  3810. memset(pPageBuf, 0xff, u4PageSize);
  3811. memset(pFDMBuf, 0xff, u4SecNum * host->hw->nand_fdm_size);
  3812. }
  3813. PFM_END_R(pfm_time_read, u4PageSize + 32);
  3814. return bRet;
  3815. }
  3816. bool mtk_nand_exec_read_sector(struct mtd_info *mtd, u32 u4RowAddr, u32 u4ColAddr, u32 u4PageSize,
  3817. u8 *pPageBuf, u8 *pFDMBuf, int subpageno)
  3818. {
  3819. u8 *buf;
  3820. int bRet = ERR_RTN_SUCCESS;
  3821. struct nand_chip *nand = mtd->priv;
  3822. u32 u4SecNum = subpageno;
  3823. u32 backup_corrected, backup_failed;
  3824. bool readRetry = FALSE;
  3825. int retryCount = 0;
  3826. u32 retrytotalcnt = gn_devinfo.feature_set.FeatureSet.readRetryCnt;
  3827. u32 tempBitMap;
  3828. #ifdef NAND_PFM
  3829. struct timeval pfm_time_read;
  3830. #endif
  3831. struct NFI_TLC_WL_INFO tlc_wl_info;
  3832. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3833. bool tlc_left_plane = TRUE;
  3834. unsigned int phys = 0;
  3835. u32 sector_per_page = mtd->writesize >> host->hw->nand_sec_shift;
  3836. int spare_per_sector = mtd->oobsize / sector_per_page;
  3837. #endif
  3838. u32 reg_val = 0;
  3839. u32 real_row_addr = 0;
  3840. u32 logical_plane_num = 1;
  3841. u32 temp_col_addr[2] = {0, 0};
  3842. u32 data_sector_num[2] = {0, 0};
  3843. u8 *temp_byte_ptr = NULL;
  3844. u8 *spare_ptr = NULL;
  3845. u32 block_addr =0;
  3846. u32 page_in_block =0;
  3847. u32 page_per_block =0;
  3848. /*MSG(INIT, "mtk_nand_exec_read_page, host->hw->nand_sec_shift: %d\n", host->hw->nand_sec_shift); */
  3849. page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  3850. PFM_BEGIN(pfm_time_read);
  3851. if (((u32) pPageBuf % 16) && local_buffer_16_align)
  3852. buf = local_buffer_16_align;
  3853. else {
  3854. if (virt_addr_valid(pPageBuf) == 0)
  3855. buf = local_buffer_16_align;
  3856. else
  3857. buf = pPageBuf;
  3858. }
  3859. backup_corrected = mtd->ecc_stats.corrected;
  3860. backup_failed = mtd->ecc_stats.failed;
  3861. do {
  3862. mtk_nand_interface_switch(mtd);
  3863. temp_byte_ptr = buf;
  3864. spare_ptr = pFDMBuf;
  3865. temp_col_addr[0] = u4ColAddr;
  3866. temp_col_addr[1] = 0;
  3867. data_sector_num[0] = u4SecNum;
  3868. data_sector_num[1] = 0;
  3869. logical_plane_num = 1;
  3870. tlc_wl_info.word_line_idx = u4RowAddr;
  3871. tlc_wl_info.wl_pre = WL_LOW_PAGE;
  3872. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3873. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3874. if (gn_devinfo.tlcControl.normaltlc) {
  3875. NFI_TLC_GetMappedWL(u4RowAddr, &tlc_wl_info);
  3876. real_row_addr = NFI_TLC_GetRowAddr(tlc_wl_info.word_line_idx);
  3877. if ((gn_devinfo.tlcControl.pPlaneEn)
  3878. && (u4ColAddr < ((sector_per_page / 2)
  3879. * (host->hw->nand_sec_size + spare_per_sector)))) {
  3880. tlc_left_plane = TRUE;
  3881. if ((u4ColAddr + (u4SecNum * (host->hw->nand_sec_size + spare_per_sector)))
  3882. > ((sector_per_page / 2) * (host->hw->nand_sec_size + spare_per_sector))) {
  3883. logical_plane_num = 2;
  3884. data_sector_num[1] =
  3885. (sector_per_page / 2)
  3886. - (u4ColAddr / (host->hw->nand_sec_size + spare_per_sector));
  3887. data_sector_num[0] = u4SecNum - data_sector_num[1];
  3888. temp_col_addr[0] = 0;
  3889. temp_col_addr[1] = u4ColAddr;
  3890. }
  3891. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  3892. }
  3893. if (gn_devinfo.tlcControl.pPlaneEn && (u4ColAddr >= ((sector_per_page / 2)
  3894. * (host->hw->nand_sec_size + spare_per_sector)))) {
  3895. temp_col_addr[0] =
  3896. u4ColAddr - ((sector_per_page / 2)
  3897. * (host->hw->nand_sec_size + spare_per_sector));
  3898. tlc_left_plane = FALSE;
  3899. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  3900. }
  3901. } else {
  3902. real_row_addr = NFI_TLC_GetRowAddr(u4RowAddr);
  3903. }
  3904. if (gn_devinfo.tlcControl.slcopmodeEn) {
  3905. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  3906. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3907. reg_val &= ~CNFG_READ_EN;
  3908. reg_val &= ~CNFG_OP_MODE_MASK;
  3909. reg_val |= CNFG_OP_CUST;
  3910. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3911. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  3912. reg_val = DRV_Reg32(NFI_CON_REG16);
  3913. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3914. /* issue reset operation */
  3915. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3916. }
  3917. } else {
  3918. if (gn_devinfo.tlcControl.normaltlc) {
  3919. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3920. reg_val &= ~CNFG_READ_EN;
  3921. reg_val &= ~CNFG_OP_MODE_MASK;
  3922. reg_val |= CNFG_OP_CUST;
  3923. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3924. if (tlc_wl_info.wl_pre == WL_LOW_PAGE)
  3925. mtk_nand_set_command(LOW_PG_SELECT_CMD);
  3926. else if (tlc_wl_info.wl_pre == WL_MID_PAGE)
  3927. mtk_nand_set_command(MID_PG_SELECT_CMD);
  3928. else if (tlc_wl_info.wl_pre == WL_HIGH_PAGE)
  3929. mtk_nand_set_command(HIGH_PG_SELECT_CMD);
  3930. reg_val = DRV_Reg32(NFI_CON_REG16);
  3931. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3932. /* issue reset operation */
  3933. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3934. }
  3935. }
  3936. reg_val = 0;
  3937. } else
  3938. #endif
  3939. {
  3940. real_row_addr = u4RowAddr;
  3941. }
  3942. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  3943. if (gn_devinfo.tlcControl.slcopmodeEn) {
  3944. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  3945. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3946. reg_val &= ~CNFG_READ_EN;
  3947. reg_val &= ~CNFG_OP_MODE_MASK;
  3948. reg_val |= CNFG_OP_CUST;
  3949. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3950. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  3951. reg_val = DRV_Reg32(NFI_CON_REG16);
  3952. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3953. /* issue reset operation */
  3954. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3955. if (gn_devinfo.vendor == VEND_SANDISK) {
  3956. block_addr = real_row_addr/page_per_block;
  3957. page_in_block = real_row_addr % page_per_block;
  3958. page_in_block <<= 1;
  3959. real_row_addr = page_in_block + block_addr * page_per_block;
  3960. /* pr_err("mtk_nand_exec_read_sector SLC Mode real_row_addr:%d, u4RowAddr:%d\n",
  3961. real_row_addr, u4RowAddr); */
  3962. }
  3963. }
  3964. } else {
  3965. if (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd) {
  3966. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  3967. reg_val &= ~CNFG_READ_EN;
  3968. reg_val &= ~CNFG_OP_MODE_MASK;
  3969. reg_val |= CNFG_OP_CUST;
  3970. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  3971. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  3972. reg_val = DRV_Reg32(NFI_CON_REG16);
  3973. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  3974. /* issue reset operation */
  3975. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  3976. }
  3977. }
  3978. }
  3979. if (use_randomizer && u4RowAddr >= RAND_START_ADDR) {
  3980. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3981. if (gn_devinfo.tlcControl.slcopmodeEn)
  3982. mtk_nand_turn_on_randomizer(mtd, nand, tlc_wl_info.word_line_idx);
  3983. else
  3984. mtk_nand_turn_on_randomizer(mtd, nand,
  3985. (tlc_wl_info.word_line_idx*3+tlc_wl_info.wl_pre));
  3986. } else
  3987. mtk_nand_turn_on_randomizer(mtd, nand, u4RowAddr);
  3988. }
  3989. if (mtk_nand_ready_for_read
  3990. (nand, real_row_addr, temp_col_addr[logical_plane_num-1],
  3991. data_sector_num[logical_plane_num - 1], true, buf, NORMAL_READ)) {
  3992. while (logical_plane_num) {
  3993. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3994. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  3995. if (gn_devinfo.tlcControl.needchangecolumn) {
  3996. if (gn_devinfo.tlcControl.pPlaneEn)
  3997. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  3998. #if 1
  3999. reg_val = DRV_Reg32(NFI_CON_REG16);
  4000. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4001. /* issue reset operation */
  4002. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4003. #endif
  4004. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4005. reg_val &= ~CNFG_READ_EN;
  4006. reg_val &= ~CNFG_OP_MODE_MASK;
  4007. reg_val |= CNFG_OP_CUST;
  4008. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4009. mtk_nand_set_command(CHANGE_COLUNM_ADDR_1ST_CMD);
  4010. mtk_nand_set_address
  4011. (temp_col_addr[logical_plane_num-1], real_row_addr,
  4012. 2, gn_devinfo.addr_cycle - 2);
  4013. mtk_nand_set_command(CHANGE_COLUNM_ADDR_2ND_CMD);
  4014. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4015. reg_val |= CNFG_READ_EN;
  4016. reg_val &= ~CNFG_OP_MODE_MASK;
  4017. reg_val |= CNFG_OP_READ;
  4018. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4019. }
  4020. }
  4021. mtk_dir = DMA_FROM_DEVICE;
  4022. sg_init_one
  4023. (&mtk_sg, temp_byte_ptr, (data_sector_num[logical_plane_num - 1]
  4024. * (1 << host->hw->nand_sec_shift)));
  4025. dma_map_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  4026. phys = mtk_sg.dma_address;
  4027. #if __INTERNAL_USE_AHB_MODE__
  4028. if (!phys)
  4029. pr_warn("[mtk_nand_ready_for_read]convert virt addr (%lx) to phys add (%x)fail!!!",
  4030. (unsigned long) temp_byte_ptr, phys);
  4031. else
  4032. DRV_WriteReg32(NFI_STRADDR_REG32, phys);
  4033. #endif
  4034. DRV_WriteReg32(NFI_CON_REG16, data_sector_num[logical_plane_num - 1] << CON_NFI_SEC_SHIFT);
  4035. if (g_bHwEcc)
  4036. ECC_Decode_Start();
  4037. #endif
  4038. if (!mtk_nand_read_page_data
  4039. (mtd, temp_byte_ptr, data_sector_num[logical_plane_num - 1]
  4040. * (1 << host->hw->nand_sec_shift))) {
  4041. MSG(INIT, "mtk_nand_read_page_data fail\n");
  4042. bRet = ERR_RTN_FAIL;
  4043. }
  4044. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  4045. if (!mtk_nand_status_ready(STA_NAND_BUSY)) {
  4046. MSG(INIT, "mtk_nand_status_ready fail\n");
  4047. bRet = ERR_RTN_FAIL;
  4048. }
  4049. if (g_bHwEcc) {
  4050. if (!mtk_nand_check_dececc_done(data_sector_num[logical_plane_num - 1])) {
  4051. MSG(INIT, "mtk_nand_check_dececc_done fail\n");
  4052. bRet = ERR_RTN_FAIL;
  4053. }
  4054. }
  4055. mtk_nand_read_fdm_data(spare_ptr, data_sector_num[logical_plane_num - 1]);
  4056. if (g_bHwEcc) {
  4057. if (!mtk_nand_check_bch_error
  4058. (mtd, temp_byte_ptr, spare_ptr, data_sector_num[logical_plane_num - 1] - 1,
  4059. u4RowAddr, NULL)) {
  4060. if (gn_devinfo.vendor != VEND_NONE)
  4061. readRetry = TRUE;
  4062. MSG(INIT, "mtk_nand_check_bch_error fail, retryCount:%d\n", retryCount);
  4063. bRet = ERR_RTN_BCH_FAIL;
  4064. } else {
  4065. if (0 != (DRV_Reg32(NFI_STA_REG32) & STA_READ_EMPTY) &&
  4066. (retryCount != 0)) {
  4067. MSG(INIT, "NFI read retry read empty page, return as uecc\n");
  4068. mtd->ecc_stats.failed += data_sector_num[logical_plane_num - 1];
  4069. bRet = ERR_RTN_BCH_FAIL;
  4070. }
  4071. }
  4072. }
  4073. mtk_nand_stop_read();
  4074. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4075. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  4076. if (gn_devinfo.tlcControl.needchangecolumn)
  4077. DRV_WriteReg16(NFI_TLC_RD_WHR2_REG16, 0x055);
  4078. if (2 == logical_plane_num) {
  4079. tlc_left_plane = FALSE;
  4080. spare_ptr += (host->hw->nand_fdm_size * data_sector_num[logical_plane_num - 1]);
  4081. #if __INTERNAL_USE_AHB_MODE__
  4082. temp_byte_ptr +=
  4083. (data_sector_num[logical_plane_num - 1]
  4084. * (1 << host->hw->nand_sec_shift));
  4085. #else
  4086. temp_byte_ptr +=
  4087. (data_sector_num[logical_plane_num - 1]
  4088. * ((1 << host->hw->nand_sec_shift)
  4089. + spare_per_sector);
  4090. #endif
  4091. }
  4092. }
  4093. #endif
  4094. logical_plane_num--;
  4095. if (bRet == ERR_RTN_BCH_FAIL)
  4096. break;
  4097. }
  4098. }
  4099. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  4100. else
  4101. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  4102. #endif
  4103. if (use_randomizer && u4RowAddr >= RAND_START_ADDR)
  4104. mtk_nand_turn_off_randomizer();
  4105. /* if(force_slc_flag == 1)
  4106. gn_devinfo.tlcControl.slcopmodeEn = true; */
  4107. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4108. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC
  4109. || gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  4110. if ((gn_devinfo.tlcControl.slcopmodeEn)
  4111. && (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd)) {
  4112. reg_val = DRV_Reg32(NFI_CON_REG16);
  4113. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4114. /* issue reset operation */
  4115. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4116. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4117. reg_val &= ~CNFG_READ_EN;
  4118. reg_val &= ~CNFG_OP_MODE_MASK;
  4119. reg_val |= CNFG_OP_CUST;
  4120. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4121. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  4122. }
  4123. }
  4124. #endif
  4125. if (bRet == ERR_RTN_BCH_FAIL) {
  4126. u32 feature = mtk_nand_rrtry_setting(gn_devinfo,
  4127. gn_devinfo.feature_set.FeatureSet.rtype,
  4128. gn_devinfo.feature_set.FeatureSet.readRetryStart, retryCount);
  4129. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4130. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK_TLC_1YNM)
  4131. && (gn_devinfo.tlcControl.slcopmodeEn))
  4132. retrytotalcnt = 10;
  4133. if (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_TOSHIBA_TLC) {
  4134. if (gn_devinfo.tlcControl.slcopmodeEn)
  4135. retrytotalcnt = 8;
  4136. else
  4137. retrytotalcnt = 31;
  4138. }
  4139. #endif
  4140. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  4141. if ((gn_devinfo.tlcControl.slcopmodeEn)
  4142. && (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK)){
  4143. retrytotalcnt = 22;
  4144. }
  4145. }
  4146. if (retryCount < retrytotalcnt) {
  4147. mtd->ecc_stats.corrected = backup_corrected;
  4148. mtd->ecc_stats.failed = backup_failed;
  4149. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, FALSE);
  4150. retryCount++;
  4151. } else {
  4152. feature = gn_devinfo.feature_set.FeatureSet.readRetryDefault;
  4153. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_SANDISK)
  4154. && (g_sandisk_retry_case < 3)) {
  4155. g_sandisk_retry_case++;
  4156. pr_warn("Sandisk read retry case#%d\n", g_sandisk_retry_case);
  4157. tempBitMap = 0;
  4158. mtd->ecc_stats.corrected = backup_corrected;
  4159. mtd->ecc_stats.failed = backup_failed;
  4160. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, FALSE);
  4161. retryCount = 0;
  4162. } else {
  4163. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, TRUE);
  4164. readRetry = FALSE;
  4165. g_sandisk_retry_case = 0;
  4166. }
  4167. }
  4168. if ((g_sandisk_retry_case == 1) || (g_sandisk_retry_case == 3))
  4169. mtk_nand_set_command(0x26);
  4170. } else {
  4171. if ((retryCount != 0) && MLC_DEVICE) {
  4172. u32 feature = gn_devinfo.feature_set.FeatureSet.readRetryDefault;
  4173. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, TRUE);
  4174. }
  4175. readRetry = FALSE;
  4176. g_sandisk_retry_case = 0;
  4177. }
  4178. if (TRUE == readRetry)
  4179. bRet = ERR_RTN_SUCCESS;
  4180. } while (readRetry);
  4181. if (retryCount != 0) {
  4182. u32 feature = gn_devinfo.feature_set.FeatureSet.readRetryDefault;
  4183. if (bRet == ERR_RTN_SUCCESS) {
  4184. MSG(INIT, "[Sector RD]u4RowAddr:0x%x read retry pass, retrycnt:%d ENUM0:%x, ENUM1:%x,\n",
  4185. u4RowAddr, retryCount, DRV_Reg32(ECC_DECENUM1_REG32), DRV_Reg32(ECC_DECENUM0_REG32));
  4186. mtd->ecc_stats.corrected++;
  4187. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
  4188. || (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX)) {
  4189. g_hynix_retry_count--;
  4190. }
  4191. } else {
  4192. MSG(INIT, "[Sector RD]u4RowAddr:0x%x read retry fail, mtd_ecc(A):%x , fail, mtd_ecc(B):%x\n",
  4193. u4RowAddr, mtd->ecc_stats.failed, backup_failed);
  4194. }
  4195. mtk_nand_rrtry_func(mtd, gn_devinfo, feature, TRUE);
  4196. g_sandisk_retry_case = 0;
  4197. }
  4198. if (buf == local_buffer_16_align)
  4199. memcpy(pPageBuf, buf, u4PageSize);
  4200. PFM_END_R(pfm_time_read, u4PageSize + 32);
  4201. if (use_randomizer && u4RowAddr >= RAND_START_ADDR)
  4202. mtk_nand_turn_off_randomizer();
  4203. if (bRet != ERR_RTN_SUCCESS) {
  4204. MSG(INIT, "ECC uncorrectable , fake buffer returned\n");
  4205. memset(pPageBuf, 0xff, u4PageSize);
  4206. memset(pFDMBuf, 0xff, u4SecNum*host->hw->nand_fdm_size);
  4207. }
  4208. return bRet;
  4209. }
  4210. /******************************************************************************
  4211. * mtk_nand_exec_write_page
  4212. *
  4213. * DESCRIPTION:
  4214. *Write a page data !
  4215. *
  4216. * PARAMETERS:
  4217. *struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize,
  4218. *u8 *pPageBuf, u8 *pFDMBuf
  4219. *
  4220. * RETURNS:
  4221. *None
  4222. *
  4223. * NOTES:
  4224. *None
  4225. *
  4226. ******************************************************************************/
  4227. int mtk_nand_exec_write_page_hw(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 *pPageBuf,
  4228. u8 *pFDMBuf)
  4229. {
  4230. struct nand_chip *chip = mtd->priv;
  4231. u32 u4SecNum = u4PageSize >> host->hw->nand_sec_shift;
  4232. u8 *buf;
  4233. u8 status;
  4234. #ifdef PWR_LOSS_SPOH
  4235. u32 time;
  4236. struct timeval pl_time_write;
  4237. suseconds_t duration;
  4238. #endif
  4239. struct NFI_TLC_WL_INFO tlc_wl_info;
  4240. u32 reg_val;
  4241. u32 real_row_addr = 0;
  4242. u32 block_addr =0;
  4243. u32 page_in_block =0;
  4244. u32 page_per_block =0;
  4245. page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  4246. mtk_nand_interface_switch(mtd);
  4247. #ifdef _MTK_NAND_DUMMY_DRIVER_
  4248. if (dummy_driver_debug) {
  4249. unsigned long long time = sched_clock();
  4250. if (!((time * 123 + 59) % 32768)) {
  4251. pr_debug("[NAND_DUMMY_DRIVER] Simulate write error at page: 0x%x\n",
  4252. u4RowAddr);
  4253. return -EIO;
  4254. }
  4255. }
  4256. #endif
  4257. #ifdef NAND_PFM
  4258. struct timeval pfm_time_write;
  4259. #endif
  4260. if (((unsigned long) pPageBuf % 16) && local_buffer_16_align) {
  4261. pr_debug("Data buffer not 16 bytes aligned: %p\n", pPageBuf);
  4262. memcpy(local_buffer_16_align, pPageBuf, u4PageSize);
  4263. buf = local_buffer_16_align;
  4264. } else {
  4265. if (virt_addr_valid(pPageBuf) == 0) {
  4266. memcpy(local_buffer_16_align, pPageBuf, u4PageSize);
  4267. buf = local_buffer_16_align;
  4268. } else {
  4269. buf = pPageBuf;
  4270. }
  4271. }
  4272. PFM_BEGIN(pfm_time_write);
  4273. tlc_wl_info.wl_pre = WL_LOW_PAGE; /* avoid compile warning */
  4274. tlc_wl_info.word_line_idx = u4RowAddr;
  4275. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4276. mtk_nand_reset();
  4277. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  4278. if (gn_devinfo.tlcControl.normaltlc) {
  4279. NFI_TLC_GetMappedWL(u4RowAddr, &tlc_wl_info);
  4280. real_row_addr = NFI_TLC_GetRowAddr(tlc_wl_info.word_line_idx);
  4281. if (gn_devinfo.tlcControl.pPlaneEn)
  4282. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_lg_left_plane);
  4283. } else
  4284. real_row_addr = NFI_TLC_GetRowAddr(u4RowAddr);
  4285. if (gn_devinfo.tlcControl.slcopmodeEn) {
  4286. if ((!gn_devinfo.tlcControl.pPlaneEn) || tlc_lg_left_plane) {
  4287. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  4288. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4289. reg_val &= ~CNFG_READ_EN;
  4290. reg_val &= ~CNFG_OP_MODE_MASK;
  4291. reg_val |= CNFG_OP_CUST;
  4292. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4293. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  4294. reg_val = DRV_Reg32(NFI_CON_REG16);
  4295. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4296. /* issue reset operation */
  4297. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4298. }
  4299. }
  4300. } else {
  4301. if (gn_devinfo.tlcControl.normaltlc) {
  4302. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4303. reg_val &= ~CNFG_READ_EN;
  4304. reg_val &= ~CNFG_OP_MODE_MASK;
  4305. reg_val |= CNFG_OP_CUST;
  4306. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4307. if (PROGRAM_1ST_CYCLE == tlc_program_cycle)
  4308. mtk_nand_set_command(PROGRAM_1ST_CYCLE_CMD);
  4309. else if (PROGRAM_2ND_CYCLE == tlc_program_cycle)
  4310. mtk_nand_set_command(PROGRAM_2ND_CYCLE_CMD);
  4311. if (tlc_wl_info.wl_pre == WL_LOW_PAGE)
  4312. mtk_nand_set_command(LOW_PG_SELECT_CMD);
  4313. else if (tlc_wl_info.wl_pre == WL_MID_PAGE)
  4314. mtk_nand_set_command(MID_PG_SELECT_CMD);
  4315. else if (tlc_wl_info.wl_pre == WL_HIGH_PAGE)
  4316. mtk_nand_set_command(HIGH_PG_SELECT_CMD);
  4317. reg_val = DRV_Reg32(NFI_CON_REG16);
  4318. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4319. /* issue reset operation */
  4320. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4321. }
  4322. }
  4323. } else
  4324. #endif
  4325. {
  4326. real_row_addr = u4RowAddr;
  4327. }
  4328. /* if(force_slc_flag == 1)
  4329. gn_devinfo.tlcControl.slcopmodeEn = true; */
  4330. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) {
  4331. if (gn_devinfo.tlcControl.slcopmodeEn) {
  4332. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  4333. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4334. reg_val &= ~CNFG_READ_EN;
  4335. reg_val &= ~CNFG_OP_MODE_MASK;
  4336. reg_val |= CNFG_OP_CUST;
  4337. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4338. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  4339. reg_val = DRV_Reg32(NFI_CON_REG16);
  4340. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4341. /* issue reset operation */
  4342. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4343. if (gn_devinfo.vendor == VEND_SANDISK) {
  4344. block_addr = real_row_addr/page_per_block;
  4345. page_in_block = real_row_addr % page_per_block;
  4346. page_in_block <<= 1;
  4347. real_row_addr = page_in_block + block_addr * page_per_block;
  4348. /* pr_err("mtk_nand_exec_read_sector SLC Mode real_row_addr:%d, u4RowAddr:%d\n",
  4349. real_row_addr, u4RowAddr); */
  4350. }
  4351. }
  4352. } else {
  4353. if (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd) {
  4354. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4355. reg_val &= ~CNFG_READ_EN;
  4356. reg_val &= ~CNFG_OP_MODE_MASK;
  4357. reg_val |= CNFG_OP_CUST;
  4358. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4359. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  4360. reg_val = DRV_Reg32(NFI_CON_REG16);
  4361. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4362. /* issue reset operation */
  4363. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4364. }
  4365. }
  4366. }
  4367. if (use_randomizer && u4RowAddr >= RAND_START_ADDR) {
  4368. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  4369. if (gn_devinfo.tlcControl.slcopmodeEn)
  4370. mtk_nand_turn_on_randomizer(mtd, chip, tlc_wl_info.word_line_idx);
  4371. else
  4372. mtk_nand_turn_on_randomizer(mtd, chip,
  4373. (tlc_wl_info.word_line_idx*3+tlc_wl_info.wl_pre));
  4374. } else
  4375. mtk_nand_turn_on_randomizer(mtd, chip, u4RowAddr);
  4376. }
  4377. if (mtk_nand_ready_for_write(chip, real_row_addr, 0, true, buf)) {
  4378. mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
  4379. (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
  4380. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  4381. (void)mtk_nand_check_RW_count(u4PageSize);
  4382. mtk_nand_stop_write();
  4383. PL_NAND_BEGIN(pl_time_write);
  4384. PL_TIME_RAND_PROG(chip, u4RowAddr, time);
  4385. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4386. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  4387. if (gn_devinfo.tlcControl.normaltlc) {
  4388. if ((gn_devinfo.tlcControl.pPlaneEn) && tlc_lg_left_plane) {
  4389. mtk_nand_set_command(PROGRAM_LEFT_PLANE_CMD);
  4390. } else {
  4391. if ((tlc_wl_info.wl_pre == WL_HIGH_PAGE) || gn_devinfo.tlcControl.slcopmodeEn)
  4392. mtk_nand_set_command(NAND_CMD_PAGEPROG);
  4393. else
  4394. mtk_nand_set_command(PROGRAM_RIGHT_PLANE_CMD);
  4395. }
  4396. } else
  4397. mtk_nand_set_command(NAND_CMD_PAGEPROG);
  4398. } else
  4399. #endif
  4400. (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
  4401. PL_NAND_RESET(time);
  4402. {
  4403. #ifdef DUMP_PEF
  4404. struct timeval stimer, etimer;
  4405. do_gettimeofday(&stimer);
  4406. #endif
  4407. while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
  4408. ;
  4409. #ifdef DUMP_PEF
  4410. do_gettimeofday(&etimer);
  4411. g_NandPerfLog.WriteBusyTotalTime += Cal_timediff(&etimer, &stimer);
  4412. g_NandPerfLog.WriteBusyCount++;
  4413. #endif
  4414. }
  4415. PL_NAND_END(pl_time_write, duration);
  4416. PL_TIME_PROG(duration);
  4417. PFM_END_W(pfm_time_write, u4PageSize + 32);
  4418. if (use_randomizer && u4RowAddr >= RAND_START_ADDR)
  4419. mtk_nand_turn_off_randomizer();
  4420. status = chip->waitfunc(mtd, chip);
  4421. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4422. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  4423. if ((gn_devinfo.tlcControl.slcopmodeEn)
  4424. && (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd)) {
  4425. reg_val = DRV_Reg32(NFI_CON_REG16);
  4426. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  4427. /* issue reset operation */
  4428. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  4429. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  4430. reg_val &= ~CNFG_READ_EN;
  4431. reg_val &= ~CNFG_OP_MODE_MASK;
  4432. reg_val |= CNFG_OP_CUST;
  4433. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  4434. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  4435. }
  4436. }
  4437. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  4438. && (gn_devinfo.tlcControl.slcopmodeEn)) {
  4439. if (status & SLC_MODE_OP_FALI)
  4440. return -EIO;
  4441. else
  4442. return 0;
  4443. } else
  4444. #endif
  4445. {
  4446. if (status & NAND_STATUS_FAIL)
  4447. return -EIO;
  4448. else
  4449. return 0;
  4450. }
  4451. } else {
  4452. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  4453. pr_warn("[Bean]mtk_nand_ready_for_write fail!\n");
  4454. if (use_randomizer && u4RowAddr >= RAND_START_ADDR)
  4455. mtk_nand_turn_off_randomizer();
  4456. return -EIO;
  4457. }
  4458. }
  4459. int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 *pPageBuf, u8 *pFDMBuf)
  4460. {
  4461. int bRet;
  4462. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4463. u8 *temp_page_buf = NULL;
  4464. u8 *temp_fdm_buf = NULL;
  4465. u32 u4SecNum = u4PageSize >> host->hw->nand_sec_shift;
  4466. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  4467. if ((gn_devinfo.tlcControl.normaltlc) && (gn_devinfo.tlcControl.pPlaneEn)) {
  4468. tlc_lg_left_plane = TRUE;
  4469. temp_page_buf = pPageBuf;
  4470. temp_fdm_buf = pFDMBuf;
  4471. bRet = mtk_nand_exec_write_page_hw(mtd, u4RowAddr, u4PageSize / 2, temp_page_buf, temp_fdm_buf);
  4472. {
  4473. return bRet;
  4474. }
  4475. tlc_lg_left_plane = FALSE;
  4476. temp_page_buf += (u4PageSize / 2);
  4477. temp_fdm_buf += ((u4SecNum / 2) * host->hw->nand_fdm_size);
  4478. bRet = mtk_nand_exec_write_page_hw(mtd, u4RowAddr, u4PageSize / 2, temp_page_buf, temp_fdm_buf);
  4479. } else {
  4480. bRet = mtk_nand_exec_write_page_hw(mtd, u4RowAddr, u4PageSize, pPageBuf, pFDMBuf);
  4481. }
  4482. } else
  4483. #endif
  4484. {
  4485. bRet = mtk_nand_exec_write_page_hw(mtd, u4RowAddr, u4PageSize, pPageBuf, pFDMBuf);
  4486. }
  4487. return bRet;
  4488. }
  4489. /******************************************************************************
  4490. *
  4491. * Write a page to a logical address
  4492. *
  4493. *****************************************************************************/
  4494. static int mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  4495. uint32_t offset, int data_len, const uint8_t *buf,
  4496. int oob_required, int page, int cached, int raw)
  4497. {
  4498. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  4499. u32 block;
  4500. u32 page_in_block;
  4501. u32 mapped_block;
  4502. #ifdef DUMP_PEF
  4503. struct timeval stimer, etimer;
  4504. do_gettimeofday(&stimer);
  4505. #endif
  4506. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  4507. /*pr_err("[WRITE] %d, %d, %d %d\n", mapped_block, block, page_in_block, page_per_block);*/
  4508. if (mapped_block != block)
  4509. set_bad_index_to_oob(chip->oob_poi, block);
  4510. else
  4511. set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
  4512. if (mtk_nand_exec_write_page
  4513. (mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *) buf,
  4514. chip->oob_poi)) {
  4515. MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
  4516. if (update_bmt
  4517. ((u64) ((u64) page_in_block + (u64) mapped_block * page_per_block) << chip->
  4518. page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
  4519. MSG(INIT, "Update BMT success\n");
  4520. } else {
  4521. MSG(INIT, "Update BMT fail\n");
  4522. return -EIO;
  4523. }
  4524. return 0;
  4525. }
  4526. #ifdef DUMP_PEF
  4527. do_gettimeofday(&etimer);
  4528. g_NandPerfLog.WritePageTotalTime += Cal_timediff(&etimer, &stimer);
  4529. g_NandPerfLog.WritePageCount++;
  4530. dump_nand_rwcount();
  4531. #endif
  4532. return 0;
  4533. }
  4534. /*-------------------------------------------------------------------------------*/
  4535. /*
  4536. static void mtk_nand_command_sp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
  4537. {
  4538. g_u4ColAddr = column;
  4539. g_u4RowAddr = page_addr;
  4540. switch (command) {
  4541. case NAND_CMD_STATUS:
  4542. break;
  4543. case NAND_CMD_READID:
  4544. break;
  4545. case NAND_CMD_RESET:
  4546. break;
  4547. case NAND_CMD_RNDOUT:
  4548. case NAND_CMD_RNDOUTSTART:
  4549. case NAND_CMD_RNDIN:
  4550. case NAND_CMD_CACHEDPROG:
  4551. case NAND_CMD_STATUS_MULTI:
  4552. default:
  4553. break;
  4554. }
  4555. }
  4556. */
  4557. /******************************************************************************
  4558. * mtk_nand_command_bp
  4559. *
  4560. * DESCRIPTION:
  4561. *Handle the commands from MTD !
  4562. *
  4563. * PARAMETERS:
  4564. *struct mtd_info *mtd, unsigned int command, int column, int page_addr
  4565. *
  4566. * RETURNS:
  4567. *None
  4568. *
  4569. * NOTES:
  4570. *None
  4571. *
  4572. ******************************************************************************/
  4573. static void mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column,
  4574. int page_addr)
  4575. {
  4576. struct nand_chip *nand = mtd->priv;
  4577. #ifdef NAND_PFM
  4578. struct timeval pfm_time_erase;
  4579. #endif
  4580. switch (command) {
  4581. case NAND_CMD_SEQIN:
  4582. memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
  4583. g_kCMD.pDataBuf = NULL;
  4584. g_kCMD.u4RowAddr = page_addr;
  4585. g_kCMD.u4ColAddr = column;
  4586. break;
  4587. case NAND_CMD_PAGEPROG:
  4588. if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[0])) {
  4589. u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
  4590. if (((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  4591. || (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER))
  4592. && (gn_devinfo.tlcControl.normaltlc)
  4593. && (!mtk_block_istlc(g_kCMD.u4RowAddr * mtd->writesize)))
  4594. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  4595. else
  4596. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  4597. mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf,
  4598. g_kCMD.au1OOB);
  4599. g_kCMD.u4RowAddr = (u32) -1;
  4600. g_kCMD.u4OOBRowAddr = (u32) -1;
  4601. }
  4602. break;
  4603. case NAND_CMD_READOOB:
  4604. g_kCMD.u4RowAddr = page_addr;
  4605. g_kCMD.u4ColAddr = column + mtd->writesize;
  4606. #ifdef NAND_PFM
  4607. g_kCMD.pureReadOOB = 1;
  4608. g_kCMD.pureReadOOBNum += 1;
  4609. #endif
  4610. break;
  4611. case NAND_CMD_READ0:
  4612. g_kCMD.u4RowAddr = page_addr;
  4613. g_kCMD.u4ColAddr = column;
  4614. #ifdef NAND_PFM
  4615. g_kCMD.pureReadOOB = 0;
  4616. #endif
  4617. break;
  4618. case NAND_CMD_ERASE1:
  4619. PFM_BEGIN(pfm_time_erase);
  4620. (void)mtk_nand_reset();
  4621. mtk_nand_set_mode(CNFG_OP_ERASE);
  4622. (void)mtk_nand_set_command(NAND_CMD_ERASE1);
  4623. (void)mtk_nand_set_address(0, page_addr, 0, gn_devinfo.addr_cycle - 2);
  4624. break;
  4625. case NAND_CMD_ERASE2:
  4626. (void)mtk_nand_set_command(NAND_CMD_ERASE2);
  4627. while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
  4628. ;
  4629. PFM_END_E(pfm_time_erase);
  4630. break;
  4631. case NAND_CMD_STATUS:
  4632. mtk_nand_interface_switch(mtd);
  4633. (void)mtk_nand_reset();
  4634. if (mtk_nand_israndomizeron()) {
  4635. g_brandstatus = TRUE;
  4636. mtk_nand_turn_off_randomizer();
  4637. }
  4638. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
  4639. mtk_nand_set_mode(CNFG_OP_SRD);
  4640. mtk_nand_set_mode(CNFG_READ_EN);
  4641. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  4642. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  4643. (void)mtk_nand_set_command(NAND_CMD_STATUS);
  4644. NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_NOB_MASK);
  4645. mb(); /*make sure process order */
  4646. DRV_WriteReg32(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
  4647. g_bcmdstatus = true;
  4648. break;
  4649. case NAND_CMD_RESET:
  4650. (void)mtk_nand_reset();
  4651. break;
  4652. case NAND_CMD_READID:
  4653. /* Issue NAND chip reset command */
  4654. mtk_nand_reset();
  4655. /* Disable HW ECC */
  4656. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  4657. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  4658. /* Disable 16-bit I/O */
  4659. /*NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN); */
  4660. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
  4661. (void)mtk_nand_reset();
  4662. mb(); /*make sure process order */
  4663. mtk_nand_set_mode(CNFG_OP_SRD);
  4664. (void)mtk_nand_set_command(NAND_CMD_READID);
  4665. (void)mtk_nand_set_address(0, 0, 1, 0);
  4666. DRV_WriteReg32(NFI_CON_REG16, CON_NFI_SRD);
  4667. while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
  4668. ;
  4669. break;
  4670. default:
  4671. BUG();
  4672. break;
  4673. }
  4674. }
  4675. /******************************************************************************
  4676. * mtk_nand_select_chip
  4677. *
  4678. * DESCRIPTION:
  4679. *Select a chip !
  4680. *
  4681. * PARAMETERS:
  4682. *struct mtd_info *mtd, int chip
  4683. *
  4684. * RETURNS:
  4685. *None
  4686. *
  4687. * NOTES:
  4688. *None
  4689. *
  4690. ******************************************************************************/
  4691. static void mtk_nand_select_chip(struct mtd_info *mtd, int chip)
  4692. {
  4693. if (chip == -1 && false == g_bInitDone) {
  4694. struct nand_chip *nand = mtd->priv;
  4695. struct mtk_nand_host *host = nand->priv;
  4696. struct mtk_nand_host_hw *hw = host->hw;
  4697. u32 spare_per_sector = mtd->oobsize / (mtd->writesize / hw->nand_sec_size);
  4698. u32 ecc_bit = 4;
  4699. u32 spare_bit = PAGEFMT_SPARE_16;
  4700. hw->nand_fdm_size = 8;
  4701. switch (spare_per_sector) {
  4702. case 16:
  4703. spare_bit = PAGEFMT_SPARE_16;
  4704. ecc_bit = 4;
  4705. spare_per_sector = 16;
  4706. break;
  4707. case 26:
  4708. case 27:
  4709. case 28:
  4710. spare_bit = PAGEFMT_SPARE_26;
  4711. ecc_bit = 10;
  4712. spare_per_sector = 26;
  4713. break;
  4714. case 32:
  4715. ecc_bit = 12;
  4716. if (MLC_DEVICE == TRUE)
  4717. spare_bit = PAGEFMT_SPARE_32_1KS;
  4718. else
  4719. spare_bit = PAGEFMT_SPARE_32;
  4720. spare_per_sector = 32;
  4721. break;
  4722. case 40:
  4723. ecc_bit = 18;
  4724. spare_bit = PAGEFMT_SPARE_40;
  4725. spare_per_sector = 40;
  4726. break;
  4727. case 44:
  4728. ecc_bit = 20;
  4729. spare_bit = PAGEFMT_SPARE_44;
  4730. spare_per_sector = 44;
  4731. break;
  4732. case 48:
  4733. case 49:
  4734. ecc_bit = 22;
  4735. spare_bit = PAGEFMT_SPARE_48;
  4736. spare_per_sector = 48;
  4737. break;
  4738. case 50:
  4739. case 51:
  4740. ecc_bit = 24;
  4741. spare_bit = PAGEFMT_SPARE_50;
  4742. spare_per_sector = 50;
  4743. break;
  4744. case 52:
  4745. case 54:
  4746. case 56:
  4747. ecc_bit = 24;
  4748. if (MLC_DEVICE == TRUE)
  4749. spare_bit = PAGEFMT_SPARE_52_1KS;
  4750. else
  4751. spare_bit = PAGEFMT_SPARE_52;
  4752. spare_per_sector = 32;
  4753. break;
  4754. case 62:
  4755. case 63:
  4756. ecc_bit = 28;
  4757. spare_bit = PAGEFMT_SPARE_62;
  4758. spare_per_sector = 62;
  4759. break;
  4760. case 64:
  4761. ecc_bit = 32;
  4762. if (MLC_DEVICE == TRUE)
  4763. spare_bit = PAGEFMT_SPARE_64_1KS;
  4764. else
  4765. spare_bit = PAGEFMT_SPARE_64;
  4766. spare_per_sector = 64;
  4767. break;
  4768. case 72:
  4769. ecc_bit = 36;
  4770. if (MLC_DEVICE == TRUE)
  4771. spare_bit = PAGEFMT_SPARE_72_1KS;
  4772. spare_per_sector = 72;
  4773. break;
  4774. case 80:
  4775. ecc_bit = 40;
  4776. if (MLC_DEVICE == TRUE)
  4777. spare_bit = PAGEFMT_SPARE_80_1KS;
  4778. spare_per_sector = 80;
  4779. break;
  4780. case 88:
  4781. ecc_bit = 44;
  4782. if (MLC_DEVICE == TRUE)
  4783. spare_bit = PAGEFMT_SPARE_88_1KS;
  4784. spare_per_sector = 88;
  4785. break;
  4786. case 96:
  4787. case 98:
  4788. ecc_bit = 48;
  4789. if (MLC_DEVICE == TRUE)
  4790. spare_bit = PAGEFMT_SPARE_96_1KS;
  4791. spare_per_sector = 96;
  4792. break;
  4793. case 100:
  4794. case 102:
  4795. case 104:
  4796. ecc_bit = 52;
  4797. if (MLC_DEVICE == TRUE)
  4798. spare_bit = PAGEFMT_SPARE_100_1KS;
  4799. spare_per_sector = 100;
  4800. break;
  4801. case 122:
  4802. case 124:
  4803. case 126:
  4804. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4805. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  4806. && gn_devinfo.tlcControl.ecc_recalculate_en) {
  4807. if (60 < gn_devinfo.tlcControl.ecc_required) {
  4808. hw->nand_fdm_size = 3;
  4809. ecc_bit = 68;
  4810. } else
  4811. ecc_bit = 60;
  4812. } else
  4813. #endif
  4814. ecc_bit = 60;
  4815. if (hw->nand_sec_size == 1024)
  4816. spare_bit = PAGEFMT_SPARE_122_1KS;
  4817. spare_per_sector = 122;
  4818. break;
  4819. case 128:
  4820. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4821. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  4822. && gn_devinfo.tlcControl.ecc_recalculate_en) {
  4823. if (68 < gn_devinfo.tlcControl.ecc_required) {
  4824. hw->nand_fdm_size = 2;
  4825. ecc_bit = 72;
  4826. } else
  4827. ecc_bit = 68;
  4828. } else
  4829. #endif
  4830. ecc_bit = 68;
  4831. if (hw->nand_sec_size == 1024)
  4832. spare_bit = PAGEFMT_SPARE_128_1KS;
  4833. spare_per_sector = 128;
  4834. break;
  4835. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  4836. case 134:
  4837. ecc_bit = 72;
  4838. if (hw->nand_sec_size == 1024)
  4839. spare_bit = PAGEFMT_SPARE_134_1KS;
  4840. spare_per_sector = 134;
  4841. break;
  4842. case 148:
  4843. ecc_bit = 80;
  4844. if (hw->nand_sec_size == 1024)
  4845. spare_bit = PAGEFMT_SPARE_148_1KS;
  4846. spare_per_sector = 148;
  4847. break;
  4848. #endif
  4849. default:
  4850. MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
  4851. ASSERT(0);
  4852. }
  4853. mtd->oobsize = spare_per_sector * (mtd->writesize / hw->nand_sec_size);
  4854. pr_err("[NAND]select ecc bit: %d, sparesize: %d\n", ecc_bit, mtd->oobsize);
  4855. /* Setup PageFormat */
  4856. if (16384 == mtd->writesize) {
  4857. NFI_SET_REG32(NFI_PAGEFMT_REG16,
  4858. (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_16K_1KS);
  4859. nand->cmdfunc = mtk_nand_command_bp;
  4860. } else if (8192 == mtd->writesize) {
  4861. NFI_SET_REG32(NFI_PAGEFMT_REG16,
  4862. (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_8K_1KS);
  4863. nand->cmdfunc = mtk_nand_command_bp;
  4864. } else if (4096 == mtd->writesize) {
  4865. if (MLC_DEVICE == FALSE)
  4866. NFI_SET_REG32(NFI_PAGEFMT_REG16,
  4867. (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
  4868. else
  4869. NFI_SET_REG32(NFI_PAGEFMT_REG16,
  4870. (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K_1KS);
  4871. nand->cmdfunc = mtk_nand_command_bp;
  4872. } else if (2048 == mtd->writesize) {
  4873. if (MLC_DEVICE == FALSE)
  4874. NFI_SET_REG32(NFI_PAGEFMT_REG16,
  4875. (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
  4876. else
  4877. NFI_SET_REG32(NFI_PAGEFMT_REG16,
  4878. (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K_1KS);
  4879. nand->cmdfunc = mtk_nand_command_bp;
  4880. }
  4881. mtk_nand_configure_fdm(hw->nand_fdm_size);
  4882. ecc_threshold = ecc_bit*4/5;
  4883. ECC_Config(hw, ecc_bit);
  4884. g_bInitDone = true;
  4885. /*xiaolei for kernel3.10 */
  4886. nand->ecc.strength = ecc_bit;
  4887. mtd->bitflip_threshold = nand->ecc.strength;
  4888. }
  4889. switch (chip) {
  4890. case -1:
  4891. break;
  4892. case 0:
  4893. #ifdef CFG_FPGA_PLATFORM /* FPGA NAND is placed at CS1 not CS0 */
  4894. DRV_WriteReg16(NFI_CSEL_REG16, 0);
  4895. break;
  4896. #endif
  4897. case 1:
  4898. if (chip != 0)
  4899. pr_warn("[BUG!!!]select_chip %d\n", chip);
  4900. DRV_WriteReg16(NFI_CSEL_REG16, chip);
  4901. break;
  4902. }
  4903. }
  4904. /******************************************************************************
  4905. * mtk_nand_read_byte
  4906. *
  4907. * DESCRIPTION:
  4908. *Read a byte of data !
  4909. *
  4910. * PARAMETERS:
  4911. *struct mtd_info *mtd
  4912. *
  4913. * RETURNS:
  4914. *None
  4915. *
  4916. * NOTES:
  4917. *None
  4918. *
  4919. ******************************************************************************/
  4920. static uint8_t mtk_nand_read_byte(struct mtd_info *mtd)
  4921. {
  4922. uint8_t retval = 0;
  4923. if (!mtk_nand_pio_ready()) {
  4924. pr_err("pio ready timeout\n");
  4925. retval = false;
  4926. }
  4927. if (g_bcmdstatus) {
  4928. retval = DRV_Reg8(NFI_DATAR_REG32);
  4929. NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_NOB_MASK);
  4930. mtk_nand_reset();
  4931. #if (__INTERNAL_USE_AHB_MODE__)
  4932. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
  4933. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_DMA_BURST_EN);
  4934. #endif
  4935. if (g_bHwEcc)
  4936. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  4937. else
  4938. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  4939. g_bcmdstatus = false;
  4940. } else
  4941. retval = DRV_Reg8(NFI_DATAR_REG32);
  4942. return retval;
  4943. }
  4944. /******************************************************************************
  4945. * mtk_nand_read_buf
  4946. *
  4947. * DESCRIPTION:
  4948. *Read NAND data !
  4949. *
  4950. * PARAMETERS:
  4951. *struct mtd_info *mtd, uint8_t *buf, int len
  4952. *
  4953. * RETURNS:
  4954. *None
  4955. *
  4956. * NOTES:
  4957. *None
  4958. *
  4959. ******************************************************************************/
  4960. static void mtk_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  4961. {
  4962. struct nand_chip *nand = (struct nand_chip *)mtd->priv;
  4963. struct NAND_CMD *pkCMD = &g_kCMD;
  4964. u32 u4ColAddr = pkCMD->u4ColAddr;
  4965. u32 u4PageSize = mtd->writesize;
  4966. if (((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  4967. || (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER))
  4968. && (gn_devinfo.tlcControl.normaltlc) && (!mtk_block_istlc(pkCMD->u4RowAddr * mtd->writesize)))
  4969. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  4970. else
  4971. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  4972. if (u4ColAddr < u4PageSize) {
  4973. if ((u4ColAddr == 0) && (len >= u4PageSize)) {
  4974. mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf,
  4975. pkCMD->au1OOB);
  4976. if (len > u4PageSize) {
  4977. u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
  4978. memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
  4979. }
  4980. } else {
  4981. mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize,
  4982. nand->buffers->databuf, pkCMD->au1OOB);
  4983. memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
  4984. }
  4985. pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
  4986. } else {
  4987. u32 u4Offset = u4ColAddr - u4PageSize;
  4988. u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
  4989. if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
  4990. mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize,
  4991. nand->buffers->databuf, pkCMD->au1OOB);
  4992. pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
  4993. }
  4994. memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
  4995. }
  4996. pkCMD->u4ColAddr += len;
  4997. }
  4998. /******************************************************************************
  4999. * mtk_nand_write_buf
  5000. *
  5001. * DESCRIPTION:
  5002. *Write NAND data !
  5003. *
  5004. * PARAMETERS:
  5005. *struct mtd_info *mtd, const uint8_t *buf, int len
  5006. *
  5007. * RETURNS:
  5008. *None
  5009. *
  5010. * NOTES:
  5011. *None
  5012. *
  5013. ******************************************************************************/
  5014. static void mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  5015. {
  5016. struct NAND_CMD *pkCMD = &g_kCMD;
  5017. u32 u4ColAddr = pkCMD->u4ColAddr;
  5018. u32 u4PageSize = mtd->writesize;
  5019. int i4Size, i;
  5020. if (u4ColAddr >= u4PageSize) {
  5021. u32 u4Offset = u4ColAddr - u4PageSize;
  5022. u8 *pOOB = pkCMD->au1OOB + u4Offset;
  5023. i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
  5024. for (i = 0; i < i4Size; i++)
  5025. pOOB[i] &= buf[i];
  5026. } else {
  5027. pkCMD->pDataBuf = (u8 *) buf;
  5028. }
  5029. pkCMD->u4ColAddr += len;
  5030. }
  5031. /******************************************************************************
  5032. * mtk_nand_write_page_hwecc
  5033. *
  5034. * DESCRIPTION:
  5035. *Write NAND data with hardware ecc !
  5036. *
  5037. * PARAMETERS:
  5038. *struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf
  5039. *
  5040. * RETURNS:
  5041. *None
  5042. *
  5043. * NOTES:
  5044. *None
  5045. *
  5046. ******************************************************************************/
  5047. static int mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  5048. const uint8_t *buf, int oob_required)
  5049. {
  5050. mtk_nand_write_buf(mtd, buf, mtd->writesize);
  5051. mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
  5052. return 0;
  5053. }
  5054. /******************************************************************************
  5055. * mtk_nand_read_page_hwecc
  5056. *
  5057. * DESCRIPTION:
  5058. *Read NAND data with hardware ecc !
  5059. *
  5060. * PARAMETERS:
  5061. *struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf
  5062. *
  5063. * RETURNS:
  5064. *None
  5065. *
  5066. * NOTES:
  5067. *None
  5068. *
  5069. ******************************************************************************/
  5070. static int mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf,
  5071. int oob_required, int page)
  5072. {
  5073. #if 0
  5074. mtk_nand_read_buf(mtd, buf, mtd->writesize);
  5075. mtk_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
  5076. #else
  5077. struct NAND_CMD *pkCMD = &g_kCMD;
  5078. u32 u4ColAddr = pkCMD->u4ColAddr;
  5079. u32 u4PageSize = mtd->writesize;
  5080. if (((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  5081. || (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER))
  5082. && (gn_devinfo.tlcControl.normaltlc) && (!mtk_block_istlc(pkCMD->u4RowAddr * mtd->writesize)))
  5083. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  5084. else
  5085. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  5086. if (u4ColAddr == 0) {
  5087. mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
  5088. pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
  5089. }
  5090. #endif
  5091. return 0;
  5092. }
  5093. /******************************************************************************
  5094. *
  5095. * Read a page to a logical address
  5096. *
  5097. *****************************************************************************/
  5098. static int mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf, int page)
  5099. {
  5100. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5101. u32 block;
  5102. u32 page_in_block;
  5103. u32 mapped_block;
  5104. int bRet = ERR_RTN_SUCCESS;
  5105. #ifdef DUMP_PEF
  5106. struct timeval stimer, etimer;
  5107. do_gettimeofday(&stimer);
  5108. #endif
  5109. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  5110. /*MSG(INIT, "[WRITE] %d, %d, %d %d\n", mapped_block, block, page_in_block, page_per_block); */
  5111. bRet =
  5112. mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
  5113. mtd->writesize, buf, chip->oob_poi);
  5114. if (bRet == ERR_RTN_SUCCESS) {
  5115. #ifdef DUMP_PEF
  5116. do_gettimeofday(&etimer);
  5117. g_NandPerfLog.ReadPageTotalTime += Cal_timediff(&etimer, &stimer);
  5118. g_NandPerfLog.ReadPageCount++;
  5119. dump_nand_rwcount();
  5120. #endif
  5121. #ifdef CFG_SNAND_ACCESS_PATTERN_LOGGER
  5122. if (g_snand_pm_on == 1)
  5123. mtk_snand_pm_add_drv_record(_SNAND_PM_OP_READ_PAGE,
  5124. page_in_block + mapped_block * page_per_block,
  5125. 0, Cal_timediff(&etimer, &stimer));
  5126. #endif
  5127. return 0;
  5128. }
  5129. /* else
  5130. return -EIO; */
  5131. return 0;
  5132. }
  5133. static int mtk_nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf, int page,
  5134. int subpage, int subpageno)
  5135. {
  5136. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5137. u32 block;
  5138. int coladdr;
  5139. u32 page_in_block;
  5140. u32 mapped_block;
  5141. int bRet = ERR_RTN_SUCCESS;
  5142. int sec_num = 1<<(chip->page_shift - host->hw->nand_sec_shift);
  5143. int spare_per_sector = mtd->oobsize / sec_num;
  5144. #ifdef DUMP_PEF
  5145. struct timeval stimer, etimer;
  5146. do_gettimeofday(&stimer);
  5147. #endif
  5148. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  5149. coladdr = subpage * (gn_devinfo.sectorsize + spare_per_sector);
  5150. bRet =
  5151. mtk_nand_exec_read_sector(mtd, page_in_block + mapped_block * page_per_block, coladdr,
  5152. gn_devinfo.sectorsize * subpageno, buf, chip->oob_poi,
  5153. subpageno);
  5154. if (bRet == ERR_RTN_SUCCESS) {
  5155. #ifdef DUMP_PEF
  5156. do_gettimeofday(&etimer);
  5157. g_NandPerfLog.ReadSubPageTotalTime += Cal_timediff(&etimer, &stimer);
  5158. g_NandPerfLog.ReadSubPageCount++;
  5159. dump_nand_rwcount();
  5160. #endif
  5161. #ifdef CFG_SNAND_ACCESS_PATTERN_LOGGER
  5162. if (g_snand_pm_on == 1)
  5163. mtk_snand_pm_add_drv_record(_SNAND_PM_OP_READ_SEC,
  5164. page_in_block + mapped_block * page_per_block,
  5165. subpage, Cal_timediff(&etimer, &stimer));
  5166. #endif
  5167. return 0;
  5168. }
  5169. /* else
  5170. return -EIO; */
  5171. if (block == 62)
  5172. mtk_nand_read_subpage(mtd, chip, buf, page++, subpage, subpageno);
  5173. return 0;
  5174. }
  5175. /******************************************************************************
  5176. *
  5177. * Erase a block at a logical address
  5178. *
  5179. *****************************************************************************/
  5180. int mtk_nand_erase_hw(struct mtd_info *mtd, int page)
  5181. {
  5182. #ifdef PWR_LOSS_SPOH
  5183. struct timeval pl_time_write;
  5184. suseconds_t duration;
  5185. u32 time;
  5186. #endif
  5187. int result;
  5188. struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  5189. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5190. struct NFI_TLC_WL_INFO tlc_wl_info;
  5191. #endif
  5192. u32 reg_val = 0;
  5193. u32 real_row_addr = 0;
  5194. #ifdef _MTK_NAND_DUMMY_DRIVER_
  5195. if (dummy_driver_debug) {
  5196. unsigned long long time = sched_clock();
  5197. if (!((time * 123 + 59) % 1024)) {
  5198. pr_debug("[NAND_DUMMY_DRIVER] Simulate erase error at page: 0x%x\n", page);
  5199. return NAND_STATUS_FAIL;
  5200. }
  5201. }
  5202. #endif
  5203. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5204. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  5205. if (gn_devinfo.tlcControl.normaltlc) {
  5206. NFI_TLC_GetMappedWL(page, &tlc_wl_info);
  5207. real_row_addr = NFI_TLC_GetRowAddr(tlc_wl_info.word_line_idx);
  5208. } else {
  5209. real_row_addr = NFI_TLC_GetRowAddr(page);
  5210. }
  5211. } else
  5212. #endif
  5213. {
  5214. real_row_addr = page;
  5215. }
  5216. /* if(force_slc_flag == 1)
  5217. gn_devinfo.tlcControl.slcopmodeEn = true; */
  5218. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5219. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  5220. if ((gn_devinfo.tlcControl.slcopmodeEn)
  5221. && (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd)) {
  5222. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  5223. reg_val &= ~CNFG_READ_EN;
  5224. reg_val &= ~CNFG_OP_MODE_MASK;
  5225. reg_val |= CNFG_OP_CUST;
  5226. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  5227. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  5228. reg_val = DRV_Reg32(NFI_CON_REG16);
  5229. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  5230. /* issue reset operation */
  5231. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  5232. } else {
  5233. if (tlc_not_keep_erase_lvl) {
  5234. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  5235. reg_val &= ~CNFG_READ_EN;
  5236. reg_val &= ~CNFG_OP_MODE_MASK;
  5237. reg_val |= CNFG_OP_CUST;
  5238. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  5239. mtk_nand_set_command(NOT_KEEP_ERASE_LVL_A19NM_CMD);
  5240. reg_val = DRV_Reg32(NFI_CON_REG16);
  5241. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  5242. /* issue reset operation */
  5243. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  5244. }
  5245. }
  5246. }
  5247. #endif
  5248. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER) && (gn_devinfo.vendor == VEND_SANDISK)) {
  5249. if (gn_devinfo.tlcControl.slcopmodeEn) {
  5250. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  5251. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  5252. reg_val &= ~CNFG_READ_EN;
  5253. reg_val &= ~CNFG_OP_MODE_MASK;
  5254. reg_val |= CNFG_OP_CUST;
  5255. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  5256. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  5257. reg_val = DRV_Reg32(NFI_CON_REG16);
  5258. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  5259. /* issue reset operation */
  5260. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  5261. /* pr_err("mtk_nand_erase_hw SLC Mode %d\n", real_row_addr); */
  5262. }
  5263. } else {
  5264. if (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd) {
  5265. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  5266. reg_val &= ~CNFG_READ_EN;
  5267. reg_val &= ~CNFG_OP_MODE_MASK;
  5268. reg_val |= CNFG_OP_CUST;
  5269. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  5270. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  5271. reg_val = DRV_Reg32(NFI_CON_REG16);
  5272. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  5273. /* issue reset operation */
  5274. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  5275. }
  5276. }
  5277. }
  5278. PL_NAND_BEGIN(pl_time_write);
  5279. PL_TIME_RAND_ERASE(chip, page, time);
  5280. chip->erase(mtd, real_row_addr);
  5281. PL_NAND_RESET(time);
  5282. result = chip->waitfunc(mtd, chip);
  5283. PL_NAND_END(pl_time_write, duration);
  5284. PL_TIME_ERASE(duration);
  5285. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5286. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  5287. && (gn_devinfo.tlcControl.slcopmodeEn)) {
  5288. if (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd) {
  5289. reg_val = DRV_Reg32(NFI_CON_REG16);
  5290. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  5291. /* issue reset operation */
  5292. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  5293. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  5294. reg_val &= ~CNFG_READ_EN;
  5295. reg_val &= ~CNFG_OP_MODE_MASK;
  5296. reg_val |= CNFG_OP_CUST;
  5297. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  5298. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  5299. }
  5300. }
  5301. #endif
  5302. return result;
  5303. }
  5304. static int mtk_nand_erase(struct mtd_info *mtd, int page)
  5305. {
  5306. int status;
  5307. struct nand_chip *chip = mtd->priv;
  5308. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5309. u32 block;
  5310. u32 page_in_block;
  5311. u32 mapped_block;
  5312. bool erase_fail = FALSE;
  5313. #ifdef DUMP_PEF
  5314. struct timeval stimer, etimer;
  5315. do_gettimeofday(&stimer);
  5316. #endif
  5317. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  5318. /*pr_err("[ERASE] 0x%x 0x%x\n", mapped_block, page);*/
  5319. status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
  5320. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5321. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  5322. && (gn_devinfo.tlcControl.slcopmodeEn)) {
  5323. if (status & SLC_MODE_OP_FALI) {
  5324. erase_fail = TRUE;
  5325. MSG(INIT, "mtk_nand_erase: page %d fail\n", page);
  5326. }
  5327. } else
  5328. #endif
  5329. {
  5330. if (status & NAND_STATUS_FAIL)
  5331. erase_fail = TRUE;
  5332. }
  5333. if (erase_fail) {
  5334. if (update_bmt
  5335. ((u64) ((u64) page_in_block + (u64) mapped_block * page_per_block) <<
  5336. chip->page_shift, UPDATE_ERASE_FAIL, NULL, NULL)) {
  5337. MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
  5338. } else {
  5339. MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
  5340. return NAND_STATUS_FAIL;
  5341. }
  5342. }
  5343. #ifdef DUMP_PEF
  5344. do_gettimeofday(&etimer);
  5345. g_NandPerfLog.EraseBlockTotalTime += Cal_timediff(&etimer, &stimer);
  5346. g_NandPerfLog.EraseBlockCount++;
  5347. dump_nand_rwcount();
  5348. #endif
  5349. #ifdef CFG_SNAND_ACCESS_PATTERN_LOGGER
  5350. if (g_snand_pm_on == 1)
  5351. mtk_snand_pm_add_drv_record(_SNAND_PM_OP_ERASE,
  5352. page_in_block + page_per_block * mapped_block, 0,
  5353. Cal_timediff(&etimer, &stimer));
  5354. #endif
  5355. return 0;
  5356. }
  5357. /******************************************************************************
  5358. * mtk_nand_read_multi_page_cache
  5359. *
  5360. * description:
  5361. *read multi page data using cache read
  5362. *
  5363. * parameters:
  5364. *struct mtd_info *mtd, struct nand_chip *chip, int page, struct mtd_oob_ops *ops
  5365. *
  5366. * returns:
  5367. *none
  5368. *
  5369. * notes:
  5370. *only available for nand flash support cache read.
  5371. *read main data only.
  5372. *
  5373. *****************************************************************************/
  5374. #if 0
  5375. static int mtk_nand_read_multi_page_cache(struct mtd_info *mtd, struct nand_chip *chip, int page,
  5376. struct mtd_oob_ops *ops)
  5377. {
  5378. int res = -EIO;
  5379. int len = ops->len;
  5380. struct mtd_ecc_stats stat = mtd->ecc_stats;
  5381. uint8_t *buf = ops->datbuf;
  5382. if (!mtk_nand_ready_for_read(chip, page, 0, true, buf))
  5383. return -EIO;
  5384. while (len > 0) {
  5385. mtk_nand_set_mode(CNFG_OP_CUST);
  5386. DRV_WriteReg32(NFI_CON_REG16, 8 << CON_NFI_SEC_SHIFT);
  5387. if (len > mtd->writesize) {
  5388. if (!mtk_nand_set_command(0x31)) /* todo: add cache read command */
  5389. goto ret;
  5390. } else {
  5391. if (!mtk_nand_set_command(0x3f)) /* last page remained */
  5392. goto ret;
  5393. }
  5394. mtk_nand_status_ready(STA_NAND_BUSY);
  5395. #ifdef __INTERNAL_USE_AHB_MODE__
  5396. if (!mtk_nand_read_page_data(mtd, buf, mtd->writesize))
  5397. goto ret;
  5398. #else
  5399. if (!mtk_nand_mcu_read_data(buf, mtd->writesize))
  5400. goto ret;
  5401. #endif
  5402. /* get ecc error info */
  5403. mtk_nand_check_bch_error(mtd, buf, 3, page);
  5404. ECC_Decode_End();
  5405. page++;
  5406. len -= mtd->writesize;
  5407. buf += mtd->writesize;
  5408. ops->retlen += mtd->writesize;
  5409. if (len > 0) {
  5410. ECC_Decode_Start();
  5411. mtk_nand_reset();
  5412. }
  5413. }
  5414. res = 0;
  5415. ret:
  5416. mtk_nand_stop_read();
  5417. if (res)
  5418. return res;
  5419. if (mtd->ecc_stats.failed > stat.failed) {
  5420. pr_debug("ecc fail happened\n");
  5421. return -EBADMSG;
  5422. }
  5423. return mtd->ecc_stats.corrected - stat.corrected ? -EUCLEAN : 0;
  5424. }
  5425. #endif
  5426. /******************************************************************************
  5427. * mtk_nand_read_oob_raw
  5428. *
  5429. * DESCRIPTION:
  5430. *Read oob data
  5431. *
  5432. * PARAMETERS:
  5433. *struct mtd_info *mtd, const uint8_t *buf, int addr, int len
  5434. *
  5435. * RETURNS:
  5436. *None
  5437. *
  5438. * NOTES:
  5439. *this function read raw oob data out of flash, so need to re-organise
  5440. *data format before using.
  5441. *len should be times of 8, call this after nand_get_device.
  5442. *Should notice, this function read data without ECC protection.
  5443. *
  5444. *****************************************************************************/
  5445. static int mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t *buf, int page_addr, int len)
  5446. {
  5447. struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  5448. u32 col_addr = 0;
  5449. u32 sector = 0;
  5450. int res = 0;
  5451. u32 colnob = 2, rawnob = gn_devinfo.addr_cycle - 2;
  5452. int randomread = 0;
  5453. int read_len = 0;
  5454. int sec_num = 1<<(chip->page_shift - host->hw->nand_sec_shift);
  5455. int spare_per_sector = mtd->oobsize / sec_num;
  5456. u32 sector_size = NAND_SECTOR_SIZE;
  5457. if (gn_devinfo.sectorsize == 1024)
  5458. sector_size = 1024;
  5459. if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
  5460. pr_warn("[%s] invalid parameter, len: %d, buf: %p\n", __func__, len, buf);
  5461. return -EINVAL;
  5462. }
  5463. if (len > spare_per_sector)
  5464. randomread = 1;
  5465. if (!randomread || !(gn_devinfo.advancedmode & RAMDOM_READ)) {
  5466. while (len > 0) {
  5467. read_len = min(len, spare_per_sector);
  5468. col_addr = sector_size + sector * (sector_size + spare_per_sector);
  5469. /* TODO: Fix this hard-code 16 */
  5470. if (!mtk_nand_ready_for_read
  5471. (chip, page_addr, col_addr, sec_num, false, NULL, NORMAL_READ)) {
  5472. pr_warn("mtk_nand_ready_for_read return failed\n");
  5473. res = -EIO;
  5474. goto error;
  5475. }
  5476. if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
  5477. pr_warn("mtk_nand_mcu_read_data return failed\n");
  5478. res = -EIO;
  5479. goto error;
  5480. }
  5481. mtk_nand_stop_read();
  5482. sector++;
  5483. len -= read_len;
  5484. }
  5485. } else {
  5486. col_addr = sector_size;
  5487. if (chip->options & NAND_BUSWIDTH_16)
  5488. col_addr /= 2;
  5489. if (!mtk_nand_reset())
  5490. goto error;
  5491. mtk_nand_set_mode(0x6000);
  5492. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  5493. DRV_WriteReg32(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
  5494. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
  5495. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  5496. mtk_nand_set_autoformat(false);
  5497. if (!mtk_nand_set_command(NAND_CMD_READ0))
  5498. goto error;
  5499. /*1 FIXED ME: For Any Kind of AddrCycle */
  5500. if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
  5501. goto error;
  5502. if (!mtk_nand_set_command(NAND_CMD_READSTART))
  5503. goto error;
  5504. if (!mtk_nand_status_ready(STA_NAND_BUSY))
  5505. goto error;
  5506. read_len = min(len, spare_per_sector);
  5507. if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
  5508. pr_warn("mtk_nand_mcu_read_data return failed first 16\n");
  5509. res = -EIO;
  5510. goto error;
  5511. }
  5512. sector++;
  5513. len -= read_len;
  5514. mtk_nand_stop_read();
  5515. while (len > 0) {
  5516. read_len = min(len, spare_per_sector);
  5517. if (!mtk_nand_set_command(0x05))
  5518. goto error;
  5519. col_addr = sector_size + sector * (sector_size + 16); /*: TODO_JP careful 16 */
  5520. if (chip->options & NAND_BUSWIDTH_16)
  5521. col_addr /= 2;
  5522. DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
  5523. DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
  5524. DRV_WriteReg32(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
  5525. if (!mtk_nand_status_ready(STA_ADDR_STATE))
  5526. goto error;
  5527. if (!mtk_nand_set_command(0xE0))
  5528. goto error;
  5529. if (!mtk_nand_status_ready(STA_NAND_BUSY))
  5530. goto error;
  5531. if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
  5532. /* TODO: and this 8 */
  5533. pr_warn("mtk_nand_mcu_read_data return failed first 16\n");
  5534. res = -EIO;
  5535. goto error;
  5536. }
  5537. mtk_nand_stop_read();
  5538. sector++;
  5539. len -= read_len;
  5540. }
  5541. }
  5542. error:
  5543. NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BRD);
  5544. return res;
  5545. }
  5546. static int mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t *buf, int page_addr, int len)
  5547. {
  5548. struct nand_chip *chip = mtd->priv;
  5549. u32 col_addr = 0;
  5550. u32 sector = 0;
  5551. int write_len = 0;
  5552. int status;
  5553. int sec_num = 1<<(chip->page_shift - host->hw->nand_sec_shift);
  5554. int spare_per_sector = mtd->oobsize / sec_num;
  5555. u32 sector_size = NAND_SECTOR_SIZE;
  5556. if (gn_devinfo.sectorsize == 1024)
  5557. sector_size = 1024;
  5558. if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
  5559. pr_warn("[%s] invalid parameter, len: %d, buf: %p\n", __func__, len, buf);
  5560. return -EINVAL;
  5561. }
  5562. while (len > 0) {
  5563. write_len = min(len, spare_per_sector);
  5564. col_addr = sector * (sector_size + spare_per_sector) + sector_size;
  5565. if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
  5566. return -EIO;
  5567. if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
  5568. return -EIO;
  5569. (void)mtk_nand_check_RW_count(write_len);
  5570. NFI_CLN_REG32(NFI_CON_REG16, CON_NFI_BWR);
  5571. (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
  5572. while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
  5573. ;
  5574. status = chip->waitfunc(mtd, chip);
  5575. if (status & NAND_STATUS_FAIL) {
  5576. pr_debug("status: %d\n", status);
  5577. return -EIO;
  5578. }
  5579. len -= write_len;
  5580. sector++;
  5581. }
  5582. return 0;
  5583. }
  5584. static int mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
  5585. {
  5586. int i, iter;
  5587. int sec_num = 1<<(chip->page_shift - host->hw->nand_sec_shift);
  5588. int spare_per_sector = mtd->oobsize / sec_num;
  5589. memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
  5590. for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
  5591. iter =
  5592. (i / OOB_AVAI_PER_SECTOR) * spare_per_sector + OOB_AVAI_PER_SECTOR +
  5593. i % OOB_AVAI_PER_SECTOR;
  5594. local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]];
  5595. }
  5596. for (i = 0; i < sec_num; i++)
  5597. memcpy(&local_oob_buf[i * spare_per_sector],
  5598. &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
  5599. return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
  5600. }
  5601. static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
  5602. {
  5603. int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  5604. u32 block;
  5605. u16 page_in_block;
  5606. u32 mapped_block;
  5607. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  5608. if (mapped_block != block)
  5609. set_bad_index_to_oob(chip->oob_poi, block);
  5610. else
  5611. set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
  5612. if (mtk_nand_write_oob_hw
  5613. (mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
  5614. MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block,
  5615. page_in_block);
  5616. if (update_bmt
  5617. ((u64) ((u64) page_in_block + (u64) mapped_block * page_per_block) <<
  5618. chip->page_shift, UPDATE_WRITE_FAIL, NULL, chip->oob_poi)) {
  5619. MSG(INIT, "Update BMT success\n");
  5620. return 0;
  5621. }
  5622. MSG(INIT, "Update BMT fail\n");
  5623. return -EIO;
  5624. }
  5625. return 0;
  5626. }
  5627. int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
  5628. {
  5629. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5630. struct nand_chip *chip = mtd->priv;
  5631. #endif
  5632. int block; /* = (int)(offset / (gn_devinfo.blocksize * 1024)); */
  5633. int page; /* = block * (gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize); */
  5634. int ret;
  5635. loff_t temp;
  5636. u8 buf[8];
  5637. temp = offset;
  5638. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  5639. block = (u32) temp;
  5640. page = block * (gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize);
  5641. memset(buf, 0xFF, 8);
  5642. buf[0] = 0;
  5643. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5644. if (mtk_is_normal_tlc_nand())
  5645. ret = mtk_nand_tlc_block_mark(mtd, chip, block);
  5646. else
  5647. #endif
  5648. ret = mtk_nand_write_oob_raw(mtd, buf, page, 8);
  5649. return 0;
  5650. }
  5651. static int mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset, const uint8_t *buf)
  5652. {
  5653. struct nand_chip *chip = mtd->priv;
  5654. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5655. u32 block; /* = (u32)(offset / (gn_devinfo.blocksize * 1024)); */
  5656. int page; /* = block * (gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize); */
  5657. u32 mapped_block;
  5658. int ret;
  5659. loff_t temp;
  5660. temp = offset;
  5661. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  5662. block = (u32) temp;
  5663. page = block * (gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize);
  5664. nand_get_device(mtd, FL_WRITING);
  5665. page = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  5666. if (NULL != buf) {
  5667. MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page);
  5668. if (update_bmt
  5669. ((u64) ((u64) page + (u64) mapped_block * page_per_block) << chip->page_shift,
  5670. UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
  5671. pr_err("Update BMT success\n");
  5672. } else {
  5673. pr_err("Update BMT fail\n");
  5674. nand_release_device(mtd);
  5675. return -EIO;
  5676. }
  5677. }
  5678. ret = mtk_nand_block_markbad_hw(mtd, mapped_block * (gn_devinfo.blocksize * 1024));
  5679. nand_release_device(mtd);
  5680. return ret;
  5681. }
  5682. int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
  5683. {
  5684. int i;
  5685. u8 iter = 0;
  5686. int sec_num = 1<<(chip->page_shift - host->hw->nand_sec_shift);
  5687. int spare_per_sector = mtd->oobsize / sec_num;
  5688. #ifdef TESTTIME
  5689. unsigned long long time1, time2;
  5690. time1 = sched_clock();
  5691. #endif
  5692. if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
  5693. /* pr_err("[%s]mtk_nand_read_oob_raw return failed\n", __func__); */
  5694. return -EIO;
  5695. }
  5696. #ifdef TESTTIME
  5697. time2 = sched_clock() - time1;
  5698. if (!readoobflag) {
  5699. readoobflag = 1;
  5700. pr_err("[%s] time is %llu", __func__, time2);
  5701. }
  5702. #endif
  5703. /* adjust to ecc physical layout to memory layout */
  5704. /*********************************************************/
  5705. /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
  5706. /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
  5707. /*********************************************************/
  5708. memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
  5709. /* copy ecc data */
  5710. for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
  5711. iter =
  5712. (i / OOB_AVAI_PER_SECTOR) * spare_per_sector + OOB_AVAI_PER_SECTOR +
  5713. i % OOB_AVAI_PER_SECTOR;
  5714. chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
  5715. }
  5716. /* copy FDM data */
  5717. for (i = 0; i < sec_num; i++)
  5718. memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR],
  5719. &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
  5720. return 0;
  5721. }
  5722. static int mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
  5723. {
  5724. mtk_nand_read_page(mtd, chip, temp_buffer_16_align, page);
  5725. return 0; /* the return value is sndcmd */
  5726. }
  5727. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5728. bool mtk_nand_slc_write_wodata(struct nand_chip *chip, u32 page)
  5729. {
  5730. bool bRet = FALSE;
  5731. bool slc_en;
  5732. u32 real_row_addr;
  5733. u32 reg_val;
  5734. struct NFI_TLC_WL_INFO tlc_wl_info;
  5735. NFI_TLC_GetMappedWL(page, &tlc_wl_info);
  5736. real_row_addr = NFI_TLC_GetRowAddr(tlc_wl_info.word_line_idx);
  5737. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  5738. reg_val &= ~CNFG_READ_EN;
  5739. reg_val &= ~CNFG_OP_MODE_MASK;
  5740. reg_val |= CNFG_OP_CUST;
  5741. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  5742. mtk_nand_set_command(0xA2);
  5743. reg_val = DRV_Reg32(NFI_CON_REG16);
  5744. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  5745. /* issue reset operation */
  5746. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  5747. mtk_nand_set_mode(CNFG_OP_PRGM);
  5748. mtk_nand_set_command(NAND_CMD_SEQIN);
  5749. mtk_nand_set_address(0, real_row_addr, 2, 3);
  5750. mtk_nand_set_command(NAND_CMD_PAGEPROG);
  5751. slc_en = gn_devinfo.tlcControl.slcopmodeEn;
  5752. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  5753. bRet = !mtk_nand_read_status();
  5754. gn_devinfo.tlcControl.slcopmodeEn = slc_en;
  5755. if (bRet)
  5756. pr_warn("mtk_nand_slc_write_wodata: page %d is bad\n", page);
  5757. return bRet;
  5758. }
  5759. u64 mtk_nand_device_size(void)
  5760. {
  5761. u64 totalsize;
  5762. totalsize = (u64)gn_devinfo.totalsize << 10;
  5763. return totalsize;
  5764. }
  5765. #endif
  5766. int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
  5767. {
  5768. struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  5769. int page_addr = (int)(ofs >> chip->page_shift);
  5770. u32 block, mapped_block;
  5771. int ret;
  5772. unsigned int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5773. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5774. bool bRet;
  5775. #endif
  5776. #if !defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5777. page_addr &= ~(page_per_block - 1);
  5778. #endif
  5779. memset(temp_buffer_16_align, 0xFF, LPAGE);
  5780. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5781. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  5782. && (gn_devinfo.vendor == VEND_SANDISK)
  5783. && mtk_nand_IsBMTPOOL(ofs)) {
  5784. page_addr = mtk_nand_page_transform(mtd, chip, page_addr, &block, &mapped_block);
  5785. bRet = mtk_nand_slc_write_wodata(chip, mapped_block * page_per_block);
  5786. /*pr_warn("after mtk_nand_slc_write_wodata\n");*/
  5787. if (bRet)
  5788. ret = 1;
  5789. else
  5790. ret = 0;
  5791. return ret;
  5792. }
  5793. #endif
  5794. ret =
  5795. mtk_nand_read_subpage(mtd, chip, temp_buffer_16_align, (ofs >> chip->page_shift), 0, 1);
  5796. page_addr = mtk_nand_page_transform(mtd, chip, page_addr, &block, &mapped_block);
  5797. if (0 != ret) {
  5798. pr_warn("mtk_nand_read_oob_raw return error %d\n", ret);
  5799. return 1;
  5800. }
  5801. if (chip->oob_poi[0] != 0xff) {
  5802. pr_warn("Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", block * page_per_block,
  5803. chip->oob_poi[0]);
  5804. return 1;
  5805. }
  5806. return 0; /* everything is OK, good block */
  5807. }
  5808. static int mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
  5809. {
  5810. int chipnr = 0;
  5811. struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  5812. int block; /* = (int)(ofs / (gn_devinfo.blocksize * 1024));*/
  5813. int mapped_block;
  5814. int page = (int)(ofs >> chip->page_shift);
  5815. int page_in_block;
  5816. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5817. loff_t temp;
  5818. int ret;
  5819. temp = ofs;
  5820. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  5821. block = (int) temp;
  5822. if (getchip) {
  5823. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5824. temp = mtk_nand_device_size();
  5825. if (ofs >= temp)
  5826. chipnr = 1;
  5827. else
  5828. chipnr = 0;
  5829. #else
  5830. chipnr = (int)(ofs >> chip->chip_shift);
  5831. #endif
  5832. nand_get_device(mtd, FL_READING);
  5833. /* Select the NAND device */
  5834. chip->select_chip(mtd, chipnr);
  5835. }
  5836. ret = mtk_nand_block_bad_hw(mtd, ofs);
  5837. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  5838. if (ret) {
  5839. MSG(INIT, "Unmapped bad block: 0x%x %d\n", mapped_block, ret);
  5840. if (update_bmt
  5841. ((u64) ((u64) page_in_block + (u64) mapped_block * page_per_block) <<
  5842. chip->page_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
  5843. MSG(INIT, "Update BMT success\n");
  5844. ret = 0;
  5845. } else {
  5846. MSG(INIT, "Update BMT fail\n");
  5847. ret = 1;
  5848. }
  5849. }
  5850. if (getchip)
  5851. nand_release_device(mtd);
  5852. return ret;
  5853. }
  5854. /******************************************************************************
  5855. * mtk_nand_init_size
  5856. *
  5857. * DESCRIPTION:
  5858. *initialize the pagesize, oobsize, blocksize
  5859. *
  5860. * PARAMETERS:
  5861. *struct mtd_info *mtd, struct nand_chip *this, u8 *id_data
  5862. *
  5863. * RETURNS:
  5864. *Buswidth
  5865. *
  5866. * NOTES:
  5867. *None
  5868. *
  5869. ******************************************************************************/
  5870. static int mtk_nand_init_size(struct mtd_info *mtd, struct nand_chip *this, u8 *id_data)
  5871. {
  5872. /* Get page size */
  5873. mtd->writesize = gn_devinfo.pagesize;
  5874. /* Get oobsize */
  5875. mtd->oobsize = gn_devinfo.sparesize;
  5876. /* Get blocksize. */
  5877. mtd->erasesize = gn_devinfo.blocksize * 1024;
  5878. /* Get buswidth information */
  5879. if (gn_devinfo.iowidth == 16)
  5880. return NAND_BUSWIDTH_16;
  5881. else
  5882. return 0;
  5883. }
  5884. int mtk_nand_write_tlc_wl_hw(struct mtd_info *mtd, struct nand_chip *chip,
  5885. uint8_t *buf, u32 wl, enum NFI_TLC_PG_CYCLE program_cycle)
  5886. {
  5887. u32 page;
  5888. uint8_t *temp_buf = NULL;
  5889. #if defined(CFG_PERFLOG_DEBUG)
  5890. struct timeval stimer, etimer;
  5891. do_gettimeofday(&stimer);
  5892. #endif
  5893. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  5894. tlc_program_cycle = program_cycle;
  5895. page = wl * 3;
  5896. temp_buf = buf;
  5897. memcpy(local_tlc_wl_buffer, temp_buf, mtd->writesize);
  5898. if (mtk_nand_exec_write_page(mtd, page, mtd->writesize, local_tlc_wl_buffer, chip->oob_poi)) {
  5899. MSG(INIT, "write fail at wl: 0x%x, page: 0x%x\n", wl, page);
  5900. return -EIO;
  5901. }
  5902. temp_buf += mtd->writesize;
  5903. memcpy(local_tlc_wl_buffer, temp_buf, mtd->writesize);
  5904. if (mtk_nand_exec_write_page(mtd, page + 1, mtd->writesize, local_tlc_wl_buffer, chip->oob_poi)) {
  5905. MSG(INIT, "write fail at wl: 0x%x, page: 0x%x\n", wl, page);
  5906. return -EIO;
  5907. }
  5908. temp_buf += mtd->writesize;
  5909. memcpy(local_tlc_wl_buffer, temp_buf, mtd->writesize);
  5910. if (mtk_nand_exec_write_page(mtd, page + 2, mtd->writesize, local_tlc_wl_buffer, chip->oob_poi)) {
  5911. MSG(INIT, "write fail at wl: 0x%x, page: 0x%x\n", wl, page);
  5912. return -EIO;
  5913. }
  5914. #if defined(CFG_PERFLOG_DEBUG)
  5915. do_gettimeofday(&etimer);
  5916. g_NandPerfLog.WritePageTotalTime += Cal_timediff(&etimer, &stimer);
  5917. g_NandPerfLog.WritePageCount++;
  5918. dump_nand_rwcount();
  5919. #endif
  5920. return 0;
  5921. }
  5922. int mtk_nand_write_tlc_wl(struct mtd_info *mtd, struct nand_chip *chip,
  5923. uint8_t *buf, u32 wl, enum NFI_TLC_PG_CYCLE program_cycle)
  5924. {
  5925. int bRet;
  5926. bRet = mtk_nand_write_tlc_wl_hw(mtd, chip, buf, wl, program_cycle);
  5927. return bRet;
  5928. }
  5929. int mtk_nand_write_tlc_block_hw(struct mtd_info *mtd, struct nand_chip *chip,
  5930. uint8_t *buf, u32 mapped_block)
  5931. {
  5932. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5933. u32 index;
  5934. int bRet;
  5935. u32 base_wl_index;
  5936. u8 *temp_buf = NULL;
  5937. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  5938. base_wl_index = mapped_block * page_per_block / 3;
  5939. for (index = 0; index < (page_per_block / 3); index++) {
  5940. if (index == 0) {
  5941. temp_buf = buf + (index * 3 * mtd->writesize);
  5942. bRet = mtk_nand_write_tlc_wl(mtd, chip, temp_buf, base_wl_index + index, PROGRAM_1ST_CYCLE);
  5943. if (bRet != 0)
  5944. break;
  5945. temp_buf = buf + ((index + 1) * 3 * mtd->writesize);
  5946. bRet = mtk_nand_write_tlc_wl(mtd, chip, temp_buf, base_wl_index + index + 1, PROGRAM_1ST_CYCLE);
  5947. if (bRet != 0)
  5948. break;
  5949. temp_buf = buf + (index * 3 * mtd->writesize);
  5950. bRet = mtk_nand_write_tlc_wl(mtd, chip, temp_buf, base_wl_index + index, PROGRAM_2ND_CYCLE);
  5951. if (bRet != 0)
  5952. break;
  5953. }
  5954. if ((index + 2) < (page_per_block / 3)) {
  5955. temp_buf = buf + ((index + 2) * 3 * mtd->writesize);
  5956. bRet = mtk_nand_write_tlc_wl(mtd, chip, temp_buf, base_wl_index + index + 2, PROGRAM_1ST_CYCLE);
  5957. if (bRet != 0)
  5958. break;
  5959. }
  5960. if ((index + 1) < (page_per_block / 3)) {
  5961. temp_buf = buf + ((index + 1) * 3 * mtd->writesize);
  5962. bRet = mtk_nand_write_tlc_wl(mtd, chip, temp_buf, base_wl_index + index + 1, PROGRAM_2ND_CYCLE);
  5963. if (bRet != 0)
  5964. break;
  5965. }
  5966. temp_buf = buf + (index * 3 * mtd->writesize);
  5967. bRet = mtk_nand_write_tlc_wl(mtd, chip, temp_buf, base_wl_index + index, PROGRAM_3RD_CYCLE);
  5968. if (bRet != 0)
  5969. break;
  5970. }
  5971. if (bRet != 0)
  5972. return -EIO;
  5973. #else
  5974. base_wl_index = mapped_block * page_per_block;
  5975. for (index = 0; index < page_per_block; index++) {
  5976. temp_buf = buf + (index * mtd->writesize);
  5977. bRet = mtk_nand_exec_write_page(mtd, base_wl_index+index, mtd->writesize, temp_buf, chip->oob_poi);
  5978. if (bRet != 0)
  5979. break;
  5980. }
  5981. #endif
  5982. return 0;
  5983. }
  5984. int mtk_nand_write_tlc_block(struct mtd_info *mtd, struct nand_chip *chip,
  5985. uint8_t *buf, u32 page)
  5986. {
  5987. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  5988. u32 block;
  5989. u32 page_in_block;
  5990. u32 mapped_block;
  5991. int bRet;
  5992. #if defined(CFG_PERFLOG_DEBUG)
  5993. struct timeval stimer, etimer;
  5994. do_gettimeofday(&stimer);
  5995. #endif
  5996. if (gn_devinfo.NAND_FLASH_TYPE != NAND_FLASH_TLC && gn_devinfo.NAND_FLASH_TYPE != NAND_FLASH_MLC_HYBER) {
  5997. MSG(INIT, "error : not tlc nand\n");
  5998. return -EIO;
  5999. }
  6000. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  6001. if (page_in_block != 0) {
  6002. MSG(INIT, "error : normal tlc block program is not block aligned\n");
  6003. return -EIO;
  6004. }
  6005. memset(chip->oob_poi, 0xff, mtd->oobsize);
  6006. if (mapped_block != block)
  6007. set_bad_index_to_oob(chip->oob_poi, block);
  6008. else
  6009. set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
  6010. bRet = mtk_nand_write_tlc_block_hw(mtd, chip, buf, mapped_block);
  6011. if (bRet != 0) {
  6012. MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
  6013. if (update_bmt
  6014. ((u64)((u64)page_in_block + (u64)mapped_block * page_per_block) << chip->page_shift,
  6015. UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
  6016. MSG(INIT, "Update BMT success\n");
  6017. return 0;
  6018. }
  6019. MSG(INIT, "Update BMT fail\n");
  6020. return -EIO;
  6021. }
  6022. #if defined(CFG_PERFLOG_DEBUG)
  6023. do_gettimeofday(&etimer);
  6024. g_NandPerfLog.WritePageTotalTime += Cal_timediff(&etimer, &stimer);
  6025. g_NandPerfLog.WritePageCount++;
  6026. dump_nand_rwcount();
  6027. #endif
  6028. return 0;
  6029. }
  6030. bool mtk_is_tlc_nand(void)
  6031. {
  6032. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  6033. return TRUE;
  6034. else
  6035. return FALSE;
  6036. }
  6037. bool mtk_is_normal_tlc_nand(void)
  6038. {
  6039. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  6040. && (gn_devinfo.tlcControl.normaltlc))
  6041. return TRUE;
  6042. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER)
  6043. return TRUE;
  6044. return FALSE;
  6045. }
  6046. int mtk_nand_tlc_wl_mark(struct mtd_info *mtd, struct nand_chip *chip,
  6047. uint8_t *buf, u32 wl, enum NFI_TLC_PG_CYCLE program_cycle)
  6048. {
  6049. u32 page;
  6050. #if defined(CFG_PERFLOG_DEBUG)
  6051. struct timeval stimer, etimer;
  6052. do_gettimeofday(&stimer);
  6053. #endif
  6054. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  6055. tlc_program_cycle = program_cycle;
  6056. page = wl * 3;
  6057. if (mtk_nand_exec_write_page(mtd, page, mtd->writesize, buf, (buf + mtd->writesize))) {
  6058. MSG(INIT, "write fail at wl: 0x%x, page: 0x%x\n", wl, page);
  6059. return -EIO;
  6060. }
  6061. if (mtk_nand_exec_write_page(mtd, page + 1, mtd->writesize, buf, (buf + mtd->writesize))) {
  6062. MSG(INIT, "write fail at wl: 0x%x, page: 0x%x\n", wl, page);
  6063. return -EIO;
  6064. }
  6065. if (mtk_nand_exec_write_page(mtd, page + 2, mtd->writesize, buf, (buf + mtd->writesize))) {
  6066. MSG(INIT, "write fail at wl: 0x%x, page: 0x%x\n", wl, page);
  6067. return -EIO;
  6068. }
  6069. #if defined(CFG_PERFLOG_DEBUG)
  6070. do_gettimeofday(&etimer);
  6071. g_NandPerfLog.WritePageTotalTime += Cal_timediff(&etimer, &stimer);
  6072. g_NandPerfLog.WritePageCount++;
  6073. dump_nand_rwcount();
  6074. #endif
  6075. return 0;
  6076. }
  6077. int mtk_nand_tlc_block_mark(struct mtd_info *mtd, struct nand_chip *chip, u32 mapped_block)
  6078. {
  6079. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  6080. u32 index;
  6081. int bRet;
  6082. u32 base_wl_index;
  6083. u8 *buf = local_tlc_wl_buffer;
  6084. memset(buf, 0xAA, LPAGE + LSPARE);
  6085. if (gn_devinfo.tlcControl.slcopmodeEn) {
  6086. if (mtk_nand_exec_write_page
  6087. (mtd, mapped_block * page_per_block, mtd->writesize, buf, (buf + mtd->writesize))) {
  6088. MSG(INIT, "mark fail at page: 0x%x\n", mapped_block * page_per_block);
  6089. return -EIO;
  6090. }
  6091. } else {
  6092. base_wl_index = mapped_block * page_per_block / 3;
  6093. for (index = 0; index < (page_per_block / 3); index++) {
  6094. if (index == 0) {
  6095. bRet = mtk_nand_tlc_wl_mark(mtd, chip, buf,
  6096. base_wl_index + index, PROGRAM_1ST_CYCLE);
  6097. if (bRet != 0)
  6098. break;
  6099. bRet = mtk_nand_tlc_wl_mark(mtd, chip, buf,
  6100. base_wl_index + index + 1, PROGRAM_1ST_CYCLE);
  6101. if (bRet != 0)
  6102. break;
  6103. bRet = mtk_nand_tlc_wl_mark(mtd, chip, buf,
  6104. base_wl_index + index, PROGRAM_2ND_CYCLE);
  6105. if (bRet != 0)
  6106. break;
  6107. }
  6108. if ((index + 2) < (page_per_block / 3)) {
  6109. bRet = mtk_nand_tlc_wl_mark(mtd, chip, buf,
  6110. base_wl_index + index + 2, PROGRAM_1ST_CYCLE);
  6111. if (bRet != 0)
  6112. break;
  6113. }
  6114. if ((index + 1) < (page_per_block / 3)) {
  6115. bRet = mtk_nand_tlc_wl_mark(mtd, chip, buf,
  6116. base_wl_index + index + 1, PROGRAM_2ND_CYCLE);
  6117. if (bRet != 0)
  6118. break;
  6119. }
  6120. bRet = mtk_nand_tlc_wl_mark(mtd, chip, buf, base_wl_index + index, PROGRAM_3RD_CYCLE);
  6121. if (bRet != 0)
  6122. break;
  6123. }
  6124. if (bRet != 0)
  6125. return -EIO;
  6126. return 0;
  6127. }
  6128. return 0;
  6129. }
  6130. /******************************************************************************
  6131. * mtk_nand_verify_buf
  6132. *
  6133. * DESCRIPTION:
  6134. *Verify the NAND write data is correct or not !
  6135. *
  6136. * PARAMETERS:
  6137. *struct mtd_info *mtd, const uint8_t *buf, int len
  6138. *
  6139. * RETURNS:
  6140. *None
  6141. *
  6142. * NOTES:
  6143. *None
  6144. *
  6145. ******************************************************************************/
  6146. #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
  6147. char gacBuf[LPAGE + LSPARE];
  6148. static int mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  6149. {
  6150. #if 1
  6151. struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  6152. struct NAND_CMD *pkCMD = &g_kCMD;
  6153. u32 u4PageSize = mtd->writesize;
  6154. u32 *pSrc, *pDst;
  6155. int i;
  6156. if (((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  6157. || (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_MLC_HYBER))
  6158. && (gn_devinfo.tlcControl.normaltlc) && (!mtk_block_istlc(pkCMD.u4RowAddr * mtd->writesize)))
  6159. gn_devinfo.tlcControl.slcopmodeEn = TRUE;
  6160. else
  6161. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  6162. mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
  6163. pSrc = (u32 *) buf;
  6164. pDst = (u32 *) gacBuf;
  6165. len = len / sizeof(u32);
  6166. for (i = 0; i < len; ++i) {
  6167. if (*pSrc != *pDst) {
  6168. MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
  6169. return -1;
  6170. }
  6171. pSrc++;
  6172. pDst++;
  6173. }
  6174. pSrc = (u32 *) chip->oob_poi;
  6175. pDst = (u32 *) (gacBuf + u4PageSize);
  6176. if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2])
  6177. || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
  6178. /* TODO: Ask Designer Why? */
  6179. /*(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7])) */
  6180. MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
  6181. MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2],
  6182. pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
  6183. MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2],
  6184. pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
  6185. return -1;
  6186. }
  6187. /*
  6188. for (i = 0; i < len; ++i) {
  6189. if (*pSrc != *pDst) {
  6190. pr_err("mtk_nand_verify_buf oob fail at page %d\n", g_kCMD.u4RowAddr);
  6191. return -1;
  6192. }
  6193. pSrc++;
  6194. pDst++;
  6195. }
  6196. */
  6197. return 0;
  6198. #else
  6199. return 0;
  6200. #endif
  6201. }
  6202. #endif
  6203. /******************************************************************************
  6204. * mtk_nand_init_hw
  6205. *
  6206. * DESCRIPTION:
  6207. *Initial NAND device hardware component !
  6208. *
  6209. * PARAMETERS:
  6210. *struct mtk_nand_host *host (Initial setting data)
  6211. *
  6212. * RETURNS:
  6213. *None
  6214. *
  6215. * NOTES:
  6216. *None
  6217. *
  6218. ******************************************************************************/
  6219. static void mtk_nand_init_hw(struct mtk_nand_host *host)
  6220. {
  6221. struct mtk_nand_host_hw *hw = host->hw;
  6222. g_bInitDone = false;
  6223. g_kCMD.u4OOBRowAddr = (u32) -1;
  6224. /* Set default NFI access timing control */
  6225. DRV_WriteReg32(NFI_ACCCON_REG32, 0x31C083F9);
  6226. DRV_WriteReg16(NFI_CNFG_REG16, 0);
  6227. DRV_WriteReg32(NFI_PAGEFMT_REG16, 4);
  6228. DRV_WriteReg32(NFI_EMPTY_THRESHOLD, 40);
  6229. /* Reset the state machine and data FIFO, because flushing FIFO */
  6230. (void)mtk_nand_reset();
  6231. /* Set the ECC engine */
  6232. if (hw->nand_ecc_mode == NAND_ECC_HW) {
  6233. MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
  6234. if (g_bHwEcc)
  6235. NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  6236. ECC_Config(host->hw, 4);
  6237. mtk_nand_configure_fdm(8);
  6238. }
  6239. /* Initialize interrupt. Clear interrupt, read clear. */
  6240. DRV_Reg16(NFI_INTR_REG16);
  6241. /* Interrupt arise when read data or program data to/from AHB is done. */
  6242. DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
  6243. /* Enable automatic disable ECC clock when NFI is busy state */
  6244. DRV_WriteReg16(NFI_DEBUG_CON1_REG16, (NFI_BYPASS|WBUF_EN|HWDCM_SWCON_ON));
  6245. NFI_SET_REG32(NFI_DEBUG_CON1_REG16, NFI_BYPASS);
  6246. NFI_SET_REG32(ECC_BYPASS_REG32, ECC_BYPASS);
  6247. DRV_WriteReg16(NFI_NAND_TYPE_CNFG_REG32, 0);
  6248. #ifdef CONFIG_PM
  6249. host->saved_para.suspend_flag = 0;
  6250. #endif
  6251. /* Reset */
  6252. }
  6253. /*-------------------------------------------------------------------------------*/
  6254. static int mtk_nand_dev_ready(struct mtd_info *mtd)
  6255. {
  6256. return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
  6257. }
  6258. /******************************************************************************
  6259. * mtk_nand_proc_read
  6260. *
  6261. * DESCRIPTION:
  6262. *Read the proc file to get the interrupt scheme setting !
  6263. *
  6264. * PARAMETERS:
  6265. *char *page, char **start, off_t off, int count, int *eof, void *data
  6266. *
  6267. * RETURNS:
  6268. *None
  6269. *
  6270. * NOTES:
  6271. *None
  6272. *
  6273. ******************************************************************************/
  6274. int mtk_nand_proc_read(struct file *file, char *buffer, size_t count, loff_t *ppos)
  6275. {
  6276. char *p = buffer;
  6277. int len = 0;
  6278. int i;
  6279. p += sprintf(p, "ID: ");
  6280. for (i = 0; i < gn_devinfo.id_length; i++)
  6281. p += sprintf(p, " 0x%x", gn_devinfo.id[i]);
  6282. p += sprintf(p, "\n");
  6283. p += sprintf(p, "total size: %dMiB; part number: %s\n", gn_devinfo.totalsize,
  6284. gn_devinfo.devciename);
  6285. p += sprintf(p, "Current working in %s mode\n", g_i4Interrupt ? "interrupt" : "polling");
  6286. p += sprintf(p, "NFI_ACCON = 0x%x\n", DRV_Reg32(NFI_ACCCON_REG32));
  6287. p += sprintf(p, "NFI_NAND_TYPE_CNFG_REG32 = 0x%x\n", DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32));
  6288. #ifdef DUMP_PEF
  6289. p += sprintf(p, "Read Page Count: %d, Read Page totalTime: %lu, Avg. RPage: %lu\r\n",
  6290. g_NandPerfLog.ReadPageCount, g_NandPerfLog.ReadPageTotalTime,
  6291. g_NandPerfLog.ReadPageCount ? (g_NandPerfLog.ReadPageTotalTime /
  6292. g_NandPerfLog.ReadPageCount) : 0);
  6293. p += sprintf(p, "Read subPage Count: %d, Read subPage totalTime: %lu, Avg. RPage: %lu\r\n",
  6294. g_NandPerfLog.ReadSubPageCount, g_NandPerfLog.ReadSubPageTotalTime,
  6295. g_NandPerfLog.ReadSubPageCount ? (g_NandPerfLog.ReadSubPageTotalTime /
  6296. g_NandPerfLog.ReadSubPageCount) : 0);
  6297. p += sprintf(p, "Read Busy Count: %d, Read Busy totalTime: %lu, Avg. R Busy: %lu\r\n",
  6298. g_NandPerfLog.ReadBusyCount, g_NandPerfLog.ReadBusyTotalTime,
  6299. g_NandPerfLog.ReadBusyCount ? (g_NandPerfLog.ReadBusyTotalTime /
  6300. g_NandPerfLog.ReadBusyCount) : 0);
  6301. p += sprintf(p, "Read DMA Count: %d, Read DMA totalTime: %lu, Avg. R DMA: %lu\r\n",
  6302. g_NandPerfLog.ReadDMACount, g_NandPerfLog.ReadDMATotalTime,
  6303. g_NandPerfLog.ReadDMACount ? (g_NandPerfLog.ReadDMATotalTime /
  6304. g_NandPerfLog.ReadDMACount) : 0);
  6305. p += sprintf(p, "Write Page Count: %d, Write Page totalTime: %lu, Avg. WPage: %lu\r\n",
  6306. g_NandPerfLog.WritePageCount, g_NandPerfLog.WritePageTotalTime,
  6307. g_NandPerfLog.WritePageCount ? (g_NandPerfLog.WritePageTotalTime /
  6308. g_NandPerfLog.WritePageCount) : 0);
  6309. p += sprintf(p, "Write Busy Count: %d, Write Busy totalTime: %lu, Avg. W Busy: %lu\r\n",
  6310. g_NandPerfLog.WriteBusyCount, g_NandPerfLog.WriteBusyTotalTime,
  6311. g_NandPerfLog.WriteBusyCount ? (g_NandPerfLog.WriteBusyTotalTime /
  6312. g_NandPerfLog.WriteBusyCount) : 0);
  6313. p += sprintf(p, "Write DMA Count: %d, Write DMA totalTime: %lu, Avg. W DMA: %lu\r\n",
  6314. g_NandPerfLog.WriteDMACount, g_NandPerfLog.WriteDMATotalTime,
  6315. g_NandPerfLog.WriteDMACount ? (g_NandPerfLog.WriteDMATotalTime /
  6316. g_NandPerfLog.WriteDMACount) : 0);
  6317. p += sprintf(p, "EraseBlock Count: %d, EraseBlock totalTime: %lu, Avg. Erase: %lu\r\n",
  6318. g_NandPerfLog.EraseBlockCount, g_NandPerfLog.EraseBlockTotalTime,
  6319. g_NandPerfLog.EraseBlockCount ? (g_NandPerfLog.EraseBlockTotalTime /
  6320. g_NandPerfLog.EraseBlockCount) : 0);
  6321. #endif
  6322. len = p - buffer;
  6323. return len < count ? len : count;
  6324. }
  6325. /******************************************************************************
  6326. * mtk_nand_proc_write
  6327. *
  6328. * DESCRIPTION:
  6329. *Write the proc file to set the interrupt scheme !
  6330. *
  6331. * PARAMETERS:
  6332. *struct file* file, const char* buffer, unsigned long count, void *data
  6333. *
  6334. * RETURNS:
  6335. *None
  6336. *
  6337. * NOTES:
  6338. *None
  6339. *
  6340. ******************************************************************************/
  6341. ssize_t mtk_nand_proc_write(struct file *file, const char *buffer, size_t count, loff_t *data)
  6342. {
  6343. struct mtd_info *mtd = &host->mtd;
  6344. char buf[16];
  6345. char cmd;
  6346. int value;
  6347. int len = (int)count;
  6348. int ret;
  6349. if (len >= sizeof(buf))
  6350. len = sizeof(buf) - 1;
  6351. if (copy_from_user(buf, buffer, len))
  6352. return -EFAULT;
  6353. ret = sscanf(buf, "%c%x", &cmd, &value);
  6354. if (ret)
  6355. pr_debug("ret = %d\n", ret);
  6356. switch (cmd) {
  6357. case '1':
  6358. #ifdef DUMP_PEF
  6359. #ifdef CFG_SNAND_ACCESS_PATTERN_LOGGER
  6360. g_NandPerfLog.ReadPageCount = 0;
  6361. g_NandPerfLog.ReadPageTotalTime = 0;
  6362. g_NandPerfLog.ReadBusyCount = 0;
  6363. g_NandPerfLog.ReadBusyTotalTime = 0;
  6364. g_NandPerfLog.ReadDMACount = 0;
  6365. g_NandPerfLog.ReadDMATotalTime = 0;
  6366. g_NandPerfLog.WritePageCount = 0;
  6367. g_NandPerfLog.WritePageTotalTime = 0;
  6368. g_NandPerfLog.WriteBusyCount = 0;
  6369. g_NandPerfLog.WriteBusyTotalTime = 0;
  6370. g_NandPerfLog.WriteDMACount = 0;
  6371. g_NandPerfLog.WriteDMATotalTime = 0;
  6372. g_NandPerfLog.EraseBlockCount = 0;
  6373. g_NandPerfLog.EraseBlockTotalTime = 0;
  6374. g_NandPerfLog.ReadPageCount = 0;
  6375. g_snand_pm_on = 1;
  6376. g_snand_pm_cnt = 0;
  6377. g_snand_pm_wrapped = 0;
  6378. #endif
  6379. #endif
  6380. break;
  6381. case '2':
  6382. #ifdef CFG_SNAND_ACCESS_PATTERN_LOGGER
  6383. nand_get_device(mtd, FL_READING);
  6384. g_snand_pm_on = 0;
  6385. mtk_snand_pm_dump_record();
  6386. #ifdef DUMP_PEF
  6387. if (g_NandPerfLog.ReadPageCount != 0)
  6388. pr_err("Read Page Count: %d, Read Page totalTime: %lu, Avg. RPage: %lu\r\n",
  6389. g_NandPerfLog.ReadPageCount, g_NandPerfLog.ReadPageTotalTime,
  6390. g_NandPerfLog.ReadPageTotalTime / g_NandPerfLog.ReadPageCount);
  6391. if (g_NandPerfLog.ReadBusyCount != 0)
  6392. pr_err
  6393. ("Read Busy Count: %d, Read Busy totalTime: %lu, Avg. R Busy: %lu\r\n",
  6394. g_NandPerfLog.ReadBusyCount, g_NandPerfLog.ReadBusyTotalTime,
  6395. g_NandPerfLog.ReadBusyTotalTime / g_NandPerfLog.ReadBusyCount);
  6396. if (g_NandPerfLog.ReadDMACount != 0)
  6397. pr_err("Read DMA Count: %d, Read DMA totalTime: %lu, Avg. R DMA: %lu\r\n",
  6398. g_NandPerfLog.ReadDMACount, g_NandPerfLog.ReadDMATotalTime,
  6399. g_NandPerfLog.ReadDMATotalTime / g_NandPerfLog.ReadDMACount);
  6400. if (g_NandPerfLog.WritePageCount != 0)
  6401. pr_err
  6402. ("Write Page Count: %d, Write Page totalTime: %lu, Avg. WPage: %lu\r\n",
  6403. g_NandPerfLog.WritePageCount, g_NandPerfLog.WritePageTotalTime,
  6404. g_NandPerfLog.WritePageTotalTime / g_NandPerfLog.WritePageCount);
  6405. if (g_NandPerfLog.WriteBusyCount != 0)
  6406. pr_err
  6407. ("Write Busy Count: %d, Write Busy totalTime: %lu, Avg. W Busy: %lu\r\n",
  6408. g_NandPerfLog.WriteBusyCount, g_NandPerfLog.WriteBusyTotalTime,
  6409. g_NandPerfLog.WriteBusyTotalTime / g_NandPerfLog.WriteBusyCount);
  6410. if (g_NandPerfLog.WriteDMACount != 0)
  6411. pr_err("Write DMA Count: %d, Write DMA totalTime: %lu, Avg. W DMA: %lu\r\n",
  6412. g_NandPerfLog.WriteDMACount, g_NandPerfLog.WriteDMATotalTime,
  6413. g_NandPerfLog.WriteDMATotalTime / g_NandPerfLog.WriteDMACount);
  6414. if (g_NandPerfLog.EraseBlockCount != 0)
  6415. pr_err
  6416. ("EraseBlock Count: %d, EraseBlock totalTime: %lu, Avg. Erase: %lu\r\n",
  6417. g_NandPerfLog.EraseBlockCount, g_NandPerfLog.EraseBlockTotalTime,
  6418. g_NandPerfLog.EraseBlockTotalTime / g_NandPerfLog.EraseBlockCount);
  6419. #endif
  6420. nand_release_device(mtd);
  6421. #endif
  6422. break;
  6423. case 'A': /* NFIA driving setting */
  6424. break;
  6425. case 'B': /* NFIB driving setting */
  6426. break;
  6427. case 'D':
  6428. #ifdef _MTK_NAND_DUMMY_DRIVER_
  6429. pr_debug("Enable dummy driver\n");
  6430. dummy_driver_debug = 1;
  6431. #endif
  6432. break;
  6433. case 'I':
  6434. if ((value > 0 && !g_i4Interrupt) || (value == 0 && g_i4Interrupt)) {
  6435. nand_get_device(mtd, FL_READING);
  6436. g_i4Interrupt = value;
  6437. if (g_i4Interrupt) {
  6438. DRV_Reg16(NFI_INTR_REG16);
  6439. enable_irq(MT_NFI_IRQ_ID);
  6440. } else
  6441. disable_irq(MT_NFI_IRQ_ID);
  6442. nand_release_device(mtd);
  6443. }
  6444. break;
  6445. case 'P':
  6446. #ifdef NAND_PFM
  6447. /* Reset values */
  6448. g_PFM_R = 0;
  6449. g_PFM_W = 0;
  6450. g_PFM_E = 0;
  6451. g_PFM_RD = 0;
  6452. g_PFM_WD = 0;
  6453. g_kCMD.pureReadOOBNum = 0;
  6454. #endif
  6455. break;
  6456. case 'R':
  6457. #ifdef DUMP_PEF
  6458. g_NandPerfLog.ReadPageCount = 0;
  6459. g_NandPerfLog.ReadPageTotalTime = 0;
  6460. g_NandPerfLog.ReadBusyCount = 0;
  6461. g_NandPerfLog.ReadBusyTotalTime = 0;
  6462. g_NandPerfLog.ReadDMACount = 0;
  6463. g_NandPerfLog.ReadDMATotalTime = 0;
  6464. g_NandPerfLog.ReadSubPageCount = 0;
  6465. g_NandPerfLog.ReadSubPageTotalTime = 0;
  6466. g_NandPerfLog.WritePageCount = 0;
  6467. g_NandPerfLog.WritePageTotalTime = 0;
  6468. g_NandPerfLog.WriteBusyCount = 0;
  6469. g_NandPerfLog.WriteBusyTotalTime = 0;
  6470. g_NandPerfLog.WriteDMACount = 0;
  6471. g_NandPerfLog.WriteDMATotalTime = 0;
  6472. g_NandPerfLog.EraseBlockCount = 0;
  6473. g_NandPerfLog.EraseBlockTotalTime = 0;
  6474. #endif
  6475. break;
  6476. case 'T':
  6477. nand_get_device(mtd, FL_READING);
  6478. DRV_WriteReg32(NFI_ACCCON_REG32, value);
  6479. nand_release_device(mtd);
  6480. break;
  6481. default:
  6482. break;
  6483. }
  6484. return len;
  6485. }
  6486. /******************************************************************************
  6487. * mtk_nand_probe
  6488. *
  6489. * DESCRIPTION:
  6490. *register the nand device file operations !
  6491. *
  6492. * PARAMETERS:
  6493. *struct platform_device *pdev : device structure
  6494. *
  6495. * RETURNS:
  6496. *0 : Success
  6497. *
  6498. * NOTES:
  6499. *None
  6500. *
  6501. ******************************************************************************/
  6502. #ifdef TLC_UNIT_TEST
  6503. __aligned(64)
  6504. static u8 temp_buffer_tlc[LPAGE + LSPARE];
  6505. __aligned(64)
  6506. static u8 temp_buffer_tlc_rd[LPAGE + LSPARE];
  6507. int mtk_tlc_unit_test(struct nand_chip *nand_chip, struct mtd_info *mtd)
  6508. {
  6509. MSG(INIT, "Begin to Kernel tlc unit test ...\n");
  6510. int err = 0;
  6511. int patternbuff[128] = {
  6512. 0x0103D901, 0xFF1802DF, 0x01200400, 0x00000021, 0x02040122, 0x02010122, 0x03020407, 0x1A050103,
  6513. 0x00020F1B, 0x08C0C0A1, 0x01550800, 0x201B0AC1, 0x41990155, 0x64F0FFFF, 0x201B0C82, 0x4118EA61,
  6514. 0xF00107F6, 0x0301EE1B, 0x0C834118, 0xEA617001, 0x07760301, 0xEE151405, 0x00202020, 0x20202020,
  6515. 0x00202020, 0x2000302E, 0x3000FF14, 0x00FF0000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6516. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6517. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6518. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6519. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6520. 0x01D90301, 0xDF0218FF, 0x00042001, 0x21000000, 0x22010402, 0x22010102, 0x07040203, 0x0301051A,
  6521. 0x1B0F0200, 0xA1C0C008, 0x00085501, 0xC10A1B20, 0x55019941, 0xFFFFF064, 0x820C1B20, 0x61EA1841,
  6522. 0xF60701F0, 0x1BEE0103, 0x1841830C, 0x017061EA, 0x01037607, 0x051415EE, 0x20202000, 0x20202020,
  6523. 0x20202000, 0x2E300020, 0x14FF0030, 0x0000FF00, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6524. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6525. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6526. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6527. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
  6528. };
  6529. u32 j, k, p = g_block_size / g_page_size, m;
  6530. u32 test_page;
  6531. u8 *buf = vmalloc(g_block_size);
  6532. pr_warn("[P] %d\n", p);
  6533. for (m = 0; m < 32; m++)
  6534. memcpy(temp_buffer_tlc+(512*m), (u8 *)patternbuff, 512);
  6535. pr_warn("***************read pl***********************\n");
  6536. memset(temp_buffer_tlc_rd, 0xA5, 16384);
  6537. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_tlc_rd, 1 * p / 3))
  6538. pr_warn("Read page 0x%x fail!\n", 1 * p / 3);
  6539. for (m = 0; m < 32; m++)
  6540. MSG(INIT, "[5]0x%x %x %x %x\n",
  6541. *((int *)temp_buffer_tlc_rd+m*4), *((int *)temp_buffer_tlc_rd+1+m*4),
  6542. *((int *)temp_buffer_tlc_rd+2+m*4), *((int *)temp_buffer_tlc_rd+3+m*4));
  6543. #ifdef NAND_PFM
  6544. g_PFM_R = 0;
  6545. g_PFM_W = 0;
  6546. g_PFM_E = 0;
  6547. g_PFM_RNum = 0;
  6548. g_PFM_RD = 0;
  6549. g_PFM_WD = 0;
  6550. #endif
  6551. #if 1
  6552. pr_warn("***************SLC MODE TEST***********************\n");
  6553. test_page = 84 * p + (p / 3);
  6554. mtk_nand_erase(mtd, test_page);
  6555. for (k = 0; k < (p/3); k++) {
  6556. pr_warn("***************w p %d***********************\n", test_page + k);
  6557. if (mtk_nand_write_page(mtd, nand_chip, 0, 0, temp_buffer_tlc, 0, test_page + k, 0, 0))
  6558. pr_warn("Write page 0x%x fail!\n", test_page + k);
  6559. pr_warn("***************r p %d***********************\n", test_page + k);
  6560. memset(temp_buffer_tlc_rd, 0x00, g_page_size);
  6561. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_tlc_rd, test_page + k))
  6562. pr_warn("Read page 0x%x fail!\n", test_page + k);
  6563. if (memcmp(temp_buffer_tlc, temp_buffer_tlc_rd, g_page_size)) {
  6564. MSG(INIT, "compare fail!\n");
  6565. err = 1;
  6566. break;
  6567. }
  6568. }
  6569. #endif
  6570. #ifdef NAND_PFM
  6571. g_PFM_R = 0;
  6572. g_PFM_W = 0;
  6573. g_PFM_E = 0;
  6574. g_PFM_RNum = 0;
  6575. g_PFM_RD = 0;
  6576. g_PFM_WD = 0;
  6577. #endif
  6578. #if 1
  6579. pr_warn("***************TLC MODE TEST***********************\n");
  6580. test_page = 880 * p;
  6581. mtk_nand_erase(mtd, test_page);
  6582. memset(buf, 0x00, g_block_size);
  6583. for (k = 0; k < p; k++)
  6584. memcpy(buf + (g_page_size * k), temp_buffer_tlc, g_page_size);
  6585. pr_warn("***************w b %d***********************\n", test_page);
  6586. mtk_nand_write_tlc_block(mtd, nand_chip, buf, test_page);
  6587. for (k = 0; k < p; k++) {
  6588. pr_warn("***************r p %d***********************\n", test_page + k);
  6589. memset(temp_buffer_tlc_rd, 0x00, g_page_size);
  6590. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_tlc_rd, test_page + k))
  6591. pr_warn("Read page 0x%x fail!\n", test_page + k);
  6592. if (memcmp(temp_buffer_tlc, temp_buffer_tlc_rd, g_page_size)) {
  6593. MSG(INIT, "compare fail!\n");
  6594. err = 2;
  6595. break;
  6596. }
  6597. }
  6598. #endif
  6599. vfree(buf);
  6600. return err;
  6601. }
  6602. #endif
  6603. #ifdef NAND_FEATURE_TEST
  6604. static int mtk_nand_test(struct mtd_info *mtd, struct nand_chip *nand_chip)
  6605. {
  6606. u32 page, page1, page2;
  6607. u32 block, block1;
  6608. bool write_test = TRUE;
  6609. while (0) {
  6610. mtk_nand_read_page(mtd, nand_chip, temp_buffer, 0x34408);
  6611. MSG(INIT, "Page: 0x34408 (0x8) bit flip: %d retry count %d\n", correct_count, g_hynix_retry_count);
  6612. mtk_nand_rrtry_func(mtd, gn_devinfo, 0, FALSE);
  6613. mdelay(3);
  6614. }
  6615. do {
  6616. mtk_nand_read_page(mtd, nand_chip, test_buffer, 0x25500);
  6617. mtk_nand_read_page(mtd, nand_chip, temp_buffer, 0x33C00);
  6618. if (!memcmp(temp_buffer, test_buffer, 1000))
  6619. write_test = FALSE;
  6620. if (write_test) {
  6621. MSG(INIT, "First step: erase and program #0 page\n");
  6622. for (page = 0x33C00; page < 0x43C00; page += 256) {
  6623. mtk_nand_erase_hw(mtd, page);
  6624. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page, 0, 0);
  6625. }
  6626. MSG(INIT, "Second step: Create open block\n");
  6627. page = 0x33C00;
  6628. for (block = 1; block < 256; block++) {
  6629. block1 = page/256;
  6630. for (page1 = page + 1; page1 < page + block; page1++) {
  6631. page2 = sandisk_pairpage_mapping((page1%256), TRUE);
  6632. if (page2 != (page1%256))
  6633. mtk_nand_read_page(mtd, nand_chip, temp_buffer, block1*256+page2);
  6634. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page1, 0, 0);
  6635. }
  6636. page += 256;
  6637. }
  6638. MSG(INIT, "Third step: Check bit flip count\n");
  6639. }
  6640. page = 0x33C00;
  6641. for (block = 1; block < 256; block++) {
  6642. block1 = page/256;
  6643. MSG(INIT, "====block 0x%x====\n", block1);
  6644. for (page1 = page; page1 < page + block; page1++) {
  6645. mtk_nand_read_page(mtd, nand_chip, temp_buffer, page1);
  6646. MSG(INIT, "Page: 0x%x (0x%x) bit flip: %d\n", page1, page1%256, correct_count);
  6647. }
  6648. page += 256;
  6649. }
  6650. mtk_nand_read_page(mtd, nand_chip, temp_buffer, 0x43C00);
  6651. write_test = TRUE;
  6652. if (!memcmp(temp_buffer, test_buffer, 1000))
  6653. write_test = FALSE;
  6654. if (write_test) {
  6655. MSG(INIT, "4th step: erase and program #0 page\n");
  6656. for (page = 0x43C00; page < 0x53C00; page += 256) {
  6657. mtk_nand_erase_hw(mtd, page);
  6658. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page, 0, 0);
  6659. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page+1, 0, 0);
  6660. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page+2, 0, 0);
  6661. }
  6662. MSG(INIT, "5th step: Create open block\n");
  6663. page = 0x43C00;
  6664. for (block = 3; block < 256; block++) {
  6665. block1 = page/256;
  6666. for (page1 = page + 3; page1 < page+block; page1++) {
  6667. page2 = sandisk_pairpage_mapping((page1%256), TRUE);
  6668. if (page2 != (page1%256))
  6669. mtk_nand_read_page(mtd, nand_chip, temp_buffer, block1*256+page2);
  6670. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page1, 0, 0);
  6671. }
  6672. page += 256;
  6673. }
  6674. MSG(INIT, "6th step: Check bit flip count\n");
  6675. }
  6676. page = 0x43C00;
  6677. for (block = 1; block < 256; block++) {
  6678. block1 = page/256;
  6679. MSG(INIT, "====WL block 0x%x====\n", block1);
  6680. for (page1 = page; page1 < page+block; page1++) {
  6681. mtk_nand_read_page(mtd, nand_chip, temp_buffer, page1);
  6682. MSG(INIT, "WL Page: 0x%x (0x%x) bit flip: %d\n", page1, page1%256, correct_count);
  6683. }
  6684. page += 256;
  6685. }
  6686. mtk_nand_read_page(mtd, nand_chip, temp_buffer, 0x53C00);
  6687. write_test = TRUE;
  6688. if (!memcmp(temp_buffer, test_buffer, 1000))
  6689. write_test = FALSE;
  6690. if (write_test) {
  6691. MSG(INIT, "7th step: erase and program\n");
  6692. page = 0x53C00;
  6693. for (block = 0; block <= 255; block++) {
  6694. mtk_nand_erase_hw(mtd, page);
  6695. block1 = page/256;
  6696. for (page1 = page; page1 <= page+block; page1++) {
  6697. page2 = sandisk_pairpage_mapping((page1%256), TRUE);
  6698. if (page2 != (page1%256))
  6699. mtk_nand_read_page(mtd, nand_chip, temp_buffer, block1*256+page2);
  6700. mtk_nand_write_page(mtd, nand_chip , 0, 16384, test_buffer, 0, page1, 0, 0);
  6701. }
  6702. page += 256;
  6703. }
  6704. MSG(INIT, "6th step: Check bit flip count\n");
  6705. }
  6706. page = 0x53C00;
  6707. for (block = 1; block < 256; block++) {
  6708. block1 = page/256;
  6709. MSG(INIT, "====Seq block 0x%x====\n", block1);
  6710. for (page1 = page; page1 < page+block; page1++) {
  6711. mtk_nand_read_page(mtd, nand_chip, temp_buffer, page1);
  6712. MSG(INIT, "Seq Page: 0x%x (0x%x) bit flip: %d\n", page1, page1%256, correct_count);
  6713. }
  6714. page += 256;
  6715. }
  6716. } while (0)
  6717. ;
  6718. while (1)
  6719. ;
  6720. }
  6721. #endif
  6722. #define KERNEL_NAND_UNIT_TEST 0
  6723. #define NAND_READ_PERFORMANCE 0
  6724. #if KERNEL_NAND_UNIT_TEST
  6725. __aligned(64)
  6726. static u8 temp_buffer_xl[LPAGE + LSPARE];
  6727. __aligned(64)
  6728. static u8 temp_buffer_xl_rd[LPAGE + LSPARE];
  6729. int mtk_nand_unit_test(struct nand_chip *nand_chip, struct mtd_info *mtd)
  6730. {
  6731. int err = 0;
  6732. /*int patternbuff[128] = {
  6733. 0x0103D901, 0xFF1802DF, 0x01200400, 0x00000021, 0x02040122, 0x02010122, 0x03020407,
  6734. 0x1A050103,
  6735. 0x00020F1B, 0x08C0C0A1, 0x01550800, 0x201B0AC1, 0x41990155, 0x64F0FFFF, 0x201B0C82,
  6736. 0x4118EA61,
  6737. 0xF00107F6, 0x0301EE1B, 0x0C834118, 0xEA617001, 0x07760301, 0xEE151405, 0x00202020,
  6738. 0x20202020,
  6739. 0x00202020, 0x2000302E, 0x3000FF14, 0x00FF0000, 0x00000000, 0x00000000, 0x00000000,
  6740. 0x00000000,
  6741. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6742. 0x00000000,
  6743. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6744. 0x00000000,
  6745. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6746. 0x00000000,
  6747. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6748. 0x00000000,
  6749. 0x01D90301, 0xDF0218FF, 0x00042001, 0x21000000, 0x22010402, 0x22010102, 0x07040203,
  6750. 0x0301051A,
  6751. 0x1B0F0200, 0xA1C0C008, 0x00085501, 0xC10A1B20, 0x55019941, 0xFFFFF064, 0x820C1B20,
  6752. 0x61EA1841,
  6753. 0xF60701F0, 0x1BEE0103, 0x1841830C, 0x017061EA, 0x01037607, 0x051415EE, 0x20202000,
  6754. 0x20202020,
  6755. 0x20202000, 0x2E300020, 0x14FF0030, 0x0000FF00, 0x00000000, 0x00000000, 0x00000000,
  6756. 0x00000000,
  6757. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6758. 0x00000000,
  6759. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6760. 0x00000000,
  6761. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6762. 0x00000000,
  6763. 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  6764. 0x00000000
  6765. };*/
  6766. u32 j, k, p = g_block_size / g_page_size;
  6767. //struct gFeatureSet *feature_set = &(devinfo.feature_set.FeatureSet);
  6768. //u32 val = 0x05;
  6769. u32 TOTAL = 10;
  6770. pr_err("Begin to Kernel nand unit test ...\n");
  6771. pr_debug("[P] %x\n", p);
  6772. #if 0
  6773. for (m = 0; m < 32; m++)
  6774. memcpy(temp_buffer_xl + 512 * m, (u8 *) patternbuff, 512);
  6775. pr_debug("***************read pl***********************\n");
  6776. memset(temp_buffer_xl_rd, 0xA5, 16384);
  6777. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_xl_rd, 1 * p))
  6778. pr_debug("Read page 0x%x fail!\n", 1 * p);
  6779. for (m = 0; m < 32; m++)
  6780. pr_debug("[5]0x%x %x %x %x\n", *((int *)temp_buffer_xl_rd + m * 4),
  6781. *((int *)temp_buffer_xl_rd + 1 + m * 4),
  6782. *((int *)temp_buffer_xl_rd + 2 + m * 4),
  6783. *((int *)temp_buffer_xl_rd + 3 + m * 4));
  6784. #endif
  6785. #if 0
  6786. pr_debug("*****Normal mode test %d***********\n", p);
  6787. gn_devinfo.tlcControl.slcopmodeEn = FALSE;
  6788. for (j = 0x400; j < 0x410; j++) {
  6789. /* memset(local_buffer, 0x00, 16384); */
  6790. /* mtk_nand_read_page(mtd, nand_chip, local_buffer, j*p); */
  6791. /* for(m = 0; m < 32; m++) */
  6792. /* MSG(INIT,"[1]0x%x %x %x %x\n",
  6793. *((int *)local_buffer+m*4), *((int *)local_buffer+1+m*4),
  6794. *((int *)local_buffer+2+m*4), *((int *)local_buffer+3+m*4)); */
  6795. mtk_nand_erase(mtd, j * p);
  6796. memset(temp_buffer_xl_rd, 0x00, 16384);
  6797. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_xl_rd, j * p))
  6798. pr_debug("Read page 0x%x fail!\n", j * p);
  6799. pr_debug("[2]0x%x %x %x %x\n", *(int *)temp_buffer_xl_rd,
  6800. *((int *)temp_buffer_xl_rd + 1), *((int *)temp_buffer_xl_rd + 2),
  6801. *((int *)temp_buffer_xl_rd + 3));
  6802. if (mtk_nand_block_bad(mtd, j * g_block_size, 0)) {
  6803. pr_debug("Bad block at %x\n", j);
  6804. continue;
  6805. }
  6806. for (k = 0; k < p; k++) {
  6807. pr_debug("***************w b***********************\n");
  6808. for (m = 0; m < 32; m++)
  6809. pr_debug("[1]0x%x %x %x %x\n", *((int *)temp_buffer_xl + m * 4),
  6810. *((int *)temp_buffer_xl + 1 + m * 4),
  6811. *((int *)temp_buffer_xl + 2 + m * 4),
  6812. *((int *)temp_buffer_xl + 3 + m * 4));
  6813. if (mtk_nand_write_page(mtd, nand_chip, 0, 0, temp_buffer_xl /*(u8 *)patternbuff */ , 0,
  6814. j * p + k, 0, 0))
  6815. pr_debug("Write page 0x%x fail!\n", j * p + k);
  6816. /* #if 1 */
  6817. /* } */
  6818. /* TOTAL=1000; */
  6819. /* do{ */
  6820. /* for (k = 0; k < p; k++) */
  6821. /* { */
  6822. /* #endif */
  6823. pr_debug("***************r b***********************\n");
  6824. memset(temp_buffer_xl_rd, 0x00, g_page_size);
  6825. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_xl_rd, j * p + k))
  6826. pr_debug("Read page 0x%x fail!\n", j * p + k);
  6827. for (m = 0; m < 32; m++)
  6828. pr_debug("[3]0x%x %x %x %x\n", *((int *)temp_buffer_xl_rd + m * 4),
  6829. *((int *)temp_buffer_xl_rd + 1 + m * 4),
  6830. *((int *)temp_buffer_xl_rd + 2 + m * 4),
  6831. *((int *)temp_buffer_xl_rd + 3 + m * 4));
  6832. if (memcmp(temp_buffer_xl /*(u8 *)patternbuff */ , temp_buffer_xl_rd,
  6833. 512 /*g_page_size */)) {
  6834. pr_debug("[KERNEL_NAND_UNIT_TEST] compare fail!\n");
  6835. err = -1;
  6836. while (1)
  6837. ;
  6838. } else {
  6839. TOTAL--;
  6840. pr_debug("[KERNEL_NAND_UNIT_TEST] compare OK!\n");
  6841. }
  6842. }
  6843. /* }while(TOTAL); */
  6844. #if 0
  6845. mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd,
  6846. feature_set->Async_timing.address, (u8 *) &val,
  6847. sizeof(feature_set->Async_timing.feature));
  6848. mtk_nand_GetFeature(mtd, feature_set->gfeatureCmd,
  6849. feature_set->Async_timing.address, (u8 *) &val, 4);
  6850. pr_debug("[ASYNC Interface]0x%X\n", val);
  6851. err = mtk_nand_interface_config(mtd);
  6852. MSG(INIT, "[nand_interface_config] %d\n", err);
  6853. #endif
  6854. }
  6855. p = (p>>1);
  6856. #endif
  6857. pr_err("*****SLC mode test %d***********\n", p);
  6858. force_slc_flag = 1;
  6859. for (j = 0x80; j < 0x100; j++) {
  6860. /* memset(local_buffer, 0x00, 16384); */
  6861. /* mtk_nand_read_page(mtd, nand_chip, local_buffer, j*p); */
  6862. /* for(m = 0; m < 32; m++) */
  6863. /* MSG(INIT,"[1]0x%x %x %x %x\n",
  6864. *((int *)local_buffer+m*4), *((int *)local_buffer+1+m*4),
  6865. *((int *)local_buffer+2+m*4), *((int *)local_buffer+3+m*4)); */
  6866. mtk_nand_erase(mtd, j * p);
  6867. memset(temp_buffer_xl_rd, 0x00, 16384);
  6868. //if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_xl_rd, j * p))
  6869. // pr_debug("Read page 0x%x fail!\n", j * p);
  6870. /*pr_debug("[2]0x%x %x %x %x\n", *(int *)temp_buffer_xl_rd,
  6871. *((int *)temp_buffer_xl_rd + 1), *((int *)temp_buffer_xl_rd + 2),
  6872. *((int *)temp_buffer_xl_rd + 3));
  6873. if (mtk_nand_block_bad(mtd, j * g_block_size, 0)) {
  6874. pr_debug("Bad block at %x\n", j);
  6875. continue;
  6876. }*/
  6877. for (k = 0; k < p; k+=2) {
  6878. pr_err("***************w b***********************\n");
  6879. /*for (m = 0; m < 32; m++)
  6880. pr_err("[1]0x%x %x %x %x\n", *((int *)temp_buffer_xl + m * 4),
  6881. *((int *)temp_buffer_xl + 1 + m * 4),
  6882. *((int *)temp_buffer_xl + 2 + m * 4),
  6883. *((int *)temp_buffer_xl + 3 + m * 4)); */
  6884. if (mtk_nand_write_page(mtd, nand_chip, 0, 0, temp_buffer_xl /*(u8 *)patternbuff */ , 0,
  6885. j * p + k, 0, 0))
  6886. pr_err("Write page 0x%x fail!\n", j * p + k);
  6887. pr_err("***************r b***********************\n");
  6888. memset(temp_buffer_xl_rd, 0x00, g_page_size);
  6889. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_xl_rd, j * p + k))
  6890. pr_err("Read page 0x%x fail!\n", j * p + k);
  6891. /*for (m = 0; m < 32; m++)
  6892. pr_err("[3]0x%x %x %x %x\n", *((int *)temp_buffer_xl_rd + m * 4),
  6893. *((int *)temp_buffer_xl_rd + 1 + m * 4),
  6894. *((int *)temp_buffer_xl_rd + 2 + m * 4),
  6895. *((int *)temp_buffer_xl_rd + 3 + m * 4));*/
  6896. if (memcmp(temp_buffer_xl /*(u8 *)patternbuff */ , temp_buffer_xl_rd,
  6897. 512 /*g_page_size */)) {
  6898. pr_err("[KERNEL_NAND_UNIT_TEST] compare fail!\n");
  6899. err = -1;
  6900. break;
  6901. } else {
  6902. TOTAL--;
  6903. pr_err("[KERNEL_NAND_UNIT_TEST] compare OK!\n");
  6904. }
  6905. }
  6906. pr_err("**********Read back check**************\n");
  6907. for (k = 0; k < p; k+=2) {
  6908. if (mtk_nand_read_page(mtd, nand_chip, temp_buffer_xl_rd, j * p + k))
  6909. pr_err("Read page 0x%x fail!\n", j * p + k);
  6910. if (memcmp(temp_buffer_xl /*(u8 *)patternbuff */ , temp_buffer_xl_rd,
  6911. 512 /*g_page_size */)) {
  6912. pr_err("[KERNEL_NAND_UNIT_TEST] compare fail!\n");
  6913. err = -1;
  6914. break;
  6915. } else {
  6916. TOTAL--;
  6917. pr_err("[KERNEL_NAND_UNIT_TEST] compare OK!\n");
  6918. }
  6919. }
  6920. /* }while(TOTAL); */
  6921. }
  6922. pr_err("*****Unit test END***********\n");
  6923. //while(1);
  6924. return err;
  6925. }
  6926. #endif
  6927. static int mtk_nand_probe(struct platform_device *pdev)
  6928. {
  6929. struct mtk_nand_host_hw *hw;
  6930. struct mtd_info *mtd;
  6931. struct nand_chip *nand_chip;
  6932. int err = 0;
  6933. int bmt_sz;
  6934. u8 id[NAND_MAX_ID];
  6935. int i;
  6936. u32 sector_size = NAND_SECTOR_SIZE;
  6937. u64 temp;
  6938. #ifdef MTK_NAND_CMD_DUMP
  6939. dbg_inf[0].cmd.cmd_array[0] = 0xFF;
  6940. dbg_inf[0].cmd.cmd_array[1] = 0xFF;
  6941. dbg_inf[0].cmd.address[0] = 0xFF;
  6942. dbg_inf[0].cmd.address[1] = 0xFF;
  6943. dbg_inf[0].cmd.address[2] = 0xFF;
  6944. #endif
  6945. g_b2Die_CS = FALSE;
  6946. #if 1
  6947. MSG(INIT, "Enable NFI and NFIECC Clock\n");
  6948. enable_clock(MT_CG_NFI_BUS_SW_CG, "NFI");
  6949. enable_clock(MT_CG_NFI_SW_CG, "NFI");
  6950. enable_clock(MT_CG_NFI2X_SW_CG, "NFI");
  6951. enable_clock(MT_CG_NFIECC_SW_CG, "NFI");
  6952. clkmux_sel(MT_CLKMUX_NFI1X_INFRA_SEL, MT_CG_SYS_26M, "NFI");
  6953. #endif
  6954. mtk_nfi_node = of_find_compatible_node(NULL, NULL, "mediatek,NFI");
  6955. mtk_nfi_base = of_iomap(mtk_nfi_node, 0);
  6956. MSG(INIT, "of_iomap for nfi base @ 0x%p\n", mtk_nfi_base);
  6957. if (mtk_nfiecc_node == NULL) {
  6958. mtk_nfiecc_node = of_find_compatible_node(NULL, NULL, "mediatek,NFIECC");
  6959. mtk_nfiecc_base = of_iomap(mtk_nfiecc_node, 0);
  6960. MSG(INIT, "of_iomap for nfiecc base @ 0x%p\n", mtk_nfiecc_base);
  6961. }
  6962. if (mtk_io_node == NULL) {
  6963. mtk_io_node = of_find_compatible_node(NULL, NULL, "mediatek,IOCFG_B");
  6964. mtk_io_base = of_iomap(mtk_io_node, 0);
  6965. MSG(INIT, "of_iomap for io base @ 0x%p\n", mtk_io_base);
  6966. }
  6967. mtk_pm_node = of_find_compatible_node(NULL, NULL,
  6968. "mediatek,mt_pmic_regulator_supply");
  6969. if (nfi_reg_vemc_3v3 == NULL)
  6970. nfi_reg_vemc_3v3 = regulator_get(&(pdev->dev), "vemc3v3");
  6971. nfi_irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
  6972. hw = &mtk_nand_hw;
  6973. BUG_ON(!hw);
  6974. regulator_set_voltage(nfi_reg_vemc_3v3, 3300000, 3300000);
  6975. err = regulator_enable(nfi_reg_vemc_3v3);
  6976. if (err)
  6977. pr_err("nfi ldo enable fail!!\n");
  6978. /* Allocate memory for the device structure (and zero it) */
  6979. host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
  6980. if (!host) {
  6981. MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
  6982. nand_disable_clock();
  6983. return -ENOMEM;
  6984. }
  6985. /* Allocate memory for 16 byte aligned buffer */
  6986. local_buffer_16_align = local_buffer;
  6987. temp_buffer_16_align = temp_buffer;
  6988. pr_debug("Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
  6989. host->hw = hw;
  6990. PL_TIME_PROG(10);
  6991. PL_TIME_ERASE(10);
  6992. PL_TIME_PROG_WDT_SET(1);
  6993. PL_TIME_ERASE_WDT_SET(1);
  6994. /* init mtd data structure */
  6995. nand_chip = &host->nand_chip;
  6996. nand_chip->priv = host; /* link the private data structures */
  6997. mtd = &host->mtd;
  6998. mtd->priv = nand_chip;
  6999. mtd->owner = THIS_MODULE;
  7000. mtd->name = "MTK-Nand";
  7001. mtd->eraseregions = host->erase_region;
  7002. hw->nand_ecc_mode = NAND_ECC_HW;
  7003. /* Set address of NAND IO lines */
  7004. nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
  7005. nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
  7006. nand_chip->chip_delay = 20; /* 20us command delay time */
  7007. nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
  7008. nand_chip->read_byte = mtk_nand_read_byte;
  7009. nand_chip->read_buf = mtk_nand_read_buf;
  7010. nand_chip->write_buf = mtk_nand_write_buf;
  7011. #ifdef CONFIG_MTD_NAND_VERIFY_WRITE
  7012. nand_chip->verify_buf = mtk_nand_verify_buf;
  7013. #endif
  7014. nand_chip->select_chip = mtk_nand_select_chip;
  7015. nand_chip->dev_ready = mtk_nand_dev_ready;
  7016. nand_chip->cmdfunc = mtk_nand_command_bp;
  7017. nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
  7018. nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
  7019. nand_chip->ecc.layout = &nand_oob_64;
  7020. nand_chip->ecc.size = hw->nand_ecc_size;
  7021. nand_chip->ecc.bytes = hw->nand_ecc_bytes;
  7022. nand_chip->options = NAND_SKIP_BBTSCAN;
  7023. /* For BMT, we need to revise driver architecture */
  7024. nand_chip->write_page = mtk_nand_write_page;
  7025. nand_chip->read_page = mtk_nand_read_page;
  7026. nand_chip->read_subpage = mtk_nand_read_subpage;
  7027. nand_chip->ecc.write_oob = mtk_nand_write_oob;
  7028. nand_chip->ecc.read_oob = mtk_nand_read_oob;
  7029. nand_chip->block_markbad = mtk_nand_block_markbad;
  7030. nand_chip->erase_hw = mtk_nand_erase;
  7031. nand_chip->block_bad = mtk_nand_block_bad;
  7032. nand_chip->init_size = mtk_nand_init_size;
  7033. mtk_nand_init_hw(host);
  7034. /* Select the device */
  7035. nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
  7036. /*
  7037. * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
  7038. * after power-up
  7039. */
  7040. DDR_INTERFACE = FALSE;
  7041. mtk_nand_reset();
  7042. DRV_WriteReg16(NFI_CNFG_REG16, CNFG_OP_RESET);
  7043. mtk_nand_set_command(NAND_CMD_RESET);
  7044. *NFI_CNRNB_REG16 = 0xF1;
  7045. while (!(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY_RETURN))
  7046. ;
  7047. mtk_nand_reset();
  7048. /* Send the command for reading device ID */
  7049. nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  7050. for (i = 0; i < NAND_MAX_ID; i++)
  7051. id[i] = nand_chip->read_byte(mtd);
  7052. manu_id = id[0];
  7053. dev_id = id[1];
  7054. if (!get_device_info(id, &gn_devinfo))
  7055. MSG(INIT, "Not Support this Device! \r\n");
  7056. if (gn_devinfo.pagesize == 16384) {
  7057. nand_chip->ecc.layout = &nand_oob_128;
  7058. hw->nand_ecc_size = 16384;
  7059. } else if (gn_devinfo.pagesize == 8192) {
  7060. nand_chip->ecc.layout = &nand_oob_128;
  7061. hw->nand_ecc_size = 8192;
  7062. } else if (gn_devinfo.pagesize == 4096) {
  7063. nand_chip->ecc.layout = &nand_oob_128;
  7064. hw->nand_ecc_size = 4096;
  7065. } else if (gn_devinfo.pagesize == 2048) {
  7066. nand_chip->ecc.layout = &nand_oob_64;
  7067. hw->nand_ecc_size = 2048;
  7068. } else if (gn_devinfo.pagesize == 512) {
  7069. nand_chip->ecc.layout = &nand_oob_16;
  7070. hw->nand_ecc_size = 512;
  7071. }
  7072. if (gn_devinfo.sectorsize == 1024) {
  7073. sector_size = 1024;
  7074. hw->nand_sec_shift = 10;
  7075. hw->nand_sec_size = 1024;
  7076. NFI_CLN_REG32(NFI_PAGEFMT_REG16, PAGEFMT_SECTOR_SEL);
  7077. }
  7078. if (gn_devinfo.pagesize <= 4096) {
  7079. nand_chip->ecc.layout->eccbytes =
  7080. gn_devinfo.sparesize -
  7081. OOB_AVAI_PER_SECTOR * (gn_devinfo.pagesize / sector_size);
  7082. hw->nand_ecc_bytes = nand_chip->ecc.layout->eccbytes;
  7083. /* Modify to fit device character */
  7084. nand_chip->ecc.size = hw->nand_ecc_size;
  7085. nand_chip->ecc.bytes = hw->nand_ecc_bytes;
  7086. } else {
  7087. nand_chip->ecc.layout->eccbytes = 64;
  7088. /*gn_devinfo.sparesize - OOB_AVAI_PER_SECTOR * (gn_devinfo.pagesize / sector_size); */
  7089. hw->nand_ecc_bytes = nand_chip->ecc.layout->eccbytes;
  7090. /* Modify to fit device character */
  7091. nand_chip->ecc.size = hw->nand_ecc_size;
  7092. nand_chip->ecc.bytes = hw->nand_ecc_bytes;
  7093. }
  7094. nand_chip->subpagesize = gn_devinfo.sectorsize;
  7095. nand_chip->subpage_size = gn_devinfo.sectorsize;
  7096. for (i = 0; i < nand_chip->ecc.layout->eccbytes; i++)
  7097. nand_chip->ecc.layout->eccpos[i] =
  7098. OOB_AVAI_PER_SECTOR * (gn_devinfo.pagesize / sector_size) + i;
  7099. if (gn_devinfo.vendor != VEND_NONE)
  7100. mtk_nand_randomizer_config(&gn_devinfo.feature_set.randConfig, 0);
  7101. if ((gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX_16NM)
  7102. || (gn_devinfo.feature_set.FeatureSet.rtype == RTYPE_HYNIX))
  7103. HYNIX_RR_TABLE_READ(&gn_devinfo);
  7104. hw->nfi_bus_width = gn_devinfo.iowidth;
  7105. #if 0
  7106. if (gn_devinfo.vendor != VEND_NONE) {
  7107. if (gn_devinfo.feature_set.FeatureSet.Async_timing.feature != 0xFF) {
  7108. struct gFeatureSet *feature_set = &(gn_devinfo.feature_set.FeatureSet);
  7109. mtk_nand_SetFeature(mtd, (u16) feature_set->sfeatureCmd,
  7110. feature_set->Async_timing.address,
  7111. (u8 *) &feature_set->Async_timing.feature,
  7112. sizeof(feature_set->Async_timing.feature));
  7113. }
  7114. }
  7115. #endif
  7116. DRV_WriteReg32(NFI_ACCCON_REG32, gn_devinfo.timmingsetting);
  7117. /* 16-bit bus width */
  7118. if (hw->nfi_bus_width == 16) {
  7119. MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
  7120. nand_chip->options |= NAND_BUSWIDTH_16;
  7121. }
  7122. mtk_dev = &pdev->dev;
  7123. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  7124. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
  7125. dev_err(&pdev->dev, "set dma mask fail\n");
  7126. pr_warn("[NAND] set dma mask fail\n");
  7127. } else
  7128. pr_debug("[NAND] set dma mask ok\n");
  7129. err = request_irq(MT_NFI_IRQ_ID, mtk_nand_irq_handler, IRQF_TRIGGER_NONE, "mtk-nand", NULL);
  7130. if (0 != err) {
  7131. MSG(INIT, "%s : Request IRQ fail: err = %d\n", MODULE_NAME, err);
  7132. goto out;
  7133. }
  7134. if (g_i4Interrupt)
  7135. enable_irq(MT_NFI_IRQ_ID);
  7136. else
  7137. disable_irq(MT_NFI_IRQ_ID);
  7138. #if 0
  7139. if (gn_devinfo.advancedmode & CACHE_READ)
  7140. nand_chip->ecc.read_multi_page_cache = NULL;
  7141. else
  7142. nand_chip->ecc.read_multi_page_cache = NULL;
  7143. #endif
  7144. mtd->oobsize = gn_devinfo.sparesize;
  7145. /* Scan to find existence of the device */
  7146. if (nand_scan(mtd, hw->nfi_cs_num)) {
  7147. MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
  7148. err = -ENXIO;
  7149. goto out;
  7150. }
  7151. g_page_size = mtd->writesize;
  7152. g_block_size = gn_devinfo.blocksize << 10;
  7153. PAGES_PER_BLOCK = (u32)(g_block_size / g_page_size);
  7154. temp = nand_chip->chipsize;
  7155. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  7156. bmt_sz = (int)(((u32) temp) / 100 * 6);
  7157. platform_set_drvdata(pdev, host);
  7158. if (hw->nfi_bus_width == 16)
  7159. NFI_SET_REG32(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
  7160. nand_chip->select_chip(mtd, 0);
  7161. nand_chip->chipsize -= (bmt_sz * g_block_size);
  7162. mtd->size = nand_chip->chipsize;
  7163. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7164. mtk_pmt_reset();
  7165. #endif
  7166. if (!g_bmt) {
  7167. g_bmt = init_bmt(nand_chip, bmt_sz);
  7168. if (!g_bmt) {
  7169. MSG(INIT, "Error: init bmt failed\n");
  7170. nand_disable_clock();
  7171. return 0;
  7172. }
  7173. }
  7174. nand_chip->chipsize -= (PMT_POOL_SIZE) * (gn_devinfo.blocksize * 1024);
  7175. mtd->size = nand_chip->chipsize;
  7176. #if KERNEL_NAND_UNIT_TEST
  7177. err = mtk_nand_unit_test(nand_chip, mtd);
  7178. if (err == 0)
  7179. pr_warn("Thanks to GOD, UNIT Test OK!\n");
  7180. #endif
  7181. #ifdef PMT
  7182. part_init_pmt(mtd, (u8 *) &g_exist_Partition[0]);
  7183. /*
  7184. for (i = 0; i < 3000; i++) {
  7185. pr_warn("waste time %d\n", i);
  7186. mtk_nand_block_bad_hw(mtd, 0x17a00000);
  7187. }
  7188. */
  7189. err = mtd_device_register(mtd, g_exist_Partition, part_num);
  7190. #else
  7191. err = mtd_device_register(mtd, g_pasStatic_Partition, part_num);
  7192. #endif
  7193. #ifdef TLC_UNIT_TEST
  7194. err = mtk_tlc_unit_test(nand_chip, mtd);
  7195. switch (err) {
  7196. case 0:
  7197. pr_warn("TLC UNIT Test OK!\n");
  7198. pr_warn("TLC UNIT Test OK!\n");
  7199. pr_warn("TLC UNIT Test OK!\n");
  7200. break;
  7201. case 1:
  7202. pr_warn("TLC UNIT Test fail: SLC mode fail!\n");
  7203. pr_warn("TLC UNIT Test fail: SLC mode fail!\n");
  7204. pr_warn("TLC UNIT Test fail: SLC mode fail!\n");
  7205. break;
  7206. case 2:
  7207. pr_warn("TLC UNIT Test fail: TLC mode fail!\n");
  7208. pr_warn("TLC UNIT Test fail: TLC mode fail!\n");
  7209. pr_warn("TLC UNIT Test fail: TLC mode fail!\n");
  7210. break;
  7211. default:
  7212. pr_warn("TLC UNIT Test fail: wrong return!\n");
  7213. break;
  7214. }
  7215. #endif
  7216. #ifdef _MTK_NAND_DUMMY_DRIVER_
  7217. dummy_driver_debug = 0;
  7218. #endif
  7219. #if defined(CFG_SNAND_ACCESS_PATTERN_LOGGER_BOOTUP) && defined(CFG_SNAND_ACCESS_PATTERN_LOGGER)
  7220. g_snand_pm_on = 1;
  7221. g_snand_pm_cnt = 0;
  7222. g_snand_pm_wrapped = 0;
  7223. #endif
  7224. /* Successfully!! */
  7225. if (!err) {
  7226. MSG(INIT, "[mtk_nand] probe successfully!\n");
  7227. nand_disable_clock();
  7228. return err;
  7229. }
  7230. /* Fail!! */
  7231. out:
  7232. MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
  7233. nand_release(mtd);
  7234. platform_set_drvdata(pdev, NULL);
  7235. kfree(host);
  7236. nand_disable_clock();
  7237. return err;
  7238. }
  7239. /******************************************************************************
  7240. * mtk_nand_suspend
  7241. *
  7242. * DESCRIPTION:
  7243. *Suspend the nand device!
  7244. *
  7245. * PARAMETERS:
  7246. *struct platform_device *pdev : device structure
  7247. *
  7248. * RETURNS:
  7249. *0 : Success
  7250. *
  7251. * NOTES:
  7252. *None
  7253. *
  7254. ******************************************************************************/
  7255. static int mtk_nand_suspend(struct platform_device *pdev, pm_message_t state)
  7256. {
  7257. struct mtk_nand_host *host = platform_get_drvdata(pdev);
  7258. /* backup register */
  7259. #ifdef CONFIG_PM
  7260. if (host->saved_para.suspend_flag == 0) {
  7261. nand_enable_clock();
  7262. /* Save NFI register */
  7263. host->saved_para.sNFI_CNFG_REG16 = DRV_Reg16(NFI_CNFG_REG16);
  7264. host->saved_para.sNFI_PAGEFMT_REG16 = DRV_Reg32(NFI_PAGEFMT_REG16);
  7265. host->saved_para.sNFI_CON_REG16 = DRV_Reg32(NFI_CON_REG16);
  7266. host->saved_para.sNFI_ACCCON_REG32 = DRV_Reg32(NFI_ACCCON_REG32);
  7267. host->saved_para.sNFI_INTR_EN_REG16 = DRV_Reg16(NFI_INTR_EN_REG16);
  7268. host->saved_para.sNFI_IOCON_REG16 = DRV_Reg16(NFI_IOCON_REG16);
  7269. host->saved_para.sNFI_CSEL_REG16 = DRV_Reg16(NFI_CSEL_REG16);
  7270. host->saved_para.sNFI_DEBUG_CON1_REG16 = DRV_Reg16(NFI_DEBUG_CON1_REG16);
  7271. /* save ECC register */
  7272. host->saved_para.sECC_ENCCNFG_REG32 = DRV_Reg32(ECC_ENCCNFG_REG32);
  7273. host->saved_para.sECC_DECCNFG_REG32 = DRV_Reg32(ECC_DECCNFG_REG32);
  7274. host->saved_para.suspend_flag = 1;
  7275. mtk_nand_interface_async();
  7276. nand_disable_clock();
  7277. regulator_disable(nfi_reg_vemc_3v3);
  7278. } else
  7279. MSG(POWERCTL, "[NFI] Suspend twice !\n");
  7280. #endif
  7281. MSG(POWERCTL, "[NFI] Suspend !\n");
  7282. return 0;
  7283. }
  7284. /******************************************************************************
  7285. * mtk_nand_resume
  7286. *
  7287. * DESCRIPTION:
  7288. *Resume the nand device!
  7289. *
  7290. * PARAMETERS:
  7291. *struct platform_device *pdev : device structure
  7292. *
  7293. * RETURNS:
  7294. *0 : Success
  7295. *
  7296. * NOTES:
  7297. *None
  7298. *
  7299. ******************************************************************************/
  7300. static int mtk_nand_resume(struct platform_device *pdev)
  7301. {
  7302. struct mtk_nand_host *host = platform_get_drvdata(pdev);
  7303. int ret = 0;
  7304. #ifdef CONFIG_PM
  7305. if (host->saved_para.suspend_flag == 1) {
  7306. regulator_set_voltage(nfi_reg_vemc_3v3, 3300000, 3300000);
  7307. ret = regulator_enable(nfi_reg_vemc_3v3);
  7308. if (ret)
  7309. pr_err("nfi ldo enable fail!!\n");
  7310. nand_enable_clock();
  7311. /* restore NFI register */
  7312. DRV_WriteReg16(NFI_CNFG_REG16 , host->saved_para.sNFI_CNFG_REG16);
  7313. DRV_WriteReg32(NFI_PAGEFMT_REG16 , host->saved_para.sNFI_PAGEFMT_REG16);
  7314. DRV_WriteReg32(NFI_CON_REG16 , host->saved_para.sNFI_CON_REG16);
  7315. DRV_WriteReg32(NFI_ACCCON_REG32 , host->saved_para.sNFI_ACCCON_REG32);
  7316. DRV_WriteReg16(NFI_IOCON_REG16 , host->saved_para.sNFI_IOCON_REG16);
  7317. DRV_WriteReg16(NFI_CSEL_REG16 , host->saved_para.sNFI_CSEL_REG16);
  7318. DRV_WriteReg16(NFI_DEBUG_CON1_REG16 , host->saved_para.sNFI_DEBUG_CON1_REG16);
  7319. /* restore ECC register */
  7320. DRV_WriteReg32(ECC_ENCCNFG_REG32 , host->saved_para.sECC_ENCCNFG_REG32);
  7321. DRV_WriteReg32(ECC_DECCNFG_REG32 , host->saved_para.sECC_DECCNFG_REG32);
  7322. /* Reset NFI and ECC state machine */
  7323. /* Reset the state machine and data FIFO, because flushing FIFO */
  7324. (void)mtk_nand_reset();
  7325. DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
  7326. while (!DRV_Reg16(ECC_DECIDLE_REG16))
  7327. ;
  7328. DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
  7329. while (!DRV_Reg32(ECC_ENCIDLE_REG32))
  7330. ;
  7331. /* Initialize interrupt. Clear interrupt, read clear. */
  7332. DRV_Reg16(NFI_INTR_REG16);
  7333. DRV_WriteReg16(NFI_INTR_EN_REG16 , host->saved_para.sNFI_INTR_EN_REG16);
  7334. nand_disable_clock();
  7335. host->saved_para.suspend_flag = 0;
  7336. } else {
  7337. MSG(POWERCTL, "[NFI] Resume twice !\n");
  7338. }
  7339. #endif
  7340. MSG(POWERCTL, "[NFI] Resume !\n");
  7341. return 0;
  7342. }
  7343. /******************************************************************************
  7344. * mtk_nand_remove
  7345. *
  7346. * DESCRIPTION:
  7347. *unregister the nand device file operations !
  7348. *
  7349. * PARAMETERS:
  7350. *struct platform_device *pdev : device structure
  7351. *
  7352. * RETURNS:
  7353. *0 : Success
  7354. *
  7355. * NOTES:
  7356. *None
  7357. *
  7358. ******************************************************************************/
  7359. static int mtk_nand_remove(struct platform_device *pdev)
  7360. {
  7361. struct mtk_nand_host *host = platform_get_drvdata(pdev);
  7362. struct mtd_info *mtd = &host->mtd;
  7363. nand_release(mtd);
  7364. kfree(host);
  7365. nand_disable_clock();
  7366. return 0;
  7367. }
  7368. /******************************************************************************
  7369. * NAND OTP operations
  7370. * ***************************************************************************/
  7371. #if (defined(NAND_OTP_SUPPORT) && SAMSUNG_OTP_SUPPORT)
  7372. unsigned int samsung_OTPQueryLength(unsigned int *QLength)
  7373. {
  7374. *QLength = SAMSUNG_OTP_PAGE_NUM * g_page_size;
  7375. return 0;
  7376. }
  7377. unsigned int samsung_OTPRead(unsigned int PageAddr, void *BufferPtr, void *SparePtr)
  7378. {
  7379. struct mtd_info *mtd = &host->mtd;
  7380. unsigned int rowaddr, coladdr;
  7381. unsigned int u4Size = g_page_size;
  7382. unsigned int timeout = 0xFFFF;
  7383. unsigned int bRet;
  7384. unsigned int sec_num = mtd->writesize >> host->hw->nand_sec_shift;
  7385. if (PageAddr >= SAMSUNG_OTP_PAGE_NUM)
  7386. return OTP_ERROR_OVERSCOPE;
  7387. /* Col -> Row; LSB first */
  7388. coladdr = 0x00000000;
  7389. rowaddr = Samsung_OTP_Page[PageAddr];
  7390. MSG(OTP, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__, coladdr, rowaddr);
  7391. /* Power on NFI HW component. */
  7392. nand_get_device(mtd, FL_READING);
  7393. mtk_nand_reset();
  7394. (void)mtk_nand_set_command(0x30);
  7395. mtk_nand_reset();
  7396. (void)mtk_nand_set_command(0x65);
  7397. MSG(OTP, "[%s]: Start to read data from OTP area\n", __func__);
  7398. if (!mtk_nand_reset()) {
  7399. bRet = OTP_ERROR_RESET;
  7400. goto cleanup;
  7401. }
  7402. mtk_nand_set_mode(CNFG_OP_READ);
  7403. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  7404. DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
  7405. DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(BufferPtr));
  7406. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
  7407. if (g_bHwEcc)
  7408. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  7409. else
  7410. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  7411. mtk_nand_set_autoformat(true);
  7412. if (g_bHwEcc)
  7413. ECC_Decode_Start();
  7414. if (!mtk_nand_set_command(NAND_CMD_READ0)) {
  7415. bRet = OTP_ERROR_BUSY;
  7416. goto cleanup;
  7417. }
  7418. if (!mtk_nand_set_address(coladdr, rowaddr, 2, 3)) {
  7419. bRet = OTP_ERROR_BUSY;
  7420. goto cleanup;
  7421. }
  7422. if (!mtk_nand_set_command(NAND_CMD_READSTART)) {
  7423. bRet = OTP_ERROR_BUSY;
  7424. goto cleanup;
  7425. }
  7426. if (!mtk_nand_status_ready(STA_NAND_BUSY)) {
  7427. bRet = OTP_ERROR_BUSY;
  7428. goto cleanup;
  7429. }
  7430. if (!mtk_nand_read_page_data(mtd, BufferPtr, u4Size)) {
  7431. bRet = OTP_ERROR_BUSY;
  7432. goto cleanup;
  7433. }
  7434. if (!mtk_nand_status_ready(STA_NAND_BUSY)) {
  7435. bRet = OTP_ERROR_BUSY;
  7436. goto cleanup;
  7437. }
  7438. mtk_nand_read_fdm_data(SparePtr, sec_num);
  7439. mtk_nand_stop_read();
  7440. MSG(OTP, "[%s]: End to read data from OTP area\n", __func__);
  7441. bRet = OTP_SUCCESS;
  7442. cleanup:
  7443. mtk_nand_reset();
  7444. (void)mtk_nand_set_command(0xFF);
  7445. nand_release_device(mtd);
  7446. return bRet;
  7447. }
  7448. unsigned int samsung_OTPWrite(unsigned int PageAddr, void *BufferPtr, void *SparePtr)
  7449. {
  7450. struct mtd_info *mtd = &host->mtd;
  7451. unsigned int rowaddr, coladdr;
  7452. unsigned int u4Size = g_page_size;
  7453. unsigned int timeout = 0xFFFF;
  7454. unsigned int bRet;
  7455. unsigned int sec_num = mtd->writesize >> 9;
  7456. if (PageAddr >= SAMSUNG_OTP_PAGE_NUM)
  7457. return OTP_ERROR_OVERSCOPE;
  7458. /* Col -> Row; LSB first */
  7459. coladdr = 0x00000000;
  7460. rowaddr = Samsung_OTP_Page[PageAddr];
  7461. MSG(OTP, "[%s]:(COLADDR) [0x%08x]/(ROWADDR)[0x%08x]\n", __func__, coladdr, rowaddr);
  7462. nand_get_device(mtd, FL_READING);
  7463. mtk_nand_reset();
  7464. (void)mtk_nand_set_command(0x30);
  7465. mtk_nand_reset();
  7466. (void)mtk_nand_set_command(0x65);
  7467. MSG(OTP, "[%s]: Start to write data to OTP area\n", __func__);
  7468. if (!mtk_nand_reset()) {
  7469. bRet = OTP_ERROR_RESET;
  7470. goto cleanup;
  7471. }
  7472. mtk_nand_set_mode(CNFG_OP_PRGM);
  7473. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
  7474. DRV_WriteReg32(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
  7475. DRV_WriteReg32(NFI_STRADDR_REG32, __virt_to_phys(BufferPtr));
  7476. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AHB);
  7477. if (g_bHwEcc)
  7478. NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  7479. else
  7480. NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
  7481. mtk_nand_set_autoformat(true);
  7482. ECC_Encode_Start();
  7483. if (!mtk_nand_set_command(NAND_CMD_SEQIN)) {
  7484. bRet = OTP_ERROR_BUSY;
  7485. goto cleanup;
  7486. }
  7487. if (!mtk_nand_set_address(coladdr, rowaddr, 2, 3)) {
  7488. bRet = OTP_ERROR_BUSY;
  7489. goto cleanup;
  7490. }
  7491. if (!mtk_nand_status_ready(STA_NAND_BUSY)) {
  7492. bRet = OTP_ERROR_BUSY;
  7493. goto cleanup;
  7494. }
  7495. mtk_nand_write_fdm_data((struct nand_chip *)mtd->priv, BufferPtr, sec_num);
  7496. (void)mtk_nand_write_page_data(mtd, BufferPtr, u4Size);
  7497. if (!mtk_nand_check_RW_count(u4Size)) {
  7498. MSG(OTP, "[%s]: Check RW count timeout !\n", __func__);
  7499. bRet = OTP_ERROR_TIMEOUT;
  7500. goto cleanup;
  7501. }
  7502. mtk_nand_stop_write();
  7503. (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
  7504. while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
  7505. ;
  7506. bRet = OTP_SUCCESS;
  7507. MSG(OTP, "[%s]: End to write data to OTP area\n", __func__);
  7508. cleanup:
  7509. mtk_nand_reset();
  7510. (void)mtk_nand_set_command(NAND_CMD_RESET);
  7511. nand_release_device(mtd);
  7512. return bRet;
  7513. }
  7514. static int mt_otp_open(struct inode *inode, struct file *filp)
  7515. {
  7516. MSG(OTP, "[%s]: (MAJOR)%d: (MINOR)%d\n", __func__, MAJOR(inode->i_rdev),
  7517. MINOR(inode->i_rdev));
  7518. filp->private_data = (int *)OTP_MAGIC_NUM;
  7519. return 0;
  7520. }
  7521. static int mt_otp_release(struct inode *inode, struct file *filp)
  7522. {
  7523. MSG(OTP, "[%s]: (MAJOR)%d: (MINOR)%d\n", __func__, MAJOR(inode->i_rdev),
  7524. MINOR(inode->i_rdev));
  7525. return 0;
  7526. }
  7527. static int mt_otp_access(unsigned int access_type, unsigned int offset, void *buff_ptr,
  7528. unsigned int length, unsigned int *status)
  7529. {
  7530. unsigned int i = 0, ret = 0;
  7531. char *BufAddr = (char *)buff_ptr;
  7532. unsigned int PageAddr, AccessLength = 0;
  7533. int Status = 0;
  7534. static char *p_D_Buff;
  7535. char S_Buff[64];
  7536. p_D_Buff = kmalloc(g_page_size, GFP_KERNEL);
  7537. if (!p_D_Buff) {
  7538. ret = -ENOMEM;
  7539. *status = OTP_ERROR_NOMEM;
  7540. goto exit;
  7541. }
  7542. MSG(OTP, "[%s]: %s (0x%x) length: (%d bytes) !\n", __func__, access_type ? "WRITE" : "READ",
  7543. offset, length);
  7544. while (1) {
  7545. PageAddr = offset / g_page_size;
  7546. if (FS_OTP_READ == access_type) {
  7547. memset(p_D_Buff, 0xff, g_page_size);
  7548. memset(S_Buff, 0xff, (sizeof(char) * 64));
  7549. MSG(OTP, "[%s]: Read Access of page (%d)\n", __func__, PageAddr);
  7550. Status = g_mtk_otp_fuc.OTPRead(PageAddr, p_D_Buff, &S_Buff);
  7551. *status = Status;
  7552. if (OTP_SUCCESS != Status) {
  7553. MSG(OTP, "[%s]: Read status (%d)\n", __func__, Status);
  7554. break;
  7555. }
  7556. AccessLength = g_page_size - (offset % g_page_size);
  7557. if (length >= AccessLength)
  7558. memcpy(BufAddr, (p_D_Buff + (offset % g_page_size)), AccessLength);
  7559. else
  7560. memcpy(BufAddr, (p_D_Buff + (offset % g_page_size)), length);
  7561. } else if (FS_OTP_WRITE == access_type) {
  7562. AccessLength = g_page_size - (offset % g_page_size);
  7563. memset(p_D_Buff, 0xff, g_page_size);
  7564. memset(S_Buff, 0xff, (sizeof(char) * 64));
  7565. if (length >= AccessLength)
  7566. memcpy((p_D_Buff + (offset % g_page_size)), BufAddr, AccessLength);
  7567. else
  7568. memcpy((p_D_Buff + (offset % g_page_size)), BufAddr, length);
  7569. Status = g_mtk_otp_fuc.OTPWrite(PageAddr, p_D_Buff, &S_Buff);
  7570. *status = Status;
  7571. if (OTP_SUCCESS != Status) {
  7572. MSG(OTP, "[%s]: Write status (%d)\n", __func__, Status);
  7573. break;
  7574. }
  7575. } else {
  7576. MSG(OTP, "[%s]: Error, not either read nor write operations !\n", __func__);
  7577. break;
  7578. }
  7579. offset += AccessLength;
  7580. BufAddr += AccessLength;
  7581. if (length <= AccessLength) {
  7582. length = 0;
  7583. break;
  7584. }
  7585. length -= AccessLength;
  7586. MSG(OTP, "[%s]: Remaining %s (%d) !\n", __func__,
  7587. access_type ? "WRITE" : "READ", length);
  7588. }
  7589. error:
  7590. kfree(p_D_Buff);
  7591. exit:
  7592. return ret;
  7593. }
  7594. static long mt_otp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  7595. {
  7596. int ret = 0, i = 0;
  7597. static char *pbuf;
  7598. void __user *uarg = (void __user *)arg;
  7599. struct otp_ctl otpctl;
  7600. /* Lock */
  7601. spin_lock(&g_OTPLock);
  7602. if (copy_from_user(&otpctl, uarg, sizeof(struct otp_ctl))) {
  7603. ret = -EFAULT;
  7604. goto exit;
  7605. }
  7606. if (false == g_bInitDone) {
  7607. MSG(OTP, "ERROR: NAND Flash Not initialized !!\n");
  7608. ret = -EFAULT;
  7609. goto exit;
  7610. }
  7611. pbuf = kmalloc_array(otpctl.Length, sizeof(char), GFP_KERNEL);
  7612. if (!pbuf) {
  7613. ret = -ENOMEM;
  7614. goto exit;
  7615. }
  7616. switch (cmd) {
  7617. case OTP_GET_LENGTH:
  7618. MSG(OTP, "OTP IOCTL: OTP_GET_LENGTH\n");
  7619. g_mtk_otp_fuc.OTPQueryLength(&otpctl.QLength);
  7620. otpctl.status = OTP_SUCCESS;
  7621. MSG(OTP, "OTP IOCTL: The Length is %d\n", otpctl.QLength);
  7622. break;
  7623. case OTP_READ:
  7624. MSG(OTP, "OTP IOCTL: OTP_READ Offset(0x%x), Length(0x%x)\n", otpctl.Offset,
  7625. otpctl.Length);
  7626. memset(pbuf, 0xff, sizeof(char) * otpctl.Length);
  7627. mt_otp_access(FS_OTP_READ, otpctl.Offset, pbuf, otpctl.Length, &otpctl.status);
  7628. if (copy_to_user(otpctl.BufferPtr, pbuf, (sizeof(char) * otpctl.Length))) {
  7629. MSG(OTP, "OTP IOCTL: Copy to user buffer Error !\n");
  7630. goto error;
  7631. }
  7632. break;
  7633. case OTP_WRITE:
  7634. MSG(OTP, "OTP IOCTL: OTP_WRITE Offset(0x%x), Length(0x%x)\n", otpctl.Offset,
  7635. otpctl.Length);
  7636. if (copy_from_user(pbuf, otpctl.BufferPtr, (sizeof(char) * otpctl.Length))) {
  7637. MSG(OTP, "OTP IOCTL: Copy from user buffer Error !\n");
  7638. goto error;
  7639. }
  7640. mt_otp_access(FS_OTP_WRITE, otpctl.Offset, pbuf, otpctl.Length, &otpctl.status);
  7641. break;
  7642. default:
  7643. ret = -EINVAL;
  7644. }
  7645. ret = copy_to_user(uarg, &otpctl, sizeof(struct otp_ctl));
  7646. error:
  7647. kfree(pbuf);
  7648. exit:
  7649. spin_unlock(&g_OTPLock);
  7650. return ret;
  7651. }
  7652. static const struct file_operations nand_otp_fops = {
  7653. .owner = THIS_MODULE,
  7654. .unlocked_ioctl = mt_otp_ioctl,
  7655. .open = mt_otp_open,
  7656. .release = mt_otp_release,
  7657. };
  7658. static struct miscdevice nand_otp_dev = {
  7659. .minor = MISC_DYNAMIC_MINOR,
  7660. .name = "otp",
  7661. .fops = &nand_otp_fops,
  7662. };
  7663. #endif
  7664. /******************************************************************************
  7665. Device driver structure
  7666. ******************************************************************************/
  7667. static const struct of_device_id mtk_nand_of_ids[] = {
  7668. { .compatible = "mediatek,NFI",},
  7669. {}
  7670. };
  7671. static struct platform_driver mtk_nand_driver = {
  7672. .probe = mtk_nand_probe,
  7673. .remove = mtk_nand_remove,
  7674. .suspend = mtk_nand_suspend,
  7675. .resume = mtk_nand_resume,
  7676. .driver = {
  7677. .name = "mtk-nand",
  7678. .owner = THIS_MODULE,
  7679. .of_match_table = mtk_nand_of_ids,
  7680. },
  7681. };
  7682. /******************************************************************************
  7683. * mtk_nand_init
  7684. *
  7685. * DESCRIPTION:
  7686. *Init the device driver !
  7687. *
  7688. * PARAMETERS:
  7689. *None
  7690. *
  7691. * RETURNS:
  7692. *None
  7693. *
  7694. * NOTES:
  7695. *None
  7696. *
  7697. ******************************************************************************/
  7698. #define SEQ_printf(m, x...) \
  7699. do { \
  7700. if (m) \
  7701. seq_printf(m, x); \
  7702. else \
  7703. pr_err(x); \
  7704. } while (0)
  7705. int mtk_nand_proc_show(struct seq_file *m, void *v)
  7706. {
  7707. int i;
  7708. SEQ_printf(m, "ID:");
  7709. for (i = 0; i < gn_devinfo.id_length; i++)
  7710. SEQ_printf(m, " 0x%x", gn_devinfo.id[i]);
  7711. SEQ_printf(m, "\n");
  7712. SEQ_printf(m, "total size: %dMiB; part number: %s\n", gn_devinfo.totalsize,
  7713. gn_devinfo.devciename);
  7714. SEQ_printf(m, "Current working in %s mode\n", g_i4Interrupt ? "interrupt" : "polling");
  7715. SEQ_printf(m, "NFI_ACCON = 0x%x\n", DRV_Reg32(NFI_ACCCON_REG32));
  7716. SEQ_printf(m, "NFI_NAND_TYPE_CNFG_REG32= 0x%x\n", DRV_Reg32(NFI_NAND_TYPE_CNFG_REG32));
  7717. return 0;
  7718. }
  7719. static int mt_nand_proc_open(struct inode *inode, struct file *file)
  7720. {
  7721. return single_open(file, mtk_nand_proc_show, inode->i_private);
  7722. }
  7723. static const struct file_operations mtk_nand_fops = {
  7724. .open = mt_nand_proc_open,
  7725. .write = mtk_nand_proc_write,
  7726. .read = seq_read,
  7727. .llseek = seq_lseek,
  7728. .release = single_release,
  7729. };
  7730. static int __init mtk_nand_init(void)
  7731. {
  7732. struct proc_dir_entry *entry;
  7733. g_i4Interrupt = 1;
  7734. #if defined(NAND_OTP_SUPPORT)
  7735. int err = 0;
  7736. MSG(OTP, "OTP: register NAND OTP device ...\n");
  7737. err = misc_register(&nand_otp_dev);
  7738. if (unlikely(err)) {
  7739. MSG(OTP, "OTP: failed to register NAND OTP device!\n");
  7740. return err;
  7741. }
  7742. spin_lock_init(&g_OTPLock);
  7743. #endif
  7744. #if (defined(NAND_OTP_SUPPORT) && SAMSUNG_OTP_SUPPORT)
  7745. g_mtk_otp_fuc.OTPQueryLength = samsung_OTPQueryLength;
  7746. g_mtk_otp_fuc.OTPRead = samsung_OTPRead;
  7747. g_mtk_otp_fuc.OTPWrite = samsung_OTPWrite;
  7748. #endif
  7749. entry = proc_create(PROCNAME, 0664, NULL, &mtk_nand_fops);
  7750. #if 0
  7751. if (entry == NULL) {
  7752. MSG(INIT, "MTK Nand : unable to create /proc entry\n");
  7753. return -ENOMEM;
  7754. }
  7755. entry->read_proc = mtk_nand_proc_read;
  7756. entry->write_proc = mtk_nand_proc_write;
  7757. #endif
  7758. pr_err("MediaTek Nand driver init, version %s\n", VERSION);
  7759. DDR_INTERFACE = FALSE;
  7760. return platform_driver_register(&mtk_nand_driver);
  7761. }
  7762. /******************************************************************************
  7763. * mtk_nand_exit
  7764. *
  7765. * DESCRIPTION:
  7766. *Free the device driver !
  7767. *
  7768. * PARAMETERS:
  7769. *None
  7770. *
  7771. * RETURNS:
  7772. *None
  7773. *
  7774. * NOTES:
  7775. *None
  7776. *
  7777. ******************************************************************************/
  7778. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7779. int mtk_nand_cache_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 *pPageBuf,
  7780. u8 *pFDMBuf, u32 readSize)
  7781. {
  7782. u8 *buf;
  7783. int bRet = ERR_RTN_SUCCESS;
  7784. struct nand_chip *nand = mtd->priv;
  7785. u32 u4SecNum = u4PageSize >> host->hw->nand_sec_shift;
  7786. u32 backup_corrected, backup_failed;
  7787. bool readReady;
  7788. int retryCount = 0;
  7789. u32 tempBitMap, bitMap;
  7790. #ifdef NAND_PFM
  7791. struct timeval pfm_time_read;
  7792. #endif
  7793. struct NFI_TLC_WL_INFO tlc_wl_info;
  7794. struct NFI_TLC_WL_INFO pre_tlc_wl_info;
  7795. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7796. bool tlc_left_plane = TRUE;
  7797. unsigned int phys = 0;
  7798. #endif
  7799. u32 reg_val = 0;
  7800. u32 real_row_addr = 0;
  7801. u32 logical_plane_num = 1;
  7802. u32 data_sector_num = 0;
  7803. u8 *temp_byte_ptr = NULL;
  7804. u8 *spare_ptr = NULL;
  7805. u32 readCount;
  7806. u32 rSize = readSize;
  7807. u32 remainSize;
  7808. u8* dataBuf = pPageBuf;
  7809. u32 read_count;
  7810. PFM_BEGIN(pfm_time_read);
  7811. tempBitMap = 0;
  7812. buf = local_buffer_16_align;
  7813. backup_corrected = mtd->ecc_stats.corrected;
  7814. backup_failed = mtd->ecc_stats.failed;
  7815. bitMap = 0;
  7816. data_sector_num = u4SecNum;
  7817. mtk_nand_interface_switch(mtd);
  7818. logical_plane_num = 1;
  7819. readCount = 0;
  7820. temp_byte_ptr = buf;
  7821. spare_ptr = pFDMBuf;
  7822. tlc_wl_info.wl_pre = WL_LOW_PAGE; /* init for build warning*/
  7823. tlc_wl_info.word_line_idx = u4RowAddr;
  7824. if (likely(gn_devinfo.tlcControl.normaltlc)) {
  7825. NFI_TLC_GetMappedWL(u4RowAddr, &tlc_wl_info);
  7826. real_row_addr = NFI_TLC_GetRowAddr(tlc_wl_info.word_line_idx);
  7827. if (unlikely(gn_devinfo.tlcControl.pPlaneEn)) {
  7828. tlc_left_plane = TRUE;
  7829. logical_plane_num = 2;
  7830. data_sector_num /= 2;
  7831. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  7832. }
  7833. /* MSG(INIT, "mtk_nand_exec_read_page, u4RowAddr: 0x%x real_row_addr 0x%x %d\n",
  7834. u4RowAddr, real_row_addr, gn_devinfo.tlcControl.slcopmodeEn); */
  7835. } else
  7836. real_row_addr = NFI_TLC_GetRowAddr(u4RowAddr);
  7837. pre_tlc_wl_info.wl_pre = tlc_wl_info.wl_pre;
  7838. pre_tlc_wl_info.word_line_idx = tlc_wl_info.word_line_idx;
  7839. read_count = 0;
  7840. do {
  7841. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7842. if (gn_devinfo.tlcControl.slcopmodeEn) {
  7843. if (0xFF != gn_devinfo.tlcControl.en_slc_mode_cmd) {
  7844. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  7845. reg_val &= ~CNFG_READ_EN;
  7846. reg_val &= ~CNFG_OP_MODE_MASK;
  7847. reg_val |= CNFG_OP_CUST;
  7848. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  7849. mtk_nand_set_command(gn_devinfo.tlcControl.en_slc_mode_cmd);
  7850. reg_val = DRV_Reg32(NFI_CON_REG16);
  7851. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  7852. /* issue reset operation */
  7853. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  7854. }
  7855. } else {
  7856. if (gn_devinfo.tlcControl.normaltlc) {
  7857. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  7858. reg_val &= ~CNFG_READ_EN;
  7859. reg_val &= ~CNFG_OP_MODE_MASK;
  7860. reg_val |= CNFG_OP_CUST;
  7861. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  7862. if (tlc_wl_info.wl_pre == WL_LOW_PAGE)
  7863. mtk_nand_set_command(LOW_PG_SELECT_CMD);
  7864. else if (tlc_wl_info.wl_pre == WL_MID_PAGE)
  7865. mtk_nand_set_command(MID_PG_SELECT_CMD);
  7866. else if (tlc_wl_info.wl_pre == WL_HIGH_PAGE)
  7867. mtk_nand_set_command(HIGH_PG_SELECT_CMD);
  7868. reg_val = DRV_Reg32(NFI_CON_REG16);
  7869. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  7870. /* issue reset operation */
  7871. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  7872. }
  7873. }
  7874. reg_val = 0;
  7875. #endif
  7876. if (gn_devinfo.tlcControl.slcopmodeEn)
  7877. mtk_nand_turn_on_randomizer(mtd, nand, pre_tlc_wl_info.word_line_idx);
  7878. else
  7879. mtk_nand_turn_on_randomizer(mtd, nand,
  7880. (pre_tlc_wl_info.word_line_idx*3+pre_tlc_wl_info.wl_pre));
  7881. if (unlikely(read_count == 0)) {
  7882. readReady = mtk_nand_ready_for_read(nand, real_row_addr, 0, data_sector_num, true, buf, NORMAL_READ);
  7883. read_count++;
  7884. #if 1
  7885. pre_tlc_wl_info.wl_pre = tlc_wl_info.wl_pre;
  7886. pre_tlc_wl_info.word_line_idx = tlc_wl_info.word_line_idx;
  7887. if (gn_devinfo.tlcControl.slcopmodeEn) {
  7888. tlc_wl_info.word_line_idx++;
  7889. real_row_addr++;
  7890. } else {
  7891. if (tlc_wl_info.wl_pre == WL_HIGH_PAGE) {
  7892. tlc_wl_info.wl_pre = WL_LOW_PAGE;
  7893. tlc_wl_info.word_line_idx++;
  7894. real_row_addr++;
  7895. } else
  7896. tlc_wl_info.wl_pre++;
  7897. }
  7898. mtk_nand_reset();
  7899. continue;
  7900. #endif
  7901. } else if (unlikely(rSize <= u4PageSize)) {
  7902. readReady = mtk_nand_ready_for_read(nand, real_row_addr, 0, data_sector_num, true, buf, AD_CACHE_FINAL);
  7903. } else {
  7904. readReady = mtk_nand_ready_for_read(nand, real_row_addr, 0, data_sector_num, true, buf, AD_CACHE_READ);
  7905. }
  7906. if (likely(readReady)) {
  7907. while (logical_plane_num) {
  7908. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7909. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  7910. if (gn_devinfo.tlcControl.needchangecolumn) {
  7911. if (gn_devinfo.tlcControl.pPlaneEn)
  7912. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  7913. reg_val = DRV_Reg32(NFI_CON_REG16);
  7914. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  7915. /* issue reset operation */
  7916. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  7917. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  7918. reg_val &= ~CNFG_READ_EN;
  7919. reg_val &= ~CNFG_OP_MODE_MASK;
  7920. reg_val |= CNFG_OP_CUST;
  7921. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  7922. mtk_nand_set_command(CHANGE_COLUNM_ADDR_1ST_CMD);
  7923. mtk_nand_set_address(0, real_row_addr, 2, gn_devinfo.addr_cycle - 2);
  7924. mtk_nand_set_command(CHANGE_COLUNM_ADDR_2ND_CMD);
  7925. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  7926. reg_val |= CNFG_READ_EN;
  7927. reg_val &= ~CNFG_OP_MODE_MASK;
  7928. reg_val |= CNFG_OP_READ;
  7929. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  7930. }
  7931. }
  7932. mtk_dir = DMA_FROM_DEVICE;
  7933. sg_init_one(&mtk_sg, temp_byte_ptr, (data_sector_num * (1 << host->hw->nand_sec_shift)));
  7934. dma_map_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  7935. phys = mtk_sg.dma_address;
  7936. #if __INTERNAL_USE_AHB_MODE__
  7937. if (!phys)
  7938. pr_warn("[%s]convert virt addr (%lx) to phys add (%x)fail!!!",
  7939. __func__, (unsigned long) temp_byte_ptr, phys);
  7940. else
  7941. DRV_WriteReg32(NFI_STRADDR_REG32, phys);
  7942. #endif
  7943. DRV_WriteReg32(NFI_CON_REG16, data_sector_num << CON_NFI_SEC_SHIFT);
  7944. if (g_bHwEcc)
  7945. ECC_Decode_Start();
  7946. #endif
  7947. if (!mtk_nand_read_page_data(mtd, temp_byte_ptr,
  7948. data_sector_num * (1 << host->hw->nand_sec_shift))) {
  7949. MSG(INIT, "mtk_nand_read_page_data fail\n");
  7950. bRet = ERR_RTN_FAIL;
  7951. }
  7952. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  7953. if (!mtk_nand_status_ready(STA_NAND_BUSY)) {
  7954. MSG(INIT, "mtk_nand_status_ready fail\n");
  7955. bRet = ERR_RTN_FAIL;
  7956. }
  7957. if (g_bHwEcc) {
  7958. if (!mtk_nand_check_dececc_done(data_sector_num)) {
  7959. MSG(INIT, "mtk_nand_check_dececc_done fail\n");
  7960. bRet = ERR_RTN_FAIL;
  7961. }
  7962. }
  7963. /* mtk_nand_read_fdm_data(spare_ptr, data_sector_num); no need*/
  7964. if (g_bHwEcc) {
  7965. if (!mtk_nand_check_bch_error
  7966. (mtd, temp_byte_ptr, spare_ptr, data_sector_num - 1, u4RowAddr, &tempBitMap)) {
  7967. MSG(INIT, "mtk_nand_check_bch_error fail, retryCount: %d\n",
  7968. retryCount);
  7969. bRet = ERR_RTN_BCH_FAIL;
  7970. }
  7971. }
  7972. mtk_nand_stop_read();
  7973. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7974. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  7975. if (gn_devinfo.tlcControl.needchangecolumn)
  7976. DRV_WriteReg16(NFI_TLC_RD_WHR2_REG16, 0x055);
  7977. if (2 == logical_plane_num) {
  7978. tlc_left_plane = FALSE;
  7979. spare_ptr += (host->hw->nand_fdm_size * data_sector_num);
  7980. temp_byte_ptr += (data_sector_num * (1 << host->hw->nand_sec_shift));
  7981. }
  7982. }
  7983. #endif
  7984. logical_plane_num--;
  7985. if (bRet == ERR_RTN_BCH_FAIL)
  7986. break;
  7987. }
  7988. }
  7989. #ifndef CONFIG_MTK_TLC_NAND_SUPPORT
  7990. else
  7991. dma_unmap_sg(mtk_dev, &mtk_sg, 1, mtk_dir);
  7992. #endif
  7993. mtk_nand_turn_off_randomizer();
  7994. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  7995. if ((gn_devinfo.tlcControl.slcopmodeEn)
  7996. && (0xFF != gn_devinfo.tlcControl.dis_slc_mode_cmd)) {
  7997. reg_val = DRV_Reg32(NFI_CON_REG16);
  7998. reg_val |= CON_FIFO_FLUSH|CON_NFI_RST;
  7999. /* issue reset operation */
  8000. DRV_WriteReg32(NFI_CON_REG16, reg_val);
  8001. reg_val = DRV_Reg16(NFI_CNFG_REG16);
  8002. reg_val &= ~CNFG_READ_EN;
  8003. reg_val &= ~CNFG_OP_MODE_MASK;
  8004. reg_val |= CNFG_OP_CUST;
  8005. DRV_WriteReg16(NFI_CNFG_REG16, reg_val);
  8006. mtk_nand_set_command(gn_devinfo.tlcControl.dis_slc_mode_cmd);
  8007. }
  8008. #endif
  8009. if (bRet == ERR_RTN_BCH_FAIL) {
  8010. break;
  8011. }
  8012. remainSize = min(rSize, u4PageSize);
  8013. memcpy(dataBuf, buf, remainSize);
  8014. readCount++;
  8015. dataBuf += remainSize;
  8016. rSize -= remainSize;
  8017. /* reset row_addr */
  8018. pre_tlc_wl_info.wl_pre = tlc_wl_info.wl_pre;
  8019. pre_tlc_wl_info.word_line_idx = tlc_wl_info.word_line_idx;
  8020. logical_plane_num = 1;
  8021. if (gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC) {
  8022. if (gn_devinfo.tlcControl.normaltlc) {
  8023. if (gn_devinfo.tlcControl.pPlaneEn) {
  8024. tlc_left_plane = TRUE;
  8025. logical_plane_num = 2;
  8026. data_sector_num /= 2;
  8027. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, tlc_left_plane);
  8028. }
  8029. }
  8030. }
  8031. if (gn_devinfo.tlcControl.pPlaneEn)
  8032. real_row_addr = NFI_TLC_SetpPlaneAddr(real_row_addr, TRUE);
  8033. if (gn_devinfo.tlcControl.slcopmodeEn) {
  8034. tlc_wl_info.word_line_idx++;
  8035. real_row_addr++;
  8036. } else {
  8037. if (tlc_wl_info.wl_pre == WL_HIGH_PAGE) {
  8038. tlc_wl_info.wl_pre = WL_LOW_PAGE;
  8039. tlc_wl_info.word_line_idx++;
  8040. real_row_addr++;
  8041. } else
  8042. tlc_wl_info.wl_pre++;
  8043. }
  8044. } while (rSize);
  8045. PFM_END_R(pfm_time_read, u4PageSize + 32);
  8046. return bRet;
  8047. }
  8048. int mtk_nand_read(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf, int page, u32 size)
  8049. {
  8050. int page_per_block = gn_devinfo.blocksize * 1024 / gn_devinfo.pagesize;
  8051. u32 block;
  8052. u32 page_in_block;
  8053. u32 mapped_block;
  8054. int bRet = ERR_RTN_SUCCESS;
  8055. #ifdef DUMP_PEF
  8056. struct timeval stimer, etimer;
  8057. do_gettimeofday(&stimer);
  8058. #endif
  8059. page_in_block = mtk_nand_page_transform(mtd, chip, page, &block, &mapped_block);
  8060. bRet =
  8061. mtk_nand_cache_read_page(mtd, page_in_block + mapped_block * page_per_block,
  8062. mtd->writesize, buf, chip->oob_poi, size);
  8063. if (bRet == ERR_RTN_SUCCESS) {
  8064. #ifdef DUMP_PEF
  8065. do_gettimeofday(&etimer);
  8066. g_NandPerfLog.ReadPageTotalTime += Cal_timediff(&etimer, &stimer);
  8067. g_NandPerfLog.ReadPageCount++;
  8068. dump_nand_rwcount();
  8069. #endif
  8070. #ifdef CFG_SNAND_ACCESS_PATTERN_LOGGER
  8071. if (g_snand_pm_on == 1)
  8072. mtk_snand_pm_add_drv_record(_SNAND_PM_OP_READ_PAGE,
  8073. page_in_block + mapped_block * page_per_block,
  8074. 0, Cal_timediff(&etimer, &stimer));
  8075. #endif
  8076. return 0;
  8077. }
  8078. return -EIO;
  8079. }
  8080. #endif
  8081. static void __exit mtk_nand_exit(void)
  8082. {
  8083. MSG(INIT, "MediaTek Nand driver exit, version %s\n", VERSION);
  8084. #if defined(NAND_OTP_SUPPORT)
  8085. misc_deregister(&nand_otp_dev);
  8086. #endif
  8087. #ifdef SAMSUNG_OTP_SUPPORT
  8088. g_mtk_otp_fuc.OTPQueryLength = NULL;
  8089. g_mtk_otp_fuc.OTPRead = NULL;
  8090. g_mtk_otp_fuc.OTPWrite = NULL;
  8091. #endif
  8092. platform_driver_unregister(&mtk_nand_driver);
  8093. remove_proc_entry(PROCNAME, NULL);
  8094. }
  8095. late_initcall(mtk_nand_init);
  8096. module_exit(mtk_nand_exit);
  8097. MODULE_LICENSE("GPL");