nand_base.c 138 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314
  1. /*
  2. * drivers/mtd/nand.c
  3. *
  4. * Overview:
  5. * This is the generic MTD driver for NAND flash devices. It should be
  6. * capable of working with almost all NAND chips currently available.
  7. *
  8. * Additional technical information is available on
  9. * http://www.linux-mtd.infradead.org/doc/nand.html
  10. *
  11. * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
  12. * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
  13. *
  14. * Credits:
  15. * David Woodhouse for adding multichip support
  16. *
  17. * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
  18. * rework for 2K page size chips
  19. *
  20. * TODO:
  21. * Enable cached programming for 2k page size chips
  22. * Check, if mtd->ecctype should be set to MTD_ECC_HW
  23. * if we have HW ECC support.
  24. * BBT table is not serialized, has to be fixed
  25. *
  26. * This program is free software; you can redistribute it and/or modify
  27. * it under the terms of the GNU General Public License version 2 as
  28. * published by the Free Software Foundation.
  29. *
  30. */
  31. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  32. #include <linux/module.h>
  33. #include <linux/delay.h>
  34. #include <linux/errno.h>
  35. #include <linux/err.h>
  36. #include <linux/sched.h>
  37. #include <linux/slab.h>
  38. #include <linux/mm.h>
  39. #include <linux/types.h>
  40. #include <linux/mtd/mtd.h>
  41. #include <linux/mtd/nand.h>
  42. #include <linux/mtd/nand_ecc.h>
  43. #include <linux/mtd/nand_bch.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/bitops.h>
  46. #include <linux/leds.h>
  47. #include <linux/io.h>
  48. #include <linux/mtd/partitions.h>
  49. #ifdef CONFIG_MTK_MTD_NAND
  50. #include <asm/cache.h> /* for ARCH_DMA_MINALIGN */
  51. #endif
  52. #include <asm/div64.h>
  53. #ifdef MTD_NAND_PFM
  54. #include <linux/time.h>
  55. static suseconds_t g_PFM_R_SLC;
  56. static suseconds_t g_PFM_W_SLC;
  57. static suseconds_t g_PFM_E_SLC;
  58. static u32 g_PFM_RNum_SLC;
  59. static u32 g_PFM_RD_SLC;
  60. static u32 g_PFM_WD_SLC;
  61. static suseconds_t g_PFM_R_TLC;
  62. static suseconds_t g_PFM_W_TLC;
  63. static suseconds_t g_PFM_E_TLC;
  64. static u32 g_PFM_RNum_TLC;
  65. static u32 g_PFM_RD_TLC;
  66. static u32 g_PFM_WD_TLC;
  67. static struct timeval g_now;
  68. #define PFM_BEGIN(time) \
  69. do { \
  70. do_gettimeofday(&g_now); \
  71. (time) = g_now; \
  72. } while (0)
  73. #define PFM_END_R_SLC(time, n) \
  74. do { \
  75. do_gettimeofday(&g_now); \
  76. g_PFM_R_SLC += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  77. g_PFM_RNum_SLC += 1; \
  78. g_PFM_RD_SLC += n; \
  79. pr_warn("[MTD_NAND] - Read SLC PFM: %lu, data: %d, num: %d\n" , g_PFM_R_SLC, g_PFM_RD_SLC, g_PFM_RNum_SLC); \
  80. } while (0)
  81. #define PFM_END_W_SLC(time, n) \
  82. do { \
  83. do_gettimeofday(&g_now); \
  84. g_PFM_W_SLC += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  85. g_PFM_WD_SLC += n; \
  86. pr_warn("[MTD_NAND] - Write SLC PFM: %lu, data: %d\n", g_PFM_W_SLC, g_PFM_WD_SLC); \
  87. } while (0)
  88. #define PFM_END_E_SLC(time) \
  89. do { \
  90. do_gettimeofday(&g_now); \
  91. g_PFM_E_SLC += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  92. pr_warn("[MTD_NAND] - Erase SLC PFM: %lu\n", g_PFM_E_SLC); \
  93. } while (0)
  94. #define PFM_END_R_TLC(time, n) \
  95. do { \
  96. do_gettimeofday(&g_now); \
  97. g_PFM_R_TLC += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  98. g_PFM_RNum_TLC += 1; \
  99. g_PFM_RD_TLC += n; \
  100. pr_warn("[MTD_NAND] - Read TLC PFM: %lu, data: %d, num: %d\n" , g_PFM_R_TLC, g_PFM_RD_TLC, g_PFM_RNum_TLC); \
  101. } while (0)
  102. #define PFM_END_W_TLC(time, n) \
  103. do { \
  104. do_gettimeofday(&g_now); \
  105. g_PFM_W_TLC += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  106. g_PFM_WD_TLC += n; \
  107. pr_warn("[MTD_NAND] - Write TLC PFM: %lu, data: %d\n", g_PFM_W_TLC, g_PFM_WD_TLC); \
  108. } while (0)
  109. #define PFM_END_E_TLC(time) \
  110. do { \
  111. do_gettimeofday(&g_now); \
  112. g_PFM_E_TLC += (g_now.tv_sec * 1000000 + g_now.tv_usec) - (time.tv_sec * 1000000 + time.tv_usec); \
  113. pr_warn("[MTD_NAND] - Erase TLC PFM: %lu\n", g_PFM_E_TLC); \
  114. } while (0)
  115. #endif
  116. /* Define default oob placement schemes for large and small page devices */
  117. static struct nand_ecclayout nand_oob_8 = {
  118. .eccbytes = 3,
  119. .eccpos = {0, 1, 2},
  120. .oobfree = {
  121. {.offset = 3,
  122. .length = 2},
  123. {.offset = 6,
  124. .length = 2} }
  125. };
  126. static struct nand_ecclayout nand_oob_16 = {
  127. .eccbytes = 6,
  128. .eccpos = {0, 1, 2, 3, 6, 7},
  129. .oobfree = {
  130. {.offset = 8,
  131. . length = 8} }
  132. };
  133. static struct nand_ecclayout nand_oob_64 = {
  134. .eccbytes = 24,
  135. .eccpos = {
  136. 40, 41, 42, 43, 44, 45, 46, 47,
  137. 48, 49, 50, 51, 52, 53, 54, 55,
  138. 56, 57, 58, 59, 60, 61, 62, 63},
  139. .oobfree = {
  140. {.offset = 2,
  141. .length = 38} }
  142. };
  143. static struct nand_ecclayout nand_oob_128 = {
  144. .eccbytes = 48,
  145. .eccpos = {
  146. 80, 81, 82, 83, 84, 85, 86, 87,
  147. 88, 89, 90, 91, 92, 93, 94, 95,
  148. 96, 97, 98, 99, 100, 101, 102, 103,
  149. 104, 105, 106, 107, 108, 109, 110, 111,
  150. 112, 113, 114, 115, 116, 117, 118, 119,
  151. 120, 121, 122, 123, 124, 125, 126, 127},
  152. .oobfree = {
  153. {.offset = 2,
  154. .length = 78} }
  155. };
  156. #define PMT_POOL_SIZE (2)
  157. static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
  158. struct mtd_oob_ops *ops);
  159. /*
  160. * For devices which display every fart in the system on a separate LED. Is
  161. * compiled away when LED support is disabled.
  162. */
  163. DEFINE_LED_TRIGGER(nand_led_trigger);
  164. static int check_offs_len(struct mtd_info *mtd,
  165. loff_t ofs, uint64_t len)
  166. {
  167. int ret = 0;
  168. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  169. int block_size;
  170. u32 idx;
  171. u64 start_addr;
  172. loff_t temp, temp1;
  173. start_addr = part_get_startaddress(ofs, &idx);
  174. block_size = mtd->eraseregions[idx].erasesize;
  175. /* Start address must align on block boundary
  176. ofs is transferred as u32 for 32 bit kernel % build error
  177. block_size is MB uint, so u32 is ok.
  178. */
  179. temp = ofs;
  180. temp1 = do_div(temp, (block_size & 0xFFFFFFFF));
  181. if (temp1) {
  182. pr_err("%s: unaligned address, 0x%x,%lld, %d,%d\n"
  183. , __func__, (u32)ofs, ofs, block_size, (u32)ofs % block_size);
  184. ret = -EINVAL;
  185. }
  186. temp = len;
  187. temp1 = do_div(temp, (block_size & 0xFFFFFFFF));
  188. /* Length must align on block boundary */
  189. if (temp1) {
  190. pr_err("%s: length not block aligned\n", __func__);
  191. ret = -EINVAL;
  192. }
  193. #elif defined(CONFIG_MTK_MLC_NAND_SUPPORT)
  194. int block_size;
  195. struct nand_chip *chip = mtd->priv;
  196. if (mtk_nand_IsRawPartition(ofs))
  197. block_size = (1ULL << (chip->phys_erase_shift - 1));
  198. else
  199. block_size = (1ULL << chip->phys_erase_shift);
  200. /* Start address must align on block boundary */
  201. if (ofs & (block_size - 1)) {
  202. pr_debug("%s: unaligned address\n", __func__);
  203. ret = -EINVAL;
  204. }
  205. /* Length must align on block boundary */
  206. if (len & (block_size - 1)) {
  207. pr_debug("%s: length not block aligned\n", __func__);
  208. ret = -EINVAL;
  209. }
  210. #else
  211. struct nand_chip *chip = mtd->priv;
  212. /* Start address must align on block boundary */
  213. if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
  214. pr_debug("%s: unaligned address\n", __func__);
  215. ret = -EINVAL;
  216. }
  217. /* Length must align on block boundary */
  218. if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
  219. pr_debug("%s: length not block aligned\n", __func__);
  220. ret = -EINVAL;
  221. }
  222. #endif
  223. #ifdef CONFIG_MTK_MTD_NAND
  224. /* Do not allow past end of device */
  225. if (ofs + len > (mtd->size + PMT_POOL_SIZE * mtd->erasesize)) {
  226. pr_debug("%s: Past end of device\n", __func__);
  227. ret = -EINVAL;
  228. }
  229. #endif
  230. return ret;
  231. }
  232. /**
  233. * nand_release_device - [GENERIC] release chip
  234. * @mtd: MTD device structure
  235. *
  236. * Release chip lock and wake up anyone waiting on the device.
  237. */
  238. void nand_release_device(struct mtd_info *mtd)
  239. {
  240. struct nand_chip *chip = mtd->priv;
  241. /* Release the controller and the chip */
  242. spin_lock(&chip->controller->lock);
  243. chip->controller->active = NULL;
  244. #ifdef CONFIG_MTK_MTD_NAND
  245. if (chip->state != FL_READY && chip->state != FL_PM_SUSPENDED)
  246. nand_disable_clock();
  247. #endif
  248. chip->state = FL_READY;
  249. wake_up(&chip->controller->wq);
  250. spin_unlock(&chip->controller->lock);
  251. }
  252. EXPORT_SYMBOL_GPL(nand_release_device);
  253. /**
  254. * nand_read_byte - [DEFAULT] read one byte from the chip
  255. * @mtd: MTD device structure
  256. *
  257. * Default read function for 8bit buswidth
  258. */
  259. static uint8_t nand_read_byte(struct mtd_info *mtd)
  260. {
  261. struct nand_chip *chip = mtd->priv;
  262. return readb(chip->IO_ADDR_R);
  263. }
  264. /**
  265. * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  266. * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
  267. * @mtd: MTD device structure
  268. *
  269. * Default read function for 16bit buswidth with endianness conversion.
  270. *
  271. */
  272. static uint8_t nand_read_byte16(struct mtd_info *mtd)
  273. {
  274. struct nand_chip *chip = mtd->priv;
  275. return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
  276. }
  277. /**
  278. * nand_read_word - [DEFAULT] read one word from the chip
  279. * @mtd: MTD device structure
  280. *
  281. * Default read function for 16bit buswidth without endianness conversion.
  282. */
  283. static u16 nand_read_word(struct mtd_info *mtd)
  284. {
  285. struct nand_chip *chip = mtd->priv;
  286. return readw(chip->IO_ADDR_R);
  287. }
  288. /**
  289. * nand_select_chip - [DEFAULT] control CE line
  290. * @mtd: MTD device structure
  291. * @chipnr: chipnumber to select, -1 for deselect
  292. *
  293. * Default select function for 1 chip devices.
  294. */
  295. static void nand_select_chip(struct mtd_info *mtd, int chipnr)
  296. {
  297. struct nand_chip *chip = mtd->priv;
  298. switch (chipnr) {
  299. case -1:
  300. chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
  301. break;
  302. case 0:
  303. break;
  304. default:
  305. BUG();
  306. }
  307. }
  308. /**
  309. * nand_write_byte - [DEFAULT] write single byte to chip
  310. * @mtd: MTD device structure
  311. * @byte: value to write
  312. *
  313. * Default function to write a byte to I/O[7:0]
  314. */
  315. static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
  316. {
  317. struct nand_chip *chip = mtd->priv;
  318. chip->write_buf(mtd, &byte, 1);
  319. }
  320. /**
  321. * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
  322. * @mtd: MTD device structure
  323. * @byte: value to write
  324. *
  325. * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
  326. */
  327. static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
  328. {
  329. struct nand_chip *chip = mtd->priv;
  330. uint16_t word = byte;
  331. /*
  332. * It's not entirely clear what should happen to I/O[15:8] when writing
  333. * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
  334. *
  335. * When the host supports a 16-bit bus width, only data is
  336. * transferred at the 16-bit width. All address and command line
  337. * transfers shall use only the lower 8-bits of the data bus. During
  338. * command transfers, the host may place any value on the upper
  339. * 8-bits of the data bus. During address transfers, the host shall
  340. * set the upper 8-bits of the data bus to 00h.
  341. *
  342. * One user of the write_byte callback is nand_onfi_set_features. The
  343. * four parameters are specified to be written to I/O[7:0], but this is
  344. * neither an address nor a command transfer. Let's assume a 0 on the
  345. * upper I/O lines is OK.
  346. */
  347. chip->write_buf(mtd, (uint8_t *)&word, 2);
  348. }
  349. /**
  350. * nand_write_buf - [DEFAULT] write buffer to chip
  351. * @mtd: MTD device structure
  352. * @buf: data buffer
  353. * @len: number of bytes to write
  354. *
  355. * Default write function for 8bit buswidth.
  356. */
  357. static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
  358. {
  359. struct nand_chip *chip = mtd->priv;
  360. iowrite8_rep(chip->IO_ADDR_W, buf, len);
  361. }
  362. /**
  363. * nand_read_buf - [DEFAULT] read chip data into buffer
  364. * @mtd: MTD device structure
  365. * @buf: buffer to store date
  366. * @len: number of bytes to read
  367. *
  368. * Default read function for 8bit buswidth.
  369. */
  370. static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
  371. {
  372. struct nand_chip *chip = mtd->priv;
  373. ioread8_rep(chip->IO_ADDR_R, buf, len);
  374. }
  375. /**
  376. * nand_write_buf16 - [DEFAULT] write buffer to chip
  377. * @mtd: MTD device structure
  378. * @buf: data buffer
  379. * @len: number of bytes to write
  380. *
  381. * Default write function for 16bit buswidth.
  382. */
  383. static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
  384. {
  385. struct nand_chip *chip = mtd->priv;
  386. u16 *p = (u16 *) buf;
  387. iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
  388. }
  389. /**
  390. * nand_read_buf16 - [DEFAULT] read chip data into buffer
  391. * @mtd: MTD device structure
  392. * @buf: buffer to store date
  393. * @len: number of bytes to read
  394. *
  395. * Default read function for 16bit buswidth.
  396. */
  397. static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
  398. {
  399. struct nand_chip *chip = mtd->priv;
  400. u16 *p = (u16 *) buf;
  401. ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
  402. }
  403. /**
  404. * nand_block_bad - [DEFAULT] Read bad block marker from the chip
  405. * @mtd: MTD device structure
  406. * @ofs: offset from device start
  407. * @getchip: 0, if the chip is already selected
  408. *
  409. * Check, if the block is bad.
  410. */
  411. static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
  412. {
  413. int page, chipnr, res = 0, i = 0;
  414. struct nand_chip *chip = mtd->priv;
  415. u16 bad;
  416. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  417. loff_t temp;
  418. #endif
  419. if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
  420. ofs += mtd->erasesize - mtd->writesize;
  421. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  422. page = (int)(ofs >> chip->page_shift);
  423. page = page % (chip->pagemask + 1);
  424. #else
  425. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  426. #endif
  427. if (getchip) {
  428. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  429. temp = mtk_nand_device_size();
  430. if (ofs >= temp)
  431. chipnr = 1;
  432. else
  433. chipnr = 0;
  434. #else
  435. chipnr = (int)(ofs >> chip->chip_shift);
  436. #endif
  437. nand_get_device(mtd, FL_READING);
  438. /* Select the NAND device */
  439. chip->select_chip(mtd, chipnr);
  440. }
  441. do {
  442. if (chip->options & NAND_BUSWIDTH_16) {
  443. chip->cmdfunc(mtd, NAND_CMD_READOOB,
  444. chip->badblockpos & 0xFE, page);
  445. bad = cpu_to_le16(chip->read_word(mtd));
  446. if (chip->badblockpos & 0x1)
  447. bad >>= 8;
  448. else
  449. bad &= 0xFF;
  450. } else {
  451. chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
  452. page);
  453. bad = chip->read_byte(mtd);
  454. }
  455. if (likely(chip->badblockbits == 8))
  456. res = bad != 0xFF;
  457. else
  458. res = hweight8(bad) < chip->badblockbits;
  459. ofs += mtd->writesize;
  460. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  461. page = (int)(ofs >> chip->page_shift) % (chip->pagemask + 1);
  462. #else
  463. page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  464. #endif
  465. i++;
  466. } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
  467. if (getchip) {
  468. chip->select_chip(mtd, -1);
  469. nand_release_device(mtd);
  470. }
  471. return res;
  472. }
  473. /**
  474. * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
  475. * @mtd: MTD device structure
  476. * @ofs: offset from device start
  477. *
  478. * This is the default implementation, which can be overridden by a hardware
  479. * specific driver. It provides the details for writing a bad block marker to a
  480. * block.
  481. */
  482. static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs, const uint8_t *buffer)
  483. {
  484. struct nand_chip *chip = mtd->priv;
  485. struct mtd_oob_ops ops;
  486. uint8_t buf[2] = { 0, 0 };
  487. int ret = 0, res, i = 0;
  488. int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
  489. if (write_oob) {
  490. struct erase_info einfo;
  491. /* Attempt erase before marking OOB */
  492. memset(&einfo, 0, sizeof(einfo));
  493. einfo.mtd = mtd;
  494. einfo.addr = ofs;
  495. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  496. einfo.len = mtd->erasesize;
  497. #else
  498. einfo.len = 1 << chip->phys_erase_shift;
  499. #endif
  500. nand_erase_nand(mtd, &einfo, 0);
  501. }
  502. ops.datbuf = NULL;
  503. ops.oobbuf = buf;
  504. ops.ooboffs = chip->badblockpos;
  505. if (chip->options & NAND_BUSWIDTH_16) {
  506. ops.ooboffs &= ~0x01;
  507. ops.len = ops.ooblen = 2;
  508. } else {
  509. ops.len = ops.ooblen = 1;
  510. }
  511. ops.mode = MTD_OPS_PLACE_OOB;
  512. /* Write to first/last page(s) if necessary */
  513. if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
  514. ofs += mtd->erasesize - mtd->writesize;
  515. do {
  516. res = nand_do_write_oob(mtd, ofs, &ops);
  517. if (!ret)
  518. ret = res;
  519. i++;
  520. ofs += mtd->writesize;
  521. } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
  522. return ret;
  523. }
  524. /**
  525. * nand_block_markbad_lowlevel - mark a block bad
  526. * @mtd: MTD device structure
  527. * @ofs: offset from device start
  528. *
  529. * This function performs the generic NAND bad block marking steps (i.e., bad
  530. * block table(s) and/or marker(s)). We only allow the hardware driver to
  531. * specify how to write bad block markers to OOB (chip->block_markbad).
  532. *
  533. * We try operations in the following order:
  534. * (1) erase the affected block, to allow OOB marker to be written cleanly
  535. * (2) write bad block marker to OOB area of affected block (unless flag
  536. * NAND_BBT_NO_OOB_BBM is present)
  537. * (3) update the BBT
  538. * Note that we retain the first error encountered in (2) or (3), finish the
  539. * procedures, and dump the error in the end.
  540. */
  541. static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
  542. {
  543. struct nand_chip *chip = mtd->priv;
  544. int res, ret = 0;
  545. if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
  546. struct erase_info einfo;
  547. /* Attempt erase before marking OOB */
  548. memset(&einfo, 0, sizeof(einfo));
  549. einfo.mtd = mtd;
  550. einfo.addr = ofs;
  551. einfo.len = 1ULL << chip->phys_erase_shift;
  552. nand_erase_nand(mtd, &einfo, 0);
  553. /* Write bad block marker to OOB */
  554. nand_get_device(mtd, FL_WRITING);
  555. ret = chip->block_markbad(mtd, ofs, NULL);
  556. nand_release_device(mtd);
  557. }
  558. /* Mark block bad in BBT */
  559. if (chip->bbt) {
  560. res = nand_markbad_bbt(mtd, ofs);
  561. if (!ret)
  562. ret = res;
  563. }
  564. if (!ret)
  565. mtd->ecc_stats.badblocks++;
  566. return ret;
  567. }
  568. /**
  569. * nand_check_wp - [GENERIC] check if the chip is write protected
  570. * @mtd: MTD device structure
  571. *
  572. * Check, if the device is write protected. The function expects, that the
  573. * device is already selected.
  574. */
  575. static int nand_check_wp(struct mtd_info *mtd)
  576. {
  577. struct nand_chip *chip = mtd->priv;
  578. /* Broken xD cards report WP despite being writable */
  579. if (chip->options & NAND_BROKEN_XD)
  580. return 0;
  581. /* Check the WP bit */
  582. chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
  583. return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
  584. }
  585. /**
  586. * nand_block_checkbad - [GENERIC] Check if a block is marked bad
  587. * @mtd: MTD device structure
  588. * @ofs: offset from device start
  589. *
  590. * Check if the block is mark as reserved.
  591. */
  592. static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
  593. {
  594. struct nand_chip *chip = mtd->priv;
  595. if (!chip->bbt)
  596. return 0;
  597. /* Return info from the table */
  598. return nand_isreserved_bbt(mtd, ofs);
  599. }
  600. /**
  601. * nand_block_checkbad - [GENERIC] Check if a block is marked bad
  602. * @mtd: MTD device structure
  603. * @ofs: offset from device start
  604. * @getchip: 0, if the chip is already selected
  605. * @allowbbt: 1, if its allowed to access the bbt area
  606. *
  607. * Check, if the block is bad. Either by reading the bad block table or
  608. * calling of the scan function.
  609. */
  610. static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
  611. int allowbbt)
  612. {
  613. struct nand_chip *chip = mtd->priv;
  614. if (!chip->bbt)
  615. return chip->block_bad(mtd, ofs, getchip);
  616. /* Return info from the table */
  617. return nand_isbad_bbt(mtd, ofs, allowbbt);
  618. }
  619. /**
  620. * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
  621. * @mtd: MTD device structure
  622. * @timeo: Timeout
  623. *
  624. * Helper function for nand_wait_ready used when needing to wait in interrupt
  625. * context.
  626. */
  627. static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
  628. {
  629. struct nand_chip *chip = mtd->priv;
  630. int i;
  631. /* Wait for the device to get ready */
  632. for (i = 0; i < timeo; i++) {
  633. if (chip->dev_ready(mtd))
  634. break;
  635. touch_softlockup_watchdog();
  636. mdelay(1);
  637. }
  638. }
  639. /* Wait for the ready pin, after a command. The timeout is caught later. */
  640. void nand_wait_ready(struct mtd_info *mtd)
  641. {
  642. struct nand_chip *chip = mtd->priv;
  643. unsigned long timeo = jiffies + msecs_to_jiffies(20);
  644. /* 400ms timeout */
  645. if (in_interrupt() || oops_in_progress)
  646. return panic_nand_wait_ready(mtd, 400);
  647. led_trigger_event(nand_led_trigger, LED_FULL);
  648. /* Wait until command is processed or timeout occurs */
  649. do {
  650. if (chip->dev_ready(mtd))
  651. break;
  652. touch_softlockup_watchdog();
  653. } while (time_before(jiffies, timeo));
  654. led_trigger_event(nand_led_trigger, LED_OFF);
  655. }
  656. EXPORT_SYMBOL_GPL(nand_wait_ready);
  657. /**
  658. * nand_command - [DEFAULT] Send command to NAND device
  659. * @mtd: MTD device structure
  660. * @command: the command to be sent
  661. * @column: the column address for this command, -1 if none
  662. * @page_addr: the page address for this command, -1 if none
  663. *
  664. * Send command to NAND device. This function is used for small page devices
  665. * (512 Bytes per page).
  666. */
  667. static void nand_command(struct mtd_info *mtd, unsigned int command,
  668. int column, int page_addr)
  669. {
  670. register struct nand_chip *chip = mtd->priv;
  671. int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
  672. /* Write out the command to the device */
  673. if (command == NAND_CMD_SEQIN) {
  674. int readcmd;
  675. if (column >= mtd->writesize) {
  676. /* OOB area */
  677. column -= mtd->writesize;
  678. readcmd = NAND_CMD_READOOB;
  679. } else if (column < 256) {
  680. /* First 256 bytes --> READ0 */
  681. readcmd = NAND_CMD_READ0;
  682. } else {
  683. column -= 256;
  684. readcmd = NAND_CMD_READ1;
  685. }
  686. chip->cmd_ctrl(mtd, readcmd, ctrl);
  687. ctrl &= ~NAND_CTRL_CHANGE;
  688. }
  689. chip->cmd_ctrl(mtd, command, ctrl);
  690. /* Address cycle, when necessary */
  691. ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
  692. /* Serially input address */
  693. if (column != -1) {
  694. /* Adjust columns for 16 bit buswidth */
  695. if (chip->options & NAND_BUSWIDTH_16 &&
  696. !nand_opcode_8bits(command))
  697. column >>= 1;
  698. chip->cmd_ctrl(mtd, column, ctrl);
  699. ctrl &= ~NAND_CTRL_CHANGE;
  700. }
  701. if (page_addr != -1) {
  702. chip->cmd_ctrl(mtd, page_addr, ctrl);
  703. ctrl &= ~NAND_CTRL_CHANGE;
  704. chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
  705. /* One more address cycle for devices > 32MiB */
  706. if (chip->chipsize > (32 << 20))
  707. chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
  708. }
  709. chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
  710. /*
  711. * Program and erase have their own busy handlers status and sequential
  712. * in needs no delay
  713. */
  714. switch (command) {
  715. case NAND_CMD_PAGEPROG:
  716. case NAND_CMD_ERASE1:
  717. case NAND_CMD_ERASE2:
  718. case NAND_CMD_SEQIN:
  719. case NAND_CMD_STATUS:
  720. return;
  721. case NAND_CMD_RESET:
  722. if (chip->dev_ready)
  723. break;
  724. udelay(chip->chip_delay);
  725. chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
  726. NAND_CTRL_CLE | NAND_CTRL_CHANGE);
  727. chip->cmd_ctrl(mtd,
  728. NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
  729. while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
  730. ;
  731. return;
  732. /* This applies to read commands */
  733. default:
  734. /*
  735. * If we don't have access to the busy pin, we apply the given
  736. * command delay
  737. */
  738. if (!chip->dev_ready) {
  739. udelay(chip->chip_delay);
  740. return;
  741. }
  742. }
  743. /*
  744. * Apply this short delay always to ensure that we do wait tWB in
  745. * any case on any machine.
  746. */
  747. ndelay(100);
  748. nand_wait_ready(mtd);
  749. }
  750. /**
  751. * nand_command_lp - [DEFAULT] Send command to NAND large page device
  752. * @mtd: MTD device structure
  753. * @command: the command to be sent
  754. * @column: the column address for this command, -1 if none
  755. * @page_addr: the page address for this command, -1 if none
  756. *
  757. * Send command to NAND device. This is the version for the new large page
  758. * devices. We don't have the separate regions as we have in the small page
  759. * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
  760. */
  761. static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
  762. int column, int page_addr)
  763. {
  764. register struct nand_chip *chip = mtd->priv;
  765. /* Emulate NAND_CMD_READOOB */
  766. if (command == NAND_CMD_READOOB) {
  767. column += mtd->writesize;
  768. command = NAND_CMD_READ0;
  769. }
  770. /* Command latch cycle */
  771. chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  772. if (column != -1 || page_addr != -1) {
  773. int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
  774. /* Serially input address */
  775. if (column != -1) {
  776. /* Adjust columns for 16 bit buswidth */
  777. if (chip->options & NAND_BUSWIDTH_16 &&
  778. !nand_opcode_8bits(command))
  779. column >>= 1;
  780. chip->cmd_ctrl(mtd, column, ctrl);
  781. ctrl &= ~NAND_CTRL_CHANGE;
  782. chip->cmd_ctrl(mtd, column >> 8, ctrl);
  783. }
  784. if (page_addr != -1) {
  785. chip->cmd_ctrl(mtd, page_addr, ctrl);
  786. chip->cmd_ctrl(mtd, page_addr >> 8,
  787. NAND_NCE | NAND_ALE);
  788. /* One more address cycle for devices > 128MiB */
  789. if (chip->chipsize > (128 << 20))
  790. chip->cmd_ctrl(mtd, page_addr >> 16,
  791. NAND_NCE | NAND_ALE);
  792. }
  793. }
  794. chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
  795. /*
  796. * Program and erase have their own busy handlers status, sequential
  797. * in, and deplete1 need no delay.
  798. */
  799. switch (command) {
  800. case NAND_CMD_CACHEDPROG:
  801. case NAND_CMD_PAGEPROG:
  802. case NAND_CMD_ERASE1:
  803. case NAND_CMD_ERASE2:
  804. case NAND_CMD_SEQIN:
  805. case NAND_CMD_RNDIN:
  806. case NAND_CMD_STATUS:
  807. return;
  808. case NAND_CMD_RESET:
  809. if (chip->dev_ready)
  810. break;
  811. udelay(chip->chip_delay);
  812. chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
  813. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  814. chip->cmd_ctrl(mtd, NAND_CMD_NONE,
  815. NAND_NCE | NAND_CTRL_CHANGE);
  816. while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
  817. ;
  818. return;
  819. case NAND_CMD_RNDOUT:
  820. /* No ready / busy check necessary */
  821. chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
  822. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  823. chip->cmd_ctrl(mtd, NAND_CMD_NONE,
  824. NAND_NCE | NAND_CTRL_CHANGE);
  825. return;
  826. case NAND_CMD_READ0:
  827. chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
  828. NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
  829. chip->cmd_ctrl(mtd, NAND_CMD_NONE,
  830. NAND_NCE | NAND_CTRL_CHANGE);
  831. /* This applies to read commands */
  832. default:
  833. /*
  834. * If we don't have access to the busy pin, we apply the given
  835. * command delay.
  836. */
  837. if (!chip->dev_ready) {
  838. udelay(chip->chip_delay);
  839. return;
  840. }
  841. }
  842. /*
  843. * Apply this short delay always to ensure that we do wait tWB in
  844. * any case on any machine.
  845. */
  846. ndelay(100);
  847. nand_wait_ready(mtd);
  848. }
  849. /**
  850. * panic_nand_get_device - [GENERIC] Get chip for selected access
  851. * @chip: the nand chip descriptor
  852. * @mtd: MTD device structure
  853. * @new_state: the state which is requested
  854. *
  855. * Used when in panic, no locks are taken.
  856. */
  857. static void panic_nand_get_device(struct nand_chip *chip,
  858. struct mtd_info *mtd, int new_state)
  859. {
  860. /* Hardware controller shared among independent devices */
  861. chip->controller->active = chip;
  862. chip->state = new_state;
  863. }
  864. /**
  865. * nand_get_device - [GENERIC] Get chip for selected access
  866. * @mtd: MTD device structure
  867. * @new_state: the state which is requested
  868. *
  869. * Get the device and lock it for exclusive access
  870. */
  871. int nand_get_device(struct mtd_info *mtd, int new_state)
  872. {
  873. struct nand_chip *chip = mtd->priv;
  874. spinlock_t *lock = &chip->controller->lock;
  875. wait_queue_head_t *wq = &chip->controller->wq;
  876. DECLARE_WAITQUEUE(wait, current);
  877. retry:
  878. spin_lock(lock);
  879. /* Hardware controller shared among independent devices */
  880. if (!chip->controller->active)
  881. chip->controller->active = chip;
  882. if (chip->controller->active == chip && chip->state == FL_READY) {
  883. #ifdef CONFIG_MTK_MTD_NAND
  884. if (new_state != FL_READY && new_state != FL_PM_SUSPENDED)
  885. nand_enable_clock();
  886. #endif
  887. chip->state = new_state;
  888. spin_unlock(lock);
  889. return 0;
  890. }
  891. if (new_state == FL_PM_SUSPENDED) {
  892. if (chip->controller->active->state == FL_PM_SUSPENDED) {
  893. chip->state = FL_PM_SUSPENDED;
  894. spin_unlock(lock);
  895. return 0;
  896. }
  897. }
  898. set_current_state(TASK_UNINTERRUPTIBLE);
  899. add_wait_queue(wq, &wait);
  900. spin_unlock(lock);
  901. schedule();
  902. remove_wait_queue(wq, &wait);
  903. goto retry;
  904. }
  905. /**
  906. * panic_nand_wait - [GENERIC] wait until the command is done
  907. * @mtd: MTD device structure
  908. * @chip: NAND chip structure
  909. * @timeo: timeout
  910. *
  911. * Wait for command done. This is a helper function for nand_wait used when
  912. * we are in interrupt context. May happen when in panic and trying to write
  913. * an oops through mtdoops.
  914. */
  915. static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
  916. unsigned long timeo)
  917. {
  918. int i;
  919. for (i = 0; i < timeo; i++) {
  920. if (chip->dev_ready) {
  921. if (chip->dev_ready(mtd))
  922. break;
  923. } else {
  924. if (chip->read_byte(mtd) & NAND_STATUS_READY)
  925. break;
  926. }
  927. mdelay(1);
  928. }
  929. }
  930. #ifdef CONFIG_MTK_MTD_NAND
  931. u8 mtk_nand_util_time_before(struct timeval *time_a, struct timeval *time_b)
  932. {
  933. if (time_a->tv_sec < time_b->tv_sec)
  934. return 1;
  935. if (time_a->tv_sec > time_b->tv_sec)
  936. return 0;
  937. if (time_a->tv_usec < time_b->tv_usec)
  938. return 1;
  939. return 0;
  940. } /* timeval_diff() */
  941. #endif
  942. /**
  943. * nand_wait - [DEFAULT] wait until the command is done
  944. * @mtd: MTD device structure
  945. * @chip: NAND chip structure
  946. *
  947. * Wait for command done. This applies to erase and program only. Erase can
  948. * take up to 400ms and program up to 20ms according to general NAND and
  949. * SmartMedia specs.
  950. */
  951. static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
  952. {
  953. int status, state = chip->state;
  954. unsigned long timeo = (state == FL_ERASING ? 400 : 20);
  955. #ifdef CONFIG_MTK_MTD_NAND
  956. struct timeval timer_timeout, timer_cur;
  957. #endif
  958. #ifdef CONFIG_MTK_MTD_NAND
  959. do_gettimeofday(&timer_timeout);
  960. if (state == FL_ERASING)
  961. timer_timeout.tv_usec += 400 * 1000; /* 400 ms */
  962. else
  963. timer_timeout.tv_usec += 20 * 1000; /* 20 ms */
  964. if (timer_timeout.tv_usec >= 1000000) {
  965. timer_timeout.tv_usec -= 1000000;
  966. timer_timeout.tv_sec += 1;
  967. }
  968. #endif
  969. led_trigger_event(nand_led_trigger, LED_FULL);
  970. /*
  971. * Apply this short delay always to ensure that we do wait tWB in any
  972. * case on any machine.
  973. */
  974. ndelay(100);
  975. chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
  976. if (in_interrupt() || oops_in_progress)
  977. panic_nand_wait(mtd, chip, timeo);
  978. else {
  979. #ifdef CONFIG_MTK_MTD_NAND
  980. /*
  981. * use non-jiffies-based method for timeout detection to prevent
  982. * jiffies issue due to dynamic tick (CONFIG_NO_HZ is on)
  983. */
  984. while (1) {
  985. do_gettimeofday(&timer_cur);
  986. if (0 == mtk_nand_util_time_before(&timer_cur, &timer_timeout))
  987. break; /* timeout */
  988. #else
  989. timeo = jiffies + msecs_to_jiffies(timeo);
  990. while (time_before(jiffies, timeo)) {
  991. #endif
  992. if (chip->dev_ready) {
  993. if (chip->dev_ready(mtd))
  994. break;
  995. } else {
  996. if (chip->read_byte(mtd) & NAND_STATUS_READY)
  997. break;
  998. }
  999. cond_resched();
  1000. }
  1001. }
  1002. led_trigger_event(nand_led_trigger, LED_OFF);
  1003. status = (int)chip->read_byte(mtd);
  1004. /* This can happen if in case of timeout or buggy dev_ready */
  1005. WARN_ON(!(status & NAND_STATUS_READY));
  1006. return status;
  1007. }
  1008. /**
  1009. * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
  1010. * @mtd: mtd info
  1011. * @ofs: offset to start unlock from
  1012. * @len: length to unlock
  1013. * @invert: when = 0, unlock the range of blocks within the lower and
  1014. * upper boundary address
  1015. * when = 1, unlock the range of blocks outside the boundaries
  1016. * of the lower and upper boundary address
  1017. *
  1018. * Returs unlock status.
  1019. */
  1020. static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
  1021. uint64_t len, int invert)
  1022. {
  1023. int ret = 0;
  1024. int status, page;
  1025. struct nand_chip *chip = mtd->priv;
  1026. /* Submit address of first page to unlock */
  1027. page = ofs >> chip->page_shift;
  1028. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1029. chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page % (chip->pagemask + 1));
  1030. #else
  1031. chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
  1032. #endif
  1033. /* Submit address of last page to unlock */
  1034. page = (ofs + len) >> chip->page_shift;
  1035. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1036. chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
  1037. (page | invert) % (chip->pagemask + 1));
  1038. #else
  1039. chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
  1040. (page | invert) & chip->pagemask);
  1041. #endif
  1042. /* Call wait ready function */
  1043. status = chip->waitfunc(mtd, chip);
  1044. /* See if device thinks it succeeded */
  1045. if (status & NAND_STATUS_FAIL) {
  1046. pr_debug("%s: error status = 0x%08x\n",
  1047. __func__, status);
  1048. ret = -EIO;
  1049. }
  1050. return ret;
  1051. }
  1052. /**
  1053. * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
  1054. * @mtd: mtd info
  1055. * @ofs: offset to start unlock from
  1056. * @len: length to unlock
  1057. *
  1058. * Returns unlock status.
  1059. */
  1060. int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  1061. {
  1062. int ret = 0;
  1063. int chipnr;
  1064. struct nand_chip *chip = mtd->priv;
  1065. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1066. loff_t temp;
  1067. #endif
  1068. pr_debug("%s: start = 0x%012llx, len = %llu\n",
  1069. __func__, (unsigned long long)ofs, len);
  1070. if (check_offs_len(mtd, ofs, len))
  1071. ret = -EINVAL;
  1072. /* Align to last block address if size addresses end of the device */
  1073. if (ofs + len == mtd->size)
  1074. len -= mtd->erasesize;
  1075. nand_get_device(mtd, FL_UNLOCKING);
  1076. /* Shift to get chip number */
  1077. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1078. temp = mtk_nand_device_size();
  1079. if (ofs >= temp)
  1080. chipnr = 1;
  1081. else
  1082. chipnr = 0;
  1083. #else
  1084. chipnr = ofs >> chip->chip_shift;
  1085. #endif
  1086. chip->select_chip(mtd, chipnr);
  1087. /*
  1088. * Reset the chip.
  1089. * If we want to check the WP through READ STATUS and check the bit 7
  1090. * we must reset the chip
  1091. * some operation can also clear the bit 7 of status register
  1092. * eg. erase/program a locked block
  1093. */
  1094. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  1095. /* Check, if it is write protected */
  1096. if (nand_check_wp(mtd)) {
  1097. pr_debug("%s: device is write protected!\n",
  1098. __func__);
  1099. ret = -EIO;
  1100. goto out;
  1101. }
  1102. ret = __nand_unlock(mtd, ofs, len, 0);
  1103. out:
  1104. chip->select_chip(mtd, -1);
  1105. nand_release_device(mtd);
  1106. return ret;
  1107. }
  1108. EXPORT_SYMBOL(nand_unlock);
  1109. /**
  1110. * nand_lock - [REPLACEABLE] locks all blocks present in the device
  1111. * @mtd: mtd info
  1112. * @ofs: offset to start unlock from
  1113. * @len: length to unlock
  1114. *
  1115. * This feature is not supported in many NAND parts. 'Micron' NAND parts do
  1116. * have this feature, but it allows only to lock all blocks, not for specified
  1117. * range for block. Implementing 'lock' feature by making use of 'unlock', for
  1118. * now.
  1119. *
  1120. * Returns lock status.
  1121. */
  1122. int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  1123. {
  1124. int ret = 0;
  1125. int chipnr, status, page;
  1126. struct nand_chip *chip = mtd->priv;
  1127. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1128. loff_t temp;
  1129. #endif
  1130. pr_debug("%s: start = 0x%012llx, len = %llu\n",
  1131. __func__, (unsigned long long)ofs, len);
  1132. if (check_offs_len(mtd, ofs, len))
  1133. ret = -EINVAL;
  1134. nand_get_device(mtd, FL_LOCKING);
  1135. /* Shift to get chip number */
  1136. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1137. temp = mtk_nand_device_size();
  1138. if (ofs >= temp)
  1139. chipnr = 1;
  1140. else
  1141. chipnr = 0;
  1142. #else
  1143. chipnr = ofs >> chip->chip_shift;
  1144. #endif
  1145. chip->select_chip(mtd, chipnr);
  1146. /*
  1147. * Reset the chip.
  1148. * If we want to check the WP through READ STATUS and check the bit 7
  1149. * we must reset the chip
  1150. * some operation can also clear the bit 7 of status register
  1151. * eg. erase/program a locked block
  1152. */
  1153. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  1154. /* Check, if it is write protected */
  1155. if (nand_check_wp(mtd)) {
  1156. pr_debug("%s: device is write protected!\n",
  1157. __func__);
  1158. status = MTD_ERASE_FAILED;
  1159. ret = -EIO;
  1160. goto out;
  1161. }
  1162. /* Submit address of first page to lock */
  1163. page = ofs >> chip->page_shift;
  1164. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1165. chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page % (chip->pagemask + 1));
  1166. #else
  1167. chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
  1168. #endif
  1169. /* Call wait ready function */
  1170. status = chip->waitfunc(mtd, chip);
  1171. /* See if device thinks it succeeded */
  1172. if (status & NAND_STATUS_FAIL) {
  1173. pr_debug("%s: error status = 0x%08x\n",
  1174. __func__, status);
  1175. ret = -EIO;
  1176. goto out;
  1177. }
  1178. ret = __nand_unlock(mtd, ofs, len, 0x1);
  1179. out:
  1180. chip->select_chip(mtd, -1);
  1181. nand_release_device(mtd);
  1182. return ret;
  1183. }
  1184. EXPORT_SYMBOL(nand_lock);
  1185. /**
  1186. * nand_read_page_raw - [INTERN] read raw page data without ecc
  1187. * @mtd: mtd info structure
  1188. * @chip: nand chip info structure
  1189. * @buf: buffer to store read data
  1190. * @oob_required: caller requires OOB data read to chip->oob_poi
  1191. * @page: page number to read
  1192. *
  1193. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  1194. */
  1195. static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1196. uint8_t *buf, int oob_required, int page)
  1197. {
  1198. chip->read_buf(mtd, buf, mtd->writesize);
  1199. if (oob_required)
  1200. chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
  1201. return 0;
  1202. }
  1203. /**
  1204. * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
  1205. * @mtd: mtd info structure
  1206. * @chip: nand chip info structure
  1207. * @buf: buffer to store read data
  1208. * @oob_required: caller requires OOB data read to chip->oob_poi
  1209. * @page: page number to read
  1210. *
  1211. * We need a special oob layout and handling even when OOB isn't used.
  1212. */
  1213. static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
  1214. struct nand_chip *chip, uint8_t *buf,
  1215. int oob_required, int page)
  1216. {
  1217. int eccsize = chip->ecc.size;
  1218. int eccbytes = chip->ecc.bytes;
  1219. uint8_t *oob = chip->oob_poi;
  1220. int steps, size;
  1221. for (steps = chip->ecc.steps; steps > 0; steps--) {
  1222. chip->read_buf(mtd, buf, eccsize);
  1223. buf += eccsize;
  1224. if (chip->ecc.prepad) {
  1225. chip->read_buf(mtd, oob, chip->ecc.prepad);
  1226. oob += chip->ecc.prepad;
  1227. }
  1228. chip->read_buf(mtd, oob, eccbytes);
  1229. oob += eccbytes;
  1230. if (chip->ecc.postpad) {
  1231. chip->read_buf(mtd, oob, chip->ecc.postpad);
  1232. oob += chip->ecc.postpad;
  1233. }
  1234. }
  1235. size = mtd->oobsize - (oob - chip->oob_poi);
  1236. if (size)
  1237. chip->read_buf(mtd, oob, size);
  1238. return 0;
  1239. }
  1240. /**
  1241. * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
  1242. * @mtd: mtd info structure
  1243. * @chip: nand chip info structure
  1244. * @buf: buffer to store read data
  1245. * @oob_required: caller requires OOB data read to chip->oob_poi
  1246. * @page: page number to read
  1247. */
  1248. static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
  1249. uint8_t *buf, int oob_required, int page)
  1250. {
  1251. int i, eccsize = chip->ecc.size;
  1252. int eccbytes = chip->ecc.bytes;
  1253. int eccsteps = chip->ecc.steps;
  1254. uint8_t *p = buf;
  1255. uint8_t *ecc_calc = chip->buffers->ecccalc;
  1256. uint8_t *ecc_code = chip->buffers->ecccode;
  1257. uint32_t *eccpos = chip->ecc.layout->eccpos;
  1258. unsigned int max_bitflips = 0;
  1259. chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
  1260. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  1261. chip->ecc.calculate(mtd, p, &ecc_calc[i]);
  1262. for (i = 0; i < chip->ecc.total; i++)
  1263. ecc_code[i] = chip->oob_poi[eccpos[i]];
  1264. eccsteps = chip->ecc.steps;
  1265. p = buf;
  1266. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  1267. int stat;
  1268. stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
  1269. if (stat < 0) {
  1270. mtd->ecc_stats.failed++;
  1271. } else {
  1272. mtd->ecc_stats.corrected += stat;
  1273. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  1274. }
  1275. }
  1276. return max_bitflips;
  1277. }
  1278. /**
  1279. * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
  1280. * @mtd: mtd info structure
  1281. * @chip: nand chip info structure
  1282. * @data_offs: offset of requested data within the page
  1283. * @readlen: data length
  1284. * @bufpoi: buffer to store read data
  1285. * @page: page number to read
  1286. */
  1287. static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
  1288. uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
  1289. int page)
  1290. {
  1291. int start_step, end_step, num_steps;
  1292. uint32_t *eccpos = chip->ecc.layout->eccpos;
  1293. uint8_t *p;
  1294. int data_col_addr, i, gaps = 0;
  1295. int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
  1296. int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
  1297. int index;
  1298. unsigned int max_bitflips = 0;
  1299. /* Column address within the page aligned to ECC size (256bytes) */
  1300. start_step = data_offs / chip->ecc.size;
  1301. end_step = (data_offs + readlen - 1) / chip->ecc.size;
  1302. num_steps = end_step - start_step + 1;
  1303. index = start_step * chip->ecc.bytes;
  1304. /* Data size aligned to ECC ecc.size */
  1305. datafrag_len = num_steps * chip->ecc.size;
  1306. eccfrag_len = num_steps * chip->ecc.bytes;
  1307. data_col_addr = start_step * chip->ecc.size;
  1308. /* If we read not a page aligned data */
  1309. if (data_col_addr != 0)
  1310. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
  1311. p = bufpoi + data_col_addr;
  1312. chip->read_buf(mtd, p, datafrag_len);
  1313. /* Calculate ECC */
  1314. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
  1315. chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
  1316. /*
  1317. * The performance is faster if we position offsets according to
  1318. * ecc.pos. Let's make sure that there are no gaps in ECC positions.
  1319. */
  1320. for (i = 0; i < eccfrag_len - 1; i++) {
  1321. if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
  1322. gaps = 1;
  1323. break;
  1324. }
  1325. }
  1326. if (gaps) {
  1327. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
  1328. chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
  1329. } else {
  1330. /*
  1331. * Send the command to read the particular ECC bytes take care
  1332. * about buswidth alignment in read_buf.
  1333. */
  1334. aligned_pos = eccpos[index] & ~(busw - 1);
  1335. aligned_len = eccfrag_len;
  1336. if (eccpos[index] & (busw - 1))
  1337. aligned_len++;
  1338. if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
  1339. aligned_len++;
  1340. chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
  1341. mtd->writesize + aligned_pos, -1);
  1342. chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
  1343. }
  1344. for (i = 0; i < eccfrag_len; i++)
  1345. chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
  1346. p = bufpoi + data_col_addr;
  1347. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
  1348. int stat;
  1349. stat = chip->ecc.correct(mtd, p,
  1350. &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
  1351. if (stat < 0) {
  1352. mtd->ecc_stats.failed++;
  1353. } else {
  1354. mtd->ecc_stats.corrected += stat;
  1355. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  1356. }
  1357. }
  1358. return max_bitflips;
  1359. }
  1360. /**
  1361. * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
  1362. * @mtd: mtd info structure
  1363. * @chip: nand chip info structure
  1364. * @buf: buffer to store read data
  1365. * @oob_required: caller requires OOB data read to chip->oob_poi
  1366. * @page: page number to read
  1367. *
  1368. * Not for syndrome calculating ECC controllers which need a special oob layout.
  1369. */
  1370. static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  1371. uint8_t *buf, int oob_required, int page)
  1372. {
  1373. int i, eccsize = chip->ecc.size;
  1374. int eccbytes = chip->ecc.bytes;
  1375. int eccsteps = chip->ecc.steps;
  1376. uint8_t *p = buf;
  1377. uint8_t *ecc_calc = chip->buffers->ecccalc;
  1378. uint8_t *ecc_code = chip->buffers->ecccode;
  1379. uint32_t *eccpos = chip->ecc.layout->eccpos;
  1380. unsigned int max_bitflips = 0;
  1381. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  1382. chip->ecc.hwctl(mtd, NAND_ECC_READ);
  1383. chip->read_buf(mtd, p, eccsize);
  1384. chip->ecc.calculate(mtd, p, &ecc_calc[i]);
  1385. }
  1386. chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
  1387. for (i = 0; i < chip->ecc.total; i++)
  1388. ecc_code[i] = chip->oob_poi[eccpos[i]];
  1389. eccsteps = chip->ecc.steps;
  1390. p = buf;
  1391. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  1392. int stat;
  1393. stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
  1394. if (stat < 0) {
  1395. mtd->ecc_stats.failed++;
  1396. } else {
  1397. mtd->ecc_stats.corrected += stat;
  1398. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  1399. }
  1400. }
  1401. return max_bitflips;
  1402. }
  1403. /**
  1404. * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
  1405. * @mtd: mtd info structure
  1406. * @chip: nand chip info structure
  1407. * @buf: buffer to store read data
  1408. * @oob_required: caller requires OOB data read to chip->oob_poi
  1409. * @page: page number to read
  1410. *
  1411. * Hardware ECC for large page chips, require OOB to be read first. For this
  1412. * ECC mode, the write_page method is re-used from ECC_HW. These methods
  1413. * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
  1414. * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
  1415. * the data area, by overwriting the NAND manufacturer bad block markings.
  1416. */
  1417. static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
  1418. struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
  1419. {
  1420. int i, eccsize = chip->ecc.size;
  1421. int eccbytes = chip->ecc.bytes;
  1422. int eccsteps = chip->ecc.steps;
  1423. uint8_t *p = buf;
  1424. uint8_t *ecc_code = chip->buffers->ecccode;
  1425. uint32_t *eccpos = chip->ecc.layout->eccpos;
  1426. uint8_t *ecc_calc = chip->buffers->ecccalc;
  1427. unsigned int max_bitflips = 0;
  1428. /* Read the OOB area first */
  1429. chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
  1430. chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
  1431. chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
  1432. for (i = 0; i < chip->ecc.total; i++)
  1433. ecc_code[i] = chip->oob_poi[eccpos[i]];
  1434. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  1435. int stat;
  1436. chip->ecc.hwctl(mtd, NAND_ECC_READ);
  1437. chip->read_buf(mtd, p, eccsize);
  1438. chip->ecc.calculate(mtd, p, &ecc_calc[i]);
  1439. stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
  1440. if (stat < 0) {
  1441. mtd->ecc_stats.failed++;
  1442. } else {
  1443. mtd->ecc_stats.corrected += stat;
  1444. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  1445. }
  1446. }
  1447. return max_bitflips;
  1448. }
  1449. /**
  1450. * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
  1451. * @mtd: mtd info structure
  1452. * @chip: nand chip info structure
  1453. * @buf: buffer to store read data
  1454. * @oob_required: caller requires OOB data read to chip->oob_poi
  1455. * @page: page number to read
  1456. *
  1457. * The hw generator calculates the error syndrome automatically. Therefore we
  1458. * need a special oob layout and handling.
  1459. */
  1460. static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
  1461. uint8_t *buf, int oob_required, int page)
  1462. {
  1463. int i, eccsize = chip->ecc.size;
  1464. int eccbytes = chip->ecc.bytes;
  1465. int eccsteps = chip->ecc.steps;
  1466. uint8_t *p = buf;
  1467. uint8_t *oob = chip->oob_poi;
  1468. unsigned int max_bitflips = 0;
  1469. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  1470. int stat;
  1471. chip->ecc.hwctl(mtd, NAND_ECC_READ);
  1472. chip->read_buf(mtd, p, eccsize);
  1473. if (chip->ecc.prepad) {
  1474. chip->read_buf(mtd, oob, chip->ecc.prepad);
  1475. oob += chip->ecc.prepad;
  1476. }
  1477. chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
  1478. chip->read_buf(mtd, oob, eccbytes);
  1479. stat = chip->ecc.correct(mtd, p, oob, NULL);
  1480. if (stat < 0) {
  1481. mtd->ecc_stats.failed++;
  1482. } else {
  1483. mtd->ecc_stats.corrected += stat;
  1484. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  1485. }
  1486. oob += eccbytes;
  1487. if (chip->ecc.postpad) {
  1488. chip->read_buf(mtd, oob, chip->ecc.postpad);
  1489. oob += chip->ecc.postpad;
  1490. }
  1491. }
  1492. /* Calculate remaining oob bytes */
  1493. i = mtd->oobsize - (oob - chip->oob_poi);
  1494. if (i)
  1495. chip->read_buf(mtd, oob, i);
  1496. return max_bitflips;
  1497. }
  1498. /**
  1499. * nand_transfer_oob - [INTERN] Transfer oob to client buffer
  1500. * @chip: nand chip structure
  1501. * @oob: oob destination address
  1502. * @ops: oob ops structure
  1503. * @len: size of oob to transfer
  1504. */
  1505. static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
  1506. struct mtd_oob_ops *ops, size_t len)
  1507. {
  1508. switch (ops->mode) {
  1509. case MTD_OPS_PLACE_OOB:
  1510. case MTD_OPS_RAW:
  1511. memcpy(oob, chip->oob_poi + ops->ooboffs, len);
  1512. return oob + len;
  1513. case MTD_OPS_AUTO_OOB: {
  1514. struct nand_oobfree *free = chip->ecc.layout->oobfree;
  1515. uint32_t boffs = 0, roffs = ops->ooboffs;
  1516. size_t bytes = 0;
  1517. for (; free->length && len; free++, len -= bytes) {
  1518. /* Read request not from offset 0? */
  1519. if (unlikely(roffs)) {
  1520. if (roffs >= free->length) {
  1521. roffs -= free->length;
  1522. continue;
  1523. }
  1524. boffs = free->offset + roffs;
  1525. bytes = min_t(size_t, len,
  1526. (free->length - roffs));
  1527. roffs = 0;
  1528. } else {
  1529. bytes = min_t(size_t, len, free->length);
  1530. boffs = free->offset;
  1531. }
  1532. memcpy(oob, chip->oob_poi + boffs, bytes);
  1533. oob += bytes;
  1534. }
  1535. return oob;
  1536. }
  1537. default:
  1538. BUG();
  1539. }
  1540. return NULL;
  1541. }
  1542. #ifdef CONFIG_MTK_MTD_NAND
  1543. struct mtd_perf_log g_MtdPerfLog = { 0 };
  1544. #endif
  1545. /**
  1546. * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
  1547. * @mtd: MTD device structure
  1548. * @retry_mode: the retry mode to use
  1549. *
  1550. * Some vendors supply a special command to shift the Vt threshold, to be used
  1551. * when there are too many bitflips in a page (i.e., ECC error). After setting
  1552. * a new threshold, the host should retry reading the page.
  1553. */
  1554. static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
  1555. {
  1556. struct nand_chip *chip = mtd->priv;
  1557. pr_debug("setting READ RETRY mode %d\n", retry_mode);
  1558. if (retry_mode >= chip->read_retries)
  1559. return -EINVAL;
  1560. if (!chip->setup_read_retry)
  1561. return -EOPNOTSUPP;
  1562. return chip->setup_read_retry(mtd, retry_mode);
  1563. }
  1564. /**
  1565. * nand_do_read_ops - [INTERN] Read data with ECC
  1566. * @mtd: MTD device structure
  1567. * @from: offset to read from
  1568. * @ops: oob ops structure
  1569. *
  1570. * Internal function. Called with chip held.
  1571. */
  1572. static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
  1573. struct mtd_oob_ops *ops)
  1574. {
  1575. int chipnr, page, realpage, col, bytes, aligned, oob_required;
  1576. struct nand_chip *chip = mtd->priv;
  1577. int ret = 0;
  1578. uint32_t readlen = ops->len;
  1579. uint32_t oobreadlen = ops->ooblen;
  1580. uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
  1581. mtd->oobavail : mtd->oobsize;
  1582. uint8_t *bufpoi, *oob, *buf;
  1583. int use_bufpoi;
  1584. unsigned int max_bitflips = 0;
  1585. int retry_mode = 0;
  1586. bool ecc_fail = false;
  1587. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1588. loff_t temp;
  1589. #endif
  1590. #ifdef CONFIG_MTK_MTD_NAND
  1591. if (readlen < 512) {
  1592. g_MtdPerfLog.read_size_0_512++;
  1593. } else if ((512 <= readlen) && (readlen < 1024)) {
  1594. g_MtdPerfLog.read_size_512_1K++;
  1595. } else if ((1024 <= readlen) && (readlen < 2048)) {
  1596. g_MtdPerfLog.read_size_1K_2K++;
  1597. } else if ((2048 <= readlen) && (readlen < 3072)) {
  1598. g_MtdPerfLog.read_size_2K_3K++;
  1599. } else if ((3072 <= readlen) && (readlen < 4096)) {
  1600. g_MtdPerfLog.read_size_3K_4K++;
  1601. } else {
  1602. g_MtdPerfLog.read_size_Above_4K +=
  1603. ((readlen + mtd->writesize - 1) / mtd->writesize);
  1604. }
  1605. #endif
  1606. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1607. temp = mtk_nand_device_size();
  1608. if (from >= temp)
  1609. chipnr = 1;
  1610. else
  1611. chipnr = 0;
  1612. #else
  1613. chipnr = (int)(from >> chip->chip_shift);
  1614. #endif
  1615. chip->select_chip(mtd, chipnr);
  1616. realpage = (int)(from >> chip->page_shift);
  1617. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1618. page = realpage % (chip->pagemask + 1);
  1619. #else
  1620. page = realpage & chip->pagemask;
  1621. #endif
  1622. col = (int)(from & (mtd->writesize - 1));
  1623. buf = ops->datbuf;
  1624. oob = ops->oobbuf;
  1625. oob_required = oob ? 1 : 0;
  1626. while (1) {
  1627. unsigned int ecc_failures = mtd->ecc_stats.failed;
  1628. bytes = min(mtd->writesize - col, readlen);
  1629. aligned = (bytes == mtd->writesize);
  1630. if (!aligned)
  1631. use_bufpoi = 1;
  1632. else if (chip->options & NAND_USE_BOUNCE_BUFFER)
  1633. use_bufpoi = !virt_addr_valid(buf);
  1634. else
  1635. use_bufpoi = 0;
  1636. /* Is the current page in the buffer? */
  1637. if (realpage != chip->pagebuf || oob) {
  1638. bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
  1639. if (use_bufpoi && aligned)
  1640. pr_debug("%s: using read bounce buffer for buf@%p\n",
  1641. __func__, buf);
  1642. read_retry:
  1643. #ifdef CONFIG_MTK_MTD_NAND
  1644. ret = chip->read_page(mtd, chip, bufpoi, page);
  1645. #else
  1646. chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
  1647. #endif
  1648. /*
  1649. * Now read the page into the buffer. Absent an error,
  1650. * the read methods return max bitflips per ecc step.
  1651. */
  1652. if (unlikely(ops->mode == MTD_OPS_RAW))
  1653. ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
  1654. oob_required,
  1655. page);
  1656. else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
  1657. !oob)
  1658. ret = chip->ecc.read_subpage(mtd, chip,
  1659. col, bytes, bufpoi,
  1660. page);
  1661. else
  1662. ret = chip->ecc.read_page(mtd, chip, bufpoi,
  1663. oob_required, page);
  1664. if (ret < 0) {
  1665. if (use_bufpoi)
  1666. /* Invalidate page cache */
  1667. chip->pagebuf = -1;
  1668. break;
  1669. }
  1670. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  1671. /* Transfer not aligned data */
  1672. if (use_bufpoi) {
  1673. if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
  1674. !(mtd->ecc_stats.failed - ecc_failures) &&
  1675. (ops->mode != MTD_OPS_RAW)) {
  1676. chip->pagebuf = realpage;
  1677. chip->pagebuf_bitflips = ret;
  1678. } else {
  1679. /* Invalidate page cache */
  1680. chip->pagebuf = -1;
  1681. }
  1682. memcpy(buf, chip->buffers->databuf + col, bytes);
  1683. }
  1684. if (unlikely(oob)) {
  1685. int toread = min(oobreadlen, max_oobsize);
  1686. if (toread) {
  1687. oob = nand_transfer_oob(chip,
  1688. oob, ops, toread);
  1689. oobreadlen -= toread;
  1690. }
  1691. }
  1692. if (chip->options & NAND_NEED_READRDY) {
  1693. /* Apply delay or wait for ready/busy pin */
  1694. if (!chip->dev_ready)
  1695. udelay(chip->chip_delay);
  1696. else
  1697. nand_wait_ready(mtd);
  1698. }
  1699. if (mtd->ecc_stats.failed - ecc_failures) {
  1700. if (retry_mode + 1 < chip->read_retries) {
  1701. retry_mode++;
  1702. ret = nand_setup_read_retry(mtd,
  1703. retry_mode);
  1704. if (ret < 0)
  1705. break;
  1706. /* Reset failures; retry */
  1707. mtd->ecc_stats.failed = ecc_failures;
  1708. goto read_retry;
  1709. } else {
  1710. /* No more retry modes; real failure */
  1711. ecc_fail = true;
  1712. }
  1713. }
  1714. buf += bytes;
  1715. } else {
  1716. memcpy(buf, chip->buffers->databuf + col, bytes);
  1717. buf += bytes;
  1718. max_bitflips = max_t(unsigned int, max_bitflips,
  1719. chip->pagebuf_bitflips);
  1720. }
  1721. readlen -= bytes;
  1722. /* Reset to retry mode 0 */
  1723. if (retry_mode) {
  1724. ret = nand_setup_read_retry(mtd, 0);
  1725. if (ret < 0)
  1726. break;
  1727. retry_mode = 0;
  1728. }
  1729. if (!readlen)
  1730. break;
  1731. /* For subsequent reads align to page boundary */
  1732. col = 0;
  1733. /* Increment page address */
  1734. realpage++;
  1735. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1736. page = realpage % (chip->pagemask + 1);
  1737. #else
  1738. page = realpage & chip->pagemask;
  1739. #endif
  1740. /* Check, if we cross a chip boundary */
  1741. if (!page) {
  1742. chipnr++;
  1743. chip->select_chip(mtd, -1);
  1744. chip->select_chip(mtd, chipnr);
  1745. }
  1746. }
  1747. chip->select_chip(mtd, -1);
  1748. ops->retlen = ops->len - (size_t) readlen;
  1749. if (oob)
  1750. ops->oobretlen = ops->ooblen - oobreadlen;
  1751. if (ret < 0)
  1752. return ret;
  1753. if (ecc_fail)
  1754. return -EBADMSG;
  1755. return max_bitflips;
  1756. }
  1757. #ifdef CONFIG_MTK_MTD_NAND
  1758. /* #define _SNAND_SUBPAGE_READ_DBG */
  1759. #define MTK_NSS_CACHEV_MAX_CNT (CONFIG_MTK_NSS_CACHEV_MAX_CNT)
  1760. int g_mtk_nss_cachev_cnt = 0;
  1761. u32 g_mtk_nss_timestamp = 0;
  1762. struct mtk_nss_cachev_struct {
  1763. int val;
  1764. unsigned int timestamp;
  1765. int part_begin;
  1766. int part_end;
  1767. u8 *buf;
  1768. };
  1769. struct mtk_nss_cachev_struct g_mtk_nss_cachev[MTK_NSS_CACHEV_MAX_CNT];
  1770. void mtk_nss_add_cache(struct mtk_nss_cachev_struct *cache, int cache_idx, int target_val, int part_begin,
  1771. int part_end, u32 *timestamp)
  1772. {
  1773. cache[cache_idx].val = target_val;
  1774. cache[cache_idx].part_begin = part_begin;
  1775. cache[cache_idx].part_end = part_end;
  1776. cache[cache_idx].timestamp = *timestamp;
  1777. *timestamp = *timestamp + 1;
  1778. #ifdef _SNAND_SUBPAGE_READ_DBG
  1779. pr_debug("[CacheV-I] Add, idx %d, page %d, begin %d, end %d, stamp %d\n", cache_idx, target_val,
  1780. part_begin, part_end, *timestamp - 1);
  1781. #endif
  1782. }
  1783. int mtk_nss_get_victim(struct mtk_nss_cachev_struct *cache)
  1784. {
  1785. int i;
  1786. unsigned int min_timestamp = 0xFFFFFFFF;
  1787. int min_idx = -1;
  1788. for (i = 0; i < g_mtk_nss_cachev_cnt; i++) {
  1789. if (cache[i].val == -1)
  1790. return i;
  1791. if (cache[i].timestamp < min_timestamp) {
  1792. min_timestamp = cache[i].timestamp;
  1793. min_idx = i;
  1794. }
  1795. }
  1796. #ifdef _SNAND_SUBPAGE_READ_DBG
  1797. pr_debug("[CacheV-I] GetVic, idx %d\n", min_idx);
  1798. #endif
  1799. return min_idx;
  1800. }
  1801. void mtk_nss_invalidate_cache_by_val(struct mtk_nss_cachev_struct *cache, int target_val)
  1802. {
  1803. int i;
  1804. for (i = 0; i < g_mtk_nss_cachev_cnt; i++) {
  1805. if (cache[i].val == target_val)
  1806. cache[i].val = -1;
  1807. }
  1808. #ifdef _SNAND_SUBPAGE_READ_DBG
  1809. pr_debug("[CacheV-I] Invalidate, idx %d, val %d\n", i, target_val);
  1810. #endif
  1811. }
  1812. void mtk_nss_invalidate_cache_by_idx(struct mtk_nss_cachev_struct *cache, int idx)
  1813. {
  1814. cache[idx].val = -1;
  1815. #ifdef _SNAND_SUBPAGE_READ_DBG
  1816. pr_debug("[CacheV-I] Invalidate, idx %d\n", idx);
  1817. #endif
  1818. }
  1819. void mtk_nss_invalidate_all_cache(struct mtk_nss_cachev_struct *cache)
  1820. {
  1821. int i;
  1822. for (i = 0; i < g_mtk_nss_cachev_cnt; i++)
  1823. cache[i].val = -1;
  1824. #ifdef _SNAND_SUBPAGE_READ_DBG
  1825. pr_debug("[CacheV-I] Invalidate all\n");
  1826. #endif
  1827. }
  1828. void mtk_nss_invalidate_cache_by_range(struct mtk_nss_cachev_struct *cache, int val_min, int val_max)
  1829. {
  1830. int i;
  1831. for (i = 0; i < g_mtk_nss_cachev_cnt; i++) {
  1832. if ((cache[i].val >= val_min) && (cache[i].val <= val_max)) {
  1833. cache[i].val = -1;
  1834. #ifdef _SNAND_SUBPAGE_READ_DBG
  1835. pr_debug("[CacheV-I] Invalidate idx %d (by range)\n", i);
  1836. #endif
  1837. }
  1838. }
  1839. }
  1840. int mtk_nss_if_cache_hit(struct mtk_nss_cachev_struct *cache, int target_val, int part_begin,
  1841. int part_end, uint32_t *timestamp, int *hit_type)
  1842. {
  1843. int i;
  1844. for (i = 0; i < g_mtk_nss_cachev_cnt; i++) {
  1845. if (cache[i].val == target_val) {
  1846. if ((part_begin >= cache[i].part_begin) && (part_end <= cache[i].part_end))
  1847. *hit_type = 0;
  1848. else if (part_end <= cache[i].part_end)
  1849. *hit_type = 1;
  1850. else if (part_begin >= cache[i].part_begin)
  1851. *hit_type = 2;
  1852. else
  1853. *hit_type = 3;
  1854. *timestamp = *timestamp + 1;
  1855. cache[i].timestamp = *timestamp;
  1856. #ifdef _SNAND_SUBPAGE_READ_DBG
  1857. pr_debug("[CacheV-I] Hit, idx %d, page %d, blk %d, begin %d, end %d, hit_type %d\n",
  1858. i, cache[i].val, cache[i].val / 64, cache[i].part_begin,
  1859. cache[i].part_end, *hit_type);
  1860. #endif
  1861. return i;
  1862. }
  1863. }
  1864. #ifdef _SNAND_SUBPAGE_READ_DBG
  1865. pr_debug("[CacheV-I] Miss, tar_page %d, begin %d, end %d\n", target_val, part_begin, part_end);
  1866. #endif
  1867. *hit_type = -1;
  1868. return -1;
  1869. }
  1870. void mtk_nss_init_cache(struct mtd_info *mtd)
  1871. {
  1872. struct nand_chip *chip = mtd->priv;
  1873. int j;
  1874. if (NULL != chip->read_subpage) {
  1875. for (j = 0; j < MTK_NSS_CACHEV_MAX_CNT; j++) {
  1876. g_mtk_nss_cachev[j].buf =
  1877. kmalloc(mtd->writesize + ARCH_DMA_MINALIGN, GFP_KERNEL);
  1878. if (NULL != g_mtk_nss_cachev[j].buf) {
  1879. if ((unsigned long)(g_mtk_nss_cachev[j].buf) & (ARCH_DMA_MINALIGN -
  1880. 1)) {
  1881. g_mtk_nss_cachev[j].buf =
  1882. (u8 *) ((unsigned long)(g_mtk_nss_cachev[j].buf) +
  1883. (ARCH_DMA_MINALIGN -
  1884. ((unsigned long)(g_mtk_nss_cachev[j].
  1885. buf) & (ARCH_DMA_MINALIGN -
  1886. 1))));
  1887. }
  1888. g_mtk_nss_cachev[j].val = -1; /* initialize each CacheV */
  1889. g_mtk_nss_cachev[j].timestamp = 0;
  1890. pr_debug("[NSS] idx: %d, buf: 0x%lX\n", j,
  1891. (unsigned long)(g_mtk_nss_cachev[j].buf));
  1892. } else {
  1893. break; /* can't malloc, just leave */
  1894. }
  1895. }
  1896. g_mtk_nss_cachev_cnt = j;
  1897. pr_debug("[NSS] Init OK, cnt: %d, aligned size: %d\n", g_mtk_nss_cachev_cnt,
  1898. ARCH_DMA_MINALIGN);
  1899. } else {
  1900. g_mtk_nss_cachev_cnt = 0;
  1901. pr_debug("[NSS] Disabled\n");
  1902. }
  1903. }
  1904. static int nand_do_read_ops_ex(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
  1905. {
  1906. int chipnr, page, realpage, col, bytes, aligned;
  1907. struct nand_chip *chip = mtd->priv;
  1908. struct mtd_ecc_stats stats;
  1909. int ret = 0;
  1910. uint32_t readlen = ops->len;
  1911. uint32_t oobreadlen = ops->ooblen;
  1912. uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
  1913. uint8_t *bufpoi, *oob, *buf;
  1914. uint32_t total_byte_read = 0;
  1915. static int last_page = -1;
  1916. int subpage_begin, subpage_end;
  1917. int toread;
  1918. int hit_idx;
  1919. int victim_idx;
  1920. int hit_type;
  1921. int temp_int;
  1922. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1923. loff_t temp;
  1924. #endif
  1925. stats = mtd->ecc_stats;
  1926. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1927. temp = mtk_nand_device_size();
  1928. if (from >= temp)
  1929. chipnr = 1;
  1930. else
  1931. chipnr = 0;
  1932. #else
  1933. chipnr = (int)(from >> chip->chip_shift);
  1934. #endif
  1935. chip->select_chip(mtd, chipnr);
  1936. realpage = (int)(from >> chip->page_shift);
  1937. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  1938. page = realpage % (chip->pagemask + 1);
  1939. #else
  1940. page = realpage & chip->pagemask;
  1941. #endif
  1942. col = (int)(from & (mtd->writesize - 1));
  1943. buf = ops->datbuf;
  1944. oob = ops->oobbuf;
  1945. #ifdef _SNAND_SUBPAGE_READ_DBG
  1946. pr_debug
  1947. ("[CacheV] -MTD1, from %u, readlen %d, page %i, last_page %d, blk %d, col %d, buf %X, oob %X\n",
  1948. (u32) from, readlen, page, last_page, page / 64, col, buf, oob);
  1949. #endif
  1950. while (1) {
  1951. bytes = min(mtd->writesize - col, readlen);
  1952. aligned = (bytes == mtd->writesize);
  1953. if (likely(!oob)) {
  1954. col = (int)((from + total_byte_read) & (mtd->writesize - 1));
  1955. subpage_begin = col / chip->subpage_size;
  1956. subpage_end = (col + bytes - 1) / chip->subpage_size;
  1957. hit_idx =
  1958. mtk_nss_if_cache_hit(g_mtk_nss_cachev, realpage, subpage_begin,
  1959. subpage_end, &g_mtk_nss_timestamp, &hit_type);
  1960. #ifdef _SNAND_SUBPAGE_READ_DBG
  1961. /* pr_debug
  1962. ("[CacheV] -MTD2-1, from %u, page %i, blk %d, col %d,
  1963. bytes %d, subbegin %d, subend %d, hit_idx %d, hit_type %d\n",
  1964. (u32) (from + total_byte_read), page, page / 64, col, bytes,
  1965. subpage_begin, subpage_end, hit_idx, hit_type);
  1966. */
  1967. #endif
  1968. if (-1 != hit_idx) {
  1969. if (0 != hit_type) {
  1970. if (1 == hit_type) {
  1971. temp_int =
  1972. g_mtk_nss_cachev[hit_idx].part_begin -
  1973. subpage_begin;
  1974. ret =
  1975. chip->read_subpage(mtd, chip,
  1976. g_mtk_nss_cachev[hit_idx].
  1977. buf +
  1978. (subpage_begin *
  1979. chip->subpage_size), page,
  1980. subpage_begin, temp_int);
  1981. g_mtk_nss_cachev[hit_idx].part_begin =
  1982. subpage_begin;
  1983. } else if (hit_type == 2) {
  1984. temp_int =
  1985. subpage_end -
  1986. g_mtk_nss_cachev[hit_idx].part_end;
  1987. ret =
  1988. chip->read_subpage(mtd, chip,
  1989. g_mtk_nss_cachev[hit_idx].
  1990. buf +
  1991. ((g_mtk_nss_cachev[hit_idx].
  1992. part_end +
  1993. 1) * chip->subpage_size),
  1994. page,
  1995. g_mtk_nss_cachev[hit_idx].
  1996. part_end + 1, temp_int);
  1997. g_mtk_nss_cachev[hit_idx].part_end = subpage_end;
  1998. } else {
  1999. ret =
  2000. chip->read_page(mtd, chip,
  2001. g_mtk_nss_cachev[hit_idx].buf,
  2002. page);
  2003. g_mtk_nss_cachev[hit_idx].part_begin = 0;
  2004. g_mtk_nss_cachev[hit_idx].part_end =
  2005. ((mtd->writesize / chip->subpage_size) - 1);
  2006. }
  2007. }
  2008. /* NOTE: Perfect Match needs nothing else to do */
  2009. memcpy(buf, g_mtk_nss_cachev[hit_idx].buf + col, bytes);
  2010. } else {
  2011. victim_idx = mtk_nss_get_victim(g_mtk_nss_cachev);
  2012. if ((realpage == (last_page + 1)) || /* favor sequential read */
  2013. ((subpage_end - subpage_begin + 1) ==
  2014. (mtd->writesize / chip->subpage_size))) {
  2015. ret =
  2016. chip->read_page(mtd, chip,
  2017. g_mtk_nss_cachev[victim_idx].buf, page);
  2018. g_mtk_nss_cachev[victim_idx].part_begin = 0;
  2019. g_mtk_nss_cachev[victim_idx].part_end =
  2020. ((mtd->writesize / chip->subpage_size) - 1);
  2021. mtk_nss_add_cache(g_mtk_nss_cachev, victim_idx, realpage, 0,
  2022. (mtd->writesize / chip->subpage_size) - 1,
  2023. &g_mtk_nss_timestamp);
  2024. } else {
  2025. temp_int = (subpage_end - subpage_begin + 1);
  2026. ret =
  2027. chip->read_subpage(mtd, chip,
  2028. g_mtk_nss_cachev[victim_idx].buf +
  2029. (subpage_begin * chip->subpage_size),
  2030. page, subpage_begin, temp_int);
  2031. mtk_nss_add_cache(g_mtk_nss_cachev, victim_idx, realpage,
  2032. subpage_begin, subpage_end,
  2033. &g_mtk_nss_timestamp);
  2034. }
  2035. memcpy(buf, g_mtk_nss_cachev[victim_idx].buf + col, bytes);
  2036. }
  2037. } else {
  2038. victim_idx = -1;
  2039. if (aligned)
  2040. bufpoi = buf;
  2041. else {
  2042. victim_idx = mtk_nss_get_victim(g_mtk_nss_cachev);
  2043. mtk_nss_invalidate_cache_by_idx(g_mtk_nss_cachev, victim_idx);
  2044. bufpoi = g_mtk_nss_cachev[victim_idx].buf;
  2045. }
  2046. #ifdef _SNAND_SUBPAGE_READ_DBG
  2047. pr_debug("[CacheV] -MTD2-2, aligned %d, victim_idx %d\n", aligned, victim_idx);
  2048. #endif
  2049. ret = chip->read_page(mtd, chip, bufpoi, page);
  2050. /* for data part */
  2051. if (!aligned && bytes)
  2052. memcpy(buf, bufpoi + col, bytes);
  2053. /* for oob */
  2054. {
  2055. toread = min(oobreadlen, max_oobsize);
  2056. if (toread) {
  2057. oob = nand_transfer_oob(chip, oob, ops, toread);
  2058. oobreadlen -= toread;
  2059. }
  2060. }
  2061. if (-1 != victim_idx)
  2062. mtk_nss_invalidate_cache_by_idx(g_mtk_nss_cachev, victim_idx);
  2063. }
  2064. /* read error, discard cache and leave */
  2065. if (ret < 0) {
  2066. mtk_nss_invalidate_cache_by_val(g_mtk_nss_cachev, realpage);
  2067. break;
  2068. }
  2069. /* discard cache in these cases */
  2070. if (!oob & /* NOTE. oob will not occupy CacheV here */
  2071. (0 != (mtd->ecc_stats.failed - stats.failed)) || (ops->mode == MTD_OPS_RAW)) {
  2072. mtk_nss_invalidate_cache_by_val(g_mtk_nss_cachev, realpage);
  2073. }
  2074. if (!(chip->options & NAND_NEED_READRDY)) {
  2075. /*
  2076. * Apply delay or wait for ready/busy pin. Do
  2077. * this before the AUTOINCR check, so no
  2078. * problems arise if a chip which does auto
  2079. * increment is marked as NOAUTOINCR by the
  2080. * board driver.
  2081. */
  2082. if (!chip->dev_ready)
  2083. udelay(chip->chip_delay);
  2084. else
  2085. nand_wait_ready(mtd);
  2086. }
  2087. buf += bytes;
  2088. readlen -= bytes;
  2089. total_byte_read += bytes;
  2090. last_page = realpage;
  2091. if (!readlen)
  2092. break;
  2093. /* For subsequent reads align to page boundary */
  2094. col = 0;
  2095. /* Increment page address */
  2096. realpage++;
  2097. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2098. page = realpage % (chip->pagemask + 1);
  2099. #else
  2100. page = realpage & chip->pagemask;
  2101. #endif
  2102. /* Check, if we cross a chip boundary */
  2103. if (!page) {
  2104. chipnr++;
  2105. chip->select_chip(mtd, -1);
  2106. chip->select_chip(mtd, chipnr);
  2107. }
  2108. }
  2109. ops->retlen = ops->len - (size_t) readlen;
  2110. if (oob)
  2111. ops->oobretlen = ops->ooblen - oobreadlen;
  2112. if (ret)
  2113. return ret;
  2114. if (mtd->ecc_stats.failed - stats.failed)
  2115. return -EBADMSG;
  2116. return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
  2117. }
  2118. #endif /* CONFIG_MTK_MTD_NAND */
  2119. /**
  2120. * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
  2121. * @mtd: MTD device structure
  2122. * @from: offset to read from
  2123. * @len: number of bytes to read
  2124. * @retlen: pointer to variable to store the number of read bytes
  2125. * @buf: the databuffer to put data
  2126. *
  2127. * Get hold of the chip and call nand_do_read.
  2128. */
  2129. static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
  2130. size_t *retlen, uint8_t *buf)
  2131. {
  2132. struct mtd_oob_ops ops;
  2133. int ret;
  2134. #ifdef CONFIG_MTK_MTD_NAND
  2135. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2136. struct nand_chip *chip = mtd->priv;
  2137. int page;
  2138. int offset;
  2139. int page_per_block;
  2140. #endif
  2141. #endif
  2142. #ifdef MTD_NAND_PFM
  2143. struct timeval pfm_time_read;
  2144. PFM_BEGIN(pfm_time_read);
  2145. #endif
  2146. nand_get_device(mtd, FL_READING);
  2147. ops.len = len;
  2148. ops.datbuf = buf;
  2149. ops.oobbuf = NULL;
  2150. ops.mode = MTD_OPS_PLACE_OOB;
  2151. #ifdef CONFIG_MTK_MTD_NAND
  2152. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2153. if (likely(len > mtd->writesize)) {
  2154. page = (int)(from >> chip->page_shift);
  2155. offset = (int)(len >> chip->page_shift);
  2156. page_per_block = mtd->erasesize / mtd->writesize;
  2157. if (likely(!mtk_block_istlc((u64)from)))
  2158. page_per_block = page_per_block / 3;
  2159. if (likely((page / page_per_block) == ((page + offset - 1) / page_per_block)
  2160. && (from & (mtd->writesize-1)) == 0)) {
  2161. ret = mtk_nand_read(mtd, chip, buf, page, len);
  2162. if (likely(!ret))
  2163. ops.retlen = len;
  2164. else {
  2165. if (g_mtk_nss_cachev_cnt)
  2166. ret = nand_do_read_ops_ex(mtd, from, &ops);
  2167. else
  2168. ret = nand_do_read_ops(mtd, from, &ops);
  2169. }
  2170. } else {
  2171. if (g_mtk_nss_cachev_cnt)
  2172. ret = nand_do_read_ops_ex(mtd, from, &ops);
  2173. else
  2174. ret = nand_do_read_ops(mtd, from, &ops);
  2175. }
  2176. } else
  2177. #endif
  2178. {
  2179. if (g_mtk_nss_cachev_cnt)
  2180. ret = nand_do_read_ops_ex(mtd, from, &ops);
  2181. else
  2182. ret = nand_do_read_ops(mtd, from, &ops);
  2183. }
  2184. #else
  2185. ret = nand_do_read_ops(mtd, from, &ops);
  2186. #endif
  2187. *retlen = ops.retlen;
  2188. nand_release_device(mtd);
  2189. #ifdef MTD_NAND_PFM
  2190. if (mtk_block_istlc((u64)from))
  2191. PFM_END_R_TLC(pfm_time_read, (*retlen));
  2192. else
  2193. PFM_END_R_SLC(pfm_time_read, (*retlen));
  2194. #endif
  2195. return ret;
  2196. }
  2197. /**
  2198. * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
  2199. * @mtd: mtd info structure
  2200. * @chip: nand chip info structure
  2201. * @page: page number to read
  2202. */
  2203. static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  2204. int page)
  2205. {
  2206. chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
  2207. chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
  2208. return 0;
  2209. }
  2210. /**
  2211. * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
  2212. * with syndromes
  2213. * @mtd: mtd info structure
  2214. * @chip: nand chip info structure
  2215. * @page: page number to read
  2216. */
  2217. static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
  2218. int page)
  2219. {
  2220. uint8_t *buf = chip->oob_poi;
  2221. int length = mtd->oobsize;
  2222. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  2223. int eccsize = chip->ecc.size;
  2224. uint8_t *bufpoi = buf;
  2225. int i, toread, sndrnd = 0, pos;
  2226. chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
  2227. for (i = 0; i < chip->ecc.steps; i++) {
  2228. if (sndrnd) {
  2229. pos = eccsize + i * (eccsize + chunk);
  2230. if (mtd->writesize > 512)
  2231. chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
  2232. else
  2233. chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
  2234. } else
  2235. sndrnd = 1;
  2236. toread = min_t(int, length, chunk);
  2237. chip->read_buf(mtd, bufpoi, toread);
  2238. bufpoi += toread;
  2239. length -= toread;
  2240. }
  2241. if (length > 0)
  2242. chip->read_buf(mtd, bufpoi, length);
  2243. return 0;
  2244. }
  2245. /**
  2246. * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
  2247. * @mtd: mtd info structure
  2248. * @chip: nand chip info structure
  2249. * @page: page number to write
  2250. */
  2251. static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
  2252. int page)
  2253. {
  2254. int status = 0;
  2255. const uint8_t *buf = chip->oob_poi;
  2256. int length = mtd->oobsize;
  2257. chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
  2258. chip->write_buf(mtd, buf, length);
  2259. /* Send command to program the OOB data */
  2260. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  2261. status = chip->waitfunc(mtd, chip);
  2262. return status & NAND_STATUS_FAIL ? -EIO : 0;
  2263. }
  2264. /**
  2265. * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
  2266. * with syndrome - only for large page flash
  2267. * @mtd: mtd info structure
  2268. * @chip: nand chip info structure
  2269. * @page: page number to write
  2270. */
  2271. static int nand_write_oob_syndrome(struct mtd_info *mtd,
  2272. struct nand_chip *chip, int page)
  2273. {
  2274. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  2275. int eccsize = chip->ecc.size, length = mtd->oobsize;
  2276. int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
  2277. const uint8_t *bufpoi = chip->oob_poi;
  2278. /*
  2279. * data-ecc-data-ecc ... ecc-oob
  2280. * or
  2281. * data-pad-ecc-pad-data-pad .... ecc-pad-oob
  2282. */
  2283. if (!chip->ecc.prepad && !chip->ecc.postpad) {
  2284. pos = steps * (eccsize + chunk);
  2285. steps = 0;
  2286. } else
  2287. pos = eccsize;
  2288. chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
  2289. for (i = 0; i < steps; i++) {
  2290. if (sndcmd) {
  2291. if (mtd->writesize <= 512) {
  2292. uint32_t fill = 0xFFFFFFFF;
  2293. len = eccsize;
  2294. while (len > 0) {
  2295. int num = min_t(int, len, 4);
  2296. chip->write_buf(mtd, (uint8_t *)&fill,
  2297. num);
  2298. len -= num;
  2299. }
  2300. } else {
  2301. pos = eccsize + i * (eccsize + chunk);
  2302. chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
  2303. }
  2304. } else
  2305. sndcmd = 1;
  2306. len = min_t(int, length, chunk);
  2307. chip->write_buf(mtd, bufpoi, len);
  2308. bufpoi += len;
  2309. length -= len;
  2310. }
  2311. if (length > 0)
  2312. chip->write_buf(mtd, bufpoi, length);
  2313. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  2314. status = chip->waitfunc(mtd, chip);
  2315. return status & NAND_STATUS_FAIL ? -EIO : 0;
  2316. }
  2317. /**
  2318. * nand_do_read_oob - [INTERN] NAND read out-of-band
  2319. * @mtd: MTD device structure
  2320. * @from: offset to read from
  2321. * @ops: oob operations description structure
  2322. *
  2323. * NAND read out-of-band data from the spare area.
  2324. */
  2325. static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
  2326. struct mtd_oob_ops *ops)
  2327. {
  2328. int page, realpage, chipnr;
  2329. struct nand_chip *chip = mtd->priv;
  2330. struct mtd_ecc_stats stats;
  2331. int readlen = ops->ooblen;
  2332. int len;
  2333. uint8_t *buf = ops->oobbuf;
  2334. int ret = 0;
  2335. #if (defined(CONFIG_MTK_TLC_NAND_SUPPORT))
  2336. loff_t temp;
  2337. #endif
  2338. #ifdef CONFIG_MTK_MTD_NAND
  2339. /* variable we need for checksum */
  2340. u8 oob_checksum = 0;
  2341. u8 i, j;
  2342. bool empty = true;
  2343. struct nand_oobfree *free_entry;
  2344. g_MtdPerfLog.read_size_0_512++;
  2345. #endif
  2346. pr_debug("%s: from = 0x%08Lx, len = %i\n",
  2347. __func__, (unsigned long long)from, readlen);
  2348. stats = mtd->ecc_stats;
  2349. if (ops->mode == MTD_OPS_AUTO_OOB)
  2350. len = chip->ecc.layout->oobavail;
  2351. else
  2352. len = mtd->oobsize;
  2353. if (unlikely(ops->ooboffs >= len)) {
  2354. pr_debug("%s: attempt to start read outside oob\n",
  2355. __func__);
  2356. return -EINVAL;
  2357. }
  2358. /* Do not allow reads past end of device */
  2359. if (unlikely(from >= mtd->size ||
  2360. ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
  2361. (from >> chip->page_shift)) * len)) {
  2362. pr_debug("%s: attempt to read beyond end of device\n",
  2363. __func__);
  2364. return -EINVAL;
  2365. }
  2366. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2367. temp = mtk_nand_device_size();
  2368. if (from >= temp)
  2369. chipnr = 1;
  2370. else
  2371. chipnr = 0;
  2372. #else
  2373. chipnr = (int)(from >> chip->chip_shift);
  2374. #endif
  2375. chip->select_chip(mtd, chipnr);
  2376. /* Shift to get page */
  2377. realpage = (int)(from >> chip->page_shift);
  2378. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2379. page = realpage % (chip->pagemask + 1);
  2380. #else
  2381. page = realpage & chip->pagemask;
  2382. #endif
  2383. while (1) {
  2384. if (ops->mode == MTD_OPS_RAW)
  2385. ret = chip->ecc.read_oob_raw(mtd, chip, page);
  2386. else
  2387. ret = chip->ecc.read_oob(mtd, chip, page);
  2388. if (ret < 0)
  2389. break;
  2390. #ifdef CONFIG_MTK_MTD_NAND
  2391. oob_checksum = 0;
  2392. for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && chip->ecc.layout->oobfree[i].length; i++) {
  2393. free_entry = (struct nand_oobfree *)(chip->ecc.layout->oobfree) + i;
  2394. for (j = 0; j < free_entry->length; j++) {
  2395. oob_checksum ^= chip->oob_poi[free_entry->offset + j];
  2396. if (chip->oob_poi[free_entry->offset + j] != 0xFF)
  2397. empty = false;
  2398. }
  2399. }
  2400. if (!empty
  2401. && (oob_checksum != chip->oob_poi[free_entry->offset + free_entry->length]))
  2402. return -EIO;
  2403. #endif
  2404. len = min(len, readlen);
  2405. buf = nand_transfer_oob(chip, buf, ops, len);
  2406. if (chip->options & NAND_NEED_READRDY) {
  2407. /* Apply delay or wait for ready/busy pin */
  2408. if (!chip->dev_ready)
  2409. udelay(chip->chip_delay);
  2410. else
  2411. nand_wait_ready(mtd);
  2412. }
  2413. readlen -= len;
  2414. if (!readlen)
  2415. break;
  2416. /* Increment page address */
  2417. realpage++;
  2418. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2419. page = realpage % (chip->pagemask + 1);
  2420. #else
  2421. page = realpage & chip->pagemask;
  2422. #endif
  2423. /* Check, if we cross a chip boundary */
  2424. if (!page) {
  2425. chipnr++;
  2426. chip->select_chip(mtd, -1);
  2427. chip->select_chip(mtd, chipnr);
  2428. }
  2429. }
  2430. chip->select_chip(mtd, -1);
  2431. ops->oobretlen = ops->ooblen - readlen;
  2432. if (ret < 0)
  2433. return ret;
  2434. if (mtd->ecc_stats.failed - stats.failed)
  2435. return -EBADMSG;
  2436. return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
  2437. }
  2438. /**
  2439. * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
  2440. * @mtd: MTD device structure
  2441. * @from: offset to read from
  2442. * @ops: oob operation description structure
  2443. *
  2444. * NAND read data and/or out-of-band data.
  2445. */
  2446. static int nand_read_oob(struct mtd_info *mtd, loff_t from,
  2447. struct mtd_oob_ops *ops)
  2448. {
  2449. int ret = -ENOTSUPP;
  2450. ops->retlen = 0;
  2451. /* Do not allow reads past end of device */
  2452. #ifdef CONFIG_MTK_MTD_NAND
  2453. if (ops->datbuf && (from + ops->len) > (mtd->size + PMT_POOL_SIZE * mtd->erasesize)) {
  2454. pr_debug("%s: attempt to read beyond end of device\n",
  2455. __func__);
  2456. return -EINVAL;
  2457. }
  2458. #else
  2459. if (ops->datbuf && (from + ops->len) > mtd->size) {
  2460. pr_debug("%s: attempt to read beyond end of device\n",
  2461. __func__);
  2462. return -EINVAL;
  2463. }
  2464. #endif
  2465. nand_get_device(mtd, FL_READING);
  2466. switch (ops->mode) {
  2467. case MTD_OPS_PLACE_OOB:
  2468. case MTD_OPS_AUTO_OOB:
  2469. case MTD_OPS_RAW:
  2470. break;
  2471. default:
  2472. goto out;
  2473. }
  2474. if (!ops->datbuf)
  2475. ret = nand_do_read_oob(mtd, from, ops);
  2476. else {
  2477. #ifdef CONFIG_MTK_MTD_NAND
  2478. if (g_mtk_nss_cachev_cnt)
  2479. ret = nand_do_read_ops_ex(mtd, from, ops);
  2480. else
  2481. ret = nand_do_read_ops(mtd, from, ops);
  2482. #else
  2483. ret = nand_do_read_ops(mtd, from, ops);
  2484. #endif
  2485. }
  2486. out:
  2487. nand_release_device(mtd);
  2488. return ret;
  2489. }
  2490. /**
  2491. * nand_write_page_raw - [INTERN] raw page write function
  2492. * @mtd: mtd info structure
  2493. * @chip: nand chip info structure
  2494. * @buf: data buffer
  2495. * @oob_required: must write chip->oob_poi to OOB
  2496. *
  2497. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  2498. */
  2499. static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  2500. const uint8_t *buf, int oob_required)
  2501. {
  2502. chip->write_buf(mtd, buf, mtd->writesize);
  2503. if (oob_required)
  2504. chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
  2505. return 0;
  2506. }
  2507. /**
  2508. * nand_write_page_raw_syndrome - [INTERN] raw page write function
  2509. * @mtd: mtd info structure
  2510. * @chip: nand chip info structure
  2511. * @buf: data buffer
  2512. * @oob_required: must write chip->oob_poi to OOB
  2513. *
  2514. * We need a special oob layout and handling even when ECC isn't checked.
  2515. */
  2516. static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
  2517. struct nand_chip *chip,
  2518. const uint8_t *buf, int oob_required)
  2519. {
  2520. int eccsize = chip->ecc.size;
  2521. int eccbytes = chip->ecc.bytes;
  2522. uint8_t *oob = chip->oob_poi;
  2523. int steps, size;
  2524. for (steps = chip->ecc.steps; steps > 0; steps--) {
  2525. chip->write_buf(mtd, buf, eccsize);
  2526. buf += eccsize;
  2527. if (chip->ecc.prepad) {
  2528. chip->write_buf(mtd, oob, chip->ecc.prepad);
  2529. oob += chip->ecc.prepad;
  2530. }
  2531. chip->write_buf(mtd, oob, eccbytes);
  2532. oob += eccbytes;
  2533. if (chip->ecc.postpad) {
  2534. chip->write_buf(mtd, oob, chip->ecc.postpad);
  2535. oob += chip->ecc.postpad;
  2536. }
  2537. }
  2538. size = mtd->oobsize - (oob - chip->oob_poi);
  2539. if (size)
  2540. chip->write_buf(mtd, oob, size);
  2541. return 0;
  2542. }
  2543. /**
  2544. * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
  2545. * @mtd: mtd info structure
  2546. * @chip: nand chip info structure
  2547. * @buf: data buffer
  2548. * @oob_required: must write chip->oob_poi to OOB
  2549. */
  2550. static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
  2551. const uint8_t *buf, int oob_required)
  2552. {
  2553. int i, eccsize = chip->ecc.size;
  2554. int eccbytes = chip->ecc.bytes;
  2555. int eccsteps = chip->ecc.steps;
  2556. uint8_t *ecc_calc = chip->buffers->ecccalc;
  2557. const uint8_t *p = buf;
  2558. uint32_t *eccpos = chip->ecc.layout->eccpos;
  2559. /* Software ECC calculation */
  2560. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  2561. chip->ecc.calculate(mtd, p, &ecc_calc[i]);
  2562. for (i = 0; i < chip->ecc.total; i++)
  2563. chip->oob_poi[eccpos[i]] = ecc_calc[i];
  2564. return chip->ecc.write_page_raw(mtd, chip, buf, 1);
  2565. }
  2566. /**
  2567. * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
  2568. * @mtd: mtd info structure
  2569. * @chip: nand chip info structure
  2570. * @buf: data buffer
  2571. * @oob_required: must write chip->oob_poi to OOB
  2572. */
  2573. static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
  2574. const uint8_t *buf, int oob_required)
  2575. {
  2576. int i, eccsize = chip->ecc.size;
  2577. int eccbytes = chip->ecc.bytes;
  2578. int eccsteps = chip->ecc.steps;
  2579. uint8_t *ecc_calc = chip->buffers->ecccalc;
  2580. const uint8_t *p = buf;
  2581. uint32_t *eccpos = chip->ecc.layout->eccpos;
  2582. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2583. chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
  2584. chip->write_buf(mtd, p, eccsize);
  2585. chip->ecc.calculate(mtd, p, &ecc_calc[i]);
  2586. }
  2587. for (i = 0; i < chip->ecc.total; i++)
  2588. chip->oob_poi[eccpos[i]] = ecc_calc[i];
  2589. chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
  2590. return 0;
  2591. }
  2592. /**
  2593. * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
  2594. * @mtd: mtd info structure
  2595. * @chip: nand chip info structure
  2596. * @offset: column address of subpage within the page
  2597. * @data_len: data length
  2598. * @buf: data buffer
  2599. * @oob_required: must write chip->oob_poi to OOB
  2600. */
  2601. static int nand_write_subpage_hwecc(struct mtd_info *mtd,
  2602. struct nand_chip *chip, uint32_t offset,
  2603. uint32_t data_len, const uint8_t *buf,
  2604. int oob_required)
  2605. {
  2606. uint8_t *oob_buf = chip->oob_poi;
  2607. uint8_t *ecc_calc = chip->buffers->ecccalc;
  2608. int ecc_size = chip->ecc.size;
  2609. int ecc_bytes = chip->ecc.bytes;
  2610. int ecc_steps = chip->ecc.steps;
  2611. uint32_t *eccpos = chip->ecc.layout->eccpos;
  2612. uint32_t start_step = offset / ecc_size;
  2613. uint32_t end_step = (offset + data_len - 1) / ecc_size;
  2614. int oob_bytes = mtd->oobsize / ecc_steps;
  2615. int step, i;
  2616. for (step = 0; step < ecc_steps; step++) {
  2617. /* configure controller for WRITE access */
  2618. chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
  2619. /* write data (untouched subpages already masked by 0xFF) */
  2620. chip->write_buf(mtd, buf, ecc_size);
  2621. /* mask ECC of un-touched subpages by padding 0xFF */
  2622. if ((step < start_step) || (step > end_step))
  2623. memset(ecc_calc, 0xff, ecc_bytes);
  2624. else
  2625. chip->ecc.calculate(mtd, buf, ecc_calc);
  2626. /* mask OOB of un-touched subpages by padding 0xFF */
  2627. /* if oob_required, preserve OOB metadata of written subpage */
  2628. if (!oob_required || (step < start_step) || (step > end_step))
  2629. memset(oob_buf, 0xff, oob_bytes);
  2630. buf += ecc_size;
  2631. ecc_calc += ecc_bytes;
  2632. oob_buf += oob_bytes;
  2633. }
  2634. /* copy calculated ECC for whole page to chip->buffer->oob */
  2635. /* this include masked-value(0xFF) for unwritten subpages */
  2636. ecc_calc = chip->buffers->ecccalc;
  2637. for (i = 0; i < chip->ecc.total; i++)
  2638. chip->oob_poi[eccpos[i]] = ecc_calc[i];
  2639. /* write OOB buffer to NAND device */
  2640. chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
  2641. return 0;
  2642. }
  2643. /**
  2644. * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
  2645. * @mtd: mtd info structure
  2646. * @chip: nand chip info structure
  2647. * @buf: data buffer
  2648. * @oob_required: must write chip->oob_poi to OOB
  2649. *
  2650. * The hw generator calculates the error syndrome automatically. Therefore we
  2651. * need a special oob layout and handling.
  2652. */
  2653. static int nand_write_page_syndrome(struct mtd_info *mtd,
  2654. struct nand_chip *chip,
  2655. const uint8_t *buf, int oob_required)
  2656. {
  2657. int i, eccsize = chip->ecc.size;
  2658. int eccbytes = chip->ecc.bytes;
  2659. int eccsteps = chip->ecc.steps;
  2660. const uint8_t *p = buf;
  2661. uint8_t *oob = chip->oob_poi;
  2662. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2663. chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
  2664. chip->write_buf(mtd, p, eccsize);
  2665. if (chip->ecc.prepad) {
  2666. chip->write_buf(mtd, oob, chip->ecc.prepad);
  2667. oob += chip->ecc.prepad;
  2668. }
  2669. chip->ecc.calculate(mtd, p, oob);
  2670. chip->write_buf(mtd, oob, eccbytes);
  2671. oob += eccbytes;
  2672. if (chip->ecc.postpad) {
  2673. chip->write_buf(mtd, oob, chip->ecc.postpad);
  2674. oob += chip->ecc.postpad;
  2675. }
  2676. }
  2677. /* Calculate remaining oob bytes */
  2678. i = mtd->oobsize - (oob - chip->oob_poi);
  2679. if (i)
  2680. chip->write_buf(mtd, oob, i);
  2681. return 0;
  2682. }
  2683. /**
  2684. * nand_write_page - [REPLACEABLE] write one page
  2685. * @mtd: MTD device structure
  2686. * @chip: NAND chip descriptor
  2687. * @offset: address offset within the page
  2688. * @data_len: length of actual data to be written
  2689. * @buf: the data to write
  2690. * @oob_required: must write chip->oob_poi to OOB
  2691. * @page: page number to write
  2692. * @cached: cached programming
  2693. * @raw: use _raw version of write_page
  2694. */
  2695. static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  2696. uint32_t offset, int data_len, const uint8_t *buf,
  2697. int oob_required, int page, int cached, int raw)
  2698. {
  2699. int status, subpage;
  2700. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
  2701. chip->ecc.write_subpage)
  2702. subpage = offset || (data_len < mtd->writesize);
  2703. else
  2704. subpage = 0;
  2705. chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
  2706. if (unlikely(raw))
  2707. status = chip->ecc.write_page_raw(mtd, chip, buf,
  2708. oob_required);
  2709. else if (subpage)
  2710. status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
  2711. buf, oob_required);
  2712. else
  2713. status = chip->ecc.write_page(mtd, chip, buf, oob_required);
  2714. if (status < 0)
  2715. return status;
  2716. /*
  2717. * Cached progamming disabled for now. Not sure if it's worth the
  2718. * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
  2719. */
  2720. cached = 0;
  2721. if (!cached || !NAND_HAS_CACHEPROG(chip)) {
  2722. chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
  2723. status = chip->waitfunc(mtd, chip);
  2724. /*
  2725. * See if operation failed and additional status checks are
  2726. * available.
  2727. */
  2728. if ((status & NAND_STATUS_FAIL) && (chip->errstat))
  2729. status = chip->errstat(mtd, chip, FL_WRITING, status,
  2730. page);
  2731. if (status & NAND_STATUS_FAIL)
  2732. return -EIO;
  2733. } else {
  2734. chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
  2735. status = chip->waitfunc(mtd, chip);
  2736. }
  2737. return 0;
  2738. }
  2739. /**
  2740. * nand_fill_oob - [INTERN] Transfer client buffer to oob
  2741. * @mtd: MTD device structure
  2742. * @oob: oob data buffer
  2743. * @len: oob data write length
  2744. * @ops: oob ops structure
  2745. */
  2746. static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
  2747. struct mtd_oob_ops *ops)
  2748. {
  2749. struct nand_chip *chip = mtd->priv;
  2750. /*
  2751. * Initialise to all 0xFF, to avoid the possibility of left over OOB
  2752. * data from a previous OOB read.
  2753. */
  2754. memset(chip->oob_poi, 0xff, mtd->oobsize);
  2755. switch (ops->mode) {
  2756. case MTD_OPS_PLACE_OOB:
  2757. case MTD_OPS_RAW:
  2758. memcpy(chip->oob_poi + ops->ooboffs, oob, len);
  2759. return oob + len;
  2760. case MTD_OPS_AUTO_OOB: {
  2761. struct nand_oobfree *free = chip->ecc.layout->oobfree;
  2762. uint32_t boffs = 0, woffs = ops->ooboffs;
  2763. size_t bytes = 0;
  2764. for (; free->length && len; free++, len -= bytes) {
  2765. /* Write request not from offset 0? */
  2766. if (unlikely(woffs)) {
  2767. if (woffs >= free->length) {
  2768. woffs -= free->length;
  2769. continue;
  2770. }
  2771. boffs = free->offset + woffs;
  2772. bytes = min_t(size_t, len,
  2773. (free->length - woffs));
  2774. woffs = 0;
  2775. } else {
  2776. bytes = min_t(size_t, len, free->length);
  2777. boffs = free->offset;
  2778. }
  2779. memcpy(chip->oob_poi + boffs, oob, bytes);
  2780. oob += bytes;
  2781. }
  2782. return oob;
  2783. }
  2784. default:
  2785. BUG();
  2786. }
  2787. return NULL;
  2788. }
  2789. #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
  2790. /**
  2791. * nand_do_write_ops - [INTERN] NAND write with ECC
  2792. * @mtd: MTD device structure
  2793. * @to: offset to write to
  2794. * @ops: oob operations description structure
  2795. *
  2796. * NAND write with ECC.
  2797. */
  2798. static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
  2799. struct mtd_oob_ops *ops)
  2800. {
  2801. int chipnr, realpage, page, blockmask, column;
  2802. struct nand_chip *chip = mtd->priv;
  2803. uint32_t writelen = ops->len;
  2804. uint32_t oobwritelen = ops->ooblen;
  2805. uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
  2806. mtd->oobavail : mtd->oobsize;
  2807. uint8_t *oob = ops->oobbuf;
  2808. uint8_t *buf = ops->datbuf;
  2809. int ret;
  2810. int oob_required = oob ? 1 : 0;
  2811. #if (defined(CONFIG_MTK_TLC_NAND_SUPPORT))
  2812. loff_t temp;
  2813. u32 block_size;
  2814. u32 idx;
  2815. u64 start_addr;
  2816. #endif
  2817. ops->retlen = 0;
  2818. if (!writelen)
  2819. return 0;
  2820. /* Reject writes, which are not page aligned */
  2821. if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
  2822. pr_notice("%s: attempt to write non page aligned data\n",
  2823. __func__);
  2824. return -EINVAL;
  2825. }
  2826. column = to & (mtd->writesize - 1);
  2827. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2828. temp = mtk_nand_device_size();
  2829. if (to >= temp)
  2830. chipnr = 1;
  2831. else
  2832. chipnr = 0;
  2833. #else
  2834. chipnr = (int)(to >> chip->chip_shift);
  2835. #endif
  2836. chip->select_chip(mtd, chipnr);
  2837. /* Check, if it is write protected */
  2838. if (nand_check_wp(mtd)) {
  2839. ret = -EIO;
  2840. goto err_out;
  2841. }
  2842. realpage = (int)(to >> chip->page_shift);
  2843. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2844. page = realpage % (chip->pagemask + 1);
  2845. #else
  2846. page = realpage & chip->pagemask;
  2847. #endif
  2848. blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
  2849. #ifdef CONFIG_MTK_MLC_NAND_SUPPORT
  2850. if (mtk_nand_IsRawPartition(to))
  2851. blockmask = (1ULL << (chip->phys_erase_shift - chip->page_shift - 1)) - 1;
  2852. if (!mtk_block_istlc(to))
  2853. blockmask = (1ULL << (chip->phys_erase_shift - chip->page_shift - 1)) - 1;
  2854. #endif
  2855. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2856. if (mtk_is_normal_tlc_nand() && !mtk_block_istlc(to)) {
  2857. block_size = mtd->erasesize / 3;
  2858. blockmask = (block_size / (1 << chip->page_shift)) - 1;
  2859. } else {
  2860. start_addr = part_get_startaddress(to, &idx);
  2861. block_size = mtd->eraseregions[idx].erasesize;
  2862. blockmask = (block_size / (1 << chip->page_shift)) - 1;
  2863. }
  2864. #endif
  2865. /* Invalidate the page cache, when we write to the cached page */
  2866. if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
  2867. ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
  2868. chip->pagebuf = -1;
  2869. #ifdef CONFIG_MTK_MTD_NAND
  2870. mtk_nss_invalidate_cache_by_val(g_mtk_nss_cachev, realpage);
  2871. #endif
  2872. /* Don't allow multipage oob writes with offset */
  2873. if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
  2874. ret = -EINVAL;
  2875. goto err_out;
  2876. }
  2877. while (1) {
  2878. int bytes = mtd->writesize;
  2879. int cached = writelen > bytes && page != blockmask;
  2880. uint8_t *wbuf = buf;
  2881. int use_bufpoi;
  2882. int part_pagewr = (column || writelen < (mtd->writesize - 1));
  2883. if (part_pagewr)
  2884. use_bufpoi = 1;
  2885. else if (chip->options & NAND_USE_BOUNCE_BUFFER)
  2886. use_bufpoi = !virt_addr_valid(buf);
  2887. else
  2888. use_bufpoi = 0;
  2889. /* Partial page write?, or need to use bounce buffer */
  2890. if (use_bufpoi) {
  2891. pr_debug("%s: using write bounce buffer for buf@%p\n",
  2892. __func__, buf);
  2893. cached = 0;
  2894. if (part_pagewr)
  2895. bytes = min_t(int, bytes - column, writelen);
  2896. chip->pagebuf = -1;
  2897. memset(chip->buffers->databuf, 0xff, mtd->writesize);
  2898. memcpy(&chip->buffers->databuf[column], buf, bytes);
  2899. wbuf = chip->buffers->databuf;
  2900. }
  2901. if (unlikely(oob)) {
  2902. size_t len = min(oobwritelen, oobmaxlen);
  2903. oob = nand_fill_oob(mtd, oob, len, ops);
  2904. oobwritelen -= len;
  2905. } else {
  2906. /* We still need to erase leftover OOB data */
  2907. memset(chip->oob_poi, 0xff, mtd->oobsize);
  2908. }
  2909. ret = chip->write_page(mtd, chip, column, bytes, wbuf,
  2910. oob_required, page, cached,
  2911. (ops->mode == MTD_OPS_RAW));
  2912. if (ret)
  2913. break;
  2914. writelen -= bytes;
  2915. if (!writelen)
  2916. break;
  2917. column = 0;
  2918. buf += bytes;
  2919. realpage++;
  2920. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  2921. page = realpage % (chip->pagemask + 1);
  2922. #else
  2923. page = realpage & chip->pagemask;
  2924. #endif
  2925. /* Check, if we cross a chip boundary */
  2926. if (!page) {
  2927. chipnr++;
  2928. chip->select_chip(mtd, -1);
  2929. chip->select_chip(mtd, chipnr);
  2930. }
  2931. }
  2932. ops->retlen = ops->len - writelen;
  2933. if (unlikely(oob))
  2934. ops->oobretlen = ops->ooblen;
  2935. err_out:
  2936. chip->select_chip(mtd, -1);
  2937. return ret;
  2938. }
  2939. /**
  2940. * panic_nand_write - [MTD Interface] NAND write with ECC
  2941. * @mtd: MTD device structure
  2942. * @to: offset to write to
  2943. * @len: number of bytes to write
  2944. * @retlen: pointer to variable to store the number of written bytes
  2945. * @buf: the data to write
  2946. *
  2947. * NAND write with ECC. Used when performing writes in interrupt context, this
  2948. * may for example be called by mtdoops when writing an oops while in panic.
  2949. */
  2950. static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  2951. size_t *retlen, const uint8_t *buf)
  2952. {
  2953. struct nand_chip *chip = mtd->priv;
  2954. struct mtd_oob_ops ops;
  2955. int ret;
  2956. #ifdef CONFIG_MTK_MTD_NAND
  2957. nand_enable_clock();
  2958. #endif
  2959. /* Wait for the device to get ready */
  2960. panic_nand_wait(mtd, chip, 400);
  2961. /* Grab the device */
  2962. panic_nand_get_device(chip, mtd, FL_WRITING);
  2963. ops.len = len;
  2964. ops.datbuf = (uint8_t *)buf;
  2965. ops.oobbuf = NULL;
  2966. ops.mode = MTD_OPS_PLACE_OOB;
  2967. ret = nand_do_write_ops(mtd, to, &ops);
  2968. *retlen = ops.retlen;
  2969. #ifdef CONFIG_MTK_MTD_NAND
  2970. nand_disable_clock();
  2971. #endif
  2972. return ret;
  2973. }
  2974. /**
  2975. * nand_write - [MTD Interface] NAND write with ECC
  2976. * @mtd: MTD device structure
  2977. * @to: offset to write to
  2978. * @len: number of bytes to write
  2979. * @retlen: pointer to variable to store the number of written bytes
  2980. * @buf: the data to write
  2981. *
  2982. * NAND write with ECC.
  2983. */
  2984. static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  2985. size_t *retlen, const uint8_t *buf)
  2986. {
  2987. struct mtd_oob_ops ops;
  2988. int ret;
  2989. struct nand_chip *chip = (struct nand_chip *)mtd->priv;
  2990. u32 page = (to >> chip->page_shift);
  2991. #ifdef MTD_NAND_PFM
  2992. struct timeval pfm_time_write;
  2993. PFM_BEGIN(pfm_time_write);
  2994. #endif
  2995. nand_get_device(mtd, FL_WRITING);
  2996. if (mtk_is_normal_tlc_nand() && mtk_block_istlc(to)) {
  2997. ret = mtk_nand_write_tlc_block(mtd, chip, (uint8_t *)buf, page);
  2998. if (ret)
  2999. *retlen = 0;
  3000. else
  3001. *retlen = len;
  3002. } else {
  3003. ops.len = len;
  3004. ops.datbuf = (uint8_t *)buf;
  3005. ops.oobbuf = NULL;
  3006. ops.mode = MTD_OPS_PLACE_OOB;
  3007. ret = nand_do_write_ops(mtd, to, &ops);
  3008. *retlen = ops.retlen;
  3009. }
  3010. nand_release_device(mtd);
  3011. #ifdef MTD_NAND_PFM
  3012. if (mtk_block_istlc((u64)to))
  3013. PFM_END_W_TLC(pfm_time_write, (*retlen));
  3014. else
  3015. PFM_END_W_SLC(pfm_time_write, (*retlen));
  3016. #endif
  3017. return ret;
  3018. }
  3019. /**
  3020. * nand_do_write_oob - [MTD Interface] NAND write out-of-band
  3021. * @mtd: MTD device structure
  3022. * @to: offset to write to
  3023. * @ops: oob operation description structure
  3024. *
  3025. * NAND write out-of-band.
  3026. */
  3027. static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
  3028. struct mtd_oob_ops *ops)
  3029. {
  3030. int chipnr, page, status, len;
  3031. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3032. loff_t temp;
  3033. #endif
  3034. struct nand_chip *chip = mtd->priv;
  3035. pr_debug("%s: to = 0x%08x, len = %i\n",
  3036. __func__, (unsigned int)to, (int)ops->ooblen);
  3037. if (ops->mode == MTD_OPS_AUTO_OOB)
  3038. len = chip->ecc.layout->oobavail;
  3039. else
  3040. len = mtd->oobsize;
  3041. /* Do not allow write past end of page */
  3042. if ((ops->ooboffs + ops->ooblen) > len) {
  3043. pr_debug("%s: attempt to write past end of page\n",
  3044. __func__);
  3045. return -EINVAL;
  3046. }
  3047. if (unlikely(ops->ooboffs >= len)) {
  3048. pr_debug("%s: attempt to start write outside oob\n",
  3049. __func__);
  3050. return -EINVAL;
  3051. }
  3052. /* Do not allow write past end of device */
  3053. if (unlikely(to >= mtd->size ||
  3054. ops->ooboffs + ops->ooblen >
  3055. ((mtd->size >> chip->page_shift) -
  3056. (to >> chip->page_shift)) * len)) {
  3057. pr_debug("%s: attempt to write beyond end of device\n",
  3058. __func__);
  3059. return -EINVAL;
  3060. }
  3061. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3062. temp = mtk_nand_device_size();
  3063. if (to >= temp)
  3064. chipnr = 1;
  3065. else
  3066. chipnr = 0;
  3067. #else
  3068. chipnr = (int)(to >> chip->chip_shift);
  3069. #endif
  3070. chip->select_chip(mtd, chipnr);
  3071. /* Shift to get page */
  3072. page = (int)(to >> chip->page_shift);
  3073. /*
  3074. * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
  3075. * of my DiskOnChip 2000 test units) will clear the whole data page too
  3076. * if we don't do this. I have no clue why, but I seem to have 'fixed'
  3077. * it in the doc2000 driver in August 1999. dwmw2.
  3078. */
  3079. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  3080. /* Check, if it is write protected */
  3081. if (nand_check_wp(mtd)) {
  3082. chip->select_chip(mtd, -1);
  3083. return -EROFS;
  3084. }
  3085. /* Invalidate the page cache, if we write to the cached page */
  3086. if (page == chip->pagebuf)
  3087. chip->pagebuf = -1;
  3088. nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
  3089. if (ops->mode == MTD_OPS_RAW) {
  3090. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3091. status = chip->ecc.write_oob_raw(mtd, chip, page % (chip->pagemask + 1));
  3092. #else
  3093. status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
  3094. #endif
  3095. } else {
  3096. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3097. status = chip->ecc.write_oob(mtd, chip, page % (chip->pagemask + 1));
  3098. #else
  3099. status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
  3100. #endif
  3101. }
  3102. chip->select_chip(mtd, -1);
  3103. if (status)
  3104. return status;
  3105. ops->oobretlen = ops->ooblen;
  3106. return 0;
  3107. }
  3108. /**
  3109. * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
  3110. * @mtd: MTD device structure
  3111. * @to: offset to write to
  3112. * @ops: oob operation description structure
  3113. */
  3114. static int nand_write_oob(struct mtd_info *mtd, loff_t to,
  3115. struct mtd_oob_ops *ops)
  3116. {
  3117. int ret = -ENOTSUPP;
  3118. ops->retlen = 0;
  3119. /* Do not allow writes past end of device */
  3120. #ifdef CONFIG_MTK_MTD_NAND
  3121. if (ops->datbuf && (to + ops->len) > (mtd->size + PMT_POOL_SIZE * mtd->erasesize)) {
  3122. pr_debug("%s: attempt to write beyond end of device\n",
  3123. __func__);
  3124. return -EINVAL;
  3125. }
  3126. #else
  3127. if (ops->datbuf && (to + ops->len) > mtd->size) {
  3128. pr_debug("%s: attempt to write beyond end of device\n",
  3129. __func__);
  3130. return -EINVAL;
  3131. }
  3132. #endif
  3133. nand_get_device(mtd, FL_WRITING);
  3134. switch (ops->mode) {
  3135. case MTD_OPS_PLACE_OOB:
  3136. case MTD_OPS_AUTO_OOB:
  3137. case MTD_OPS_RAW:
  3138. break;
  3139. default:
  3140. goto out;
  3141. }
  3142. if (!ops->datbuf)
  3143. ret = nand_do_write_oob(mtd, to, ops);
  3144. else
  3145. ret = nand_do_write_ops(mtd, to, ops);
  3146. out:
  3147. nand_release_device(mtd);
  3148. return ret;
  3149. }
  3150. /**
  3151. * single_erase - [GENERIC] NAND standard block erase command function
  3152. * @mtd: MTD device structure
  3153. * @page: the page address of the block which will be erased
  3154. *
  3155. * Standard erase command for NAND chips. Returns NAND status.
  3156. */
  3157. static int single_erase(struct mtd_info *mtd, int page)
  3158. {
  3159. struct nand_chip *chip = mtd->priv;
  3160. /* Send commands to erase a block */
  3161. chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
  3162. chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
  3163. return chip->waitfunc(mtd, chip);
  3164. }
  3165. /**
  3166. * nand_erase - [MTD Interface] erase block(s)
  3167. * @mtd: MTD device structure
  3168. * @instr: erase instruction
  3169. *
  3170. * Erase one ore more blocks.
  3171. */
  3172. static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  3173. {
  3174. return nand_erase_nand(mtd, instr, 0);
  3175. }
  3176. /**
  3177. * nand_erase_nand - [INTERN] erase block(s)
  3178. * @mtd: MTD device structure
  3179. * @instr: erase instruction
  3180. * @allowbbt: allow erasing the bbt area
  3181. *
  3182. * Erase one ore more blocks.
  3183. */
  3184. int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
  3185. int allowbbt)
  3186. {
  3187. int page, status, pages_per_block, ret, chipnr;
  3188. struct nand_chip *chip = mtd->priv;
  3189. loff_t len;
  3190. #if (defined(CONFIG_MTK_TLC_NAND_SUPPORT))
  3191. u64 start_addr;
  3192. u64 block_size;
  3193. u32 idx;
  3194. loff_t temp;
  3195. #endif
  3196. #ifdef CONFIG_MTK_MLC_NAND_SUPPORT
  3197. u64 block_size;
  3198. bool raw_partition = false;
  3199. #endif
  3200. pr_debug("%s: start = 0x%012llx, len = %llu\n",
  3201. __func__, (unsigned long long)instr->addr,
  3202. (unsigned long long)instr->len);
  3203. if (check_offs_len(mtd, instr->addr, instr->len))
  3204. return -EINVAL;
  3205. /* Grab the lock and see if the device is available */
  3206. nand_get_device(mtd, FL_ERASING);
  3207. /* Shift to get first page */
  3208. page = (int)(instr->addr >> chip->page_shift);
  3209. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3210. temp = mtk_nand_device_size();
  3211. if (instr->addr >= temp)
  3212. chipnr = 1;
  3213. else
  3214. chipnr = 0;
  3215. #else
  3216. chipnr = (int)(instr->addr >> chip->chip_shift);
  3217. #endif
  3218. /* Calculate pages in each block */
  3219. pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  3220. #ifdef CONFIG_MTK_MLC_NAND_SUPPORT
  3221. if (mtk_nand_IsRawPartition(instr->addr)) {
  3222. raw_partition = true;
  3223. pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift - 1);
  3224. }
  3225. if (raw_partition)
  3226. block_size = (1 << (chip->phys_erase_shift-1));
  3227. else
  3228. block_size = (1 << chip->phys_erase_shift);
  3229. if (!mtk_block_istlc(instr->addr)) {
  3230. block_size = (1 << (chip->phys_erase_shift-1));
  3231. pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift - 1);
  3232. }
  3233. #endif
  3234. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3235. if (mtk_is_normal_tlc_nand() && !mtk_block_istlc(instr->addr)) {
  3236. block_size = mtd->erasesize / 3;
  3237. pages_per_block = (u32)((u32)(block_size) / (1 << chip->page_shift));
  3238. } else {
  3239. start_addr = part_get_startaddress(instr->addr, &idx);
  3240. block_size = mtd->eraseregions[idx].erasesize;
  3241. pages_per_block = (u32)((u32)(block_size) / (1 << chip->page_shift));
  3242. }
  3243. #endif
  3244. /* Select the NAND device */
  3245. chip->select_chip(mtd, chipnr);
  3246. /* Check, if it is write protected */
  3247. if (nand_check_wp(mtd)) {
  3248. pr_debug("%s: device is write protected!\n",
  3249. __func__);
  3250. instr->state = MTD_ERASE_FAILED;
  3251. goto erase_exit;
  3252. }
  3253. /* Loop through the pages */
  3254. len = instr->len;
  3255. instr->state = MTD_ERASING;
  3256. while (len) {
  3257. /* Check if we have a bad block, we do not erase bad blocks! */
  3258. if (nand_block_checkbad(mtd, ((loff_t) page) <<
  3259. chip->page_shift, 0, allowbbt)) {
  3260. pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
  3261. __func__, page);
  3262. instr->state = MTD_ERASE_FAILED;
  3263. goto erase_exit;
  3264. }
  3265. /*
  3266. * Invalidate the page cache, if we erase the block which
  3267. * contains the current cached page.
  3268. */
  3269. if (page <= chip->pagebuf && chip->pagebuf <
  3270. (page + pages_per_block))
  3271. chip->pagebuf = -1;
  3272. #ifdef CONFIG_MTK_MTD_NAND
  3273. mtk_nss_invalidate_cache_by_range(g_mtk_nss_cachev, page,
  3274. page + pages_per_block - 1);
  3275. #endif
  3276. #ifdef CONFIG_MTK_MTD_NAND
  3277. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3278. status = chip->erase_hw(mtd, page % (chip->pagemask + 1));
  3279. #else
  3280. status = chip->erase_hw(mtd, page & chip->pagemask);
  3281. #endif
  3282. #else
  3283. status = chip->erase(mtd, page & chip->pagemask);
  3284. #endif
  3285. /*
  3286. * See if operation failed and additional status checks are
  3287. * available
  3288. */
  3289. if ((status & NAND_STATUS_FAIL) && (chip->errstat))
  3290. status = chip->errstat(mtd, chip, FL_ERASING,
  3291. status, page);
  3292. /* See if block erase succeeded */
  3293. if (status & NAND_STATUS_FAIL) {
  3294. pr_debug("%s: failed erase, page 0x%08x\n",
  3295. __func__, page);
  3296. instr->state = MTD_ERASE_FAILED;
  3297. instr->fail_addr =
  3298. ((loff_t)page << chip->page_shift);
  3299. goto erase_exit;
  3300. }
  3301. /* Increment page address and decrement length */
  3302. #if (defined(CONFIG_MTK_MLC_NAND_SUPPORT) || defined(CONFIG_MTK_TLC_NAND_SUPPORT))
  3303. len -= block_size;
  3304. #else
  3305. len -= (1ULL << chip->phys_erase_shift);
  3306. #endif
  3307. page += pages_per_block;
  3308. /* Check, if we cross a chip boundary */
  3309. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  3310. if (len && !(page % (chip->pagemask + 1))) {
  3311. #else
  3312. if (len && !(page & chip->pagemask)) {
  3313. #endif
  3314. chipnr++;
  3315. chip->select_chip(mtd, -1);
  3316. chip->select_chip(mtd, chipnr);
  3317. }
  3318. }
  3319. instr->state = MTD_ERASE_DONE;
  3320. erase_exit:
  3321. ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
  3322. /* Deselect and wake up anyone waiting on the device */
  3323. chip->select_chip(mtd, -1);
  3324. nand_release_device(mtd);
  3325. /* Do call back function */
  3326. if (!ret)
  3327. mtd_erase_callback(instr);
  3328. /* Return more or less happy */
  3329. return ret;
  3330. }
  3331. /**
  3332. * nand_sync - [MTD Interface] sync
  3333. * @mtd: MTD device structure
  3334. *
  3335. * Sync is actually a wait for chip ready function.
  3336. */
  3337. static void nand_sync(struct mtd_info *mtd)
  3338. {
  3339. pr_debug("%s: called\n", __func__);
  3340. /* Grab the lock and see if the device is available */
  3341. nand_get_device(mtd, FL_SYNCING);
  3342. /* Release it and go back */
  3343. nand_release_device(mtd);
  3344. }
  3345. /**
  3346. * nand_block_isbad - [MTD Interface] Check if block at offset is bad
  3347. * @mtd: MTD device structure
  3348. * @offs: offset relative to mtd start
  3349. */
  3350. static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
  3351. {
  3352. return nand_block_checkbad(mtd, offs, 1, 0);
  3353. }
  3354. /**
  3355. * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
  3356. * @mtd: MTD device structure
  3357. * @ofs: offset relative to mtd start
  3358. */
  3359. static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs, const uint8_t *buffer)
  3360. {
  3361. int ret;
  3362. #ifdef CONFIG_MTK_MTD_NAND
  3363. struct nand_chip *chip = mtd->priv;
  3364. #endif
  3365. ret = nand_block_isbad(mtd, ofs);
  3366. if (ret) {
  3367. /* If it was bad already, return success and do nothing */
  3368. if (ret > 0)
  3369. return 0;
  3370. return ret;
  3371. }
  3372. #ifdef CONFIG_MTK_MTD_NAND
  3373. return chip->block_markbad(mtd, ofs, buffer);
  3374. #else
  3375. return nand_block_markbad_lowlevel(mtd, ofs);
  3376. #endif
  3377. }
  3378. /**
  3379. * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
  3380. * @mtd: MTD device structure
  3381. * @chip: nand chip info structure
  3382. * @addr: feature address.
  3383. * @subfeature_param: the subfeature parameters, a four bytes array.
  3384. */
  3385. static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
  3386. int addr, uint8_t *subfeature_param)
  3387. {
  3388. int status;
  3389. int i;
  3390. if (!chip->onfi_version ||
  3391. !(le16_to_cpu(chip->onfi_params.opt_cmd)
  3392. & ONFI_OPT_CMD_SET_GET_FEATURES))
  3393. return -EINVAL;
  3394. chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
  3395. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  3396. chip->write_byte(mtd, subfeature_param[i]);
  3397. status = chip->waitfunc(mtd, chip);
  3398. if (status & NAND_STATUS_FAIL)
  3399. return -EIO;
  3400. return 0;
  3401. }
  3402. /**
  3403. * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
  3404. * @mtd: MTD device structure
  3405. * @chip: nand chip info structure
  3406. * @addr: feature address.
  3407. * @subfeature_param: the subfeature parameters, a four bytes array.
  3408. */
  3409. static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
  3410. int addr, uint8_t *subfeature_param)
  3411. {
  3412. int i;
  3413. if (!chip->onfi_version ||
  3414. !(le16_to_cpu(chip->onfi_params.opt_cmd)
  3415. & ONFI_OPT_CMD_SET_GET_FEATURES))
  3416. return -EINVAL;
  3417. /* clear the sub feature parameters */
  3418. memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
  3419. chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
  3420. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  3421. *subfeature_param++ = chip->read_byte(mtd);
  3422. return 0;
  3423. }
  3424. /**
  3425. * nand_suspend - [MTD Interface] Suspend the NAND flash
  3426. * @mtd: MTD device structure
  3427. */
  3428. static int nand_suspend(struct mtd_info *mtd)
  3429. {
  3430. return nand_get_device(mtd, FL_PM_SUSPENDED);
  3431. }
  3432. /**
  3433. * nand_resume - [MTD Interface] Resume the NAND flash
  3434. * @mtd: MTD device structure
  3435. */
  3436. static void nand_resume(struct mtd_info *mtd)
  3437. {
  3438. struct nand_chip *chip = mtd->priv;
  3439. if (chip->state == FL_PM_SUSPENDED)
  3440. nand_release_device(mtd);
  3441. else
  3442. pr_err("%s called for a chip which is not in suspended state\n",
  3443. __func__);
  3444. }
  3445. /* Set default functions */
  3446. static void nand_set_defaults(struct nand_chip *chip, int busw)
  3447. {
  3448. /* check for proper chip_delay setup, set 20us if not */
  3449. if (!chip->chip_delay)
  3450. chip->chip_delay = 20;
  3451. /* check, if a user supplied command function given */
  3452. if (chip->cmdfunc == NULL)
  3453. chip->cmdfunc = nand_command;
  3454. /* check, if a user supplied wait function given */
  3455. if (chip->waitfunc == NULL)
  3456. chip->waitfunc = nand_wait;
  3457. if (!chip->select_chip)
  3458. chip->select_chip = nand_select_chip;
  3459. /* set for ONFI nand */
  3460. if (!chip->onfi_set_features)
  3461. chip->onfi_set_features = nand_onfi_set_features;
  3462. if (!chip->onfi_get_features)
  3463. chip->onfi_get_features = nand_onfi_get_features;
  3464. /* If called twice, pointers that depend on busw may need to be reset */
  3465. if (!chip->read_byte || chip->read_byte == nand_read_byte)
  3466. chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
  3467. if (!chip->read_word)
  3468. chip->read_word = nand_read_word;
  3469. if (!chip->block_bad)
  3470. chip->block_bad = nand_block_bad;
  3471. if (!chip->block_markbad)
  3472. chip->block_markbad = nand_default_block_markbad;
  3473. if (!chip->write_buf || chip->write_buf == nand_write_buf)
  3474. chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
  3475. if (!chip->write_byte || chip->write_byte == nand_write_byte)
  3476. chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
  3477. if (!chip->read_buf || chip->read_buf == nand_read_buf)
  3478. chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
  3479. if (!chip->scan_bbt)
  3480. chip->scan_bbt = nand_default_bbt;
  3481. if (!chip->controller) {
  3482. chip->controller = &chip->hwcontrol;
  3483. spin_lock_init(&chip->controller->lock);
  3484. init_waitqueue_head(&chip->controller->wq);
  3485. }
  3486. }
  3487. /* Sanitize ONFI strings so we can safely print them */
  3488. static void sanitize_string(uint8_t *s, size_t len)
  3489. {
  3490. ssize_t i;
  3491. /* Null terminate */
  3492. s[len - 1] = 0;
  3493. /* Remove non printable chars */
  3494. for (i = 0; i < len - 1; i++) {
  3495. if (s[i] < ' ' || s[i] > 127)
  3496. s[i] = '?';
  3497. }
  3498. /* Remove trailing spaces */
  3499. strim(s);
  3500. }
  3501. static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
  3502. {
  3503. int i;
  3504. while (len--) {
  3505. crc ^= *p++ << 8;
  3506. for (i = 0; i < 8; i++)
  3507. crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
  3508. }
  3509. return crc;
  3510. }
  3511. /* Parse the Extended Parameter Page. */
  3512. static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
  3513. struct nand_chip *chip, struct nand_onfi_params *p)
  3514. {
  3515. struct onfi_ext_param_page *ep;
  3516. struct onfi_ext_section *s;
  3517. struct onfi_ext_ecc_info *ecc;
  3518. uint8_t *cursor;
  3519. int ret = -EINVAL;
  3520. int len;
  3521. int i;
  3522. len = le16_to_cpu(p->ext_param_page_length) * 16;
  3523. ep = kmalloc(len, GFP_KERNEL);
  3524. if (!ep)
  3525. return -ENOMEM;
  3526. /* Send our own NAND_CMD_PARAM. */
  3527. chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
  3528. /* Use the Change Read Column command to skip the ONFI param pages. */
  3529. chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
  3530. sizeof(*p) * p->num_of_param_pages , -1);
  3531. /* Read out the Extended Parameter Page. */
  3532. chip->read_buf(mtd, (uint8_t *)ep, len);
  3533. if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
  3534. != le16_to_cpu(ep->crc))) {
  3535. pr_debug("fail in the CRC.\n");
  3536. goto ext_out;
  3537. }
  3538. /*
  3539. * Check the signature.
  3540. * Do not strictly follow the ONFI spec, maybe changed in future.
  3541. */
  3542. if (strncmp(ep->sig, "EPPS", 4)) {
  3543. pr_debug("The signature is invalid.\n");
  3544. goto ext_out;
  3545. }
  3546. /* find the ECC section. */
  3547. cursor = (uint8_t *)(ep + 1);
  3548. for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
  3549. s = ep->sections + i;
  3550. if (s->type == ONFI_SECTION_TYPE_2)
  3551. break;
  3552. cursor += s->length * 16;
  3553. }
  3554. if (i == ONFI_EXT_SECTION_MAX) {
  3555. pr_debug("We can not find the ECC section.\n");
  3556. goto ext_out;
  3557. }
  3558. /* get the info we want. */
  3559. ecc = (struct onfi_ext_ecc_info *)cursor;
  3560. if (!ecc->codeword_size) {
  3561. pr_debug("Invalid codeword size\n");
  3562. goto ext_out;
  3563. }
  3564. chip->ecc_strength_ds = ecc->ecc_bits;
  3565. chip->ecc_step_ds = 1 << ecc->codeword_size;
  3566. ret = 0;
  3567. ext_out:
  3568. kfree(ep);
  3569. return ret;
  3570. }
  3571. static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
  3572. {
  3573. struct nand_chip *chip = mtd->priv;
  3574. uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
  3575. return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
  3576. feature);
  3577. }
  3578. /*
  3579. * Configure chip properties from Micron vendor-specific ONFI table
  3580. */
  3581. static void nand_onfi_detect_micron(struct nand_chip *chip,
  3582. struct nand_onfi_params *p)
  3583. {
  3584. struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
  3585. if (le16_to_cpu(p->vendor_revision) < 1)
  3586. return;
  3587. chip->read_retries = micron->read_retry_options;
  3588. chip->setup_read_retry = nand_setup_read_retry_micron;
  3589. }
  3590. /*
  3591. * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  3592. */
  3593. static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
  3594. int *busw)
  3595. {
  3596. struct nand_onfi_params *p = &chip->onfi_params;
  3597. int i, j;
  3598. int val;
  3599. /* Try ONFI for unknown chip or LP */
  3600. chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
  3601. if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
  3602. chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
  3603. return 0;
  3604. chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
  3605. for (i = 0; i < 3; i++) {
  3606. for (j = 0; j < sizeof(*p); j++)
  3607. ((uint8_t *)p)[j] = chip->read_byte(mtd);
  3608. if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
  3609. le16_to_cpu(p->crc)) {
  3610. break;
  3611. }
  3612. }
  3613. if (i == 3) {
  3614. pr_err("Could not find valid ONFI parameter page; aborting\n");
  3615. return 0;
  3616. }
  3617. /* Check version */
  3618. val = le16_to_cpu(p->revision);
  3619. if (val & (1 << 5))
  3620. chip->onfi_version = 23;
  3621. else if (val & (1 << 4))
  3622. chip->onfi_version = 22;
  3623. else if (val & (1 << 3))
  3624. chip->onfi_version = 21;
  3625. else if (val & (1 << 2))
  3626. chip->onfi_version = 20;
  3627. else if (val & (1 << 1))
  3628. chip->onfi_version = 10;
  3629. if (!chip->onfi_version) {
  3630. pr_info("unsupported ONFI version: %d\n", val);
  3631. return 0;
  3632. }
  3633. sanitize_string(p->manufacturer, sizeof(p->manufacturer));
  3634. sanitize_string(p->model, sizeof(p->model));
  3635. if (!mtd->name)
  3636. mtd->name = p->model;
  3637. mtd->writesize = le32_to_cpu(p->byte_per_page);
  3638. /*
  3639. * pages_per_block and blocks_per_lun may not be a power-of-2 size
  3640. * (don't ask me who thought of this...). MTD assumes that these
  3641. * dimensions will be power-of-2, so just truncate the remaining area.
  3642. */
  3643. mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
  3644. mtd->erasesize *= mtd->writesize;
  3645. mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
  3646. /* See erasesize comment */
  3647. chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
  3648. chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
  3649. chip->bits_per_cell = p->bits_per_cell;
  3650. if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
  3651. *busw = NAND_BUSWIDTH_16;
  3652. else
  3653. *busw = 0;
  3654. if (p->ecc_bits != 0xff) {
  3655. chip->ecc_strength_ds = p->ecc_bits;
  3656. chip->ecc_step_ds = 512;
  3657. } else if (chip->onfi_version >= 21 &&
  3658. (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
  3659. /*
  3660. * The nand_flash_detect_ext_param_page() uses the
  3661. * Change Read Column command which maybe not supported
  3662. * by the chip->cmdfunc. So try to update the chip->cmdfunc
  3663. * now. We do not replace user supplied command function.
  3664. */
  3665. if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
  3666. chip->cmdfunc = nand_command_lp;
  3667. /* The Extended Parameter Page is supported since ONFI 2.1. */
  3668. if (nand_flash_detect_ext_param_page(mtd, chip, p))
  3669. pr_warn("Failed to detect ONFI extended param page\n");
  3670. } else {
  3671. pr_warn("Could not retrieve ONFI ECC requirements\n");
  3672. }
  3673. if (p->jedec_id == NAND_MFR_MICRON)
  3674. nand_onfi_detect_micron(chip, p);
  3675. return 1;
  3676. }
  3677. /*
  3678. * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
  3679. */
  3680. static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
  3681. int *busw)
  3682. {
  3683. struct nand_jedec_params *p = &chip->jedec_params;
  3684. struct jedec_ecc_info *ecc;
  3685. int val;
  3686. int i, j;
  3687. /* Try JEDEC for unknown chip or LP */
  3688. chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
  3689. if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
  3690. chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
  3691. chip->read_byte(mtd) != 'C')
  3692. return 0;
  3693. chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
  3694. for (i = 0; i < 3; i++) {
  3695. for (j = 0; j < sizeof(*p); j++)
  3696. ((uint8_t *)p)[j] = chip->read_byte(mtd);
  3697. if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
  3698. le16_to_cpu(p->crc))
  3699. break;
  3700. }
  3701. if (i == 3) {
  3702. pr_err("Could not find valid JEDEC parameter page; aborting\n");
  3703. return 0;
  3704. }
  3705. /* Check version */
  3706. val = le16_to_cpu(p->revision);
  3707. if (val & (1 << 2))
  3708. chip->jedec_version = 10;
  3709. else if (val & (1 << 1))
  3710. chip->jedec_version = 1; /* vendor specific version */
  3711. if (!chip->jedec_version) {
  3712. pr_info("unsupported JEDEC version: %d\n", val);
  3713. return 0;
  3714. }
  3715. sanitize_string(p->manufacturer, sizeof(p->manufacturer));
  3716. sanitize_string(p->model, sizeof(p->model));
  3717. if (!mtd->name)
  3718. mtd->name = p->model;
  3719. mtd->writesize = le32_to_cpu(p->byte_per_page);
  3720. /* Please reference to the comment for nand_flash_detect_onfi. */
  3721. mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
  3722. mtd->erasesize *= mtd->writesize;
  3723. mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
  3724. /* Please reference to the comment for nand_flash_detect_onfi. */
  3725. chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
  3726. chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
  3727. chip->bits_per_cell = p->bits_per_cell;
  3728. if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
  3729. *busw = NAND_BUSWIDTH_16;
  3730. else
  3731. *busw = 0;
  3732. /* ECC info */
  3733. ecc = &p->ecc_info[0];
  3734. if (ecc->codeword_size >= 9) {
  3735. chip->ecc_strength_ds = ecc->ecc_bits;
  3736. chip->ecc_step_ds = 1 << ecc->codeword_size;
  3737. } else {
  3738. pr_warn("Invalid codeword size\n");
  3739. }
  3740. return 1;
  3741. }
  3742. /*
  3743. * nand_id_has_period - Check if an ID string has a given wraparound period
  3744. * @id_data: the ID string
  3745. * @arrlen: the length of the @id_data array
  3746. * @period: the period of repitition
  3747. *
  3748. * Check if an ID string is repeated within a given sequence of bytes at
  3749. * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
  3750. * period of 3). This is a helper function for nand_id_len(). Returns non-zero
  3751. * if the repetition has a period of @period; otherwise, returns zero.
  3752. */
  3753. static int nand_id_has_period(u8 *id_data, int arrlen, int period)
  3754. {
  3755. int i, j;
  3756. for (i = 0; i < period; i++)
  3757. for (j = i + period; j < arrlen; j += period)
  3758. if (id_data[i] != id_data[j])
  3759. return 0;
  3760. return 1;
  3761. }
  3762. /*
  3763. * nand_id_len - Get the length of an ID string returned by CMD_READID
  3764. * @id_data: the ID string
  3765. * @arrlen: the length of the @id_data array
  3766. * Returns the length of the ID string, according to known wraparound/trailing
  3767. * zero patterns. If no pattern exists, returns the length of the array.
  3768. */
  3769. static int nand_id_len(u8 *id_data, int arrlen)
  3770. {
  3771. int last_nonzero, period;
  3772. /* Find last non-zero byte */
  3773. for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
  3774. if (id_data[last_nonzero])
  3775. break;
  3776. /* All zeros */
  3777. if (last_nonzero < 0)
  3778. return 0;
  3779. /* Calculate wraparound period */
  3780. for (period = 1; period < arrlen; period++)
  3781. if (nand_id_has_period(id_data, arrlen, period))
  3782. break;
  3783. /* There's a repeated pattern */
  3784. if (period < arrlen)
  3785. return period;
  3786. /* There are trailing zeros */
  3787. if (last_nonzero < arrlen - 1)
  3788. return last_nonzero + 1;
  3789. /* No pattern detected */
  3790. return arrlen;
  3791. }
  3792. /* Extract the bits of per cell from the 3rd byte of the extended ID */
  3793. static int nand_get_bits_per_cell(u8 cellinfo)
  3794. {
  3795. int bits;
  3796. bits = cellinfo & NAND_CI_CELLTYPE_MSK;
  3797. bits >>= NAND_CI_CELLTYPE_SHIFT;
  3798. return bits + 1;
  3799. }
  3800. /*
  3801. * Many new NAND share similar device ID codes, which represent the size of the
  3802. * chip. The rest of the parameters must be decoded according to generic or
  3803. * manufacturer-specific "extended ID" decoding patterns.
  3804. */
  3805. static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
  3806. u8 id_data[8], int *busw)
  3807. {
  3808. int extid, id_len;
  3809. /* The 3rd id byte holds MLC / multichip data */
  3810. chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  3811. /* The 4th id byte is the important one */
  3812. extid = id_data[3];
  3813. id_len = nand_id_len(id_data, 8);
  3814. /*
  3815. * Field definitions are in the following datasheets:
  3816. * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
  3817. * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
  3818. * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
  3819. *
  3820. * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
  3821. * ID to decide what to do.
  3822. */
  3823. if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
  3824. !nand_is_slc(chip) && id_data[5] != 0x00) {
  3825. /* Calc pagesize */
  3826. mtd->writesize = 2048 << (extid & 0x03);
  3827. extid >>= 2;
  3828. /* Calc oobsize */
  3829. switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
  3830. case 1:
  3831. mtd->oobsize = 128;
  3832. break;
  3833. case 2:
  3834. mtd->oobsize = 218;
  3835. break;
  3836. case 3:
  3837. mtd->oobsize = 400;
  3838. break;
  3839. case 4:
  3840. mtd->oobsize = 436;
  3841. break;
  3842. case 5:
  3843. mtd->oobsize = 512;
  3844. break;
  3845. case 6:
  3846. mtd->oobsize = 640;
  3847. break;
  3848. case 7:
  3849. default: /* Other cases are "reserved" (unknown) */
  3850. mtd->oobsize = 1024;
  3851. break;
  3852. }
  3853. extid >>= 2;
  3854. /* Calc blocksize */
  3855. mtd->erasesize = (128 * 1024) <<
  3856. (((extid >> 1) & 0x04) | (extid & 0x03));
  3857. *busw = 0;
  3858. } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
  3859. !nand_is_slc(chip)) {
  3860. unsigned int tmp;
  3861. /* Calc pagesize */
  3862. mtd->writesize = 2048 << (extid & 0x03);
  3863. extid >>= 2;
  3864. /* Calc oobsize */
  3865. switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
  3866. case 0:
  3867. mtd->oobsize = 128;
  3868. break;
  3869. case 1:
  3870. mtd->oobsize = 224;
  3871. break;
  3872. case 2:
  3873. mtd->oobsize = 448;
  3874. break;
  3875. case 3:
  3876. mtd->oobsize = 64;
  3877. break;
  3878. case 4:
  3879. mtd->oobsize = 32;
  3880. break;
  3881. case 5:
  3882. mtd->oobsize = 16;
  3883. break;
  3884. default:
  3885. mtd->oobsize = 640;
  3886. break;
  3887. }
  3888. extid >>= 2;
  3889. /* Calc blocksize */
  3890. tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
  3891. if (tmp < 0x03)
  3892. mtd->erasesize = (128 * 1024) << tmp;
  3893. else if (tmp == 0x03)
  3894. mtd->erasesize = 768 * 1024;
  3895. else
  3896. mtd->erasesize = (64 * 1024) << tmp;
  3897. *busw = 0;
  3898. } else {
  3899. /* Calc pagesize */
  3900. mtd->writesize = 1024 << (extid & 0x03);
  3901. extid >>= 2;
  3902. /* Calc oobsize */
  3903. mtd->oobsize = (8 << (extid & 0x01)) *
  3904. (mtd->writesize >> 9);
  3905. extid >>= 2;
  3906. /* Calc blocksize. Blocksize is multiples of 64KiB */
  3907. mtd->erasesize = (64 * 1024) << (extid & 0x03);
  3908. extid >>= 2;
  3909. /* Get buswidth information */
  3910. *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
  3911. /*
  3912. * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
  3913. * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
  3914. * follows:
  3915. * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
  3916. * 110b -> 24nm
  3917. * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
  3918. */
  3919. if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
  3920. nand_is_slc(chip) &&
  3921. (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
  3922. !(id_data[4] & 0x80) /* !BENAND */) {
  3923. mtd->oobsize = 32 * mtd->writesize >> 9;
  3924. }
  3925. }
  3926. }
  3927. /*
  3928. * Old devices have chip data hardcoded in the device ID table. nand_decode_id
  3929. * decodes a matching ID table entry and assigns the MTD size parameters for
  3930. * the chip.
  3931. */
  3932. static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
  3933. struct nand_flash_dev *type, u8 id_data[8],
  3934. int *busw)
  3935. {
  3936. int maf_id = id_data[0];
  3937. mtd->erasesize = type->erasesize;
  3938. mtd->writesize = type->pagesize;
  3939. mtd->oobsize = mtd->writesize / 32;
  3940. *busw = type->options & NAND_BUSWIDTH_16;
  3941. /* All legacy ID NAND are small-page, SLC */
  3942. chip->bits_per_cell = 1;
  3943. /*
  3944. * Check for Spansion/AMD ID + repeating 5th, 6th byte since
  3945. * some Spansion chips have erasesize that conflicts with size
  3946. * listed in nand_ids table.
  3947. * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
  3948. */
  3949. if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
  3950. && id_data[6] == 0x00 && id_data[7] == 0x00
  3951. && mtd->writesize == 512) {
  3952. mtd->erasesize = 128 * 1024;
  3953. mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
  3954. }
  3955. }
  3956. /*
  3957. * Set the bad block marker/indicator (BBM/BBI) patterns according to some
  3958. * heuristic patterns using various detected parameters (e.g., manufacturer,
  3959. * page size, cell-type information).
  3960. */
  3961. static void nand_decode_bbm_options(struct mtd_info *mtd,
  3962. struct nand_chip *chip, u8 id_data[8])
  3963. {
  3964. int maf_id = id_data[0];
  3965. /* Set the bad block position */
  3966. if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
  3967. chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
  3968. else
  3969. chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
  3970. /*
  3971. * Bad block marker is stored in the last page of each block on Samsung
  3972. * and Hynix MLC devices; stored in first two pages of each block on
  3973. * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
  3974. * AMD/Spansion, and Macronix. All others scan only the first page.
  3975. */
  3976. if (!nand_is_slc(chip) &&
  3977. (maf_id == NAND_MFR_SAMSUNG ||
  3978. maf_id == NAND_MFR_HYNIX))
  3979. chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
  3980. else if ((nand_is_slc(chip) &&
  3981. (maf_id == NAND_MFR_SAMSUNG ||
  3982. maf_id == NAND_MFR_HYNIX ||
  3983. maf_id == NAND_MFR_TOSHIBA ||
  3984. maf_id == NAND_MFR_AMD ||
  3985. maf_id == NAND_MFR_MACRONIX)) ||
  3986. (mtd->writesize == 2048 &&
  3987. maf_id == NAND_MFR_MICRON))
  3988. chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
  3989. }
  3990. static inline bool is_full_id_nand(struct nand_flash_dev *type)
  3991. {
  3992. return type->id_len;
  3993. }
  3994. static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
  3995. struct nand_flash_dev *type, u8 *id_data, int *busw)
  3996. {
  3997. if (!strncmp(type->id, id_data, type->id_len)) {
  3998. mtd->writesize = type->pagesize;
  3999. mtd->erasesize = type->erasesize;
  4000. mtd->oobsize = type->oobsize;
  4001. chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4002. #ifdef CONFIG_MTK_TLC_NAND_SUPPORT
  4003. chip->chipsize = (uint64_t)type->chipsize << 10;
  4004. #else
  4005. chip->chipsize = (uint64_t)type->chipsize << 20;
  4006. #endif
  4007. chip->options |= type->options;
  4008. chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
  4009. chip->ecc_step_ds = NAND_ECC_STEP(type);
  4010. chip->onfi_timing_mode_default =
  4011. type->onfi_timing_mode_default;
  4012. *busw = type->options & NAND_BUSWIDTH_16;
  4013. if (!mtd->name)
  4014. mtd->name = type->name;
  4015. return true;
  4016. }
  4017. return false;
  4018. }
  4019. /*
  4020. * Get the flash and manufacturer id and lookup if the type is supported.
  4021. */
  4022. static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
  4023. struct nand_chip *chip,
  4024. int *maf_id, int *dev_id,
  4025. struct nand_flash_dev *type)
  4026. {
  4027. int busw;
  4028. int i, maf_idx;
  4029. u8 id_data[8];
  4030. /* Select the device */
  4031. chip->select_chip(mtd, 0);
  4032. /*
  4033. * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
  4034. * after power-up.
  4035. */
  4036. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  4037. /* Send the command for reading device ID */
  4038. chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  4039. /* Read manufacturer and device IDs */
  4040. *maf_id = chip->read_byte(mtd);
  4041. *dev_id = chip->read_byte(mtd);
  4042. /*
  4043. * Try again to make sure, as some systems the bus-hold or other
  4044. * interface concerns can cause random data which looks like a
  4045. * possibly credible NAND flash to appear. If the two results do
  4046. * not match, ignore the device completely.
  4047. */
  4048. chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  4049. /* Read entire ID string */
  4050. for (i = 0; i < 8; i++)
  4051. id_data[i] = chip->read_byte(mtd);
  4052. if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
  4053. pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
  4054. *maf_id, *dev_id, id_data[0], id_data[1]);
  4055. return ERR_PTR(-ENODEV);
  4056. }
  4057. if (!type)
  4058. type = nand_flash_ids;
  4059. for (; type->name != NULL; type++) {
  4060. if (is_full_id_nand(type)) {
  4061. if (find_full_id_nand(mtd, chip, type, id_data, &busw))
  4062. goto ident_done;
  4063. } else if (*dev_id == type->dev_id) {
  4064. break;
  4065. }
  4066. }
  4067. chip->onfi_version = 0;
  4068. if (!type->name || !type->pagesize) {
  4069. /* Check if the chip is ONFI compliant */
  4070. if (nand_flash_detect_onfi(mtd, chip, &busw))
  4071. goto ident_done;
  4072. /* Check if the chip is JEDEC compliant */
  4073. if (nand_flash_detect_jedec(mtd, chip, &busw))
  4074. goto ident_done;
  4075. }
  4076. if (!type->name)
  4077. return ERR_PTR(-ENODEV);
  4078. if (!mtd->name)
  4079. mtd->name = type->name;
  4080. #ifdef CONFIG_MTK_TLC_NAND_SUPPORT
  4081. chip->chipsize = (uint64_t)type->chipsize << 10;
  4082. #else
  4083. chip->chipsize = (uint64_t)type->chipsize << 20;
  4084. #endif
  4085. if (!type->pagesize && chip->init_size) {
  4086. /* Set the pagesize, oobsize, erasesize by the driver */
  4087. busw = chip->init_size(mtd, chip, id_data);
  4088. } else if (!type->pagesize) {
  4089. /* Decode parameters from extended ID */
  4090. nand_decode_ext_id(mtd, chip, id_data, &busw);
  4091. } else {
  4092. nand_decode_id(mtd, chip, type, id_data, &busw);
  4093. }
  4094. /* Get chip options */
  4095. chip->options |= type->options;
  4096. /*
  4097. * Check if chip is not a Samsung device. Do not clear the
  4098. * options for chips which do not have an extended id.
  4099. */
  4100. if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
  4101. chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
  4102. ident_done:
  4103. /* Try to identify manufacturer */
  4104. for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
  4105. if (nand_manuf_ids[maf_idx].id == *maf_id)
  4106. break;
  4107. }
  4108. if (chip->options & NAND_BUSWIDTH_AUTO) {
  4109. WARN_ON(chip->options & NAND_BUSWIDTH_16);
  4110. chip->options |= busw;
  4111. nand_set_defaults(chip, busw);
  4112. } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
  4113. /*
  4114. * Check, if buswidth is correct. Hardware drivers should set
  4115. * chip correct!
  4116. */
  4117. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4118. *maf_id, *dev_id);
  4119. pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
  4120. pr_warn("bus width %d instead %d bit\n",
  4121. (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
  4122. busw ? 16 : 8);
  4123. return ERR_PTR(-EINVAL);
  4124. }
  4125. nand_decode_bbm_options(mtd, chip, id_data);
  4126. /* Calculate the address shift from the page size */
  4127. chip->page_shift = ffs(mtd->writesize) - 1;
  4128. /* Convert chipsize to number of pages per chip -1 */
  4129. chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
  4130. chip->bbt_erase_shift = chip->phys_erase_shift =
  4131. ffs(mtd->erasesize) - 1;
  4132. if (chip->chipsize & 0xffffffff)
  4133. chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
  4134. else {
  4135. chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
  4136. chip->chip_shift += 32 - 1;
  4137. }
  4138. chip->badblockbits = 8;
  4139. chip->erase = single_erase;
  4140. /* Do not replace user supplied command function! */
  4141. if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
  4142. chip->cmdfunc = nand_command_lp;
  4143. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4144. *maf_id, *dev_id);
  4145. if (chip->onfi_version)
  4146. pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
  4147. chip->onfi_params.model);
  4148. else if (chip->jedec_version)
  4149. pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
  4150. chip->jedec_params.model);
  4151. else
  4152. pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
  4153. type->name);
  4154. pr_info("%dMiB, %s, page size: %d, OOB size: %d\n",
  4155. (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
  4156. mtd->writesize, mtd->oobsize);
  4157. return type;
  4158. }
  4159. /**
  4160. * nand_scan_ident - [NAND Interface] Scan for the NAND device
  4161. * @mtd: MTD device structure
  4162. * @maxchips: number of chips to scan for
  4163. * @table: alternative NAND ID table
  4164. *
  4165. * This is the first phase of the normal nand_scan() function. It reads the
  4166. * flash ID and sets up MTD fields accordingly.
  4167. *
  4168. * The mtd->owner field must be set to the module of the caller.
  4169. */
  4170. int nand_scan_ident(struct mtd_info *mtd, int maxchips,
  4171. struct nand_flash_dev *table)
  4172. {
  4173. int i, nand_maf_id, nand_dev_id;
  4174. struct nand_chip *chip = mtd->priv;
  4175. struct nand_flash_dev *type;
  4176. /* Set the default functions */
  4177. nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
  4178. /* Read the flash type */
  4179. type = nand_get_flash_type(mtd, chip, &nand_maf_id,
  4180. &nand_dev_id, table);
  4181. if (IS_ERR(type)) {
  4182. if (!(chip->options & NAND_SCAN_SILENT_NODEV))
  4183. pr_warn("No NAND device found\n");
  4184. chip->select_chip(mtd, -1);
  4185. return PTR_ERR(type);
  4186. }
  4187. chip->select_chip(mtd, -1);
  4188. /* Check for a chip array */
  4189. for (i = 1; i < maxchips; i++) {
  4190. chip->select_chip(mtd, i);
  4191. /* See comment in nand_get_flash_type for reset */
  4192. chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
  4193. /* Send the command for reading device ID */
  4194. chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
  4195. /* Read manufacturer and device IDs */
  4196. if (nand_maf_id != chip->read_byte(mtd) ||
  4197. nand_dev_id != chip->read_byte(mtd)) {
  4198. chip->select_chip(mtd, -1);
  4199. break;
  4200. }
  4201. chip->select_chip(mtd, -1);
  4202. }
  4203. if (i > 1)
  4204. pr_info("%d NAND chips detected\n", i);
  4205. #if (defined(CONFIG_MTK_MLC_NAND_SUPPORT))
  4206. /* Store the number of chips and calc total size for mtd */
  4207. chip->numchips = i;
  4208. mtd->size = i * chip->chipsize;
  4209. if (g_b2Die_CS && (i > 1)) {
  4210. chip->pagemask = (mtd->size >> chip->page_shift) - 1;
  4211. if (mtd->size & 0xffffffff)
  4212. chip->chip_shift = ffs((unsigned)mtd->size) - 1;
  4213. else {
  4214. chip->chip_shift = ffs((unsigned)(mtd->size >> 32));
  4215. chip->chip_shift += 32 - 1;
  4216. }
  4217. }
  4218. #else
  4219. /* Store the number of chips and calc total size for mtd */
  4220. chip->numchips = i;
  4221. mtd->size = i * chip->chipsize;
  4222. #endif
  4223. return 0;
  4224. }
  4225. EXPORT_SYMBOL(nand_scan_ident);
  4226. /*
  4227. * Check if the chip configuration meet the datasheet requirements.
  4228. * If our configuration corrects A bits per B bytes and the minimum
  4229. * required correction level is X bits per Y bytes, then we must ensure
  4230. * both of the following are true:
  4231. *
  4232. * (1) A / B >= X / Y
  4233. * (2) A >= X
  4234. *
  4235. * Requirement (1) ensures we can correct for the required bitflip density.
  4236. * Requirement (2) ensures we can correct even when all bitflips are clumped
  4237. * in the same sector.
  4238. */
  4239. static bool nand_ecc_strength_good(struct mtd_info *mtd)
  4240. {
  4241. struct nand_chip *chip = mtd->priv;
  4242. struct nand_ecc_ctrl *ecc = &chip->ecc;
  4243. int corr, ds_corr;
  4244. if (ecc->size == 0 || chip->ecc_step_ds == 0)
  4245. /* Not enough information */
  4246. return true;
  4247. /*
  4248. * We get the number of corrected bits per page to compare
  4249. * the correction density.
  4250. */
  4251. corr = (mtd->writesize * ecc->strength) / ecc->size;
  4252. ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
  4253. return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
  4254. }
  4255. /**
  4256. * nand_scan_tail - [NAND Interface] Scan for the NAND device
  4257. * @mtd: MTD device structure
  4258. *
  4259. * This is the second phase of the normal nand_scan() function. It fills out
  4260. * all the uninitialized function pointers with the defaults and scans for a
  4261. * bad block table if appropriate.
  4262. */
  4263. int nand_scan_tail(struct mtd_info *mtd)
  4264. {
  4265. int i;
  4266. struct nand_chip *chip = mtd->priv;
  4267. struct nand_ecc_ctrl *ecc = &chip->ecc;
  4268. struct nand_buffers *nbuf;
  4269. /* New bad blocks should be marked in OOB, flash-based BBT, or both */
  4270. BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
  4271. !(chip->bbt_options & NAND_BBT_USE_FLASH));
  4272. if (!(chip->options & NAND_OWN_BUFFERS)) {
  4273. nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
  4274. + mtd->oobsize * 3, GFP_KERNEL);
  4275. if (!nbuf)
  4276. return -ENOMEM;
  4277. nbuf->ecccalc = (uint8_t *)(nbuf + 1);
  4278. nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
  4279. nbuf->databuf = nbuf->ecccode + mtd->oobsize;
  4280. chip->buffers = nbuf;
  4281. } else {
  4282. if (!chip->buffers)
  4283. return -ENOMEM;
  4284. }
  4285. /* Set the internal oob buffer location, just after the page data */
  4286. chip->oob_poi = chip->buffers->databuf + mtd->writesize;
  4287. /*
  4288. * If no default placement scheme is given, select an appropriate one.
  4289. */
  4290. if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
  4291. switch (mtd->oobsize) {
  4292. case 8:
  4293. ecc->layout = &nand_oob_8;
  4294. break;
  4295. case 16:
  4296. ecc->layout = &nand_oob_16;
  4297. break;
  4298. case 64:
  4299. ecc->layout = &nand_oob_64;
  4300. break;
  4301. case 128:
  4302. ecc->layout = &nand_oob_128;
  4303. break;
  4304. default:
  4305. pr_warn("No oob scheme defined for oobsize %d\n",
  4306. mtd->oobsize);
  4307. BUG();
  4308. }
  4309. }
  4310. if (!chip->write_page)
  4311. chip->write_page = nand_write_page;
  4312. /*
  4313. * Check ECC mode, default to software if 3byte/512byte hardware ECC is
  4314. * selected and we have 256 byte pagesize fallback to software ECC
  4315. */
  4316. switch (ecc->mode) {
  4317. case NAND_ECC_HW_OOB_FIRST:
  4318. /* Similar to NAND_ECC_HW, but a separate read_page handle */
  4319. if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
  4320. pr_warn("No ECC functions supplied; hardware ECC not possible\n");
  4321. BUG();
  4322. }
  4323. if (!ecc->read_page)
  4324. ecc->read_page = nand_read_page_hwecc_oob_first;
  4325. case NAND_ECC_HW:
  4326. /* Use standard hwecc read page function? */
  4327. if (!ecc->read_page)
  4328. ecc->read_page = nand_read_page_hwecc;
  4329. if (!ecc->write_page)
  4330. ecc->write_page = nand_write_page_hwecc;
  4331. if (!ecc->read_page_raw)
  4332. ecc->read_page_raw = nand_read_page_raw;
  4333. if (!ecc->write_page_raw)
  4334. ecc->write_page_raw = nand_write_page_raw;
  4335. if (!ecc->read_oob)
  4336. ecc->read_oob = nand_read_oob_std;
  4337. if (!ecc->write_oob)
  4338. ecc->write_oob = nand_write_oob_std;
  4339. if (!ecc->read_subpage)
  4340. ecc->read_subpage = nand_read_subpage;
  4341. if (!ecc->write_subpage)
  4342. ecc->write_subpage = nand_write_subpage_hwecc;
  4343. case NAND_ECC_HW_SYNDROME:
  4344. if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
  4345. (!ecc->read_page ||
  4346. ecc->read_page == nand_read_page_hwecc ||
  4347. !ecc->write_page ||
  4348. ecc->write_page == nand_write_page_hwecc)) {
  4349. pr_warn("No ECC functions supplied; hardware ECC not possible\n");
  4350. BUG();
  4351. }
  4352. /* Use standard syndrome read/write page function? */
  4353. if (!ecc->read_page)
  4354. ecc->read_page = nand_read_page_syndrome;
  4355. if (!ecc->write_page)
  4356. ecc->write_page = nand_write_page_syndrome;
  4357. if (!ecc->read_page_raw)
  4358. ecc->read_page_raw = nand_read_page_raw_syndrome;
  4359. if (!ecc->write_page_raw)
  4360. ecc->write_page_raw = nand_write_page_raw_syndrome;
  4361. if (!ecc->read_oob)
  4362. ecc->read_oob = nand_read_oob_syndrome;
  4363. if (!ecc->write_oob)
  4364. ecc->write_oob = nand_write_oob_syndrome;
  4365. if (mtd->writesize >= ecc->size) {
  4366. if (!ecc->strength) {
  4367. pr_warn("Driver must set ecc.strength when using hardware ECC\n");
  4368. BUG();
  4369. }
  4370. break;
  4371. }
  4372. pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
  4373. ecc->size, mtd->writesize);
  4374. ecc->mode = NAND_ECC_SOFT;
  4375. case NAND_ECC_SOFT:
  4376. ecc->calculate = nand_calculate_ecc;
  4377. ecc->correct = nand_correct_data;
  4378. ecc->read_page = nand_read_page_swecc;
  4379. ecc->read_subpage = nand_read_subpage;
  4380. ecc->write_page = nand_write_page_swecc;
  4381. ecc->read_page_raw = nand_read_page_raw;
  4382. ecc->write_page_raw = nand_write_page_raw;
  4383. ecc->read_oob = nand_read_oob_std;
  4384. ecc->write_oob = nand_write_oob_std;
  4385. if (!ecc->size)
  4386. ecc->size = 256;
  4387. ecc->bytes = 3;
  4388. ecc->strength = 1;
  4389. break;
  4390. case NAND_ECC_SOFT_BCH:
  4391. if (!mtd_nand_has_bch()) {
  4392. pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
  4393. BUG();
  4394. }
  4395. ecc->calculate = nand_bch_calculate_ecc;
  4396. ecc->correct = nand_bch_correct_data;
  4397. ecc->read_page = nand_read_page_swecc;
  4398. ecc->read_subpage = nand_read_subpage;
  4399. ecc->write_page = nand_write_page_swecc;
  4400. ecc->read_page_raw = nand_read_page_raw;
  4401. ecc->write_page_raw = nand_write_page_raw;
  4402. ecc->read_oob = nand_read_oob_std;
  4403. ecc->write_oob = nand_write_oob_std;
  4404. /*
  4405. * Board driver should supply ecc.size and ecc.bytes values to
  4406. * select how many bits are correctable; see nand_bch_init()
  4407. * for details. Otherwise, default to 4 bits for large page
  4408. * devices.
  4409. */
  4410. if (!ecc->size && (mtd->oobsize >= 64)) {
  4411. ecc->size = 512;
  4412. ecc->bytes = 7;
  4413. }
  4414. ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
  4415. &ecc->layout);
  4416. if (!ecc->priv) {
  4417. pr_warn("BCH ECC initialization failed!\n");
  4418. BUG();
  4419. }
  4420. ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size);
  4421. break;
  4422. case NAND_ECC_NONE:
  4423. pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
  4424. ecc->read_page = nand_read_page_raw;
  4425. ecc->write_page = nand_write_page_raw;
  4426. ecc->read_oob = nand_read_oob_std;
  4427. ecc->read_page_raw = nand_read_page_raw;
  4428. ecc->write_page_raw = nand_write_page_raw;
  4429. ecc->write_oob = nand_write_oob_std;
  4430. ecc->size = mtd->writesize;
  4431. ecc->bytes = 0;
  4432. ecc->strength = 0;
  4433. break;
  4434. default:
  4435. pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
  4436. BUG();
  4437. }
  4438. /* For many systems, the standard OOB write also works for raw */
  4439. if (!ecc->read_oob_raw)
  4440. ecc->read_oob_raw = ecc->read_oob;
  4441. if (!ecc->write_oob_raw)
  4442. ecc->write_oob_raw = ecc->write_oob;
  4443. /*
  4444. * The number of bytes available for a client to place data into
  4445. * the out of band area.
  4446. */
  4447. ecc->layout->oobavail = 0;
  4448. for (i = 0; ecc->layout->oobfree[i].length
  4449. && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
  4450. ecc->layout->oobavail += ecc->layout->oobfree[i].length;
  4451. mtd->oobavail = ecc->layout->oobavail;
  4452. /* ECC sanity check: warn if it's too weak */
  4453. if (!nand_ecc_strength_good(mtd))
  4454. pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
  4455. mtd->name);
  4456. /*
  4457. * Set the number of read / write steps for one page depending on ECC
  4458. * mode.
  4459. */
  4460. ecc->steps = mtd->writesize / ecc->size;
  4461. if (ecc->steps * ecc->size != mtd->writesize) {
  4462. pr_warn("Invalid ECC parameters\n");
  4463. BUG();
  4464. }
  4465. ecc->total = ecc->steps * ecc->bytes;
  4466. /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
  4467. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
  4468. switch (ecc->steps) {
  4469. case 2:
  4470. mtd->subpage_sft = 1;
  4471. break;
  4472. case 4:
  4473. case 8:
  4474. case 16:
  4475. mtd->subpage_sft = 2;
  4476. break;
  4477. }
  4478. }
  4479. chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
  4480. /* Initialize state */
  4481. chip->state = FL_READY;
  4482. /* Invalidate the pagebuffer reference */
  4483. chip->pagebuf = -1;
  4484. /* Large page NAND with SOFT_ECC should support subpage reads */
  4485. switch (ecc->mode) {
  4486. case NAND_ECC_SOFT:
  4487. case NAND_ECC_SOFT_BCH:
  4488. if (chip->page_shift > 9)
  4489. chip->options |= NAND_SUBPAGE_READ;
  4490. break;
  4491. default:
  4492. break;
  4493. }
  4494. /* Fill in remaining MTD driver data */
  4495. mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
  4496. mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
  4497. MTD_CAP_NANDFLASH;
  4498. mtd->_erase = nand_erase;
  4499. mtd->_point = NULL;
  4500. mtd->_unpoint = NULL;
  4501. mtd->_read = nand_read;
  4502. mtd->_write = nand_write;
  4503. mtd->_panic_write = panic_nand_write;
  4504. mtd->_read_oob = nand_read_oob;
  4505. mtd->_write_oob = nand_write_oob;
  4506. mtd->_sync = nand_sync;
  4507. mtd->_lock = NULL;
  4508. mtd->_unlock = NULL;
  4509. mtd->_suspend = nand_suspend;
  4510. mtd->_resume = nand_resume;
  4511. mtd->_block_isreserved = nand_block_isreserved;
  4512. mtd->_block_isbad = nand_block_isbad;
  4513. mtd->_block_markbad = nand_block_markbad;
  4514. mtd->writebufsize = mtd->writesize;
  4515. /* propagate ecc info to mtd_info */
  4516. mtd->ecclayout = ecc->layout;
  4517. mtd->ecc_strength = ecc->strength;
  4518. mtd->ecc_step_size = ecc->size;
  4519. /*
  4520. * Initialize bitflip_threshold to its default prior scan_bbt() call.
  4521. * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
  4522. * properly set.
  4523. */
  4524. if (!mtd->bitflip_threshold)
  4525. mtd->bitflip_threshold = mtd->ecc_strength;
  4526. /* Check, if we should skip the bad block table scan */
  4527. if (chip->options & NAND_SKIP_BBTSCAN)
  4528. return 0;
  4529. /* Build bad block table */
  4530. return chip->scan_bbt(mtd);
  4531. }
  4532. EXPORT_SYMBOL(nand_scan_tail);
  4533. /*
  4534. * is_module_text_address() isn't exported, and it's mostly a pointless
  4535. * test if this is a module _anyway_ -- they'd have to try _really_ hard
  4536. * to call us from in-kernel code if the core NAND support is modular.
  4537. */
  4538. #ifdef MODULE
  4539. #define caller_is_module() (1)
  4540. #else
  4541. #define caller_is_module() \
  4542. is_module_text_address((unsigned long)__builtin_return_address(0))
  4543. #endif
  4544. /**
  4545. * nand_scan - [NAND Interface] Scan for the NAND device
  4546. * @mtd: MTD device structure
  4547. * @maxchips: number of chips to scan for
  4548. *
  4549. * This fills out all the uninitialized function pointers with the defaults.
  4550. * The flash ID is read and the mtd/chip structures are filled with the
  4551. * appropriate values. The mtd->owner field must be set to the module of the
  4552. * caller.
  4553. */
  4554. int nand_scan(struct mtd_info *mtd, int maxchips)
  4555. {
  4556. int ret;
  4557. /* Many callers got this wrong, so check for it for a while... */
  4558. if (!mtd->owner && caller_is_module()) {
  4559. pr_crit("%s called with NULL mtd->owner!\n", __func__);
  4560. BUG();
  4561. }
  4562. ret = nand_scan_ident(mtd, maxchips, NULL);
  4563. #ifdef CONFIG_MTK_MTD_NAND
  4564. mtk_nss_init_cache(mtd); /* init CacheV (if CacheV is disabled by driver, do nothing inside) */
  4565. #endif
  4566. if (!ret)
  4567. ret = nand_scan_tail(mtd);
  4568. return ret;
  4569. }
  4570. EXPORT_SYMBOL(nand_scan);
  4571. /**
  4572. * nand_release - [NAND Interface] Free resources held by the NAND device
  4573. * @mtd: MTD device structure
  4574. */
  4575. void nand_release(struct mtd_info *mtd)
  4576. {
  4577. struct nand_chip *chip = mtd->priv;
  4578. if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
  4579. nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
  4580. mtd_device_unregister(mtd);
  4581. /* Free bad block table memory */
  4582. kfree(chip->bbt);
  4583. if (!(chip->options & NAND_OWN_BUFFERS))
  4584. kfree(chip->buffers);
  4585. /* Free bad block descriptor memory */
  4586. if (chip->badblock_pattern && chip->badblock_pattern->options
  4587. & NAND_BBT_DYNAMICSTRUCT)
  4588. kfree(chip->badblock_pattern);
  4589. }
  4590. EXPORT_SYMBOL_GPL(nand_release);
  4591. static int __init nand_base_init(void)
  4592. {
  4593. led_trigger_register_simple("nand-disk", &nand_led_trigger);
  4594. return 0;
  4595. }
  4596. static void __exit nand_base_exit(void)
  4597. {
  4598. led_trigger_unregister_simple(nand_led_trigger);
  4599. }
  4600. module_init(nand_base_init);
  4601. module_exit(nand_base_exit);
  4602. MODULE_LICENSE("GPL");
  4603. MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
  4604. MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
  4605. MODULE_DESCRIPTION("Generic NAND flash driver code");