md.c 222 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549
  1. /*
  2. md.c : Multiple Devices driver for Linux
  3. Copyright (C) 1998, 1999, 2000 Ingo Molnar
  4. completely rewritten, based on the MD driver code from Marc Zyngier
  5. Changes:
  6. - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
  7. - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
  8. - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
  9. - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
  10. - kmod support by: Cyrus Durgin
  11. - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
  12. - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
  13. - lots of fixes and improvements to the RAID1/RAID5 and generic
  14. RAID code (such as request based resynchronization):
  15. Neil Brown <neilb@cse.unsw.edu.au>.
  16. - persistent bitmap code
  17. Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
  18. This program is free software; you can redistribute it and/or modify
  19. it under the terms of the GNU General Public License as published by
  20. the Free Software Foundation; either version 2, or (at your option)
  21. any later version.
  22. You should have received a copy of the GNU General Public License
  23. (for example /usr/src/linux/COPYING); if not, write to the Free
  24. Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. */
  26. #include <linux/kthread.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/sysctl.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/fs.h>
  31. #include <linux/poll.h>
  32. #include <linux/ctype.h>
  33. #include <linux/string.h>
  34. #include <linux/hdreg.h>
  35. #include <linux/proc_fs.h>
  36. #include <linux/random.h>
  37. #include <linux/module.h>
  38. #include <linux/reboot.h>
  39. #include <linux/file.h>
  40. #include <linux/compat.h>
  41. #include <linux/delay.h>
  42. #include <linux/raid/md_p.h>
  43. #include <linux/raid/md_u.h>
  44. #include <linux/slab.h>
  45. #include "md.h"
  46. #include "bitmap.h"
  47. #ifndef MODULE
  48. static void autostart_arrays(int part);
  49. #endif
  50. /* pers_list is a list of registered personalities protected
  51. * by pers_lock.
  52. * pers_lock does extra service to protect accesses to
  53. * mddev->thread when the mutex cannot be held.
  54. */
  55. static LIST_HEAD(pers_list);
  56. static DEFINE_SPINLOCK(pers_lock);
  57. static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
  58. static struct workqueue_struct *md_wq;
  59. static struct workqueue_struct *md_misc_wq;
  60. static int remove_and_add_spares(struct mddev *mddev,
  61. struct md_rdev *this);
  62. /*
  63. * Default number of read corrections we'll attempt on an rdev
  64. * before ejecting it from the array. We divide the read error
  65. * count by 2 for every hour elapsed between read errors.
  66. */
  67. #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
  68. /*
  69. * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  70. * is 1000 KB/sec, so the extra system load does not show up that much.
  71. * Increase it if you want to have more _guaranteed_ speed. Note that
  72. * the RAID driver will use the maximum available bandwidth if the IO
  73. * subsystem is idle. There is also an 'absolute maximum' reconstruction
  74. * speed limit - in case reconstruction slows down your system despite
  75. * idle IO detection.
  76. *
  77. * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
  78. * or /sys/block/mdX/md/sync_speed_{min,max}
  79. */
  80. static int sysctl_speed_limit_min = 1000;
  81. static int sysctl_speed_limit_max = 200000;
  82. static inline int speed_min(struct mddev *mddev)
  83. {
  84. return mddev->sync_speed_min ?
  85. mddev->sync_speed_min : sysctl_speed_limit_min;
  86. }
  87. static inline int speed_max(struct mddev *mddev)
  88. {
  89. return mddev->sync_speed_max ?
  90. mddev->sync_speed_max : sysctl_speed_limit_max;
  91. }
  92. static struct ctl_table_header *raid_table_header;
  93. static struct ctl_table raid_table[] = {
  94. {
  95. .procname = "speed_limit_min",
  96. .data = &sysctl_speed_limit_min,
  97. .maxlen = sizeof(int),
  98. .mode = S_IRUGO|S_IWUSR,
  99. .proc_handler = proc_dointvec,
  100. },
  101. {
  102. .procname = "speed_limit_max",
  103. .data = &sysctl_speed_limit_max,
  104. .maxlen = sizeof(int),
  105. .mode = S_IRUGO|S_IWUSR,
  106. .proc_handler = proc_dointvec,
  107. },
  108. { }
  109. };
  110. static struct ctl_table raid_dir_table[] = {
  111. {
  112. .procname = "raid",
  113. .maxlen = 0,
  114. .mode = S_IRUGO|S_IXUGO,
  115. .child = raid_table,
  116. },
  117. { }
  118. };
  119. static struct ctl_table raid_root_table[] = {
  120. {
  121. .procname = "dev",
  122. .maxlen = 0,
  123. .mode = 0555,
  124. .child = raid_dir_table,
  125. },
  126. { }
  127. };
  128. static const struct block_device_operations md_fops;
  129. static int start_readonly;
  130. /* bio_clone_mddev
  131. * like bio_clone, but with a local bio set
  132. */
  133. struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
  134. struct mddev *mddev)
  135. {
  136. struct bio *b;
  137. if (!mddev || !mddev->bio_set)
  138. return bio_alloc(gfp_mask, nr_iovecs);
  139. b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
  140. if (!b)
  141. return NULL;
  142. return b;
  143. }
  144. EXPORT_SYMBOL_GPL(bio_alloc_mddev);
  145. struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
  146. struct mddev *mddev)
  147. {
  148. if (!mddev || !mddev->bio_set)
  149. return bio_clone(bio, gfp_mask);
  150. return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
  151. }
  152. EXPORT_SYMBOL_GPL(bio_clone_mddev);
  153. /*
  154. * We have a system wide 'event count' that is incremented
  155. * on any 'interesting' event, and readers of /proc/mdstat
  156. * can use 'poll' or 'select' to find out when the event
  157. * count increases.
  158. *
  159. * Events are:
  160. * start array, stop array, error, add device, remove device,
  161. * start build, activate spare
  162. */
  163. static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
  164. static atomic_t md_event_count;
  165. void md_new_event(struct mddev *mddev)
  166. {
  167. atomic_inc(&md_event_count);
  168. wake_up(&md_event_waiters);
  169. }
  170. EXPORT_SYMBOL_GPL(md_new_event);
  171. /* Alternate version that can be called from interrupts
  172. * when calling sysfs_notify isn't needed.
  173. */
  174. static void md_new_event_inintr(struct mddev *mddev)
  175. {
  176. atomic_inc(&md_event_count);
  177. wake_up(&md_event_waiters);
  178. }
  179. /*
  180. * Enables to iterate over all existing md arrays
  181. * all_mddevs_lock protects this list.
  182. */
  183. static LIST_HEAD(all_mddevs);
  184. static DEFINE_SPINLOCK(all_mddevs_lock);
  185. /*
  186. * iterates through all used mddevs in the system.
  187. * We take care to grab the all_mddevs_lock whenever navigating
  188. * the list, and to always hold a refcount when unlocked.
  189. * Any code which breaks out of this loop while own
  190. * a reference to the current mddev and must mddev_put it.
  191. */
  192. #define for_each_mddev(_mddev,_tmp) \
  193. \
  194. for (({ spin_lock(&all_mddevs_lock); \
  195. _tmp = all_mddevs.next; \
  196. _mddev = NULL;}); \
  197. ({ if (_tmp != &all_mddevs) \
  198. mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
  199. spin_unlock(&all_mddevs_lock); \
  200. if (_mddev) mddev_put(_mddev); \
  201. _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
  202. _tmp != &all_mddevs;}); \
  203. ({ spin_lock(&all_mddevs_lock); \
  204. _tmp = _tmp->next;}) \
  205. )
  206. /* Rather than calling directly into the personality make_request function,
  207. * IO requests come here first so that we can check if the device is
  208. * being suspended pending a reconfiguration.
  209. * We hold a refcount over the call to ->make_request. By the time that
  210. * call has finished, the bio has been linked into some internal structure
  211. * and so is visible to ->quiesce(), so we don't need the refcount any more.
  212. */
  213. static void md_make_request(struct request_queue *q, struct bio *bio)
  214. {
  215. const int rw = bio_data_dir(bio);
  216. struct mddev *mddev = q->queuedata;
  217. int cpu;
  218. unsigned int sectors;
  219. if (mddev == NULL || mddev->pers == NULL
  220. || !mddev->ready) {
  221. bio_io_error(bio);
  222. return;
  223. }
  224. if (mddev->ro == 1 && unlikely(rw == WRITE)) {
  225. bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
  226. return;
  227. }
  228. smp_rmb(); /* Ensure implications of 'active' are visible */
  229. rcu_read_lock();
  230. if (mddev->suspended) {
  231. DEFINE_WAIT(__wait);
  232. for (;;) {
  233. prepare_to_wait(&mddev->sb_wait, &__wait,
  234. TASK_UNINTERRUPTIBLE);
  235. if (!mddev->suspended)
  236. break;
  237. rcu_read_unlock();
  238. schedule();
  239. rcu_read_lock();
  240. }
  241. finish_wait(&mddev->sb_wait, &__wait);
  242. }
  243. atomic_inc(&mddev->active_io);
  244. rcu_read_unlock();
  245. /*
  246. * save the sectors now since our bio can
  247. * go away inside make_request
  248. */
  249. sectors = bio_sectors(bio);
  250. mddev->pers->make_request(mddev, bio);
  251. cpu = part_stat_lock();
  252. part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
  253. part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
  254. part_stat_unlock();
  255. if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
  256. wake_up(&mddev->sb_wait);
  257. }
  258. /* mddev_suspend makes sure no new requests are submitted
  259. * to the device, and that any requests that have been submitted
  260. * are completely handled.
  261. * Once ->stop is called and completes, the module will be completely
  262. * unused.
  263. */
  264. void mddev_suspend(struct mddev *mddev)
  265. {
  266. BUG_ON(mddev->suspended);
  267. mddev->suspended = 1;
  268. synchronize_rcu();
  269. wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
  270. mddev->pers->quiesce(mddev, 1);
  271. del_timer_sync(&mddev->safemode_timer);
  272. }
  273. EXPORT_SYMBOL_GPL(mddev_suspend);
  274. void mddev_resume(struct mddev *mddev)
  275. {
  276. mddev->suspended = 0;
  277. wake_up(&mddev->sb_wait);
  278. mddev->pers->quiesce(mddev, 0);
  279. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  280. md_wakeup_thread(mddev->thread);
  281. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  282. }
  283. EXPORT_SYMBOL_GPL(mddev_resume);
  284. int mddev_congested(struct mddev *mddev, int bits)
  285. {
  286. return mddev->suspended;
  287. }
  288. EXPORT_SYMBOL(mddev_congested);
  289. /*
  290. * Generic flush handling for md
  291. */
  292. static void md_end_flush(struct bio *bio, int err)
  293. {
  294. struct md_rdev *rdev = bio->bi_private;
  295. struct mddev *mddev = rdev->mddev;
  296. rdev_dec_pending(rdev, mddev);
  297. if (atomic_dec_and_test(&mddev->flush_pending)) {
  298. /* The pre-request flush has finished */
  299. queue_work(md_wq, &mddev->flush_work);
  300. }
  301. bio_put(bio);
  302. }
  303. static void md_submit_flush_data(struct work_struct *ws);
  304. static void submit_flushes(struct work_struct *ws)
  305. {
  306. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  307. struct md_rdev *rdev;
  308. INIT_WORK(&mddev->flush_work, md_submit_flush_data);
  309. atomic_set(&mddev->flush_pending, 1);
  310. rcu_read_lock();
  311. rdev_for_each_rcu(rdev, mddev)
  312. if (rdev->raid_disk >= 0 &&
  313. !test_bit(Faulty, &rdev->flags)) {
  314. /* Take two references, one is dropped
  315. * when request finishes, one after
  316. * we reclaim rcu_read_lock
  317. */
  318. struct bio *bi;
  319. atomic_inc(&rdev->nr_pending);
  320. atomic_inc(&rdev->nr_pending);
  321. rcu_read_unlock();
  322. bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
  323. bi->bi_end_io = md_end_flush;
  324. bi->bi_private = rdev;
  325. bi->bi_bdev = rdev->bdev;
  326. atomic_inc(&mddev->flush_pending);
  327. submit_bio(WRITE_FLUSH, bi);
  328. rcu_read_lock();
  329. rdev_dec_pending(rdev, mddev);
  330. }
  331. rcu_read_unlock();
  332. if (atomic_dec_and_test(&mddev->flush_pending))
  333. queue_work(md_wq, &mddev->flush_work);
  334. }
  335. static void md_submit_flush_data(struct work_struct *ws)
  336. {
  337. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  338. struct bio *bio = mddev->flush_bio;
  339. if (bio->bi_iter.bi_size == 0)
  340. /* an empty barrier - all done */
  341. bio_endio(bio, 0);
  342. else {
  343. bio->bi_rw &= ~REQ_FLUSH;
  344. mddev->pers->make_request(mddev, bio);
  345. }
  346. mddev->flush_bio = NULL;
  347. wake_up(&mddev->sb_wait);
  348. }
  349. void md_flush_request(struct mddev *mddev, struct bio *bio)
  350. {
  351. spin_lock_irq(&mddev->write_lock);
  352. wait_event_lock_irq(mddev->sb_wait,
  353. !mddev->flush_bio,
  354. mddev->write_lock);
  355. mddev->flush_bio = bio;
  356. spin_unlock_irq(&mddev->write_lock);
  357. INIT_WORK(&mddev->flush_work, submit_flushes);
  358. queue_work(md_wq, &mddev->flush_work);
  359. }
  360. EXPORT_SYMBOL(md_flush_request);
  361. void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
  362. {
  363. struct mddev *mddev = cb->data;
  364. md_wakeup_thread(mddev->thread);
  365. kfree(cb);
  366. }
  367. EXPORT_SYMBOL(md_unplug);
  368. static inline struct mddev *mddev_get(struct mddev *mddev)
  369. {
  370. atomic_inc(&mddev->active);
  371. return mddev;
  372. }
  373. static void mddev_delayed_delete(struct work_struct *ws);
  374. static void mddev_put(struct mddev *mddev)
  375. {
  376. struct bio_set *bs = NULL;
  377. if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
  378. return;
  379. if (!mddev->raid_disks && list_empty(&mddev->disks) &&
  380. mddev->ctime == 0 && !mddev->hold_active) {
  381. /* Array is not configured at all, and not held active,
  382. * so destroy it */
  383. list_del_init(&mddev->all_mddevs);
  384. bs = mddev->bio_set;
  385. mddev->bio_set = NULL;
  386. if (mddev->gendisk) {
  387. /* We did a probe so need to clean up. Call
  388. * queue_work inside the spinlock so that
  389. * flush_workqueue() after mddev_find will
  390. * succeed in waiting for the work to be done.
  391. */
  392. INIT_WORK(&mddev->del_work, mddev_delayed_delete);
  393. queue_work(md_misc_wq, &mddev->del_work);
  394. } else
  395. kfree(mddev);
  396. }
  397. spin_unlock(&all_mddevs_lock);
  398. if (bs)
  399. bioset_free(bs);
  400. }
  401. void mddev_init(struct mddev *mddev)
  402. {
  403. mutex_init(&mddev->open_mutex);
  404. mutex_init(&mddev->reconfig_mutex);
  405. mutex_init(&mddev->bitmap_info.mutex);
  406. INIT_LIST_HEAD(&mddev->disks);
  407. INIT_LIST_HEAD(&mddev->all_mddevs);
  408. init_timer(&mddev->safemode_timer);
  409. atomic_set(&mddev->active, 1);
  410. atomic_set(&mddev->openers, 0);
  411. atomic_set(&mddev->active_io, 0);
  412. spin_lock_init(&mddev->write_lock);
  413. atomic_set(&mddev->flush_pending, 0);
  414. init_waitqueue_head(&mddev->sb_wait);
  415. init_waitqueue_head(&mddev->recovery_wait);
  416. mddev->reshape_position = MaxSector;
  417. mddev->reshape_backwards = 0;
  418. mddev->last_sync_action = "none";
  419. mddev->resync_min = 0;
  420. mddev->resync_max = MaxSector;
  421. mddev->level = LEVEL_NONE;
  422. }
  423. EXPORT_SYMBOL_GPL(mddev_init);
  424. static struct mddev *mddev_find(dev_t unit)
  425. {
  426. struct mddev *mddev, *new = NULL;
  427. if (unit && MAJOR(unit) != MD_MAJOR)
  428. unit &= ~((1<<MdpMinorShift)-1);
  429. retry:
  430. spin_lock(&all_mddevs_lock);
  431. if (unit) {
  432. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  433. if (mddev->unit == unit) {
  434. mddev_get(mddev);
  435. spin_unlock(&all_mddevs_lock);
  436. kfree(new);
  437. return mddev;
  438. }
  439. if (new) {
  440. list_add(&new->all_mddevs, &all_mddevs);
  441. spin_unlock(&all_mddevs_lock);
  442. new->hold_active = UNTIL_IOCTL;
  443. return new;
  444. }
  445. } else if (new) {
  446. /* find an unused unit number */
  447. static int next_minor = 512;
  448. int start = next_minor;
  449. int is_free = 0;
  450. int dev = 0;
  451. while (!is_free) {
  452. dev = MKDEV(MD_MAJOR, next_minor);
  453. next_minor++;
  454. if (next_minor > MINORMASK)
  455. next_minor = 0;
  456. if (next_minor == start) {
  457. /* Oh dear, all in use. */
  458. spin_unlock(&all_mddevs_lock);
  459. kfree(new);
  460. return NULL;
  461. }
  462. is_free = 1;
  463. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  464. if (mddev->unit == dev) {
  465. is_free = 0;
  466. break;
  467. }
  468. }
  469. new->unit = dev;
  470. new->md_minor = MINOR(dev);
  471. new->hold_active = UNTIL_STOP;
  472. list_add(&new->all_mddevs, &all_mddevs);
  473. spin_unlock(&all_mddevs_lock);
  474. return new;
  475. }
  476. spin_unlock(&all_mddevs_lock);
  477. new = kzalloc(sizeof(*new), GFP_KERNEL);
  478. if (!new)
  479. return NULL;
  480. new->unit = unit;
  481. if (MAJOR(unit) == MD_MAJOR)
  482. new->md_minor = MINOR(unit);
  483. else
  484. new->md_minor = MINOR(unit) >> MdpMinorShift;
  485. mddev_init(new);
  486. goto retry;
  487. }
  488. static inline int __must_check mddev_lock(struct mddev *mddev)
  489. {
  490. return mutex_lock_interruptible(&mddev->reconfig_mutex);
  491. }
  492. /* Sometimes we need to take the lock in a situation where
  493. * failure due to interrupts is not acceptable.
  494. */
  495. static inline void mddev_lock_nointr(struct mddev *mddev)
  496. {
  497. mutex_lock(&mddev->reconfig_mutex);
  498. }
  499. static inline int mddev_is_locked(struct mddev *mddev)
  500. {
  501. return mutex_is_locked(&mddev->reconfig_mutex);
  502. }
  503. static inline int mddev_trylock(struct mddev *mddev)
  504. {
  505. return mutex_trylock(&mddev->reconfig_mutex);
  506. }
  507. static struct attribute_group md_redundancy_group;
  508. static void mddev_unlock(struct mddev *mddev)
  509. {
  510. if (mddev->to_remove) {
  511. /* These cannot be removed under reconfig_mutex as
  512. * an access to the files will try to take reconfig_mutex
  513. * while holding the file unremovable, which leads to
  514. * a deadlock.
  515. * So hold set sysfs_active while the remove in happeing,
  516. * and anything else which might set ->to_remove or my
  517. * otherwise change the sysfs namespace will fail with
  518. * -EBUSY if sysfs_active is still set.
  519. * We set sysfs_active under reconfig_mutex and elsewhere
  520. * test it under the same mutex to ensure its correct value
  521. * is seen.
  522. */
  523. struct attribute_group *to_remove = mddev->to_remove;
  524. mddev->to_remove = NULL;
  525. mddev->sysfs_active = 1;
  526. mutex_unlock(&mddev->reconfig_mutex);
  527. if (mddev->kobj.sd) {
  528. if (to_remove != &md_redundancy_group)
  529. sysfs_remove_group(&mddev->kobj, to_remove);
  530. if (mddev->pers == NULL ||
  531. mddev->pers->sync_request == NULL) {
  532. sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
  533. if (mddev->sysfs_action)
  534. sysfs_put(mddev->sysfs_action);
  535. mddev->sysfs_action = NULL;
  536. }
  537. }
  538. mddev->sysfs_active = 0;
  539. } else
  540. mutex_unlock(&mddev->reconfig_mutex);
  541. /* As we've dropped the mutex we need a spinlock to
  542. * make sure the thread doesn't disappear
  543. */
  544. spin_lock(&pers_lock);
  545. md_wakeup_thread(mddev->thread);
  546. spin_unlock(&pers_lock);
  547. }
  548. static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
  549. {
  550. struct md_rdev *rdev;
  551. rdev_for_each_rcu(rdev, mddev)
  552. if (rdev->desc_nr == nr)
  553. return rdev;
  554. return NULL;
  555. }
  556. static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
  557. {
  558. struct md_rdev *rdev;
  559. rdev_for_each(rdev, mddev)
  560. if (rdev->bdev->bd_dev == dev)
  561. return rdev;
  562. return NULL;
  563. }
  564. static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
  565. {
  566. struct md_rdev *rdev;
  567. rdev_for_each_rcu(rdev, mddev)
  568. if (rdev->bdev->bd_dev == dev)
  569. return rdev;
  570. return NULL;
  571. }
  572. static struct md_personality *find_pers(int level, char *clevel)
  573. {
  574. struct md_personality *pers;
  575. list_for_each_entry(pers, &pers_list, list) {
  576. if (level != LEVEL_NONE && pers->level == level)
  577. return pers;
  578. if (strcmp(pers->name, clevel)==0)
  579. return pers;
  580. }
  581. return NULL;
  582. }
  583. /* return the offset of the super block in 512byte sectors */
  584. static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
  585. {
  586. sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
  587. return MD_NEW_SIZE_SECTORS(num_sectors);
  588. }
  589. static int alloc_disk_sb(struct md_rdev *rdev)
  590. {
  591. rdev->sb_page = alloc_page(GFP_KERNEL);
  592. if (!rdev->sb_page) {
  593. printk(KERN_ALERT "md: out of memory.\n");
  594. return -ENOMEM;
  595. }
  596. return 0;
  597. }
  598. void md_rdev_clear(struct md_rdev *rdev)
  599. {
  600. if (rdev->sb_page) {
  601. put_page(rdev->sb_page);
  602. rdev->sb_loaded = 0;
  603. rdev->sb_page = NULL;
  604. rdev->sb_start = 0;
  605. rdev->sectors = 0;
  606. }
  607. if (rdev->bb_page) {
  608. put_page(rdev->bb_page);
  609. rdev->bb_page = NULL;
  610. }
  611. kfree(rdev->badblocks.page);
  612. rdev->badblocks.page = NULL;
  613. }
  614. EXPORT_SYMBOL_GPL(md_rdev_clear);
  615. static void super_written(struct bio *bio, int error)
  616. {
  617. struct md_rdev *rdev = bio->bi_private;
  618. struct mddev *mddev = rdev->mddev;
  619. if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
  620. printk("md: super_written gets error=%d, uptodate=%d\n",
  621. error, test_bit(BIO_UPTODATE, &bio->bi_flags));
  622. WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
  623. md_error(mddev, rdev);
  624. }
  625. if (atomic_dec_and_test(&mddev->pending_writes))
  626. wake_up(&mddev->sb_wait);
  627. bio_put(bio);
  628. }
  629. void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
  630. sector_t sector, int size, struct page *page)
  631. {
  632. /* write first size bytes of page to sector of rdev
  633. * Increment mddev->pending_writes before returning
  634. * and decrement it on completion, waking up sb_wait
  635. * if zero is reached.
  636. * If an error occurred, call md_error
  637. */
  638. struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
  639. bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
  640. bio->bi_iter.bi_sector = sector;
  641. bio_add_page(bio, page, size, 0);
  642. bio->bi_private = rdev;
  643. bio->bi_end_io = super_written;
  644. atomic_inc(&mddev->pending_writes);
  645. submit_bio(WRITE_FLUSH_FUA, bio);
  646. }
  647. void md_super_wait(struct mddev *mddev)
  648. {
  649. /* wait for all superblock writes that were scheduled to complete */
  650. wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
  651. }
  652. int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
  653. struct page *page, int rw, bool metadata_op)
  654. {
  655. struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
  656. int ret;
  657. bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
  658. rdev->meta_bdev : rdev->bdev;
  659. if (metadata_op)
  660. bio->bi_iter.bi_sector = sector + rdev->sb_start;
  661. else if (rdev->mddev->reshape_position != MaxSector &&
  662. (rdev->mddev->reshape_backwards ==
  663. (sector >= rdev->mddev->reshape_position)))
  664. bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
  665. else
  666. bio->bi_iter.bi_sector = sector + rdev->data_offset;
  667. bio_add_page(bio, page, size, 0);
  668. submit_bio_wait(rw, bio);
  669. ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
  670. bio_put(bio);
  671. return ret;
  672. }
  673. EXPORT_SYMBOL_GPL(sync_page_io);
  674. static int read_disk_sb(struct md_rdev *rdev, int size)
  675. {
  676. char b[BDEVNAME_SIZE];
  677. if (rdev->sb_loaded)
  678. return 0;
  679. if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
  680. goto fail;
  681. rdev->sb_loaded = 1;
  682. return 0;
  683. fail:
  684. printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
  685. bdevname(rdev->bdev,b));
  686. return -EINVAL;
  687. }
  688. static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  689. {
  690. return sb1->set_uuid0 == sb2->set_uuid0 &&
  691. sb1->set_uuid1 == sb2->set_uuid1 &&
  692. sb1->set_uuid2 == sb2->set_uuid2 &&
  693. sb1->set_uuid3 == sb2->set_uuid3;
  694. }
  695. static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  696. {
  697. int ret;
  698. mdp_super_t *tmp1, *tmp2;
  699. tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
  700. tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
  701. if (!tmp1 || !tmp2) {
  702. ret = 0;
  703. printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
  704. goto abort;
  705. }
  706. *tmp1 = *sb1;
  707. *tmp2 = *sb2;
  708. /*
  709. * nr_disks is not constant
  710. */
  711. tmp1->nr_disks = 0;
  712. tmp2->nr_disks = 0;
  713. ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
  714. abort:
  715. kfree(tmp1);
  716. kfree(tmp2);
  717. return ret;
  718. }
  719. static u32 md_csum_fold(u32 csum)
  720. {
  721. csum = (csum & 0xffff) + (csum >> 16);
  722. return (csum & 0xffff) + (csum >> 16);
  723. }
  724. static unsigned int calc_sb_csum(mdp_super_t *sb)
  725. {
  726. u64 newcsum = 0;
  727. u32 *sb32 = (u32*)sb;
  728. int i;
  729. unsigned int disk_csum, csum;
  730. disk_csum = sb->sb_csum;
  731. sb->sb_csum = 0;
  732. for (i = 0; i < MD_SB_BYTES/4 ; i++)
  733. newcsum += sb32[i];
  734. csum = (newcsum & 0xffffffff) + (newcsum>>32);
  735. #ifdef CONFIG_ALPHA
  736. /* This used to use csum_partial, which was wrong for several
  737. * reasons including that different results are returned on
  738. * different architectures. It isn't critical that we get exactly
  739. * the same return value as before (we always csum_fold before
  740. * testing, and that removes any differences). However as we
  741. * know that csum_partial always returned a 16bit value on
  742. * alphas, do a fold to maximise conformity to previous behaviour.
  743. */
  744. sb->sb_csum = md_csum_fold(disk_csum);
  745. #else
  746. sb->sb_csum = disk_csum;
  747. #endif
  748. return csum;
  749. }
  750. /*
  751. * Handle superblock details.
  752. * We want to be able to handle multiple superblock formats
  753. * so we have a common interface to them all, and an array of
  754. * different handlers.
  755. * We rely on user-space to write the initial superblock, and support
  756. * reading and updating of superblocks.
  757. * Interface methods are:
  758. * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
  759. * loads and validates a superblock on dev.
  760. * if refdev != NULL, compare superblocks on both devices
  761. * Return:
  762. * 0 - dev has a superblock that is compatible with refdev
  763. * 1 - dev has a superblock that is compatible and newer than refdev
  764. * so dev should be used as the refdev in future
  765. * -EINVAL superblock incompatible or invalid
  766. * -othererror e.g. -EIO
  767. *
  768. * int validate_super(struct mddev *mddev, struct md_rdev *dev)
  769. * Verify that dev is acceptable into mddev.
  770. * The first time, mddev->raid_disks will be 0, and data from
  771. * dev should be merged in. Subsequent calls check that dev
  772. * is new enough. Return 0 or -EINVAL
  773. *
  774. * void sync_super(struct mddev *mddev, struct md_rdev *dev)
  775. * Update the superblock for rdev with data in mddev
  776. * This does not write to disc.
  777. *
  778. */
  779. struct super_type {
  780. char *name;
  781. struct module *owner;
  782. int (*load_super)(struct md_rdev *rdev,
  783. struct md_rdev *refdev,
  784. int minor_version);
  785. int (*validate_super)(struct mddev *mddev,
  786. struct md_rdev *rdev);
  787. void (*sync_super)(struct mddev *mddev,
  788. struct md_rdev *rdev);
  789. unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
  790. sector_t num_sectors);
  791. int (*allow_new_offset)(struct md_rdev *rdev,
  792. unsigned long long new_offset);
  793. };
  794. /*
  795. * Check that the given mddev has no bitmap.
  796. *
  797. * This function is called from the run method of all personalities that do not
  798. * support bitmaps. It prints an error message and returns non-zero if mddev
  799. * has a bitmap. Otherwise, it returns 0.
  800. *
  801. */
  802. int md_check_no_bitmap(struct mddev *mddev)
  803. {
  804. if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
  805. return 0;
  806. printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
  807. mdname(mddev), mddev->pers->name);
  808. return 1;
  809. }
  810. EXPORT_SYMBOL(md_check_no_bitmap);
  811. /*
  812. * load_super for 0.90.0
  813. */
  814. static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  815. {
  816. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  817. mdp_super_t *sb;
  818. int ret;
  819. /*
  820. * Calculate the position of the superblock (512byte sectors),
  821. * it's at the end of the disk.
  822. *
  823. * It also happens to be a multiple of 4Kb.
  824. */
  825. rdev->sb_start = calc_dev_sboffset(rdev);
  826. ret = read_disk_sb(rdev, MD_SB_BYTES);
  827. if (ret) return ret;
  828. ret = -EINVAL;
  829. bdevname(rdev->bdev, b);
  830. sb = page_address(rdev->sb_page);
  831. if (sb->md_magic != MD_SB_MAGIC) {
  832. printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
  833. b);
  834. goto abort;
  835. }
  836. if (sb->major_version != 0 ||
  837. sb->minor_version < 90 ||
  838. sb->minor_version > 91) {
  839. printk(KERN_WARNING "Bad version number %d.%d on %s\n",
  840. sb->major_version, sb->minor_version,
  841. b);
  842. goto abort;
  843. }
  844. if (sb->raid_disks <= 0)
  845. goto abort;
  846. if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
  847. printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
  848. b);
  849. goto abort;
  850. }
  851. rdev->preferred_minor = sb->md_minor;
  852. rdev->data_offset = 0;
  853. rdev->new_data_offset = 0;
  854. rdev->sb_size = MD_SB_BYTES;
  855. rdev->badblocks.shift = -1;
  856. if (sb->level == LEVEL_MULTIPATH)
  857. rdev->desc_nr = -1;
  858. else
  859. rdev->desc_nr = sb->this_disk.number;
  860. if (!refdev) {
  861. ret = 1;
  862. } else {
  863. __u64 ev1, ev2;
  864. mdp_super_t *refsb = page_address(refdev->sb_page);
  865. if (!uuid_equal(refsb, sb)) {
  866. printk(KERN_WARNING "md: %s has different UUID to %s\n",
  867. b, bdevname(refdev->bdev,b2));
  868. goto abort;
  869. }
  870. if (!sb_equal(refsb, sb)) {
  871. printk(KERN_WARNING "md: %s has same UUID"
  872. " but different superblock to %s\n",
  873. b, bdevname(refdev->bdev, b2));
  874. goto abort;
  875. }
  876. ev1 = md_event(sb);
  877. ev2 = md_event(refsb);
  878. if (ev1 > ev2)
  879. ret = 1;
  880. else
  881. ret = 0;
  882. }
  883. rdev->sectors = rdev->sb_start;
  884. /* Limit to 4TB as metadata cannot record more than that.
  885. * (not needed for Linear and RAID0 as metadata doesn't
  886. * record this size)
  887. */
  888. if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
  889. rdev->sectors = (2ULL << 32) - 2;
  890. if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
  891. /* "this cannot possibly happen" ... */
  892. ret = -EINVAL;
  893. abort:
  894. return ret;
  895. }
  896. /*
  897. * validate_super for 0.90.0
  898. */
  899. static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
  900. {
  901. mdp_disk_t *desc;
  902. mdp_super_t *sb = page_address(rdev->sb_page);
  903. __u64 ev1 = md_event(sb);
  904. rdev->raid_disk = -1;
  905. clear_bit(Faulty, &rdev->flags);
  906. clear_bit(In_sync, &rdev->flags);
  907. clear_bit(Bitmap_sync, &rdev->flags);
  908. clear_bit(WriteMostly, &rdev->flags);
  909. if (mddev->raid_disks == 0) {
  910. mddev->major_version = 0;
  911. mddev->minor_version = sb->minor_version;
  912. mddev->patch_version = sb->patch_version;
  913. mddev->external = 0;
  914. mddev->chunk_sectors = sb->chunk_size >> 9;
  915. mddev->ctime = sb->ctime;
  916. mddev->utime = sb->utime;
  917. mddev->level = sb->level;
  918. mddev->clevel[0] = 0;
  919. mddev->layout = sb->layout;
  920. mddev->raid_disks = sb->raid_disks;
  921. mddev->dev_sectors = ((sector_t)sb->size) * 2;
  922. mddev->events = ev1;
  923. mddev->bitmap_info.offset = 0;
  924. mddev->bitmap_info.space = 0;
  925. /* bitmap can use 60 K after the 4K superblocks */
  926. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  927. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  928. mddev->reshape_backwards = 0;
  929. if (mddev->minor_version >= 91) {
  930. mddev->reshape_position = sb->reshape_position;
  931. mddev->delta_disks = sb->delta_disks;
  932. mddev->new_level = sb->new_level;
  933. mddev->new_layout = sb->new_layout;
  934. mddev->new_chunk_sectors = sb->new_chunk >> 9;
  935. if (mddev->delta_disks < 0)
  936. mddev->reshape_backwards = 1;
  937. } else {
  938. mddev->reshape_position = MaxSector;
  939. mddev->delta_disks = 0;
  940. mddev->new_level = mddev->level;
  941. mddev->new_layout = mddev->layout;
  942. mddev->new_chunk_sectors = mddev->chunk_sectors;
  943. }
  944. if (sb->state & (1<<MD_SB_CLEAN))
  945. mddev->recovery_cp = MaxSector;
  946. else {
  947. if (sb->events_hi == sb->cp_events_hi &&
  948. sb->events_lo == sb->cp_events_lo) {
  949. mddev->recovery_cp = sb->recovery_cp;
  950. } else
  951. mddev->recovery_cp = 0;
  952. }
  953. memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
  954. memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
  955. memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
  956. memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
  957. mddev->max_disks = MD_SB_DISKS;
  958. if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
  959. mddev->bitmap_info.file == NULL) {
  960. mddev->bitmap_info.offset =
  961. mddev->bitmap_info.default_offset;
  962. mddev->bitmap_info.space =
  963. mddev->bitmap_info.default_space;
  964. }
  965. } else if (mddev->pers == NULL) {
  966. /* Insist on good event counter while assembling, except
  967. * for spares (which don't need an event count) */
  968. ++ev1;
  969. if (sb->disks[rdev->desc_nr].state & (
  970. (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
  971. if (ev1 < mddev->events)
  972. return -EINVAL;
  973. } else if (mddev->bitmap) {
  974. /* if adding to array with a bitmap, then we can accept an
  975. * older device ... but not too old.
  976. */
  977. if (ev1 < mddev->bitmap->events_cleared)
  978. return 0;
  979. if (ev1 < mddev->events)
  980. set_bit(Bitmap_sync, &rdev->flags);
  981. } else {
  982. if (ev1 < mddev->events)
  983. /* just a hot-add of a new device, leave raid_disk at -1 */
  984. return 0;
  985. }
  986. if (mddev->level != LEVEL_MULTIPATH) {
  987. desc = sb->disks + rdev->desc_nr;
  988. if (desc->state & (1<<MD_DISK_FAULTY))
  989. set_bit(Faulty, &rdev->flags);
  990. else if (desc->state & (1<<MD_DISK_SYNC) /* &&
  991. desc->raid_disk < mddev->raid_disks */) {
  992. set_bit(In_sync, &rdev->flags);
  993. rdev->raid_disk = desc->raid_disk;
  994. rdev->saved_raid_disk = desc->raid_disk;
  995. } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
  996. /* active but not in sync implies recovery up to
  997. * reshape position. We don't know exactly where
  998. * that is, so set to zero for now */
  999. if (mddev->minor_version >= 91) {
  1000. rdev->recovery_offset = 0;
  1001. rdev->raid_disk = desc->raid_disk;
  1002. }
  1003. }
  1004. if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
  1005. set_bit(WriteMostly, &rdev->flags);
  1006. } else /* MULTIPATH are always insync */
  1007. set_bit(In_sync, &rdev->flags);
  1008. return 0;
  1009. }
  1010. /*
  1011. * sync_super for 0.90.0
  1012. */
  1013. static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
  1014. {
  1015. mdp_super_t *sb;
  1016. struct md_rdev *rdev2;
  1017. int next_spare = mddev->raid_disks;
  1018. /* make rdev->sb match mddev data..
  1019. *
  1020. * 1/ zero out disks
  1021. * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
  1022. * 3/ any empty disks < next_spare become removed
  1023. *
  1024. * disks[0] gets initialised to REMOVED because
  1025. * we cannot be sure from other fields if it has
  1026. * been initialised or not.
  1027. */
  1028. int i;
  1029. int active=0, working=0,failed=0,spare=0,nr_disks=0;
  1030. rdev->sb_size = MD_SB_BYTES;
  1031. sb = page_address(rdev->sb_page);
  1032. memset(sb, 0, sizeof(*sb));
  1033. sb->md_magic = MD_SB_MAGIC;
  1034. sb->major_version = mddev->major_version;
  1035. sb->patch_version = mddev->patch_version;
  1036. sb->gvalid_words = 0; /* ignored */
  1037. memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
  1038. memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
  1039. memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
  1040. memcpy(&sb->set_uuid3, mddev->uuid+12,4);
  1041. sb->ctime = mddev->ctime;
  1042. sb->level = mddev->level;
  1043. sb->size = mddev->dev_sectors / 2;
  1044. sb->raid_disks = mddev->raid_disks;
  1045. sb->md_minor = mddev->md_minor;
  1046. sb->not_persistent = 0;
  1047. sb->utime = mddev->utime;
  1048. sb->state = 0;
  1049. sb->events_hi = (mddev->events>>32);
  1050. sb->events_lo = (u32)mddev->events;
  1051. if (mddev->reshape_position == MaxSector)
  1052. sb->minor_version = 90;
  1053. else {
  1054. sb->minor_version = 91;
  1055. sb->reshape_position = mddev->reshape_position;
  1056. sb->new_level = mddev->new_level;
  1057. sb->delta_disks = mddev->delta_disks;
  1058. sb->new_layout = mddev->new_layout;
  1059. sb->new_chunk = mddev->new_chunk_sectors << 9;
  1060. }
  1061. mddev->minor_version = sb->minor_version;
  1062. if (mddev->in_sync)
  1063. {
  1064. sb->recovery_cp = mddev->recovery_cp;
  1065. sb->cp_events_hi = (mddev->events>>32);
  1066. sb->cp_events_lo = (u32)mddev->events;
  1067. if (mddev->recovery_cp == MaxSector)
  1068. sb->state = (1<< MD_SB_CLEAN);
  1069. } else
  1070. sb->recovery_cp = 0;
  1071. sb->layout = mddev->layout;
  1072. sb->chunk_size = mddev->chunk_sectors << 9;
  1073. if (mddev->bitmap && mddev->bitmap_info.file == NULL)
  1074. sb->state |= (1<<MD_SB_BITMAP_PRESENT);
  1075. sb->disks[0].state = (1<<MD_DISK_REMOVED);
  1076. rdev_for_each(rdev2, mddev) {
  1077. mdp_disk_t *d;
  1078. int desc_nr;
  1079. int is_active = test_bit(In_sync, &rdev2->flags);
  1080. if (rdev2->raid_disk >= 0 &&
  1081. sb->minor_version >= 91)
  1082. /* we have nowhere to store the recovery_offset,
  1083. * but if it is not below the reshape_position,
  1084. * we can piggy-back on that.
  1085. */
  1086. is_active = 1;
  1087. if (rdev2->raid_disk < 0 ||
  1088. test_bit(Faulty, &rdev2->flags))
  1089. is_active = 0;
  1090. if (is_active)
  1091. desc_nr = rdev2->raid_disk;
  1092. else
  1093. desc_nr = next_spare++;
  1094. rdev2->desc_nr = desc_nr;
  1095. d = &sb->disks[rdev2->desc_nr];
  1096. nr_disks++;
  1097. d->number = rdev2->desc_nr;
  1098. d->major = MAJOR(rdev2->bdev->bd_dev);
  1099. d->minor = MINOR(rdev2->bdev->bd_dev);
  1100. if (is_active)
  1101. d->raid_disk = rdev2->raid_disk;
  1102. else
  1103. d->raid_disk = rdev2->desc_nr; /* compatibility */
  1104. if (test_bit(Faulty, &rdev2->flags))
  1105. d->state = (1<<MD_DISK_FAULTY);
  1106. else if (is_active) {
  1107. d->state = (1<<MD_DISK_ACTIVE);
  1108. if (test_bit(In_sync, &rdev2->flags))
  1109. d->state |= (1<<MD_DISK_SYNC);
  1110. active++;
  1111. working++;
  1112. } else {
  1113. d->state = 0;
  1114. spare++;
  1115. working++;
  1116. }
  1117. if (test_bit(WriteMostly, &rdev2->flags))
  1118. d->state |= (1<<MD_DISK_WRITEMOSTLY);
  1119. }
  1120. /* now set the "removed" and "faulty" bits on any missing devices */
  1121. for (i=0 ; i < mddev->raid_disks ; i++) {
  1122. mdp_disk_t *d = &sb->disks[i];
  1123. if (d->state == 0 && d->number == 0) {
  1124. d->number = i;
  1125. d->raid_disk = i;
  1126. d->state = (1<<MD_DISK_REMOVED);
  1127. d->state |= (1<<MD_DISK_FAULTY);
  1128. failed++;
  1129. }
  1130. }
  1131. sb->nr_disks = nr_disks;
  1132. sb->active_disks = active;
  1133. sb->working_disks = working;
  1134. sb->failed_disks = failed;
  1135. sb->spare_disks = spare;
  1136. sb->this_disk = sb->disks[rdev->desc_nr];
  1137. sb->sb_csum = calc_sb_csum(sb);
  1138. }
  1139. /*
  1140. * rdev_size_change for 0.90.0
  1141. */
  1142. static unsigned long long
  1143. super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1144. {
  1145. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1146. return 0; /* component must fit device */
  1147. if (rdev->mddev->bitmap_info.offset)
  1148. return 0; /* can't move bitmap */
  1149. rdev->sb_start = calc_dev_sboffset(rdev);
  1150. if (!num_sectors || num_sectors > rdev->sb_start)
  1151. num_sectors = rdev->sb_start;
  1152. /* Limit to 4TB as metadata cannot record more than that.
  1153. * 4TB == 2^32 KB, or 2*2^32 sectors.
  1154. */
  1155. if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
  1156. num_sectors = (2ULL << 32) - 2;
  1157. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1158. rdev->sb_page);
  1159. md_super_wait(rdev->mddev);
  1160. return num_sectors;
  1161. }
  1162. static int
  1163. super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
  1164. {
  1165. /* non-zero offset changes not possible with v0.90 */
  1166. return new_offset == 0;
  1167. }
  1168. /*
  1169. * version 1 superblock
  1170. */
  1171. static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
  1172. {
  1173. __le32 disk_csum;
  1174. u32 csum;
  1175. unsigned long long newcsum;
  1176. int size = 256 + le32_to_cpu(sb->max_dev)*2;
  1177. __le32 *isuper = (__le32*)sb;
  1178. disk_csum = sb->sb_csum;
  1179. sb->sb_csum = 0;
  1180. newcsum = 0;
  1181. for (; size >= 4; size -= 4)
  1182. newcsum += le32_to_cpu(*isuper++);
  1183. if (size == 2)
  1184. newcsum += le16_to_cpu(*(__le16*) isuper);
  1185. csum = (newcsum & 0xffffffff) + (newcsum >> 32);
  1186. sb->sb_csum = disk_csum;
  1187. return cpu_to_le32(csum);
  1188. }
  1189. static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
  1190. int acknowledged);
  1191. static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  1192. {
  1193. struct mdp_superblock_1 *sb;
  1194. int ret;
  1195. sector_t sb_start;
  1196. sector_t sectors;
  1197. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  1198. int bmask;
  1199. /*
  1200. * Calculate the position of the superblock in 512byte sectors.
  1201. * It is always aligned to a 4K boundary and
  1202. * depeding on minor_version, it can be:
  1203. * 0: At least 8K, but less than 12K, from end of device
  1204. * 1: At start of device
  1205. * 2: 4K from start of device.
  1206. */
  1207. switch(minor_version) {
  1208. case 0:
  1209. sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
  1210. sb_start -= 8*2;
  1211. sb_start &= ~(sector_t)(4*2-1);
  1212. break;
  1213. case 1:
  1214. sb_start = 0;
  1215. break;
  1216. case 2:
  1217. sb_start = 8;
  1218. break;
  1219. default:
  1220. return -EINVAL;
  1221. }
  1222. rdev->sb_start = sb_start;
  1223. /* superblock is rarely larger than 1K, but it can be larger,
  1224. * and it is safe to read 4k, so we do that
  1225. */
  1226. ret = read_disk_sb(rdev, 4096);
  1227. if (ret) return ret;
  1228. sb = page_address(rdev->sb_page);
  1229. if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
  1230. sb->major_version != cpu_to_le32(1) ||
  1231. le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
  1232. le64_to_cpu(sb->super_offset) != rdev->sb_start ||
  1233. (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
  1234. return -EINVAL;
  1235. if (calc_sb_1_csum(sb) != sb->sb_csum) {
  1236. printk("md: invalid superblock checksum on %s\n",
  1237. bdevname(rdev->bdev,b));
  1238. return -EINVAL;
  1239. }
  1240. if (le64_to_cpu(sb->data_size) < 10) {
  1241. printk("md: data_size too small on %s\n",
  1242. bdevname(rdev->bdev,b));
  1243. return -EINVAL;
  1244. }
  1245. if (sb->pad0 ||
  1246. sb->pad3[0] ||
  1247. memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
  1248. /* Some padding is non-zero, might be a new feature */
  1249. return -EINVAL;
  1250. rdev->preferred_minor = 0xffff;
  1251. rdev->data_offset = le64_to_cpu(sb->data_offset);
  1252. rdev->new_data_offset = rdev->data_offset;
  1253. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
  1254. (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
  1255. rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
  1256. atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
  1257. rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
  1258. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1259. if (rdev->sb_size & bmask)
  1260. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1261. if (minor_version
  1262. && rdev->data_offset < sb_start + (rdev->sb_size/512))
  1263. return -EINVAL;
  1264. if (minor_version
  1265. && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
  1266. return -EINVAL;
  1267. if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
  1268. rdev->desc_nr = -1;
  1269. else
  1270. rdev->desc_nr = le32_to_cpu(sb->dev_number);
  1271. if (!rdev->bb_page) {
  1272. rdev->bb_page = alloc_page(GFP_KERNEL);
  1273. if (!rdev->bb_page)
  1274. return -ENOMEM;
  1275. }
  1276. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
  1277. rdev->badblocks.count == 0) {
  1278. /* need to load the bad block list.
  1279. * Currently we limit it to one page.
  1280. */
  1281. s32 offset;
  1282. sector_t bb_sector;
  1283. u64 *bbp;
  1284. int i;
  1285. int sectors = le16_to_cpu(sb->bblog_size);
  1286. if (sectors > (PAGE_SIZE / 512))
  1287. return -EINVAL;
  1288. offset = le32_to_cpu(sb->bblog_offset);
  1289. if (offset == 0)
  1290. return -EINVAL;
  1291. bb_sector = (long long)offset;
  1292. if (!sync_page_io(rdev, bb_sector, sectors << 9,
  1293. rdev->bb_page, READ, true))
  1294. return -EIO;
  1295. bbp = (u64 *)page_address(rdev->bb_page);
  1296. rdev->badblocks.shift = sb->bblog_shift;
  1297. for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
  1298. u64 bb = le64_to_cpu(*bbp);
  1299. int count = bb & (0x3ff);
  1300. u64 sector = bb >> 10;
  1301. sector <<= sb->bblog_shift;
  1302. count <<= sb->bblog_shift;
  1303. if (bb + 1 == 0)
  1304. break;
  1305. if (md_set_badblocks(&rdev->badblocks,
  1306. sector, count, 1) == 0)
  1307. return -EINVAL;
  1308. }
  1309. } else if (sb->bblog_offset != 0)
  1310. rdev->badblocks.shift = 0;
  1311. if (!refdev) {
  1312. ret = 1;
  1313. } else {
  1314. __u64 ev1, ev2;
  1315. struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
  1316. if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
  1317. sb->level != refsb->level ||
  1318. sb->layout != refsb->layout ||
  1319. sb->chunksize != refsb->chunksize) {
  1320. printk(KERN_WARNING "md: %s has strangely different"
  1321. " superblock to %s\n",
  1322. bdevname(rdev->bdev,b),
  1323. bdevname(refdev->bdev,b2));
  1324. return -EINVAL;
  1325. }
  1326. ev1 = le64_to_cpu(sb->events);
  1327. ev2 = le64_to_cpu(refsb->events);
  1328. if (ev1 > ev2)
  1329. ret = 1;
  1330. else
  1331. ret = 0;
  1332. }
  1333. if (minor_version) {
  1334. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
  1335. sectors -= rdev->data_offset;
  1336. } else
  1337. sectors = rdev->sb_start;
  1338. if (sectors < le64_to_cpu(sb->data_size))
  1339. return -EINVAL;
  1340. rdev->sectors = le64_to_cpu(sb->data_size);
  1341. return ret;
  1342. }
  1343. static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
  1344. {
  1345. struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
  1346. __u64 ev1 = le64_to_cpu(sb->events);
  1347. rdev->raid_disk = -1;
  1348. clear_bit(Faulty, &rdev->flags);
  1349. clear_bit(In_sync, &rdev->flags);
  1350. clear_bit(Bitmap_sync, &rdev->flags);
  1351. clear_bit(WriteMostly, &rdev->flags);
  1352. if (mddev->raid_disks == 0) {
  1353. mddev->major_version = 1;
  1354. mddev->patch_version = 0;
  1355. mddev->external = 0;
  1356. mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
  1357. mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
  1358. mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
  1359. mddev->level = le32_to_cpu(sb->level);
  1360. mddev->clevel[0] = 0;
  1361. mddev->layout = le32_to_cpu(sb->layout);
  1362. mddev->raid_disks = le32_to_cpu(sb->raid_disks);
  1363. mddev->dev_sectors = le64_to_cpu(sb->size);
  1364. mddev->events = ev1;
  1365. mddev->bitmap_info.offset = 0;
  1366. mddev->bitmap_info.space = 0;
  1367. /* Default location for bitmap is 1K after superblock
  1368. * using 3K - total of 4K
  1369. */
  1370. mddev->bitmap_info.default_offset = 1024 >> 9;
  1371. mddev->bitmap_info.default_space = (4096-1024) >> 9;
  1372. mddev->reshape_backwards = 0;
  1373. mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
  1374. memcpy(mddev->uuid, sb->set_uuid, 16);
  1375. mddev->max_disks = (4096-256)/2;
  1376. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
  1377. mddev->bitmap_info.file == NULL) {
  1378. mddev->bitmap_info.offset =
  1379. (__s32)le32_to_cpu(sb->bitmap_offset);
  1380. /* Metadata doesn't record how much space is available.
  1381. * For 1.0, we assume we can use up to the superblock
  1382. * if before, else to 4K beyond superblock.
  1383. * For others, assume no change is possible.
  1384. */
  1385. if (mddev->minor_version > 0)
  1386. mddev->bitmap_info.space = 0;
  1387. else if (mddev->bitmap_info.offset > 0)
  1388. mddev->bitmap_info.space =
  1389. 8 - mddev->bitmap_info.offset;
  1390. else
  1391. mddev->bitmap_info.space =
  1392. -mddev->bitmap_info.offset;
  1393. }
  1394. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
  1395. mddev->reshape_position = le64_to_cpu(sb->reshape_position);
  1396. mddev->delta_disks = le32_to_cpu(sb->delta_disks);
  1397. mddev->new_level = le32_to_cpu(sb->new_level);
  1398. mddev->new_layout = le32_to_cpu(sb->new_layout);
  1399. mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
  1400. if (mddev->delta_disks < 0 ||
  1401. (mddev->delta_disks == 0 &&
  1402. (le32_to_cpu(sb->feature_map)
  1403. & MD_FEATURE_RESHAPE_BACKWARDS)))
  1404. mddev->reshape_backwards = 1;
  1405. } else {
  1406. mddev->reshape_position = MaxSector;
  1407. mddev->delta_disks = 0;
  1408. mddev->new_level = mddev->level;
  1409. mddev->new_layout = mddev->layout;
  1410. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1411. }
  1412. } else if (mddev->pers == NULL) {
  1413. /* Insist of good event counter while assembling, except for
  1414. * spares (which don't need an event count) */
  1415. ++ev1;
  1416. if (rdev->desc_nr >= 0 &&
  1417. rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
  1418. le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
  1419. if (ev1 < mddev->events)
  1420. return -EINVAL;
  1421. } else if (mddev->bitmap) {
  1422. /* If adding to array with a bitmap, then we can accept an
  1423. * older device, but not too old.
  1424. */
  1425. if (ev1 < mddev->bitmap->events_cleared)
  1426. return 0;
  1427. if (ev1 < mddev->events)
  1428. set_bit(Bitmap_sync, &rdev->flags);
  1429. } else {
  1430. if (ev1 < mddev->events)
  1431. /* just a hot-add of a new device, leave raid_disk at -1 */
  1432. return 0;
  1433. }
  1434. if (mddev->level != LEVEL_MULTIPATH) {
  1435. int role;
  1436. if (rdev->desc_nr < 0 ||
  1437. rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
  1438. role = 0xffff;
  1439. rdev->desc_nr = -1;
  1440. } else
  1441. role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
  1442. switch(role) {
  1443. case 0xffff: /* spare */
  1444. break;
  1445. case 0xfffe: /* faulty */
  1446. set_bit(Faulty, &rdev->flags);
  1447. break;
  1448. default:
  1449. rdev->saved_raid_disk = role;
  1450. if ((le32_to_cpu(sb->feature_map) &
  1451. MD_FEATURE_RECOVERY_OFFSET)) {
  1452. rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
  1453. if (!(le32_to_cpu(sb->feature_map) &
  1454. MD_FEATURE_RECOVERY_BITMAP))
  1455. rdev->saved_raid_disk = -1;
  1456. } else
  1457. set_bit(In_sync, &rdev->flags);
  1458. rdev->raid_disk = role;
  1459. break;
  1460. }
  1461. if (sb->devflags & WriteMostly1)
  1462. set_bit(WriteMostly, &rdev->flags);
  1463. if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
  1464. set_bit(Replacement, &rdev->flags);
  1465. } else /* MULTIPATH are always insync */
  1466. set_bit(In_sync, &rdev->flags);
  1467. return 0;
  1468. }
  1469. static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
  1470. {
  1471. struct mdp_superblock_1 *sb;
  1472. struct md_rdev *rdev2;
  1473. int max_dev, i;
  1474. /* make rdev->sb match mddev and rdev data. */
  1475. sb = page_address(rdev->sb_page);
  1476. sb->feature_map = 0;
  1477. sb->pad0 = 0;
  1478. sb->recovery_offset = cpu_to_le64(0);
  1479. memset(sb->pad3, 0, sizeof(sb->pad3));
  1480. sb->utime = cpu_to_le64((__u64)mddev->utime);
  1481. sb->events = cpu_to_le64(mddev->events);
  1482. if (mddev->in_sync)
  1483. sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
  1484. else
  1485. sb->resync_offset = cpu_to_le64(0);
  1486. sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
  1487. sb->raid_disks = cpu_to_le32(mddev->raid_disks);
  1488. sb->size = cpu_to_le64(mddev->dev_sectors);
  1489. sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
  1490. sb->level = cpu_to_le32(mddev->level);
  1491. sb->layout = cpu_to_le32(mddev->layout);
  1492. if (test_bit(WriteMostly, &rdev->flags))
  1493. sb->devflags |= WriteMostly1;
  1494. else
  1495. sb->devflags &= ~WriteMostly1;
  1496. sb->data_offset = cpu_to_le64(rdev->data_offset);
  1497. sb->data_size = cpu_to_le64(rdev->sectors);
  1498. if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
  1499. sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
  1500. sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
  1501. }
  1502. if (rdev->raid_disk >= 0 &&
  1503. !test_bit(In_sync, &rdev->flags)) {
  1504. sb->feature_map |=
  1505. cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
  1506. sb->recovery_offset =
  1507. cpu_to_le64(rdev->recovery_offset);
  1508. if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
  1509. sb->feature_map |=
  1510. cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
  1511. }
  1512. if (test_bit(Replacement, &rdev->flags))
  1513. sb->feature_map |=
  1514. cpu_to_le32(MD_FEATURE_REPLACEMENT);
  1515. if (mddev->reshape_position != MaxSector) {
  1516. sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
  1517. sb->reshape_position = cpu_to_le64(mddev->reshape_position);
  1518. sb->new_layout = cpu_to_le32(mddev->new_layout);
  1519. sb->delta_disks = cpu_to_le32(mddev->delta_disks);
  1520. sb->new_level = cpu_to_le32(mddev->new_level);
  1521. sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
  1522. if (mddev->delta_disks == 0 &&
  1523. mddev->reshape_backwards)
  1524. sb->feature_map
  1525. |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
  1526. if (rdev->new_data_offset != rdev->data_offset) {
  1527. sb->feature_map
  1528. |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
  1529. sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
  1530. - rdev->data_offset));
  1531. }
  1532. }
  1533. if (rdev->badblocks.count == 0)
  1534. /* Nothing to do for bad blocks*/ ;
  1535. else if (sb->bblog_offset == 0)
  1536. /* Cannot record bad blocks on this device */
  1537. md_error(mddev, rdev);
  1538. else {
  1539. struct badblocks *bb = &rdev->badblocks;
  1540. u64 *bbp = (u64 *)page_address(rdev->bb_page);
  1541. u64 *p = bb->page;
  1542. sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
  1543. if (bb->changed) {
  1544. unsigned seq;
  1545. retry:
  1546. seq = read_seqbegin(&bb->lock);
  1547. memset(bbp, 0xff, PAGE_SIZE);
  1548. for (i = 0 ; i < bb->count ; i++) {
  1549. u64 internal_bb = p[i];
  1550. u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
  1551. | BB_LEN(internal_bb));
  1552. bbp[i] = cpu_to_le64(store_bb);
  1553. }
  1554. bb->changed = 0;
  1555. if (read_seqretry(&bb->lock, seq))
  1556. goto retry;
  1557. bb->sector = (rdev->sb_start +
  1558. (int)le32_to_cpu(sb->bblog_offset));
  1559. bb->size = le16_to_cpu(sb->bblog_size);
  1560. }
  1561. }
  1562. max_dev = 0;
  1563. rdev_for_each(rdev2, mddev)
  1564. if (rdev2->desc_nr+1 > max_dev)
  1565. max_dev = rdev2->desc_nr+1;
  1566. if (max_dev > le32_to_cpu(sb->max_dev)) {
  1567. int bmask;
  1568. sb->max_dev = cpu_to_le32(max_dev);
  1569. rdev->sb_size = max_dev * 2 + 256;
  1570. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1571. if (rdev->sb_size & bmask)
  1572. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1573. } else
  1574. max_dev = le32_to_cpu(sb->max_dev);
  1575. for (i=0; i<max_dev;i++)
  1576. sb->dev_roles[i] = cpu_to_le16(0xfffe);
  1577. rdev_for_each(rdev2, mddev) {
  1578. i = rdev2->desc_nr;
  1579. if (test_bit(Faulty, &rdev2->flags))
  1580. sb->dev_roles[i] = cpu_to_le16(0xfffe);
  1581. else if (test_bit(In_sync, &rdev2->flags))
  1582. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1583. else if (rdev2->raid_disk >= 0)
  1584. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1585. else
  1586. sb->dev_roles[i] = cpu_to_le16(0xffff);
  1587. }
  1588. sb->sb_csum = calc_sb_1_csum(sb);
  1589. }
  1590. static unsigned long long
  1591. super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1592. {
  1593. struct mdp_superblock_1 *sb;
  1594. sector_t max_sectors;
  1595. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1596. return 0; /* component must fit device */
  1597. if (rdev->data_offset != rdev->new_data_offset)
  1598. return 0; /* too confusing */
  1599. if (rdev->sb_start < rdev->data_offset) {
  1600. /* minor versions 1 and 2; superblock before data */
  1601. max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
  1602. max_sectors -= rdev->data_offset;
  1603. if (!num_sectors || num_sectors > max_sectors)
  1604. num_sectors = max_sectors;
  1605. } else if (rdev->mddev->bitmap_info.offset) {
  1606. /* minor version 0 with bitmap we can't move */
  1607. return 0;
  1608. } else {
  1609. /* minor version 0; superblock after data */
  1610. sector_t sb_start;
  1611. sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
  1612. sb_start &= ~(sector_t)(4*2 - 1);
  1613. max_sectors = rdev->sectors + sb_start - rdev->sb_start;
  1614. if (!num_sectors || num_sectors > max_sectors)
  1615. num_sectors = max_sectors;
  1616. rdev->sb_start = sb_start;
  1617. }
  1618. sb = page_address(rdev->sb_page);
  1619. sb->data_size = cpu_to_le64(num_sectors);
  1620. sb->super_offset = rdev->sb_start;
  1621. sb->sb_csum = calc_sb_1_csum(sb);
  1622. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1623. rdev->sb_page);
  1624. md_super_wait(rdev->mddev);
  1625. return num_sectors;
  1626. }
  1627. static int
  1628. super_1_allow_new_offset(struct md_rdev *rdev,
  1629. unsigned long long new_offset)
  1630. {
  1631. /* All necessary checks on new >= old have been done */
  1632. struct bitmap *bitmap;
  1633. if (new_offset >= rdev->data_offset)
  1634. return 1;
  1635. /* with 1.0 metadata, there is no metadata to tread on
  1636. * so we can always move back */
  1637. if (rdev->mddev->minor_version == 0)
  1638. return 1;
  1639. /* otherwise we must be sure not to step on
  1640. * any metadata, so stay:
  1641. * 36K beyond start of superblock
  1642. * beyond end of badblocks
  1643. * beyond write-intent bitmap
  1644. */
  1645. if (rdev->sb_start + (32+4)*2 > new_offset)
  1646. return 0;
  1647. bitmap = rdev->mddev->bitmap;
  1648. if (bitmap && !rdev->mddev->bitmap_info.file &&
  1649. rdev->sb_start + rdev->mddev->bitmap_info.offset +
  1650. bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
  1651. return 0;
  1652. if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
  1653. return 0;
  1654. return 1;
  1655. }
  1656. static struct super_type super_types[] = {
  1657. [0] = {
  1658. .name = "0.90.0",
  1659. .owner = THIS_MODULE,
  1660. .load_super = super_90_load,
  1661. .validate_super = super_90_validate,
  1662. .sync_super = super_90_sync,
  1663. .rdev_size_change = super_90_rdev_size_change,
  1664. .allow_new_offset = super_90_allow_new_offset,
  1665. },
  1666. [1] = {
  1667. .name = "md-1",
  1668. .owner = THIS_MODULE,
  1669. .load_super = super_1_load,
  1670. .validate_super = super_1_validate,
  1671. .sync_super = super_1_sync,
  1672. .rdev_size_change = super_1_rdev_size_change,
  1673. .allow_new_offset = super_1_allow_new_offset,
  1674. },
  1675. };
  1676. static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
  1677. {
  1678. if (mddev->sync_super) {
  1679. mddev->sync_super(mddev, rdev);
  1680. return;
  1681. }
  1682. BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
  1683. super_types[mddev->major_version].sync_super(mddev, rdev);
  1684. }
  1685. static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
  1686. {
  1687. struct md_rdev *rdev, *rdev2;
  1688. rcu_read_lock();
  1689. rdev_for_each_rcu(rdev, mddev1)
  1690. rdev_for_each_rcu(rdev2, mddev2)
  1691. if (rdev->bdev->bd_contains ==
  1692. rdev2->bdev->bd_contains) {
  1693. rcu_read_unlock();
  1694. return 1;
  1695. }
  1696. rcu_read_unlock();
  1697. return 0;
  1698. }
  1699. static LIST_HEAD(pending_raid_disks);
  1700. /*
  1701. * Try to register data integrity profile for an mddev
  1702. *
  1703. * This is called when an array is started and after a disk has been kicked
  1704. * from the array. It only succeeds if all working and active component devices
  1705. * are integrity capable with matching profiles.
  1706. */
  1707. int md_integrity_register(struct mddev *mddev)
  1708. {
  1709. struct md_rdev *rdev, *reference = NULL;
  1710. if (list_empty(&mddev->disks))
  1711. return 0; /* nothing to do */
  1712. if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
  1713. return 0; /* shouldn't register, or already is */
  1714. rdev_for_each(rdev, mddev) {
  1715. /* skip spares and non-functional disks */
  1716. if (test_bit(Faulty, &rdev->flags))
  1717. continue;
  1718. if (rdev->raid_disk < 0)
  1719. continue;
  1720. if (!reference) {
  1721. /* Use the first rdev as the reference */
  1722. reference = rdev;
  1723. continue;
  1724. }
  1725. /* does this rdev's profile match the reference profile? */
  1726. if (blk_integrity_compare(reference->bdev->bd_disk,
  1727. rdev->bdev->bd_disk) < 0)
  1728. return -EINVAL;
  1729. }
  1730. if (!reference || !bdev_get_integrity(reference->bdev))
  1731. return 0;
  1732. /*
  1733. * All component devices are integrity capable and have matching
  1734. * profiles, register the common profile for the md device.
  1735. */
  1736. if (blk_integrity_register(mddev->gendisk,
  1737. bdev_get_integrity(reference->bdev)) != 0) {
  1738. printk(KERN_ERR "md: failed to register integrity for %s\n",
  1739. mdname(mddev));
  1740. return -EINVAL;
  1741. }
  1742. printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
  1743. if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
  1744. printk(KERN_ERR "md: failed to create integrity pool for %s\n",
  1745. mdname(mddev));
  1746. return -EINVAL;
  1747. }
  1748. return 0;
  1749. }
  1750. EXPORT_SYMBOL(md_integrity_register);
  1751. /* Disable data integrity if non-capable/non-matching disk is being added */
  1752. void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
  1753. {
  1754. struct blk_integrity *bi_rdev;
  1755. struct blk_integrity *bi_mddev;
  1756. if (!mddev->gendisk)
  1757. return;
  1758. bi_rdev = bdev_get_integrity(rdev->bdev);
  1759. bi_mddev = blk_get_integrity(mddev->gendisk);
  1760. if (!bi_mddev) /* nothing to do */
  1761. return;
  1762. if (rdev->raid_disk < 0) /* skip spares */
  1763. return;
  1764. if (bi_rdev && blk_integrity_compare(mddev->gendisk,
  1765. rdev->bdev->bd_disk) >= 0)
  1766. return;
  1767. printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
  1768. blk_integrity_unregister(mddev->gendisk);
  1769. }
  1770. EXPORT_SYMBOL(md_integrity_add_rdev);
  1771. static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
  1772. {
  1773. char b[BDEVNAME_SIZE];
  1774. struct kobject *ko;
  1775. char *s;
  1776. int err;
  1777. /* prevent duplicates */
  1778. if (find_rdev(mddev, rdev->bdev->bd_dev))
  1779. return -EEXIST;
  1780. /* make sure rdev->sectors exceeds mddev->dev_sectors */
  1781. if (rdev->sectors && (mddev->dev_sectors == 0 ||
  1782. rdev->sectors < mddev->dev_sectors)) {
  1783. if (mddev->pers) {
  1784. /* Cannot change size, so fail
  1785. * If mddev->level <= 0, then we don't care
  1786. * about aligning sizes (e.g. linear)
  1787. */
  1788. if (mddev->level > 0)
  1789. return -ENOSPC;
  1790. } else
  1791. mddev->dev_sectors = rdev->sectors;
  1792. }
  1793. /* Verify rdev->desc_nr is unique.
  1794. * If it is -1, assign a free number, else
  1795. * check number is not in use
  1796. */
  1797. rcu_read_lock();
  1798. if (rdev->desc_nr < 0) {
  1799. int choice = 0;
  1800. if (mddev->pers)
  1801. choice = mddev->raid_disks;
  1802. while (find_rdev_nr_rcu(mddev, choice))
  1803. choice++;
  1804. rdev->desc_nr = choice;
  1805. } else {
  1806. if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
  1807. rcu_read_unlock();
  1808. return -EBUSY;
  1809. }
  1810. }
  1811. rcu_read_unlock();
  1812. if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
  1813. printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
  1814. mdname(mddev), mddev->max_disks);
  1815. return -EBUSY;
  1816. }
  1817. bdevname(rdev->bdev,b);
  1818. while ( (s=strchr(b, '/')) != NULL)
  1819. *s = '!';
  1820. rdev->mddev = mddev;
  1821. printk(KERN_INFO "md: bind<%s>\n", b);
  1822. if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
  1823. goto fail;
  1824. ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
  1825. if (sysfs_create_link(&rdev->kobj, ko, "block"))
  1826. /* failure here is OK */;
  1827. rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
  1828. list_add_rcu(&rdev->same_set, &mddev->disks);
  1829. bd_link_disk_holder(rdev->bdev, mddev->gendisk);
  1830. /* May as well allow recovery to be retried once */
  1831. mddev->recovery_disabled++;
  1832. return 0;
  1833. fail:
  1834. printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
  1835. b, mdname(mddev));
  1836. return err;
  1837. }
  1838. static void md_delayed_delete(struct work_struct *ws)
  1839. {
  1840. struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
  1841. kobject_del(&rdev->kobj);
  1842. kobject_put(&rdev->kobj);
  1843. }
  1844. static void unbind_rdev_from_array(struct md_rdev *rdev)
  1845. {
  1846. char b[BDEVNAME_SIZE];
  1847. bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
  1848. list_del_rcu(&rdev->same_set);
  1849. printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
  1850. rdev->mddev = NULL;
  1851. sysfs_remove_link(&rdev->kobj, "block");
  1852. sysfs_put(rdev->sysfs_state);
  1853. rdev->sysfs_state = NULL;
  1854. rdev->badblocks.count = 0;
  1855. /* We need to delay this, otherwise we can deadlock when
  1856. * writing to 'remove' to "dev/state". We also need
  1857. * to delay it due to rcu usage.
  1858. */
  1859. synchronize_rcu();
  1860. INIT_WORK(&rdev->del_work, md_delayed_delete);
  1861. kobject_get(&rdev->kobj);
  1862. queue_work(md_misc_wq, &rdev->del_work);
  1863. }
  1864. /*
  1865. * prevent the device from being mounted, repartitioned or
  1866. * otherwise reused by a RAID array (or any other kernel
  1867. * subsystem), by bd_claiming the device.
  1868. */
  1869. static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
  1870. {
  1871. int err = 0;
  1872. struct block_device *bdev;
  1873. char b[BDEVNAME_SIZE];
  1874. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1875. shared ? (struct md_rdev *)lock_rdev : rdev);
  1876. if (IS_ERR(bdev)) {
  1877. printk(KERN_ERR "md: could not open %s.\n",
  1878. __bdevname(dev, b));
  1879. return PTR_ERR(bdev);
  1880. }
  1881. rdev->bdev = bdev;
  1882. return err;
  1883. }
  1884. static void unlock_rdev(struct md_rdev *rdev)
  1885. {
  1886. struct block_device *bdev = rdev->bdev;
  1887. rdev->bdev = NULL;
  1888. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1889. }
  1890. void md_autodetect_dev(dev_t dev);
  1891. static void export_rdev(struct md_rdev *rdev)
  1892. {
  1893. char b[BDEVNAME_SIZE];
  1894. printk(KERN_INFO "md: export_rdev(%s)\n",
  1895. bdevname(rdev->bdev,b));
  1896. md_rdev_clear(rdev);
  1897. #ifndef MODULE
  1898. if (test_bit(AutoDetected, &rdev->flags))
  1899. md_autodetect_dev(rdev->bdev->bd_dev);
  1900. #endif
  1901. unlock_rdev(rdev);
  1902. kobject_put(&rdev->kobj);
  1903. }
  1904. static void kick_rdev_from_array(struct md_rdev *rdev)
  1905. {
  1906. unbind_rdev_from_array(rdev);
  1907. export_rdev(rdev);
  1908. }
  1909. static void export_array(struct mddev *mddev)
  1910. {
  1911. struct md_rdev *rdev;
  1912. while (!list_empty(&mddev->disks)) {
  1913. rdev = list_first_entry(&mddev->disks, struct md_rdev,
  1914. same_set);
  1915. kick_rdev_from_array(rdev);
  1916. }
  1917. mddev->raid_disks = 0;
  1918. mddev->major_version = 0;
  1919. }
  1920. static void sync_sbs(struct mddev *mddev, int nospares)
  1921. {
  1922. /* Update each superblock (in-memory image), but
  1923. * if we are allowed to, skip spares which already
  1924. * have the right event counter, or have one earlier
  1925. * (which would mean they aren't being marked as dirty
  1926. * with the rest of the array)
  1927. */
  1928. struct md_rdev *rdev;
  1929. rdev_for_each(rdev, mddev) {
  1930. if (rdev->sb_events == mddev->events ||
  1931. (nospares &&
  1932. rdev->raid_disk < 0 &&
  1933. rdev->sb_events+1 == mddev->events)) {
  1934. /* Don't update this superblock */
  1935. rdev->sb_loaded = 2;
  1936. } else {
  1937. sync_super(mddev, rdev);
  1938. rdev->sb_loaded = 1;
  1939. }
  1940. }
  1941. }
  1942. static void md_update_sb(struct mddev *mddev, int force_change)
  1943. {
  1944. struct md_rdev *rdev;
  1945. int sync_req;
  1946. int nospares = 0;
  1947. int any_badblocks_changed = 0;
  1948. if (mddev->ro) {
  1949. if (force_change)
  1950. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1951. return;
  1952. }
  1953. repeat:
  1954. /* First make sure individual recovery_offsets are correct */
  1955. rdev_for_each(rdev, mddev) {
  1956. if (rdev->raid_disk >= 0 &&
  1957. mddev->delta_disks >= 0 &&
  1958. !test_bit(In_sync, &rdev->flags) &&
  1959. mddev->curr_resync_completed > rdev->recovery_offset)
  1960. rdev->recovery_offset = mddev->curr_resync_completed;
  1961. }
  1962. if (!mddev->persistent) {
  1963. clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
  1964. clear_bit(MD_CHANGE_DEVS, &mddev->flags);
  1965. if (!mddev->external) {
  1966. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  1967. rdev_for_each(rdev, mddev) {
  1968. if (rdev->badblocks.changed) {
  1969. rdev->badblocks.changed = 0;
  1970. md_ack_all_badblocks(&rdev->badblocks);
  1971. md_error(mddev, rdev);
  1972. }
  1973. clear_bit(Blocked, &rdev->flags);
  1974. clear_bit(BlockedBadBlocks, &rdev->flags);
  1975. wake_up(&rdev->blocked_wait);
  1976. }
  1977. }
  1978. wake_up(&mddev->sb_wait);
  1979. return;
  1980. }
  1981. spin_lock_irq(&mddev->write_lock);
  1982. mddev->utime = get_seconds();
  1983. if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
  1984. force_change = 1;
  1985. if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
  1986. /* just a clean<-> dirty transition, possibly leave spares alone,
  1987. * though if events isn't the right even/odd, we will have to do
  1988. * spares after all
  1989. */
  1990. nospares = 1;
  1991. if (force_change)
  1992. nospares = 0;
  1993. if (mddev->degraded)
  1994. /* If the array is degraded, then skipping spares is both
  1995. * dangerous and fairly pointless.
  1996. * Dangerous because a device that was removed from the array
  1997. * might have a event_count that still looks up-to-date,
  1998. * so it can be re-added without a resync.
  1999. * Pointless because if there are any spares to skip,
  2000. * then a recovery will happen and soon that array won't
  2001. * be degraded any more and the spare can go back to sleep then.
  2002. */
  2003. nospares = 0;
  2004. sync_req = mddev->in_sync;
  2005. /* If this is just a dirty<->clean transition, and the array is clean
  2006. * and 'events' is odd, we can roll back to the previous clean state */
  2007. if (nospares
  2008. && (mddev->in_sync && mddev->recovery_cp == MaxSector)
  2009. && mddev->can_decrease_events
  2010. && mddev->events != 1) {
  2011. mddev->events--;
  2012. mddev->can_decrease_events = 0;
  2013. } else {
  2014. /* otherwise we have to go forward and ... */
  2015. mddev->events ++;
  2016. mddev->can_decrease_events = nospares;
  2017. }
  2018. /*
  2019. * This 64-bit counter should never wrap.
  2020. * Either we are in around ~1 trillion A.C., assuming
  2021. * 1 reboot per second, or we have a bug...
  2022. */
  2023. WARN_ON(mddev->events == 0);
  2024. rdev_for_each(rdev, mddev) {
  2025. if (rdev->badblocks.changed)
  2026. any_badblocks_changed++;
  2027. if (test_bit(Faulty, &rdev->flags))
  2028. set_bit(FaultRecorded, &rdev->flags);
  2029. }
  2030. sync_sbs(mddev, nospares);
  2031. spin_unlock_irq(&mddev->write_lock);
  2032. pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
  2033. mdname(mddev), mddev->in_sync);
  2034. bitmap_update_sb(mddev->bitmap);
  2035. rdev_for_each(rdev, mddev) {
  2036. char b[BDEVNAME_SIZE];
  2037. if (rdev->sb_loaded != 1)
  2038. continue; /* no noise on spare devices */
  2039. if (!test_bit(Faulty, &rdev->flags)) {
  2040. md_super_write(mddev,rdev,
  2041. rdev->sb_start, rdev->sb_size,
  2042. rdev->sb_page);
  2043. pr_debug("md: (write) %s's sb offset: %llu\n",
  2044. bdevname(rdev->bdev, b),
  2045. (unsigned long long)rdev->sb_start);
  2046. rdev->sb_events = mddev->events;
  2047. if (rdev->badblocks.size) {
  2048. md_super_write(mddev, rdev,
  2049. rdev->badblocks.sector,
  2050. rdev->badblocks.size << 9,
  2051. rdev->bb_page);
  2052. rdev->badblocks.size = 0;
  2053. }
  2054. } else
  2055. pr_debug("md: %s (skipping faulty)\n",
  2056. bdevname(rdev->bdev, b));
  2057. if (mddev->level == LEVEL_MULTIPATH)
  2058. /* only need to write one superblock... */
  2059. break;
  2060. }
  2061. md_super_wait(mddev);
  2062. /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
  2063. spin_lock_irq(&mddev->write_lock);
  2064. if (mddev->in_sync != sync_req ||
  2065. test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
  2066. /* have to write it out again */
  2067. spin_unlock_irq(&mddev->write_lock);
  2068. goto repeat;
  2069. }
  2070. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  2071. spin_unlock_irq(&mddev->write_lock);
  2072. wake_up(&mddev->sb_wait);
  2073. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  2074. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  2075. rdev_for_each(rdev, mddev) {
  2076. if (test_and_clear_bit(FaultRecorded, &rdev->flags))
  2077. clear_bit(Blocked, &rdev->flags);
  2078. if (any_badblocks_changed)
  2079. md_ack_all_badblocks(&rdev->badblocks);
  2080. clear_bit(BlockedBadBlocks, &rdev->flags);
  2081. wake_up(&rdev->blocked_wait);
  2082. }
  2083. }
  2084. /* words written to sysfs files may, or may not, be \n terminated.
  2085. * We want to accept with case. For this we use cmd_match.
  2086. */
  2087. static int cmd_match(const char *cmd, const char *str)
  2088. {
  2089. /* See if cmd, written into a sysfs file, matches
  2090. * str. They must either be the same, or cmd can
  2091. * have a trailing newline
  2092. */
  2093. while (*cmd && *str && *cmd == *str) {
  2094. cmd++;
  2095. str++;
  2096. }
  2097. if (*cmd == '\n')
  2098. cmd++;
  2099. if (*str || *cmd)
  2100. return 0;
  2101. return 1;
  2102. }
  2103. struct rdev_sysfs_entry {
  2104. struct attribute attr;
  2105. ssize_t (*show)(struct md_rdev *, char *);
  2106. ssize_t (*store)(struct md_rdev *, const char *, size_t);
  2107. };
  2108. static ssize_t
  2109. state_show(struct md_rdev *rdev, char *page)
  2110. {
  2111. char *sep = "";
  2112. size_t len = 0;
  2113. if (test_bit(Faulty, &rdev->flags) ||
  2114. rdev->badblocks.unacked_exist) {
  2115. len+= sprintf(page+len, "%sfaulty",sep);
  2116. sep = ",";
  2117. }
  2118. if (test_bit(In_sync, &rdev->flags)) {
  2119. len += sprintf(page+len, "%sin_sync",sep);
  2120. sep = ",";
  2121. }
  2122. if (test_bit(WriteMostly, &rdev->flags)) {
  2123. len += sprintf(page+len, "%swrite_mostly",sep);
  2124. sep = ",";
  2125. }
  2126. if (test_bit(Blocked, &rdev->flags) ||
  2127. (rdev->badblocks.unacked_exist
  2128. && !test_bit(Faulty, &rdev->flags))) {
  2129. len += sprintf(page+len, "%sblocked", sep);
  2130. sep = ",";
  2131. }
  2132. if (!test_bit(Faulty, &rdev->flags) &&
  2133. !test_bit(In_sync, &rdev->flags)) {
  2134. len += sprintf(page+len, "%sspare", sep);
  2135. sep = ",";
  2136. }
  2137. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  2138. len += sprintf(page+len, "%swrite_error", sep);
  2139. sep = ",";
  2140. }
  2141. if (test_bit(WantReplacement, &rdev->flags)) {
  2142. len += sprintf(page+len, "%swant_replacement", sep);
  2143. sep = ",";
  2144. }
  2145. if (test_bit(Replacement, &rdev->flags)) {
  2146. len += sprintf(page+len, "%sreplacement", sep);
  2147. sep = ",";
  2148. }
  2149. return len+sprintf(page+len, "\n");
  2150. }
  2151. static ssize_t
  2152. state_store(struct md_rdev *rdev, const char *buf, size_t len)
  2153. {
  2154. /* can write
  2155. * faulty - simulates an error
  2156. * remove - disconnects the device
  2157. * writemostly - sets write_mostly
  2158. * -writemostly - clears write_mostly
  2159. * blocked - sets the Blocked flags
  2160. * -blocked - clears the Blocked and possibly simulates an error
  2161. * insync - sets Insync providing device isn't active
  2162. * -insync - clear Insync for a device with a slot assigned,
  2163. * so that it gets rebuilt based on bitmap
  2164. * write_error - sets WriteErrorSeen
  2165. * -write_error - clears WriteErrorSeen
  2166. */
  2167. int err = -EINVAL;
  2168. if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
  2169. md_error(rdev->mddev, rdev);
  2170. if (test_bit(Faulty, &rdev->flags))
  2171. err = 0;
  2172. else
  2173. err = -EBUSY;
  2174. } else if (cmd_match(buf, "remove")) {
  2175. if (rdev->raid_disk >= 0)
  2176. err = -EBUSY;
  2177. else {
  2178. struct mddev *mddev = rdev->mddev;
  2179. kick_rdev_from_array(rdev);
  2180. if (mddev->pers)
  2181. md_update_sb(mddev, 1);
  2182. md_new_event(mddev);
  2183. err = 0;
  2184. }
  2185. } else if (cmd_match(buf, "writemostly")) {
  2186. set_bit(WriteMostly, &rdev->flags);
  2187. err = 0;
  2188. } else if (cmd_match(buf, "-writemostly")) {
  2189. clear_bit(WriteMostly, &rdev->flags);
  2190. err = 0;
  2191. } else if (cmd_match(buf, "blocked")) {
  2192. set_bit(Blocked, &rdev->flags);
  2193. err = 0;
  2194. } else if (cmd_match(buf, "-blocked")) {
  2195. if (!test_bit(Faulty, &rdev->flags) &&
  2196. rdev->badblocks.unacked_exist) {
  2197. /* metadata handler doesn't understand badblocks,
  2198. * so we need to fail the device
  2199. */
  2200. md_error(rdev->mddev, rdev);
  2201. }
  2202. clear_bit(Blocked, &rdev->flags);
  2203. clear_bit(BlockedBadBlocks, &rdev->flags);
  2204. wake_up(&rdev->blocked_wait);
  2205. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2206. md_wakeup_thread(rdev->mddev->thread);
  2207. err = 0;
  2208. } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
  2209. set_bit(In_sync, &rdev->flags);
  2210. err = 0;
  2211. } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
  2212. if (rdev->mddev->pers == NULL) {
  2213. clear_bit(In_sync, &rdev->flags);
  2214. rdev->saved_raid_disk = rdev->raid_disk;
  2215. rdev->raid_disk = -1;
  2216. err = 0;
  2217. }
  2218. } else if (cmd_match(buf, "write_error")) {
  2219. set_bit(WriteErrorSeen, &rdev->flags);
  2220. err = 0;
  2221. } else if (cmd_match(buf, "-write_error")) {
  2222. clear_bit(WriteErrorSeen, &rdev->flags);
  2223. err = 0;
  2224. } else if (cmd_match(buf, "want_replacement")) {
  2225. /* Any non-spare device that is not a replacement can
  2226. * become want_replacement at any time, but we then need to
  2227. * check if recovery is needed.
  2228. */
  2229. if (rdev->raid_disk >= 0 &&
  2230. !test_bit(Replacement, &rdev->flags))
  2231. set_bit(WantReplacement, &rdev->flags);
  2232. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2233. md_wakeup_thread(rdev->mddev->thread);
  2234. err = 0;
  2235. } else if (cmd_match(buf, "-want_replacement")) {
  2236. /* Clearing 'want_replacement' is always allowed.
  2237. * Once replacements starts it is too late though.
  2238. */
  2239. err = 0;
  2240. clear_bit(WantReplacement, &rdev->flags);
  2241. } else if (cmd_match(buf, "replacement")) {
  2242. /* Can only set a device as a replacement when array has not
  2243. * yet been started. Once running, replacement is automatic
  2244. * from spares, or by assigning 'slot'.
  2245. */
  2246. if (rdev->mddev->pers)
  2247. err = -EBUSY;
  2248. else {
  2249. set_bit(Replacement, &rdev->flags);
  2250. err = 0;
  2251. }
  2252. } else if (cmd_match(buf, "-replacement")) {
  2253. /* Similarly, can only clear Replacement before start */
  2254. if (rdev->mddev->pers)
  2255. err = -EBUSY;
  2256. else {
  2257. clear_bit(Replacement, &rdev->flags);
  2258. err = 0;
  2259. }
  2260. }
  2261. if (!err)
  2262. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2263. return err ? err : len;
  2264. }
  2265. static struct rdev_sysfs_entry rdev_state =
  2266. __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
  2267. static ssize_t
  2268. errors_show(struct md_rdev *rdev, char *page)
  2269. {
  2270. return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
  2271. }
  2272. static ssize_t
  2273. errors_store(struct md_rdev *rdev, const char *buf, size_t len)
  2274. {
  2275. char *e;
  2276. unsigned long n = simple_strtoul(buf, &e, 10);
  2277. if (*buf && (*e == 0 || *e == '\n')) {
  2278. atomic_set(&rdev->corrected_errors, n);
  2279. return len;
  2280. }
  2281. return -EINVAL;
  2282. }
  2283. static struct rdev_sysfs_entry rdev_errors =
  2284. __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
  2285. static ssize_t
  2286. slot_show(struct md_rdev *rdev, char *page)
  2287. {
  2288. if (rdev->raid_disk < 0)
  2289. return sprintf(page, "none\n");
  2290. else
  2291. return sprintf(page, "%d\n", rdev->raid_disk);
  2292. }
  2293. static ssize_t
  2294. slot_store(struct md_rdev *rdev, const char *buf, size_t len)
  2295. {
  2296. char *e;
  2297. int err;
  2298. int slot = simple_strtoul(buf, &e, 10);
  2299. if (strncmp(buf, "none", 4)==0)
  2300. slot = -1;
  2301. else if (e==buf || (*e && *e!= '\n'))
  2302. return -EINVAL;
  2303. if (rdev->mddev->pers && slot == -1) {
  2304. /* Setting 'slot' on an active array requires also
  2305. * updating the 'rd%d' link, and communicating
  2306. * with the personality with ->hot_*_disk.
  2307. * For now we only support removing
  2308. * failed/spare devices. This normally happens automatically,
  2309. * but not when the metadata is externally managed.
  2310. */
  2311. if (rdev->raid_disk == -1)
  2312. return -EEXIST;
  2313. /* personality does all needed checks */
  2314. if (rdev->mddev->pers->hot_remove_disk == NULL)
  2315. return -EINVAL;
  2316. clear_bit(Blocked, &rdev->flags);
  2317. remove_and_add_spares(rdev->mddev, rdev);
  2318. if (rdev->raid_disk >= 0)
  2319. return -EBUSY;
  2320. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2321. md_wakeup_thread(rdev->mddev->thread);
  2322. } else if (rdev->mddev->pers) {
  2323. /* Activating a spare .. or possibly reactivating
  2324. * if we ever get bitmaps working here.
  2325. */
  2326. if (rdev->raid_disk != -1)
  2327. return -EBUSY;
  2328. if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
  2329. return -EBUSY;
  2330. if (rdev->mddev->pers->hot_add_disk == NULL)
  2331. return -EINVAL;
  2332. if (slot >= rdev->mddev->raid_disks &&
  2333. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2334. return -ENOSPC;
  2335. rdev->raid_disk = slot;
  2336. if (test_bit(In_sync, &rdev->flags))
  2337. rdev->saved_raid_disk = slot;
  2338. else
  2339. rdev->saved_raid_disk = -1;
  2340. clear_bit(In_sync, &rdev->flags);
  2341. clear_bit(Bitmap_sync, &rdev->flags);
  2342. err = rdev->mddev->pers->
  2343. hot_add_disk(rdev->mddev, rdev);
  2344. if (err) {
  2345. rdev->raid_disk = -1;
  2346. return err;
  2347. } else
  2348. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2349. if (sysfs_link_rdev(rdev->mddev, rdev))
  2350. /* failure here is OK */;
  2351. /* don't wakeup anyone, leave that to userspace. */
  2352. } else {
  2353. if (slot >= rdev->mddev->raid_disks &&
  2354. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2355. return -ENOSPC;
  2356. rdev->raid_disk = slot;
  2357. /* assume it is working */
  2358. clear_bit(Faulty, &rdev->flags);
  2359. clear_bit(WriteMostly, &rdev->flags);
  2360. set_bit(In_sync, &rdev->flags);
  2361. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2362. }
  2363. return len;
  2364. }
  2365. static struct rdev_sysfs_entry rdev_slot =
  2366. __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
  2367. static ssize_t
  2368. offset_show(struct md_rdev *rdev, char *page)
  2369. {
  2370. return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
  2371. }
  2372. static ssize_t
  2373. offset_store(struct md_rdev *rdev, const char *buf, size_t len)
  2374. {
  2375. unsigned long long offset;
  2376. if (kstrtoull(buf, 10, &offset) < 0)
  2377. return -EINVAL;
  2378. if (rdev->mddev->pers && rdev->raid_disk >= 0)
  2379. return -EBUSY;
  2380. if (rdev->sectors && rdev->mddev->external)
  2381. /* Must set offset before size, so overlap checks
  2382. * can be sane */
  2383. return -EBUSY;
  2384. rdev->data_offset = offset;
  2385. rdev->new_data_offset = offset;
  2386. return len;
  2387. }
  2388. static struct rdev_sysfs_entry rdev_offset =
  2389. __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
  2390. static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
  2391. {
  2392. return sprintf(page, "%llu\n",
  2393. (unsigned long long)rdev->new_data_offset);
  2394. }
  2395. static ssize_t new_offset_store(struct md_rdev *rdev,
  2396. const char *buf, size_t len)
  2397. {
  2398. unsigned long long new_offset;
  2399. struct mddev *mddev = rdev->mddev;
  2400. if (kstrtoull(buf, 10, &new_offset) < 0)
  2401. return -EINVAL;
  2402. if (mddev->sync_thread)
  2403. return -EBUSY;
  2404. if (new_offset == rdev->data_offset)
  2405. /* reset is always permitted */
  2406. ;
  2407. else if (new_offset > rdev->data_offset) {
  2408. /* must not push array size beyond rdev_sectors */
  2409. if (new_offset - rdev->data_offset
  2410. + mddev->dev_sectors > rdev->sectors)
  2411. return -E2BIG;
  2412. }
  2413. /* Metadata worries about other space details. */
  2414. /* decreasing the offset is inconsistent with a backwards
  2415. * reshape.
  2416. */
  2417. if (new_offset < rdev->data_offset &&
  2418. mddev->reshape_backwards)
  2419. return -EINVAL;
  2420. /* Increasing offset is inconsistent with forwards
  2421. * reshape. reshape_direction should be set to
  2422. * 'backwards' first.
  2423. */
  2424. if (new_offset > rdev->data_offset &&
  2425. !mddev->reshape_backwards)
  2426. return -EINVAL;
  2427. if (mddev->pers && mddev->persistent &&
  2428. !super_types[mddev->major_version]
  2429. .allow_new_offset(rdev, new_offset))
  2430. return -E2BIG;
  2431. rdev->new_data_offset = new_offset;
  2432. if (new_offset > rdev->data_offset)
  2433. mddev->reshape_backwards = 1;
  2434. else if (new_offset < rdev->data_offset)
  2435. mddev->reshape_backwards = 0;
  2436. return len;
  2437. }
  2438. static struct rdev_sysfs_entry rdev_new_offset =
  2439. __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
  2440. static ssize_t
  2441. rdev_size_show(struct md_rdev *rdev, char *page)
  2442. {
  2443. return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
  2444. }
  2445. static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
  2446. {
  2447. /* check if two start/length pairs overlap */
  2448. if (s1+l1 <= s2)
  2449. return 0;
  2450. if (s2+l2 <= s1)
  2451. return 0;
  2452. return 1;
  2453. }
  2454. static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
  2455. {
  2456. unsigned long long blocks;
  2457. sector_t new;
  2458. if (kstrtoull(buf, 10, &blocks) < 0)
  2459. return -EINVAL;
  2460. if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
  2461. return -EINVAL; /* sector conversion overflow */
  2462. new = blocks * 2;
  2463. if (new != blocks * 2)
  2464. return -EINVAL; /* unsigned long long to sector_t overflow */
  2465. *sectors = new;
  2466. return 0;
  2467. }
  2468. static ssize_t
  2469. rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
  2470. {
  2471. struct mddev *my_mddev = rdev->mddev;
  2472. sector_t oldsectors = rdev->sectors;
  2473. sector_t sectors;
  2474. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  2475. return -EINVAL;
  2476. if (rdev->data_offset != rdev->new_data_offset)
  2477. return -EINVAL; /* too confusing */
  2478. if (my_mddev->pers && rdev->raid_disk >= 0) {
  2479. if (my_mddev->persistent) {
  2480. sectors = super_types[my_mddev->major_version].
  2481. rdev_size_change(rdev, sectors);
  2482. if (!sectors)
  2483. return -EBUSY;
  2484. } else if (!sectors)
  2485. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
  2486. rdev->data_offset;
  2487. if (!my_mddev->pers->resize)
  2488. /* Cannot change size for RAID0 or Linear etc */
  2489. return -EINVAL;
  2490. }
  2491. if (sectors < my_mddev->dev_sectors)
  2492. return -EINVAL; /* component must fit device */
  2493. rdev->sectors = sectors;
  2494. if (sectors > oldsectors && my_mddev->external) {
  2495. /* Need to check that all other rdevs with the same
  2496. * ->bdev do not overlap. 'rcu' is sufficient to walk
  2497. * the rdev lists safely.
  2498. * This check does not provide a hard guarantee, it
  2499. * just helps avoid dangerous mistakes.
  2500. */
  2501. struct mddev *mddev;
  2502. int overlap = 0;
  2503. struct list_head *tmp;
  2504. rcu_read_lock();
  2505. for_each_mddev(mddev, tmp) {
  2506. struct md_rdev *rdev2;
  2507. rdev_for_each(rdev2, mddev)
  2508. if (rdev->bdev == rdev2->bdev &&
  2509. rdev != rdev2 &&
  2510. overlaps(rdev->data_offset, rdev->sectors,
  2511. rdev2->data_offset,
  2512. rdev2->sectors)) {
  2513. overlap = 1;
  2514. break;
  2515. }
  2516. if (overlap) {
  2517. mddev_put(mddev);
  2518. break;
  2519. }
  2520. }
  2521. rcu_read_unlock();
  2522. if (overlap) {
  2523. /* Someone else could have slipped in a size
  2524. * change here, but doing so is just silly.
  2525. * We put oldsectors back because we *know* it is
  2526. * safe, and trust userspace not to race with
  2527. * itself
  2528. */
  2529. rdev->sectors = oldsectors;
  2530. return -EBUSY;
  2531. }
  2532. }
  2533. return len;
  2534. }
  2535. static struct rdev_sysfs_entry rdev_size =
  2536. __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
  2537. static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
  2538. {
  2539. unsigned long long recovery_start = rdev->recovery_offset;
  2540. if (test_bit(In_sync, &rdev->flags) ||
  2541. recovery_start == MaxSector)
  2542. return sprintf(page, "none\n");
  2543. return sprintf(page, "%llu\n", recovery_start);
  2544. }
  2545. static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
  2546. {
  2547. unsigned long long recovery_start;
  2548. if (cmd_match(buf, "none"))
  2549. recovery_start = MaxSector;
  2550. else if (kstrtoull(buf, 10, &recovery_start))
  2551. return -EINVAL;
  2552. if (rdev->mddev->pers &&
  2553. rdev->raid_disk >= 0)
  2554. return -EBUSY;
  2555. rdev->recovery_offset = recovery_start;
  2556. if (recovery_start == MaxSector)
  2557. set_bit(In_sync, &rdev->flags);
  2558. else
  2559. clear_bit(In_sync, &rdev->flags);
  2560. return len;
  2561. }
  2562. static struct rdev_sysfs_entry rdev_recovery_start =
  2563. __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
  2564. static ssize_t
  2565. badblocks_show(struct badblocks *bb, char *page, int unack);
  2566. static ssize_t
  2567. badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
  2568. static ssize_t bb_show(struct md_rdev *rdev, char *page)
  2569. {
  2570. return badblocks_show(&rdev->badblocks, page, 0);
  2571. }
  2572. static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
  2573. {
  2574. int rv = badblocks_store(&rdev->badblocks, page, len, 0);
  2575. /* Maybe that ack was all we needed */
  2576. if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
  2577. wake_up(&rdev->blocked_wait);
  2578. return rv;
  2579. }
  2580. static struct rdev_sysfs_entry rdev_bad_blocks =
  2581. __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
  2582. static ssize_t ubb_show(struct md_rdev *rdev, char *page)
  2583. {
  2584. return badblocks_show(&rdev->badblocks, page, 1);
  2585. }
  2586. static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
  2587. {
  2588. return badblocks_store(&rdev->badblocks, page, len, 1);
  2589. }
  2590. static struct rdev_sysfs_entry rdev_unack_bad_blocks =
  2591. __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
  2592. static struct attribute *rdev_default_attrs[] = {
  2593. &rdev_state.attr,
  2594. &rdev_errors.attr,
  2595. &rdev_slot.attr,
  2596. &rdev_offset.attr,
  2597. &rdev_new_offset.attr,
  2598. &rdev_size.attr,
  2599. &rdev_recovery_start.attr,
  2600. &rdev_bad_blocks.attr,
  2601. &rdev_unack_bad_blocks.attr,
  2602. NULL,
  2603. };
  2604. static ssize_t
  2605. rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  2606. {
  2607. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2608. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2609. struct mddev *mddev = rdev->mddev;
  2610. ssize_t rv;
  2611. if (!entry->show)
  2612. return -EIO;
  2613. rv = mddev ? mddev_lock(mddev) : -EBUSY;
  2614. if (!rv) {
  2615. if (rdev->mddev == NULL)
  2616. rv = -EBUSY;
  2617. else
  2618. rv = entry->show(rdev, page);
  2619. mddev_unlock(mddev);
  2620. }
  2621. return rv;
  2622. }
  2623. static ssize_t
  2624. rdev_attr_store(struct kobject *kobj, struct attribute *attr,
  2625. const char *page, size_t length)
  2626. {
  2627. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2628. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2629. ssize_t rv;
  2630. struct mddev *mddev = rdev->mddev;
  2631. if (!entry->store)
  2632. return -EIO;
  2633. if (!capable(CAP_SYS_ADMIN))
  2634. return -EACCES;
  2635. rv = mddev ? mddev_lock(mddev): -EBUSY;
  2636. if (!rv) {
  2637. if (rdev->mddev == NULL)
  2638. rv = -EBUSY;
  2639. else
  2640. rv = entry->store(rdev, page, length);
  2641. mddev_unlock(mddev);
  2642. }
  2643. return rv;
  2644. }
  2645. static void rdev_free(struct kobject *ko)
  2646. {
  2647. struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
  2648. kfree(rdev);
  2649. }
  2650. static const struct sysfs_ops rdev_sysfs_ops = {
  2651. .show = rdev_attr_show,
  2652. .store = rdev_attr_store,
  2653. };
  2654. static struct kobj_type rdev_ktype = {
  2655. .release = rdev_free,
  2656. .sysfs_ops = &rdev_sysfs_ops,
  2657. .default_attrs = rdev_default_attrs,
  2658. };
  2659. int md_rdev_init(struct md_rdev *rdev)
  2660. {
  2661. rdev->desc_nr = -1;
  2662. rdev->saved_raid_disk = -1;
  2663. rdev->raid_disk = -1;
  2664. rdev->flags = 0;
  2665. rdev->data_offset = 0;
  2666. rdev->new_data_offset = 0;
  2667. rdev->sb_events = 0;
  2668. rdev->last_read_error.tv_sec = 0;
  2669. rdev->last_read_error.tv_nsec = 0;
  2670. rdev->sb_loaded = 0;
  2671. rdev->bb_page = NULL;
  2672. atomic_set(&rdev->nr_pending, 0);
  2673. atomic_set(&rdev->read_errors, 0);
  2674. atomic_set(&rdev->corrected_errors, 0);
  2675. INIT_LIST_HEAD(&rdev->same_set);
  2676. init_waitqueue_head(&rdev->blocked_wait);
  2677. /* Add space to store bad block list.
  2678. * This reserves the space even on arrays where it cannot
  2679. * be used - I wonder if that matters
  2680. */
  2681. rdev->badblocks.count = 0;
  2682. rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
  2683. rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
  2684. seqlock_init(&rdev->badblocks.lock);
  2685. if (rdev->badblocks.page == NULL)
  2686. return -ENOMEM;
  2687. return 0;
  2688. }
  2689. EXPORT_SYMBOL_GPL(md_rdev_init);
  2690. /*
  2691. * Import a device. If 'super_format' >= 0, then sanity check the superblock
  2692. *
  2693. * mark the device faulty if:
  2694. *
  2695. * - the device is nonexistent (zero size)
  2696. * - the device has no valid superblock
  2697. *
  2698. * a faulty rdev _never_ has rdev->sb set.
  2699. */
  2700. static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
  2701. {
  2702. char b[BDEVNAME_SIZE];
  2703. int err;
  2704. struct md_rdev *rdev;
  2705. sector_t size;
  2706. rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
  2707. if (!rdev) {
  2708. printk(KERN_ERR "md: could not alloc mem for new device!\n");
  2709. return ERR_PTR(-ENOMEM);
  2710. }
  2711. err = md_rdev_init(rdev);
  2712. if (err)
  2713. goto abort_free;
  2714. err = alloc_disk_sb(rdev);
  2715. if (err)
  2716. goto abort_free;
  2717. err = lock_rdev(rdev, newdev, super_format == -2);
  2718. if (err)
  2719. goto abort_free;
  2720. kobject_init(&rdev->kobj, &rdev_ktype);
  2721. size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
  2722. if (!size) {
  2723. printk(KERN_WARNING
  2724. "md: %s has zero or unknown size, marking faulty!\n",
  2725. bdevname(rdev->bdev,b));
  2726. err = -EINVAL;
  2727. goto abort_free;
  2728. }
  2729. if (super_format >= 0) {
  2730. err = super_types[super_format].
  2731. load_super(rdev, NULL, super_minor);
  2732. if (err == -EINVAL) {
  2733. printk(KERN_WARNING
  2734. "md: %s does not have a valid v%d.%d "
  2735. "superblock, not importing!\n",
  2736. bdevname(rdev->bdev,b),
  2737. super_format, super_minor);
  2738. goto abort_free;
  2739. }
  2740. if (err < 0) {
  2741. printk(KERN_WARNING
  2742. "md: could not read %s's sb, not importing!\n",
  2743. bdevname(rdev->bdev,b));
  2744. goto abort_free;
  2745. }
  2746. }
  2747. return rdev;
  2748. abort_free:
  2749. if (rdev->bdev)
  2750. unlock_rdev(rdev);
  2751. md_rdev_clear(rdev);
  2752. kfree(rdev);
  2753. return ERR_PTR(err);
  2754. }
  2755. /*
  2756. * Check a full RAID array for plausibility
  2757. */
  2758. static void analyze_sbs(struct mddev *mddev)
  2759. {
  2760. int i;
  2761. struct md_rdev *rdev, *freshest, *tmp;
  2762. char b[BDEVNAME_SIZE];
  2763. freshest = NULL;
  2764. rdev_for_each_safe(rdev, tmp, mddev)
  2765. switch (super_types[mddev->major_version].
  2766. load_super(rdev, freshest, mddev->minor_version)) {
  2767. case 1:
  2768. freshest = rdev;
  2769. break;
  2770. case 0:
  2771. break;
  2772. default:
  2773. printk( KERN_ERR \
  2774. "md: fatal superblock inconsistency in %s"
  2775. " -- removing from array\n",
  2776. bdevname(rdev->bdev,b));
  2777. kick_rdev_from_array(rdev);
  2778. }
  2779. super_types[mddev->major_version].
  2780. validate_super(mddev, freshest);
  2781. i = 0;
  2782. rdev_for_each_safe(rdev, tmp, mddev) {
  2783. if (mddev->max_disks &&
  2784. (rdev->desc_nr >= mddev->max_disks ||
  2785. i > mddev->max_disks)) {
  2786. printk(KERN_WARNING
  2787. "md: %s: %s: only %d devices permitted\n",
  2788. mdname(mddev), bdevname(rdev->bdev, b),
  2789. mddev->max_disks);
  2790. kick_rdev_from_array(rdev);
  2791. continue;
  2792. }
  2793. if (rdev != freshest)
  2794. if (super_types[mddev->major_version].
  2795. validate_super(mddev, rdev)) {
  2796. printk(KERN_WARNING "md: kicking non-fresh %s"
  2797. " from array!\n",
  2798. bdevname(rdev->bdev,b));
  2799. kick_rdev_from_array(rdev);
  2800. continue;
  2801. }
  2802. if (mddev->level == LEVEL_MULTIPATH) {
  2803. rdev->desc_nr = i++;
  2804. rdev->raid_disk = rdev->desc_nr;
  2805. set_bit(In_sync, &rdev->flags);
  2806. } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
  2807. rdev->raid_disk = -1;
  2808. clear_bit(In_sync, &rdev->flags);
  2809. }
  2810. }
  2811. }
  2812. /* Read a fixed-point number.
  2813. * Numbers in sysfs attributes should be in "standard" units where
  2814. * possible, so time should be in seconds.
  2815. * However we internally use a a much smaller unit such as
  2816. * milliseconds or jiffies.
  2817. * This function takes a decimal number with a possible fractional
  2818. * component, and produces an integer which is the result of
  2819. * multiplying that number by 10^'scale'.
  2820. * all without any floating-point arithmetic.
  2821. */
  2822. int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
  2823. {
  2824. unsigned long result = 0;
  2825. long decimals = -1;
  2826. while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
  2827. if (*cp == '.')
  2828. decimals = 0;
  2829. else if (decimals < scale) {
  2830. unsigned int value;
  2831. value = *cp - '0';
  2832. result = result * 10 + value;
  2833. if (decimals >= 0)
  2834. decimals++;
  2835. }
  2836. cp++;
  2837. }
  2838. if (*cp == '\n')
  2839. cp++;
  2840. if (*cp)
  2841. return -EINVAL;
  2842. if (decimals < 0)
  2843. decimals = 0;
  2844. while (decimals < scale) {
  2845. result *= 10;
  2846. decimals ++;
  2847. }
  2848. *res = result;
  2849. return 0;
  2850. }
  2851. static void md_safemode_timeout(unsigned long data);
  2852. static ssize_t
  2853. safe_delay_show(struct mddev *mddev, char *page)
  2854. {
  2855. int msec = (mddev->safemode_delay*1000)/HZ;
  2856. return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
  2857. }
  2858. static ssize_t
  2859. safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
  2860. {
  2861. unsigned long msec;
  2862. if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
  2863. return -EINVAL;
  2864. if (msec == 0)
  2865. mddev->safemode_delay = 0;
  2866. else {
  2867. unsigned long old_delay = mddev->safemode_delay;
  2868. mddev->safemode_delay = (msec*HZ)/1000;
  2869. if (mddev->safemode_delay == 0)
  2870. mddev->safemode_delay = 1;
  2871. if (mddev->safemode_delay < old_delay || old_delay == 0)
  2872. md_safemode_timeout((unsigned long)mddev);
  2873. }
  2874. return len;
  2875. }
  2876. static struct md_sysfs_entry md_safe_delay =
  2877. __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
  2878. static ssize_t
  2879. level_show(struct mddev *mddev, char *page)
  2880. {
  2881. struct md_personality *p = mddev->pers;
  2882. if (p)
  2883. return sprintf(page, "%s\n", p->name);
  2884. else if (mddev->clevel[0])
  2885. return sprintf(page, "%s\n", mddev->clevel);
  2886. else if (mddev->level != LEVEL_NONE)
  2887. return sprintf(page, "%d\n", mddev->level);
  2888. else
  2889. return 0;
  2890. }
  2891. static ssize_t
  2892. level_store(struct mddev *mddev, const char *buf, size_t len)
  2893. {
  2894. char clevel[16];
  2895. ssize_t rv = len;
  2896. struct md_personality *pers;
  2897. long level;
  2898. void *priv;
  2899. struct md_rdev *rdev;
  2900. if (mddev->pers == NULL) {
  2901. if (len == 0)
  2902. return 0;
  2903. if (len >= sizeof(mddev->clevel))
  2904. return -ENOSPC;
  2905. strncpy(mddev->clevel, buf, len);
  2906. if (mddev->clevel[len-1] == '\n')
  2907. len--;
  2908. mddev->clevel[len] = 0;
  2909. mddev->level = LEVEL_NONE;
  2910. return rv;
  2911. }
  2912. if (mddev->ro)
  2913. return -EROFS;
  2914. /* request to change the personality. Need to ensure:
  2915. * - array is not engaged in resync/recovery/reshape
  2916. * - old personality can be suspended
  2917. * - new personality will access other array.
  2918. */
  2919. if (mddev->sync_thread ||
  2920. mddev->reshape_position != MaxSector ||
  2921. mddev->sysfs_active)
  2922. return -EBUSY;
  2923. if (!mddev->pers->quiesce) {
  2924. printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
  2925. mdname(mddev), mddev->pers->name);
  2926. return -EINVAL;
  2927. }
  2928. /* Now find the new personality */
  2929. if (len == 0 || len >= sizeof(clevel))
  2930. return -EINVAL;
  2931. strncpy(clevel, buf, len);
  2932. if (clevel[len-1] == '\n')
  2933. len--;
  2934. clevel[len] = 0;
  2935. if (kstrtol(clevel, 10, &level))
  2936. level = LEVEL_NONE;
  2937. if (request_module("md-%s", clevel) != 0)
  2938. request_module("md-level-%s", clevel);
  2939. spin_lock(&pers_lock);
  2940. pers = find_pers(level, clevel);
  2941. if (!pers || !try_module_get(pers->owner)) {
  2942. spin_unlock(&pers_lock);
  2943. printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
  2944. return -EINVAL;
  2945. }
  2946. spin_unlock(&pers_lock);
  2947. if (pers == mddev->pers) {
  2948. /* Nothing to do! */
  2949. module_put(pers->owner);
  2950. return rv;
  2951. }
  2952. if (!pers->takeover) {
  2953. module_put(pers->owner);
  2954. printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
  2955. mdname(mddev), clevel);
  2956. return -EINVAL;
  2957. }
  2958. rdev_for_each(rdev, mddev)
  2959. rdev->new_raid_disk = rdev->raid_disk;
  2960. /* ->takeover must set new_* and/or delta_disks
  2961. * if it succeeds, and may set them when it fails.
  2962. */
  2963. priv = pers->takeover(mddev);
  2964. if (IS_ERR(priv)) {
  2965. mddev->new_level = mddev->level;
  2966. mddev->new_layout = mddev->layout;
  2967. mddev->new_chunk_sectors = mddev->chunk_sectors;
  2968. mddev->raid_disks -= mddev->delta_disks;
  2969. mddev->delta_disks = 0;
  2970. mddev->reshape_backwards = 0;
  2971. module_put(pers->owner);
  2972. printk(KERN_WARNING "md: %s: %s would not accept array\n",
  2973. mdname(mddev), clevel);
  2974. return PTR_ERR(priv);
  2975. }
  2976. /* Looks like we have a winner */
  2977. mddev_suspend(mddev);
  2978. mddev->pers->stop(mddev);
  2979. if (mddev->pers->sync_request == NULL &&
  2980. pers->sync_request != NULL) {
  2981. /* need to add the md_redundancy_group */
  2982. if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  2983. printk(KERN_WARNING
  2984. "md: cannot register extra attributes for %s\n",
  2985. mdname(mddev));
  2986. mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
  2987. }
  2988. if (mddev->pers->sync_request != NULL &&
  2989. pers->sync_request == NULL) {
  2990. /* need to remove the md_redundancy_group */
  2991. if (mddev->to_remove == NULL)
  2992. mddev->to_remove = &md_redundancy_group;
  2993. }
  2994. if (mddev->pers->sync_request == NULL &&
  2995. mddev->external) {
  2996. /* We are converting from a no-redundancy array
  2997. * to a redundancy array and metadata is managed
  2998. * externally so we need to be sure that writes
  2999. * won't block due to a need to transition
  3000. * clean->dirty
  3001. * until external management is started.
  3002. */
  3003. mddev->in_sync = 0;
  3004. mddev->safemode_delay = 0;
  3005. mddev->safemode = 0;
  3006. }
  3007. rdev_for_each(rdev, mddev) {
  3008. if (rdev->raid_disk < 0)
  3009. continue;
  3010. if (rdev->new_raid_disk >= mddev->raid_disks)
  3011. rdev->new_raid_disk = -1;
  3012. if (rdev->new_raid_disk == rdev->raid_disk)
  3013. continue;
  3014. sysfs_unlink_rdev(mddev, rdev);
  3015. }
  3016. rdev_for_each(rdev, mddev) {
  3017. if (rdev->raid_disk < 0)
  3018. continue;
  3019. if (rdev->new_raid_disk == rdev->raid_disk)
  3020. continue;
  3021. rdev->raid_disk = rdev->new_raid_disk;
  3022. if (rdev->raid_disk < 0)
  3023. clear_bit(In_sync, &rdev->flags);
  3024. else {
  3025. if (sysfs_link_rdev(mddev, rdev))
  3026. printk(KERN_WARNING "md: cannot register rd%d"
  3027. " for %s after level change\n",
  3028. rdev->raid_disk, mdname(mddev));
  3029. }
  3030. }
  3031. module_put(mddev->pers->owner);
  3032. mddev->pers = pers;
  3033. mddev->private = priv;
  3034. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  3035. mddev->level = mddev->new_level;
  3036. mddev->layout = mddev->new_layout;
  3037. mddev->chunk_sectors = mddev->new_chunk_sectors;
  3038. mddev->delta_disks = 0;
  3039. mddev->reshape_backwards = 0;
  3040. mddev->degraded = 0;
  3041. if (mddev->pers->sync_request == NULL) {
  3042. /* this is now an array without redundancy, so
  3043. * it must always be in_sync
  3044. */
  3045. mddev->in_sync = 1;
  3046. del_timer_sync(&mddev->safemode_timer);
  3047. }
  3048. blk_set_stacking_limits(&mddev->queue->limits);
  3049. pers->run(mddev);
  3050. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  3051. mddev_resume(mddev);
  3052. if (!mddev->thread)
  3053. md_update_sb(mddev, 1);
  3054. sysfs_notify(&mddev->kobj, NULL, "level");
  3055. md_new_event(mddev);
  3056. return rv;
  3057. }
  3058. static struct md_sysfs_entry md_level =
  3059. __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
  3060. static ssize_t
  3061. layout_show(struct mddev *mddev, char *page)
  3062. {
  3063. /* just a number, not meaningful for all levels */
  3064. if (mddev->reshape_position != MaxSector &&
  3065. mddev->layout != mddev->new_layout)
  3066. return sprintf(page, "%d (%d)\n",
  3067. mddev->new_layout, mddev->layout);
  3068. return sprintf(page, "%d\n", mddev->layout);
  3069. }
  3070. static ssize_t
  3071. layout_store(struct mddev *mddev, const char *buf, size_t len)
  3072. {
  3073. char *e;
  3074. unsigned long n = simple_strtoul(buf, &e, 10);
  3075. if (!*buf || (*e && *e != '\n'))
  3076. return -EINVAL;
  3077. if (mddev->pers) {
  3078. int err;
  3079. if (mddev->pers->check_reshape == NULL)
  3080. return -EBUSY;
  3081. if (mddev->ro)
  3082. return -EROFS;
  3083. mddev->new_layout = n;
  3084. err = mddev->pers->check_reshape(mddev);
  3085. if (err) {
  3086. mddev->new_layout = mddev->layout;
  3087. return err;
  3088. }
  3089. } else {
  3090. mddev->new_layout = n;
  3091. if (mddev->reshape_position == MaxSector)
  3092. mddev->layout = n;
  3093. }
  3094. return len;
  3095. }
  3096. static struct md_sysfs_entry md_layout =
  3097. __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
  3098. static ssize_t
  3099. raid_disks_show(struct mddev *mddev, char *page)
  3100. {
  3101. if (mddev->raid_disks == 0)
  3102. return 0;
  3103. if (mddev->reshape_position != MaxSector &&
  3104. mddev->delta_disks != 0)
  3105. return sprintf(page, "%d (%d)\n", mddev->raid_disks,
  3106. mddev->raid_disks - mddev->delta_disks);
  3107. return sprintf(page, "%d\n", mddev->raid_disks);
  3108. }
  3109. static int update_raid_disks(struct mddev *mddev, int raid_disks);
  3110. static ssize_t
  3111. raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
  3112. {
  3113. char *e;
  3114. int rv = 0;
  3115. unsigned long n = simple_strtoul(buf, &e, 10);
  3116. if (!*buf || (*e && *e != '\n'))
  3117. return -EINVAL;
  3118. if (mddev->pers)
  3119. rv = update_raid_disks(mddev, n);
  3120. else if (mddev->reshape_position != MaxSector) {
  3121. struct md_rdev *rdev;
  3122. int olddisks = mddev->raid_disks - mddev->delta_disks;
  3123. rdev_for_each(rdev, mddev) {
  3124. if (olddisks < n &&
  3125. rdev->data_offset < rdev->new_data_offset)
  3126. return -EINVAL;
  3127. if (olddisks > n &&
  3128. rdev->data_offset > rdev->new_data_offset)
  3129. return -EINVAL;
  3130. }
  3131. mddev->delta_disks = n - olddisks;
  3132. mddev->raid_disks = n;
  3133. mddev->reshape_backwards = (mddev->delta_disks < 0);
  3134. } else
  3135. mddev->raid_disks = n;
  3136. return rv ? rv : len;
  3137. }
  3138. static struct md_sysfs_entry md_raid_disks =
  3139. __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
  3140. static ssize_t
  3141. chunk_size_show(struct mddev *mddev, char *page)
  3142. {
  3143. if (mddev->reshape_position != MaxSector &&
  3144. mddev->chunk_sectors != mddev->new_chunk_sectors)
  3145. return sprintf(page, "%d (%d)\n",
  3146. mddev->new_chunk_sectors << 9,
  3147. mddev->chunk_sectors << 9);
  3148. return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
  3149. }
  3150. static ssize_t
  3151. chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
  3152. {
  3153. char *e;
  3154. unsigned long n = simple_strtoul(buf, &e, 10);
  3155. if (!*buf || (*e && *e != '\n'))
  3156. return -EINVAL;
  3157. if (mddev->pers) {
  3158. int err;
  3159. if (mddev->pers->check_reshape == NULL)
  3160. return -EBUSY;
  3161. if (mddev->ro)
  3162. return -EROFS;
  3163. mddev->new_chunk_sectors = n >> 9;
  3164. err = mddev->pers->check_reshape(mddev);
  3165. if (err) {
  3166. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3167. return err;
  3168. }
  3169. } else {
  3170. mddev->new_chunk_sectors = n >> 9;
  3171. if (mddev->reshape_position == MaxSector)
  3172. mddev->chunk_sectors = n >> 9;
  3173. }
  3174. return len;
  3175. }
  3176. static struct md_sysfs_entry md_chunk_size =
  3177. __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
  3178. static ssize_t
  3179. resync_start_show(struct mddev *mddev, char *page)
  3180. {
  3181. if (mddev->recovery_cp == MaxSector)
  3182. return sprintf(page, "none\n");
  3183. return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
  3184. }
  3185. static ssize_t
  3186. resync_start_store(struct mddev *mddev, const char *buf, size_t len)
  3187. {
  3188. char *e;
  3189. unsigned long long n = simple_strtoull(buf, &e, 10);
  3190. if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3191. return -EBUSY;
  3192. if (cmd_match(buf, "none"))
  3193. n = MaxSector;
  3194. else if (!*buf || (*e && *e != '\n'))
  3195. return -EINVAL;
  3196. mddev->recovery_cp = n;
  3197. if (mddev->pers)
  3198. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  3199. return len;
  3200. }
  3201. static struct md_sysfs_entry md_resync_start =
  3202. __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
  3203. /*
  3204. * The array state can be:
  3205. *
  3206. * clear
  3207. * No devices, no size, no level
  3208. * Equivalent to STOP_ARRAY ioctl
  3209. * inactive
  3210. * May have some settings, but array is not active
  3211. * all IO results in error
  3212. * When written, doesn't tear down array, but just stops it
  3213. * suspended (not supported yet)
  3214. * All IO requests will block. The array can be reconfigured.
  3215. * Writing this, if accepted, will block until array is quiescent
  3216. * readonly
  3217. * no resync can happen. no superblocks get written.
  3218. * write requests fail
  3219. * read-auto
  3220. * like readonly, but behaves like 'clean' on a write request.
  3221. *
  3222. * clean - no pending writes, but otherwise active.
  3223. * When written to inactive array, starts without resync
  3224. * If a write request arrives then
  3225. * if metadata is known, mark 'dirty' and switch to 'active'.
  3226. * if not known, block and switch to write-pending
  3227. * If written to an active array that has pending writes, then fails.
  3228. * active
  3229. * fully active: IO and resync can be happening.
  3230. * When written to inactive array, starts with resync
  3231. *
  3232. * write-pending
  3233. * clean, but writes are blocked waiting for 'active' to be written.
  3234. *
  3235. * active-idle
  3236. * like active, but no writes have been seen for a while (100msec).
  3237. *
  3238. */
  3239. enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
  3240. write_pending, active_idle, bad_word};
  3241. static char *array_states[] = {
  3242. "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
  3243. "write-pending", "active-idle", NULL };
  3244. static int match_word(const char *word, char **list)
  3245. {
  3246. int n;
  3247. for (n=0; list[n]; n++)
  3248. if (cmd_match(word, list[n]))
  3249. break;
  3250. return n;
  3251. }
  3252. static ssize_t
  3253. array_state_show(struct mddev *mddev, char *page)
  3254. {
  3255. enum array_state st = inactive;
  3256. if (mddev->pers)
  3257. switch(mddev->ro) {
  3258. case 1:
  3259. st = readonly;
  3260. break;
  3261. case 2:
  3262. st = read_auto;
  3263. break;
  3264. case 0:
  3265. if (mddev->in_sync)
  3266. st = clean;
  3267. else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
  3268. st = write_pending;
  3269. else if (mddev->safemode)
  3270. st = active_idle;
  3271. else
  3272. st = active;
  3273. }
  3274. else {
  3275. if (list_empty(&mddev->disks) &&
  3276. mddev->raid_disks == 0 &&
  3277. mddev->dev_sectors == 0)
  3278. st = clear;
  3279. else
  3280. st = inactive;
  3281. }
  3282. return sprintf(page, "%s\n", array_states[st]);
  3283. }
  3284. static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
  3285. static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
  3286. static int do_md_run(struct mddev *mddev);
  3287. static int restart_array(struct mddev *mddev);
  3288. static ssize_t
  3289. array_state_store(struct mddev *mddev, const char *buf, size_t len)
  3290. {
  3291. int err = -EINVAL;
  3292. enum array_state st = match_word(buf, array_states);
  3293. switch(st) {
  3294. case bad_word:
  3295. break;
  3296. case clear:
  3297. /* stopping an active array */
  3298. err = do_md_stop(mddev, 0, NULL);
  3299. break;
  3300. case inactive:
  3301. /* stopping an active array */
  3302. if (mddev->pers)
  3303. err = do_md_stop(mddev, 2, NULL);
  3304. else
  3305. err = 0; /* already inactive */
  3306. break;
  3307. case suspended:
  3308. break; /* not supported yet */
  3309. case readonly:
  3310. if (mddev->pers)
  3311. err = md_set_readonly(mddev, NULL);
  3312. else {
  3313. mddev->ro = 1;
  3314. set_disk_ro(mddev->gendisk, 1);
  3315. err = do_md_run(mddev);
  3316. }
  3317. break;
  3318. case read_auto:
  3319. if (mddev->pers) {
  3320. if (mddev->ro == 0)
  3321. err = md_set_readonly(mddev, NULL);
  3322. else if (mddev->ro == 1)
  3323. err = restart_array(mddev);
  3324. if (err == 0) {
  3325. mddev->ro = 2;
  3326. set_disk_ro(mddev->gendisk, 0);
  3327. }
  3328. } else {
  3329. mddev->ro = 2;
  3330. err = do_md_run(mddev);
  3331. }
  3332. break;
  3333. case clean:
  3334. if (mddev->pers) {
  3335. restart_array(mddev);
  3336. spin_lock_irq(&mddev->write_lock);
  3337. if (atomic_read(&mddev->writes_pending) == 0) {
  3338. if (mddev->in_sync == 0) {
  3339. mddev->in_sync = 1;
  3340. if (mddev->safemode == 1)
  3341. mddev->safemode = 0;
  3342. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  3343. }
  3344. err = 0;
  3345. } else
  3346. err = -EBUSY;
  3347. spin_unlock_irq(&mddev->write_lock);
  3348. } else
  3349. err = -EINVAL;
  3350. break;
  3351. case active:
  3352. if (mddev->pers) {
  3353. restart_array(mddev);
  3354. clear_bit(MD_CHANGE_PENDING, &mddev->flags);
  3355. wake_up(&mddev->sb_wait);
  3356. err = 0;
  3357. } else {
  3358. mddev->ro = 0;
  3359. set_disk_ro(mddev->gendisk, 0);
  3360. err = do_md_run(mddev);
  3361. }
  3362. break;
  3363. case write_pending:
  3364. case active_idle:
  3365. /* these cannot be set */
  3366. break;
  3367. }
  3368. if (err)
  3369. return err;
  3370. else {
  3371. if (mddev->hold_active == UNTIL_IOCTL)
  3372. mddev->hold_active = 0;
  3373. sysfs_notify_dirent_safe(mddev->sysfs_state);
  3374. return len;
  3375. }
  3376. }
  3377. static struct md_sysfs_entry md_array_state =
  3378. __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
  3379. static ssize_t
  3380. max_corrected_read_errors_show(struct mddev *mddev, char *page) {
  3381. return sprintf(page, "%d\n",
  3382. atomic_read(&mddev->max_corr_read_errors));
  3383. }
  3384. static ssize_t
  3385. max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
  3386. {
  3387. char *e;
  3388. unsigned long n = simple_strtoul(buf, &e, 10);
  3389. if (*buf && (*e == 0 || *e == '\n')) {
  3390. atomic_set(&mddev->max_corr_read_errors, n);
  3391. return len;
  3392. }
  3393. return -EINVAL;
  3394. }
  3395. static struct md_sysfs_entry max_corr_read_errors =
  3396. __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
  3397. max_corrected_read_errors_store);
  3398. static ssize_t
  3399. null_show(struct mddev *mddev, char *page)
  3400. {
  3401. return -EINVAL;
  3402. }
  3403. static ssize_t
  3404. new_dev_store(struct mddev *mddev, const char *buf, size_t len)
  3405. {
  3406. /* buf must be %d:%d\n? giving major and minor numbers */
  3407. /* The new device is added to the array.
  3408. * If the array has a persistent superblock, we read the
  3409. * superblock to initialise info and check validity.
  3410. * Otherwise, only checking done is that in bind_rdev_to_array,
  3411. * which mainly checks size.
  3412. */
  3413. char *e;
  3414. int major = simple_strtoul(buf, &e, 10);
  3415. int minor;
  3416. dev_t dev;
  3417. struct md_rdev *rdev;
  3418. int err;
  3419. if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
  3420. return -EINVAL;
  3421. minor = simple_strtoul(e+1, &e, 10);
  3422. if (*e && *e != '\n')
  3423. return -EINVAL;
  3424. dev = MKDEV(major, minor);
  3425. if (major != MAJOR(dev) ||
  3426. minor != MINOR(dev))
  3427. return -EOVERFLOW;
  3428. if (mddev->persistent) {
  3429. rdev = md_import_device(dev, mddev->major_version,
  3430. mddev->minor_version);
  3431. if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
  3432. struct md_rdev *rdev0
  3433. = list_entry(mddev->disks.next,
  3434. struct md_rdev, same_set);
  3435. err = super_types[mddev->major_version]
  3436. .load_super(rdev, rdev0, mddev->minor_version);
  3437. if (err < 0)
  3438. goto out;
  3439. }
  3440. } else if (mddev->external)
  3441. rdev = md_import_device(dev, -2, -1);
  3442. else
  3443. rdev = md_import_device(dev, -1, -1);
  3444. if (IS_ERR(rdev))
  3445. return PTR_ERR(rdev);
  3446. err = bind_rdev_to_array(rdev, mddev);
  3447. out:
  3448. if (err)
  3449. export_rdev(rdev);
  3450. return err ? err : len;
  3451. }
  3452. static struct md_sysfs_entry md_new_device =
  3453. __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
  3454. static ssize_t
  3455. bitmap_store(struct mddev *mddev, const char *buf, size_t len)
  3456. {
  3457. char *end;
  3458. unsigned long chunk, end_chunk;
  3459. if (!mddev->bitmap)
  3460. goto out;
  3461. /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
  3462. while (*buf) {
  3463. chunk = end_chunk = simple_strtoul(buf, &end, 0);
  3464. if (buf == end) break;
  3465. if (*end == '-') { /* range */
  3466. buf = end + 1;
  3467. end_chunk = simple_strtoul(buf, &end, 0);
  3468. if (buf == end) break;
  3469. }
  3470. if (*end && !isspace(*end)) break;
  3471. bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
  3472. buf = skip_spaces(end);
  3473. }
  3474. bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
  3475. out:
  3476. return len;
  3477. }
  3478. static struct md_sysfs_entry md_bitmap =
  3479. __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
  3480. static ssize_t
  3481. size_show(struct mddev *mddev, char *page)
  3482. {
  3483. return sprintf(page, "%llu\n",
  3484. (unsigned long long)mddev->dev_sectors / 2);
  3485. }
  3486. static int update_size(struct mddev *mddev, sector_t num_sectors);
  3487. static ssize_t
  3488. size_store(struct mddev *mddev, const char *buf, size_t len)
  3489. {
  3490. /* If array is inactive, we can reduce the component size, but
  3491. * not increase it (except from 0).
  3492. * If array is active, we can try an on-line resize
  3493. */
  3494. sector_t sectors;
  3495. int err = strict_blocks_to_sectors(buf, &sectors);
  3496. if (err < 0)
  3497. return err;
  3498. if (mddev->pers) {
  3499. err = update_size(mddev, sectors);
  3500. md_update_sb(mddev, 1);
  3501. } else {
  3502. if (mddev->dev_sectors == 0 ||
  3503. mddev->dev_sectors > sectors)
  3504. mddev->dev_sectors = sectors;
  3505. else
  3506. err = -ENOSPC;
  3507. }
  3508. return err ? err : len;
  3509. }
  3510. static struct md_sysfs_entry md_size =
  3511. __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
  3512. /* Metadata version.
  3513. * This is one of
  3514. * 'none' for arrays with no metadata (good luck...)
  3515. * 'external' for arrays with externally managed metadata,
  3516. * or N.M for internally known formats
  3517. */
  3518. static ssize_t
  3519. metadata_show(struct mddev *mddev, char *page)
  3520. {
  3521. if (mddev->persistent)
  3522. return sprintf(page, "%d.%d\n",
  3523. mddev->major_version, mddev->minor_version);
  3524. else if (mddev->external)
  3525. return sprintf(page, "external:%s\n", mddev->metadata_type);
  3526. else
  3527. return sprintf(page, "none\n");
  3528. }
  3529. static ssize_t
  3530. metadata_store(struct mddev *mddev, const char *buf, size_t len)
  3531. {
  3532. int major, minor;
  3533. char *e;
  3534. /* Changing the details of 'external' metadata is
  3535. * always permitted. Otherwise there must be
  3536. * no devices attached to the array.
  3537. */
  3538. if (mddev->external && strncmp(buf, "external:", 9) == 0)
  3539. ;
  3540. else if (!list_empty(&mddev->disks))
  3541. return -EBUSY;
  3542. if (cmd_match(buf, "none")) {
  3543. mddev->persistent = 0;
  3544. mddev->external = 0;
  3545. mddev->major_version = 0;
  3546. mddev->minor_version = 90;
  3547. return len;
  3548. }
  3549. if (strncmp(buf, "external:", 9) == 0) {
  3550. size_t namelen = len-9;
  3551. if (namelen >= sizeof(mddev->metadata_type))
  3552. namelen = sizeof(mddev->metadata_type)-1;
  3553. strncpy(mddev->metadata_type, buf+9, namelen);
  3554. mddev->metadata_type[namelen] = 0;
  3555. if (namelen && mddev->metadata_type[namelen-1] == '\n')
  3556. mddev->metadata_type[--namelen] = 0;
  3557. mddev->persistent = 0;
  3558. mddev->external = 1;
  3559. mddev->major_version = 0;
  3560. mddev->minor_version = 90;
  3561. return len;
  3562. }
  3563. major = simple_strtoul(buf, &e, 10);
  3564. if (e==buf || *e != '.')
  3565. return -EINVAL;
  3566. buf = e+1;
  3567. minor = simple_strtoul(buf, &e, 10);
  3568. if (e==buf || (*e && *e != '\n') )
  3569. return -EINVAL;
  3570. if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
  3571. return -ENOENT;
  3572. mddev->major_version = major;
  3573. mddev->minor_version = minor;
  3574. mddev->persistent = 1;
  3575. mddev->external = 0;
  3576. return len;
  3577. }
  3578. static struct md_sysfs_entry md_metadata =
  3579. __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
  3580. static ssize_t
  3581. action_show(struct mddev *mddev, char *page)
  3582. {
  3583. char *type = "idle";
  3584. if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3585. type = "frozen";
  3586. else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3587. (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
  3588. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3589. type = "reshape";
  3590. else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  3591. if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  3592. type = "resync";
  3593. else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
  3594. type = "check";
  3595. else
  3596. type = "repair";
  3597. } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
  3598. type = "recover";
  3599. }
  3600. return sprintf(page, "%s\n", type);
  3601. }
  3602. static ssize_t
  3603. action_store(struct mddev *mddev, const char *page, size_t len)
  3604. {
  3605. if (!mddev->pers || !mddev->pers->sync_request)
  3606. return -EINVAL;
  3607. if (cmd_match(page, "frozen"))
  3608. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  3609. else
  3610. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  3611. if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
  3612. if (mddev->sync_thread) {
  3613. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  3614. md_reap_sync_thread(mddev);
  3615. }
  3616. } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3617. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  3618. return -EBUSY;
  3619. else if (cmd_match(page, "resync"))
  3620. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3621. else if (cmd_match(page, "recover")) {
  3622. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  3623. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3624. } else if (cmd_match(page, "reshape")) {
  3625. int err;
  3626. if (mddev->pers->start_reshape == NULL)
  3627. return -EINVAL;
  3628. err = mddev->pers->start_reshape(mddev);
  3629. if (err)
  3630. return err;
  3631. sysfs_notify(&mddev->kobj, NULL, "degraded");
  3632. } else {
  3633. if (cmd_match(page, "check"))
  3634. set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  3635. else if (!cmd_match(page, "repair"))
  3636. return -EINVAL;
  3637. set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  3638. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  3639. }
  3640. if (mddev->ro == 2) {
  3641. /* A write to sync_action is enough to justify
  3642. * canceling read-auto mode
  3643. */
  3644. mddev->ro = 0;
  3645. md_wakeup_thread(mddev->sync_thread);
  3646. }
  3647. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  3648. md_wakeup_thread(mddev->thread);
  3649. sysfs_notify_dirent_safe(mddev->sysfs_action);
  3650. return len;
  3651. }
  3652. static struct md_sysfs_entry md_scan_mode =
  3653. __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
  3654. static ssize_t
  3655. last_sync_action_show(struct mddev *mddev, char *page)
  3656. {
  3657. return sprintf(page, "%s\n", mddev->last_sync_action);
  3658. }
  3659. static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
  3660. static ssize_t
  3661. mismatch_cnt_show(struct mddev *mddev, char *page)
  3662. {
  3663. return sprintf(page, "%llu\n",
  3664. (unsigned long long)
  3665. atomic64_read(&mddev->resync_mismatches));
  3666. }
  3667. static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
  3668. static ssize_t
  3669. sync_min_show(struct mddev *mddev, char *page)
  3670. {
  3671. return sprintf(page, "%d (%s)\n", speed_min(mddev),
  3672. mddev->sync_speed_min ? "local": "system");
  3673. }
  3674. static ssize_t
  3675. sync_min_store(struct mddev *mddev, const char *buf, size_t len)
  3676. {
  3677. int min;
  3678. char *e;
  3679. if (strncmp(buf, "system", 6)==0) {
  3680. mddev->sync_speed_min = 0;
  3681. return len;
  3682. }
  3683. min = simple_strtoul(buf, &e, 10);
  3684. if (buf == e || (*e && *e != '\n') || min <= 0)
  3685. return -EINVAL;
  3686. mddev->sync_speed_min = min;
  3687. return len;
  3688. }
  3689. static struct md_sysfs_entry md_sync_min =
  3690. __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
  3691. static ssize_t
  3692. sync_max_show(struct mddev *mddev, char *page)
  3693. {
  3694. return sprintf(page, "%d (%s)\n", speed_max(mddev),
  3695. mddev->sync_speed_max ? "local": "system");
  3696. }
  3697. static ssize_t
  3698. sync_max_store(struct mddev *mddev, const char *buf, size_t len)
  3699. {
  3700. int max;
  3701. char *e;
  3702. if (strncmp(buf, "system", 6)==0) {
  3703. mddev->sync_speed_max = 0;
  3704. return len;
  3705. }
  3706. max = simple_strtoul(buf, &e, 10);
  3707. if (buf == e || (*e && *e != '\n') || max <= 0)
  3708. return -EINVAL;
  3709. mddev->sync_speed_max = max;
  3710. return len;
  3711. }
  3712. static struct md_sysfs_entry md_sync_max =
  3713. __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
  3714. static ssize_t
  3715. degraded_show(struct mddev *mddev, char *page)
  3716. {
  3717. return sprintf(page, "%d\n", mddev->degraded);
  3718. }
  3719. static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
  3720. static ssize_t
  3721. sync_force_parallel_show(struct mddev *mddev, char *page)
  3722. {
  3723. return sprintf(page, "%d\n", mddev->parallel_resync);
  3724. }
  3725. static ssize_t
  3726. sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
  3727. {
  3728. long n;
  3729. if (kstrtol(buf, 10, &n))
  3730. return -EINVAL;
  3731. if (n != 0 && n != 1)
  3732. return -EINVAL;
  3733. mddev->parallel_resync = n;
  3734. if (mddev->sync_thread)
  3735. wake_up(&resync_wait);
  3736. return len;
  3737. }
  3738. /* force parallel resync, even with shared block devices */
  3739. static struct md_sysfs_entry md_sync_force_parallel =
  3740. __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
  3741. sync_force_parallel_show, sync_force_parallel_store);
  3742. static ssize_t
  3743. sync_speed_show(struct mddev *mddev, char *page)
  3744. {
  3745. unsigned long resync, dt, db;
  3746. if (mddev->curr_resync == 0)
  3747. return sprintf(page, "none\n");
  3748. resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
  3749. dt = (jiffies - mddev->resync_mark) / HZ;
  3750. if (!dt) dt++;
  3751. db = resync - mddev->resync_mark_cnt;
  3752. return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
  3753. }
  3754. static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
  3755. static ssize_t
  3756. sync_completed_show(struct mddev *mddev, char *page)
  3757. {
  3758. unsigned long long max_sectors, resync;
  3759. if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3760. return sprintf(page, "none\n");
  3761. if (mddev->curr_resync == 1 ||
  3762. mddev->curr_resync == 2)
  3763. return sprintf(page, "delayed\n");
  3764. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  3765. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  3766. max_sectors = mddev->resync_max_sectors;
  3767. else
  3768. max_sectors = mddev->dev_sectors;
  3769. resync = mddev->curr_resync_completed;
  3770. return sprintf(page, "%llu / %llu\n", resync, max_sectors);
  3771. }
  3772. static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
  3773. static ssize_t
  3774. min_sync_show(struct mddev *mddev, char *page)
  3775. {
  3776. return sprintf(page, "%llu\n",
  3777. (unsigned long long)mddev->resync_min);
  3778. }
  3779. static ssize_t
  3780. min_sync_store(struct mddev *mddev, const char *buf, size_t len)
  3781. {
  3782. unsigned long long min;
  3783. if (kstrtoull(buf, 10, &min))
  3784. return -EINVAL;
  3785. if (min > mddev->resync_max)
  3786. return -EINVAL;
  3787. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3788. return -EBUSY;
  3789. /* Must be a multiple of chunk_size */
  3790. if (mddev->chunk_sectors) {
  3791. sector_t temp = min;
  3792. if (sector_div(temp, mddev->chunk_sectors))
  3793. return -EINVAL;
  3794. }
  3795. mddev->resync_min = min;
  3796. return len;
  3797. }
  3798. static struct md_sysfs_entry md_min_sync =
  3799. __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
  3800. static ssize_t
  3801. max_sync_show(struct mddev *mddev, char *page)
  3802. {
  3803. if (mddev->resync_max == MaxSector)
  3804. return sprintf(page, "max\n");
  3805. else
  3806. return sprintf(page, "%llu\n",
  3807. (unsigned long long)mddev->resync_max);
  3808. }
  3809. static ssize_t
  3810. max_sync_store(struct mddev *mddev, const char *buf, size_t len)
  3811. {
  3812. if (strncmp(buf, "max", 3) == 0)
  3813. mddev->resync_max = MaxSector;
  3814. else {
  3815. unsigned long long max;
  3816. if (kstrtoull(buf, 10, &max))
  3817. return -EINVAL;
  3818. if (max < mddev->resync_min)
  3819. return -EINVAL;
  3820. if (max < mddev->resync_max &&
  3821. mddev->ro == 0 &&
  3822. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  3823. return -EBUSY;
  3824. /* Must be a multiple of chunk_size */
  3825. if (mddev->chunk_sectors) {
  3826. sector_t temp = max;
  3827. if (sector_div(temp, mddev->chunk_sectors))
  3828. return -EINVAL;
  3829. }
  3830. mddev->resync_max = max;
  3831. }
  3832. wake_up(&mddev->recovery_wait);
  3833. return len;
  3834. }
  3835. static struct md_sysfs_entry md_max_sync =
  3836. __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
  3837. static ssize_t
  3838. suspend_lo_show(struct mddev *mddev, char *page)
  3839. {
  3840. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
  3841. }
  3842. static ssize_t
  3843. suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
  3844. {
  3845. char *e;
  3846. unsigned long long new = simple_strtoull(buf, &e, 10);
  3847. unsigned long long old = mddev->suspend_lo;
  3848. if (mddev->pers == NULL ||
  3849. mddev->pers->quiesce == NULL)
  3850. return -EINVAL;
  3851. if (buf == e || (*e && *e != '\n'))
  3852. return -EINVAL;
  3853. mddev->suspend_lo = new;
  3854. if (new >= old)
  3855. /* Shrinking suspended region */
  3856. mddev->pers->quiesce(mddev, 2);
  3857. else {
  3858. /* Expanding suspended region - need to wait */
  3859. mddev->pers->quiesce(mddev, 1);
  3860. mddev->pers->quiesce(mddev, 0);
  3861. }
  3862. return len;
  3863. }
  3864. static struct md_sysfs_entry md_suspend_lo =
  3865. __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
  3866. static ssize_t
  3867. suspend_hi_show(struct mddev *mddev, char *page)
  3868. {
  3869. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
  3870. }
  3871. static ssize_t
  3872. suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
  3873. {
  3874. char *e;
  3875. unsigned long long new = simple_strtoull(buf, &e, 10);
  3876. unsigned long long old = mddev->suspend_hi;
  3877. if (mddev->pers == NULL ||
  3878. mddev->pers->quiesce == NULL)
  3879. return -EINVAL;
  3880. if (buf == e || (*e && *e != '\n'))
  3881. return -EINVAL;
  3882. mddev->suspend_hi = new;
  3883. if (new <= old)
  3884. /* Shrinking suspended region */
  3885. mddev->pers->quiesce(mddev, 2);
  3886. else {
  3887. /* Expanding suspended region - need to wait */
  3888. mddev->pers->quiesce(mddev, 1);
  3889. mddev->pers->quiesce(mddev, 0);
  3890. }
  3891. return len;
  3892. }
  3893. static struct md_sysfs_entry md_suspend_hi =
  3894. __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
  3895. static ssize_t
  3896. reshape_position_show(struct mddev *mddev, char *page)
  3897. {
  3898. if (mddev->reshape_position != MaxSector)
  3899. return sprintf(page, "%llu\n",
  3900. (unsigned long long)mddev->reshape_position);
  3901. strcpy(page, "none\n");
  3902. return 5;
  3903. }
  3904. static ssize_t
  3905. reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
  3906. {
  3907. struct md_rdev *rdev;
  3908. char *e;
  3909. unsigned long long new = simple_strtoull(buf, &e, 10);
  3910. if (mddev->pers)
  3911. return -EBUSY;
  3912. if (buf == e || (*e && *e != '\n'))
  3913. return -EINVAL;
  3914. mddev->reshape_position = new;
  3915. mddev->delta_disks = 0;
  3916. mddev->reshape_backwards = 0;
  3917. mddev->new_level = mddev->level;
  3918. mddev->new_layout = mddev->layout;
  3919. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3920. rdev_for_each(rdev, mddev)
  3921. rdev->new_data_offset = rdev->data_offset;
  3922. return len;
  3923. }
  3924. static struct md_sysfs_entry md_reshape_position =
  3925. __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
  3926. reshape_position_store);
  3927. static ssize_t
  3928. reshape_direction_show(struct mddev *mddev, char *page)
  3929. {
  3930. return sprintf(page, "%s\n",
  3931. mddev->reshape_backwards ? "backwards" : "forwards");
  3932. }
  3933. static ssize_t
  3934. reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
  3935. {
  3936. int backwards = 0;
  3937. if (cmd_match(buf, "forwards"))
  3938. backwards = 0;
  3939. else if (cmd_match(buf, "backwards"))
  3940. backwards = 1;
  3941. else
  3942. return -EINVAL;
  3943. if (mddev->reshape_backwards == backwards)
  3944. return len;
  3945. /* check if we are allowed to change */
  3946. if (mddev->delta_disks)
  3947. return -EBUSY;
  3948. if (mddev->persistent &&
  3949. mddev->major_version == 0)
  3950. return -EINVAL;
  3951. mddev->reshape_backwards = backwards;
  3952. return len;
  3953. }
  3954. static struct md_sysfs_entry md_reshape_direction =
  3955. __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
  3956. reshape_direction_store);
  3957. static ssize_t
  3958. array_size_show(struct mddev *mddev, char *page)
  3959. {
  3960. if (mddev->external_size)
  3961. return sprintf(page, "%llu\n",
  3962. (unsigned long long)mddev->array_sectors/2);
  3963. else
  3964. return sprintf(page, "default\n");
  3965. }
  3966. static ssize_t
  3967. array_size_store(struct mddev *mddev, const char *buf, size_t len)
  3968. {
  3969. sector_t sectors;
  3970. if (strncmp(buf, "default", 7) == 0) {
  3971. if (mddev->pers)
  3972. sectors = mddev->pers->size(mddev, 0, 0);
  3973. else
  3974. sectors = mddev->array_sectors;
  3975. mddev->external_size = 0;
  3976. } else {
  3977. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  3978. return -EINVAL;
  3979. if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
  3980. return -E2BIG;
  3981. mddev->external_size = 1;
  3982. }
  3983. mddev->array_sectors = sectors;
  3984. if (mddev->pers) {
  3985. set_capacity(mddev->gendisk, mddev->array_sectors);
  3986. revalidate_disk(mddev->gendisk);
  3987. }
  3988. return len;
  3989. }
  3990. static struct md_sysfs_entry md_array_size =
  3991. __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
  3992. array_size_store);
  3993. static struct attribute *md_default_attrs[] = {
  3994. &md_level.attr,
  3995. &md_layout.attr,
  3996. &md_raid_disks.attr,
  3997. &md_chunk_size.attr,
  3998. &md_size.attr,
  3999. &md_resync_start.attr,
  4000. &md_metadata.attr,
  4001. &md_new_device.attr,
  4002. &md_safe_delay.attr,
  4003. &md_array_state.attr,
  4004. &md_reshape_position.attr,
  4005. &md_reshape_direction.attr,
  4006. &md_array_size.attr,
  4007. &max_corr_read_errors.attr,
  4008. NULL,
  4009. };
  4010. static struct attribute *md_redundancy_attrs[] = {
  4011. &md_scan_mode.attr,
  4012. &md_last_scan_mode.attr,
  4013. &md_mismatches.attr,
  4014. &md_sync_min.attr,
  4015. &md_sync_max.attr,
  4016. &md_sync_speed.attr,
  4017. &md_sync_force_parallel.attr,
  4018. &md_sync_completed.attr,
  4019. &md_min_sync.attr,
  4020. &md_max_sync.attr,
  4021. &md_suspend_lo.attr,
  4022. &md_suspend_hi.attr,
  4023. &md_bitmap.attr,
  4024. &md_degraded.attr,
  4025. NULL,
  4026. };
  4027. static struct attribute_group md_redundancy_group = {
  4028. .name = NULL,
  4029. .attrs = md_redundancy_attrs,
  4030. };
  4031. static ssize_t
  4032. md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  4033. {
  4034. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4035. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4036. ssize_t rv;
  4037. if (!entry->show)
  4038. return -EIO;
  4039. spin_lock(&all_mddevs_lock);
  4040. if (list_empty(&mddev->all_mddevs)) {
  4041. spin_unlock(&all_mddevs_lock);
  4042. return -EBUSY;
  4043. }
  4044. mddev_get(mddev);
  4045. spin_unlock(&all_mddevs_lock);
  4046. rv = mddev_lock(mddev);
  4047. if (!rv) {
  4048. rv = entry->show(mddev, page);
  4049. mddev_unlock(mddev);
  4050. }
  4051. mddev_put(mddev);
  4052. return rv;
  4053. }
  4054. static ssize_t
  4055. md_attr_store(struct kobject *kobj, struct attribute *attr,
  4056. const char *page, size_t length)
  4057. {
  4058. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4059. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4060. ssize_t rv;
  4061. if (!entry->store)
  4062. return -EIO;
  4063. if (!capable(CAP_SYS_ADMIN))
  4064. return -EACCES;
  4065. spin_lock(&all_mddevs_lock);
  4066. if (list_empty(&mddev->all_mddevs)) {
  4067. spin_unlock(&all_mddevs_lock);
  4068. return -EBUSY;
  4069. }
  4070. mddev_get(mddev);
  4071. spin_unlock(&all_mddevs_lock);
  4072. if (entry->store == new_dev_store)
  4073. flush_workqueue(md_misc_wq);
  4074. rv = mddev_lock(mddev);
  4075. if (!rv) {
  4076. rv = entry->store(mddev, page, length);
  4077. mddev_unlock(mddev);
  4078. }
  4079. mddev_put(mddev);
  4080. return rv;
  4081. }
  4082. static void md_free(struct kobject *ko)
  4083. {
  4084. struct mddev *mddev = container_of(ko, struct mddev, kobj);
  4085. if (mddev->sysfs_state)
  4086. sysfs_put(mddev->sysfs_state);
  4087. if (mddev->gendisk) {
  4088. del_gendisk(mddev->gendisk);
  4089. put_disk(mddev->gendisk);
  4090. }
  4091. if (mddev->queue)
  4092. blk_cleanup_queue(mddev->queue);
  4093. kfree(mddev);
  4094. }
  4095. static const struct sysfs_ops md_sysfs_ops = {
  4096. .show = md_attr_show,
  4097. .store = md_attr_store,
  4098. };
  4099. static struct kobj_type md_ktype = {
  4100. .release = md_free,
  4101. .sysfs_ops = &md_sysfs_ops,
  4102. .default_attrs = md_default_attrs,
  4103. };
  4104. int mdp_major = 0;
  4105. static void mddev_delayed_delete(struct work_struct *ws)
  4106. {
  4107. struct mddev *mddev = container_of(ws, struct mddev, del_work);
  4108. sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
  4109. kobject_del(&mddev->kobj);
  4110. kobject_put(&mddev->kobj);
  4111. }
  4112. static int md_alloc(dev_t dev, char *name)
  4113. {
  4114. static DEFINE_MUTEX(disks_mutex);
  4115. struct mddev *mddev = mddev_find(dev);
  4116. struct gendisk *disk;
  4117. int partitioned;
  4118. int shift;
  4119. int unit;
  4120. int error;
  4121. if (!mddev)
  4122. return -ENODEV;
  4123. partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
  4124. shift = partitioned ? MdpMinorShift : 0;
  4125. unit = MINOR(mddev->unit) >> shift;
  4126. /* wait for any previous instance of this device to be
  4127. * completely removed (mddev_delayed_delete).
  4128. */
  4129. flush_workqueue(md_misc_wq);
  4130. mutex_lock(&disks_mutex);
  4131. error = -EEXIST;
  4132. if (mddev->gendisk)
  4133. goto abort;
  4134. if (name) {
  4135. /* Need to ensure that 'name' is not a duplicate.
  4136. */
  4137. struct mddev *mddev2;
  4138. spin_lock(&all_mddevs_lock);
  4139. list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
  4140. if (mddev2->gendisk &&
  4141. strcmp(mddev2->gendisk->disk_name, name) == 0) {
  4142. spin_unlock(&all_mddevs_lock);
  4143. goto abort;
  4144. }
  4145. spin_unlock(&all_mddevs_lock);
  4146. }
  4147. error = -ENOMEM;
  4148. mddev->queue = blk_alloc_queue(GFP_KERNEL);
  4149. if (!mddev->queue)
  4150. goto abort;
  4151. mddev->queue->queuedata = mddev;
  4152. blk_queue_make_request(mddev->queue, md_make_request);
  4153. blk_set_stacking_limits(&mddev->queue->limits);
  4154. disk = alloc_disk(1 << shift);
  4155. if (!disk) {
  4156. blk_cleanup_queue(mddev->queue);
  4157. mddev->queue = NULL;
  4158. goto abort;
  4159. }
  4160. disk->major = MAJOR(mddev->unit);
  4161. disk->first_minor = unit << shift;
  4162. if (name)
  4163. strcpy(disk->disk_name, name);
  4164. else if (partitioned)
  4165. sprintf(disk->disk_name, "md_d%d", unit);
  4166. else
  4167. sprintf(disk->disk_name, "md%d", unit);
  4168. disk->fops = &md_fops;
  4169. disk->private_data = mddev;
  4170. disk->queue = mddev->queue;
  4171. blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
  4172. /* Allow extended partitions. This makes the
  4173. * 'mdp' device redundant, but we can't really
  4174. * remove it now.
  4175. */
  4176. disk->flags |= GENHD_FL_EXT_DEVT;
  4177. mddev->gendisk = disk;
  4178. /* As soon as we call add_disk(), another thread could get
  4179. * through to md_open, so make sure it doesn't get too far
  4180. */
  4181. mutex_lock(&mddev->open_mutex);
  4182. add_disk(disk);
  4183. error = kobject_init_and_add(&mddev->kobj, &md_ktype,
  4184. &disk_to_dev(disk)->kobj, "%s", "md");
  4185. if (error) {
  4186. /* This isn't possible, but as kobject_init_and_add is marked
  4187. * __must_check, we must do something with the result
  4188. */
  4189. printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
  4190. disk->disk_name);
  4191. error = 0;
  4192. }
  4193. if (mddev->kobj.sd &&
  4194. sysfs_create_group(&mddev->kobj, &md_bitmap_group))
  4195. printk(KERN_DEBUG "pointless warning\n");
  4196. mutex_unlock(&mddev->open_mutex);
  4197. abort:
  4198. mutex_unlock(&disks_mutex);
  4199. if (!error && mddev->kobj.sd) {
  4200. kobject_uevent(&mddev->kobj, KOBJ_ADD);
  4201. mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
  4202. }
  4203. mddev_put(mddev);
  4204. return error;
  4205. }
  4206. static struct kobject *md_probe(dev_t dev, int *part, void *data)
  4207. {
  4208. md_alloc(dev, NULL);
  4209. return NULL;
  4210. }
  4211. static int add_named_array(const char *val, struct kernel_param *kp)
  4212. {
  4213. /* val must be "md_*" where * is not all digits.
  4214. * We allocate an array with a large free minor number, and
  4215. * set the name to val. val must not already be an active name.
  4216. */
  4217. int len = strlen(val);
  4218. char buf[DISK_NAME_LEN];
  4219. while (len && val[len-1] == '\n')
  4220. len--;
  4221. if (len >= DISK_NAME_LEN)
  4222. return -E2BIG;
  4223. strlcpy(buf, val, len+1);
  4224. if (strncmp(buf, "md_", 3) != 0)
  4225. return -EINVAL;
  4226. return md_alloc(0, buf);
  4227. }
  4228. static void md_safemode_timeout(unsigned long data)
  4229. {
  4230. struct mddev *mddev = (struct mddev *) data;
  4231. if (!atomic_read(&mddev->writes_pending)) {
  4232. mddev->safemode = 1;
  4233. if (mddev->external)
  4234. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4235. }
  4236. md_wakeup_thread(mddev->thread);
  4237. }
  4238. static int start_dirty_degraded;
  4239. int md_run(struct mddev *mddev)
  4240. {
  4241. int err;
  4242. struct md_rdev *rdev;
  4243. struct md_personality *pers;
  4244. if (list_empty(&mddev->disks))
  4245. /* cannot run an array with no devices.. */
  4246. return -EINVAL;
  4247. if (mddev->pers)
  4248. return -EBUSY;
  4249. /* Cannot run until previous stop completes properly */
  4250. if (mddev->sysfs_active)
  4251. return -EBUSY;
  4252. /*
  4253. * Analyze all RAID superblock(s)
  4254. */
  4255. if (!mddev->raid_disks) {
  4256. if (!mddev->persistent)
  4257. return -EINVAL;
  4258. analyze_sbs(mddev);
  4259. }
  4260. if (mddev->level != LEVEL_NONE)
  4261. request_module("md-level-%d", mddev->level);
  4262. else if (mddev->clevel[0])
  4263. request_module("md-%s", mddev->clevel);
  4264. /*
  4265. * Drop all container device buffers, from now on
  4266. * the only valid external interface is through the md
  4267. * device.
  4268. */
  4269. rdev_for_each(rdev, mddev) {
  4270. if (test_bit(Faulty, &rdev->flags))
  4271. continue;
  4272. sync_blockdev(rdev->bdev);
  4273. invalidate_bdev(rdev->bdev);
  4274. /* perform some consistency tests on the device.
  4275. * We don't want the data to overlap the metadata,
  4276. * Internal Bitmap issues have been handled elsewhere.
  4277. */
  4278. if (rdev->meta_bdev) {
  4279. /* Nothing to check */;
  4280. } else if (rdev->data_offset < rdev->sb_start) {
  4281. if (mddev->dev_sectors &&
  4282. rdev->data_offset + mddev->dev_sectors
  4283. > rdev->sb_start) {
  4284. printk("md: %s: data overlaps metadata\n",
  4285. mdname(mddev));
  4286. return -EINVAL;
  4287. }
  4288. } else {
  4289. if (rdev->sb_start + rdev->sb_size/512
  4290. > rdev->data_offset) {
  4291. printk("md: %s: metadata overlaps data\n",
  4292. mdname(mddev));
  4293. return -EINVAL;
  4294. }
  4295. }
  4296. sysfs_notify_dirent_safe(rdev->sysfs_state);
  4297. }
  4298. if (mddev->bio_set == NULL)
  4299. mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
  4300. spin_lock(&pers_lock);
  4301. pers = find_pers(mddev->level, mddev->clevel);
  4302. if (!pers || !try_module_get(pers->owner)) {
  4303. spin_unlock(&pers_lock);
  4304. if (mddev->level != LEVEL_NONE)
  4305. printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
  4306. mddev->level);
  4307. else
  4308. printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
  4309. mddev->clevel);
  4310. return -EINVAL;
  4311. }
  4312. mddev->pers = pers;
  4313. spin_unlock(&pers_lock);
  4314. if (mddev->level != pers->level) {
  4315. mddev->level = pers->level;
  4316. mddev->new_level = pers->level;
  4317. }
  4318. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  4319. if (mddev->reshape_position != MaxSector &&
  4320. pers->start_reshape == NULL) {
  4321. /* This personality cannot handle reshaping... */
  4322. mddev->pers = NULL;
  4323. module_put(pers->owner);
  4324. return -EINVAL;
  4325. }
  4326. if (pers->sync_request) {
  4327. /* Warn if this is a potentially silly
  4328. * configuration.
  4329. */
  4330. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  4331. struct md_rdev *rdev2;
  4332. int warned = 0;
  4333. rdev_for_each(rdev, mddev)
  4334. rdev_for_each(rdev2, mddev) {
  4335. if (rdev < rdev2 &&
  4336. rdev->bdev->bd_contains ==
  4337. rdev2->bdev->bd_contains) {
  4338. printk(KERN_WARNING
  4339. "%s: WARNING: %s appears to be"
  4340. " on the same physical disk as"
  4341. " %s.\n",
  4342. mdname(mddev),
  4343. bdevname(rdev->bdev,b),
  4344. bdevname(rdev2->bdev,b2));
  4345. warned = 1;
  4346. }
  4347. }
  4348. if (warned)
  4349. printk(KERN_WARNING
  4350. "True protection against single-disk"
  4351. " failure might be compromised.\n");
  4352. }
  4353. mddev->recovery = 0;
  4354. /* may be over-ridden by personality */
  4355. mddev->resync_max_sectors = mddev->dev_sectors;
  4356. mddev->ok_start_degraded = start_dirty_degraded;
  4357. if (start_readonly && mddev->ro == 0)
  4358. mddev->ro = 2; /* read-only, but switch on first write */
  4359. err = mddev->pers->run(mddev);
  4360. if (err)
  4361. printk(KERN_ERR "md: pers->run() failed ...\n");
  4362. else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
  4363. WARN_ONCE(!mddev->external_size, "%s: default size too small,"
  4364. " but 'external_size' not in effect?\n", __func__);
  4365. printk(KERN_ERR
  4366. "md: invalid array_size %llu > default size %llu\n",
  4367. (unsigned long long)mddev->array_sectors / 2,
  4368. (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
  4369. err = -EINVAL;
  4370. mddev->pers->stop(mddev);
  4371. }
  4372. if (err == 0 && mddev->pers->sync_request &&
  4373. (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
  4374. err = bitmap_create(mddev);
  4375. if (err) {
  4376. printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
  4377. mdname(mddev), err);
  4378. mddev->pers->stop(mddev);
  4379. }
  4380. }
  4381. if (err) {
  4382. module_put(mddev->pers->owner);
  4383. mddev->pers = NULL;
  4384. bitmap_destroy(mddev);
  4385. return err;
  4386. }
  4387. if (mddev->pers->sync_request) {
  4388. if (mddev->kobj.sd &&
  4389. sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  4390. printk(KERN_WARNING
  4391. "md: cannot register extra attributes for %s\n",
  4392. mdname(mddev));
  4393. mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
  4394. } else if (mddev->ro == 2) /* auto-readonly not meaningful */
  4395. mddev->ro = 0;
  4396. atomic_set(&mddev->writes_pending,0);
  4397. atomic_set(&mddev->max_corr_read_errors,
  4398. MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
  4399. mddev->safemode = 0;
  4400. mddev->safemode_timer.function = md_safemode_timeout;
  4401. mddev->safemode_timer.data = (unsigned long) mddev;
  4402. mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
  4403. mddev->in_sync = 1;
  4404. smp_wmb();
  4405. mddev->ready = 1;
  4406. rdev_for_each(rdev, mddev)
  4407. if (rdev->raid_disk >= 0)
  4408. if (sysfs_link_rdev(mddev, rdev))
  4409. /* failure here is OK */;
  4410. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4411. if (mddev->flags & MD_UPDATE_SB_FLAGS)
  4412. md_update_sb(mddev, 0);
  4413. md_new_event(mddev);
  4414. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4415. sysfs_notify_dirent_safe(mddev->sysfs_action);
  4416. sysfs_notify(&mddev->kobj, NULL, "degraded");
  4417. return 0;
  4418. }
  4419. EXPORT_SYMBOL_GPL(md_run);
  4420. static int do_md_run(struct mddev *mddev)
  4421. {
  4422. int err;
  4423. err = md_run(mddev);
  4424. if (err)
  4425. goto out;
  4426. err = bitmap_load(mddev);
  4427. if (err) {
  4428. bitmap_destroy(mddev);
  4429. goto out;
  4430. }
  4431. md_wakeup_thread(mddev->thread);
  4432. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  4433. set_capacity(mddev->gendisk, mddev->array_sectors);
  4434. revalidate_disk(mddev->gendisk);
  4435. mddev->changed = 1;
  4436. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  4437. out:
  4438. return err;
  4439. }
  4440. static int restart_array(struct mddev *mddev)
  4441. {
  4442. struct gendisk *disk = mddev->gendisk;
  4443. /* Complain if it has no devices */
  4444. if (list_empty(&mddev->disks))
  4445. return -ENXIO;
  4446. if (!mddev->pers)
  4447. return -EINVAL;
  4448. if (!mddev->ro)
  4449. return -EBUSY;
  4450. mddev->safemode = 0;
  4451. mddev->ro = 0;
  4452. set_disk_ro(disk, 0);
  4453. printk(KERN_INFO "md: %s switched to read-write mode.\n",
  4454. mdname(mddev));
  4455. /* Kick recovery or resync if necessary */
  4456. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4457. md_wakeup_thread(mddev->thread);
  4458. md_wakeup_thread(mddev->sync_thread);
  4459. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4460. return 0;
  4461. }
  4462. static void md_clean(struct mddev *mddev)
  4463. {
  4464. mddev->array_sectors = 0;
  4465. mddev->external_size = 0;
  4466. mddev->dev_sectors = 0;
  4467. mddev->raid_disks = 0;
  4468. mddev->recovery_cp = 0;
  4469. mddev->resync_min = 0;
  4470. mddev->resync_max = MaxSector;
  4471. mddev->reshape_position = MaxSector;
  4472. mddev->external = 0;
  4473. mddev->persistent = 0;
  4474. mddev->level = LEVEL_NONE;
  4475. mddev->clevel[0] = 0;
  4476. mddev->flags = 0;
  4477. mddev->ro = 0;
  4478. mddev->metadata_type[0] = 0;
  4479. mddev->chunk_sectors = 0;
  4480. mddev->ctime = mddev->utime = 0;
  4481. mddev->layout = 0;
  4482. mddev->max_disks = 0;
  4483. mddev->events = 0;
  4484. mddev->can_decrease_events = 0;
  4485. mddev->delta_disks = 0;
  4486. mddev->reshape_backwards = 0;
  4487. mddev->new_level = LEVEL_NONE;
  4488. mddev->new_layout = 0;
  4489. mddev->new_chunk_sectors = 0;
  4490. mddev->curr_resync = 0;
  4491. atomic64_set(&mddev->resync_mismatches, 0);
  4492. mddev->suspend_lo = mddev->suspend_hi = 0;
  4493. mddev->sync_speed_min = mddev->sync_speed_max = 0;
  4494. mddev->recovery = 0;
  4495. mddev->in_sync = 0;
  4496. mddev->changed = 0;
  4497. mddev->degraded = 0;
  4498. mddev->safemode = 0;
  4499. mddev->merge_check_needed = 0;
  4500. mddev->bitmap_info.offset = 0;
  4501. mddev->bitmap_info.default_offset = 0;
  4502. mddev->bitmap_info.default_space = 0;
  4503. mddev->bitmap_info.chunksize = 0;
  4504. mddev->bitmap_info.daemon_sleep = 0;
  4505. mddev->bitmap_info.max_write_behind = 0;
  4506. }
  4507. static void __md_stop_writes(struct mddev *mddev)
  4508. {
  4509. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4510. if (mddev->sync_thread) {
  4511. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  4512. md_reap_sync_thread(mddev);
  4513. }
  4514. del_timer_sync(&mddev->safemode_timer);
  4515. bitmap_flush(mddev);
  4516. md_super_wait(mddev);
  4517. if (mddev->ro == 0 &&
  4518. (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
  4519. /* mark array as shutdown cleanly */
  4520. mddev->in_sync = 1;
  4521. md_update_sb(mddev, 1);
  4522. }
  4523. }
  4524. void md_stop_writes(struct mddev *mddev)
  4525. {
  4526. mddev_lock_nointr(mddev);
  4527. __md_stop_writes(mddev);
  4528. mddev_unlock(mddev);
  4529. }
  4530. EXPORT_SYMBOL_GPL(md_stop_writes);
  4531. static void __md_stop(struct mddev *mddev)
  4532. {
  4533. mddev->ready = 0;
  4534. mddev->pers->stop(mddev);
  4535. if (mddev->pers->sync_request && mddev->to_remove == NULL)
  4536. mddev->to_remove = &md_redundancy_group;
  4537. module_put(mddev->pers->owner);
  4538. mddev->pers = NULL;
  4539. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4540. }
  4541. void md_stop(struct mddev *mddev)
  4542. {
  4543. /* stop the array and free an attached data structures.
  4544. * This is called from dm-raid
  4545. */
  4546. __md_stop(mddev);
  4547. bitmap_destroy(mddev);
  4548. if (mddev->bio_set)
  4549. bioset_free(mddev->bio_set);
  4550. }
  4551. EXPORT_SYMBOL_GPL(md_stop);
  4552. static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
  4553. {
  4554. int err = 0;
  4555. int did_freeze = 0;
  4556. if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
  4557. did_freeze = 1;
  4558. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4559. md_wakeup_thread(mddev->thread);
  4560. }
  4561. if (mddev->sync_thread) {
  4562. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  4563. /* Thread might be blocked waiting for metadata update
  4564. * which will now never happen */
  4565. wake_up_process(mddev->sync_thread->tsk);
  4566. }
  4567. mddev_unlock(mddev);
  4568. wait_event(resync_wait, mddev->sync_thread == NULL);
  4569. mddev_lock_nointr(mddev);
  4570. mutex_lock(&mddev->open_mutex);
  4571. if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
  4572. mddev->sync_thread ||
  4573. (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
  4574. printk("md: %s still in use.\n",mdname(mddev));
  4575. if (did_freeze) {
  4576. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4577. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4578. md_wakeup_thread(mddev->thread);
  4579. }
  4580. err = -EBUSY;
  4581. goto out;
  4582. }
  4583. if (mddev->pers) {
  4584. __md_stop_writes(mddev);
  4585. err = -ENXIO;
  4586. if (mddev->ro==1)
  4587. goto out;
  4588. mddev->ro = 1;
  4589. set_disk_ro(mddev->gendisk, 1);
  4590. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4591. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4592. md_wakeup_thread(mddev->thread);
  4593. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4594. err = 0;
  4595. }
  4596. out:
  4597. mutex_unlock(&mddev->open_mutex);
  4598. return err;
  4599. }
  4600. /* mode:
  4601. * 0 - completely stop and dis-assemble array
  4602. * 2 - stop but do not disassemble array
  4603. */
  4604. static int do_md_stop(struct mddev *mddev, int mode,
  4605. struct block_device *bdev)
  4606. {
  4607. struct gendisk *disk = mddev->gendisk;
  4608. struct md_rdev *rdev;
  4609. int did_freeze = 0;
  4610. if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
  4611. did_freeze = 1;
  4612. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4613. md_wakeup_thread(mddev->thread);
  4614. }
  4615. if (mddev->sync_thread) {
  4616. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  4617. /* Thread might be blocked waiting for metadata update
  4618. * which will now never happen */
  4619. wake_up_process(mddev->sync_thread->tsk);
  4620. }
  4621. mddev_unlock(mddev);
  4622. wait_event(resync_wait, mddev->sync_thread == NULL);
  4623. mddev_lock_nointr(mddev);
  4624. mutex_lock(&mddev->open_mutex);
  4625. if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
  4626. mddev->sysfs_active ||
  4627. mddev->sync_thread ||
  4628. (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
  4629. printk("md: %s still in use.\n",mdname(mddev));
  4630. mutex_unlock(&mddev->open_mutex);
  4631. if (did_freeze) {
  4632. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4633. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4634. md_wakeup_thread(mddev->thread);
  4635. }
  4636. return -EBUSY;
  4637. }
  4638. if (mddev->pers) {
  4639. if (mddev->ro)
  4640. set_disk_ro(disk, 0);
  4641. __md_stop_writes(mddev);
  4642. __md_stop(mddev);
  4643. mddev->queue->merge_bvec_fn = NULL;
  4644. mddev->queue->backing_dev_info.congested_fn = NULL;
  4645. /* tell userspace to handle 'inactive' */
  4646. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4647. rdev_for_each(rdev, mddev)
  4648. if (rdev->raid_disk >= 0)
  4649. sysfs_unlink_rdev(mddev, rdev);
  4650. set_capacity(disk, 0);
  4651. mutex_unlock(&mddev->open_mutex);
  4652. mddev->changed = 1;
  4653. revalidate_disk(disk);
  4654. if (mddev->ro)
  4655. mddev->ro = 0;
  4656. } else
  4657. mutex_unlock(&mddev->open_mutex);
  4658. /*
  4659. * Free resources if final stop
  4660. */
  4661. if (mode == 0) {
  4662. printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
  4663. bitmap_destroy(mddev);
  4664. if (mddev->bitmap_info.file) {
  4665. fput(mddev->bitmap_info.file);
  4666. mddev->bitmap_info.file = NULL;
  4667. }
  4668. mddev->bitmap_info.offset = 0;
  4669. export_array(mddev);
  4670. md_clean(mddev);
  4671. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  4672. if (mddev->hold_active == UNTIL_STOP)
  4673. mddev->hold_active = 0;
  4674. }
  4675. blk_integrity_unregister(disk);
  4676. md_new_event(mddev);
  4677. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4678. return 0;
  4679. }
  4680. #ifndef MODULE
  4681. static void autorun_array(struct mddev *mddev)
  4682. {
  4683. struct md_rdev *rdev;
  4684. int err;
  4685. if (list_empty(&mddev->disks))
  4686. return;
  4687. printk(KERN_INFO "md: running: ");
  4688. rdev_for_each(rdev, mddev) {
  4689. char b[BDEVNAME_SIZE];
  4690. printk("<%s>", bdevname(rdev->bdev,b));
  4691. }
  4692. printk("\n");
  4693. err = do_md_run(mddev);
  4694. if (err) {
  4695. printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
  4696. do_md_stop(mddev, 0, NULL);
  4697. }
  4698. }
  4699. /*
  4700. * lets try to run arrays based on all disks that have arrived
  4701. * until now. (those are in pending_raid_disks)
  4702. *
  4703. * the method: pick the first pending disk, collect all disks with
  4704. * the same UUID, remove all from the pending list and put them into
  4705. * the 'same_array' list. Then order this list based on superblock
  4706. * update time (freshest comes first), kick out 'old' disks and
  4707. * compare superblocks. If everything's fine then run it.
  4708. *
  4709. * If "unit" is allocated, then bump its reference count
  4710. */
  4711. static void autorun_devices(int part)
  4712. {
  4713. struct md_rdev *rdev0, *rdev, *tmp;
  4714. struct mddev *mddev;
  4715. char b[BDEVNAME_SIZE];
  4716. printk(KERN_INFO "md: autorun ...\n");
  4717. while (!list_empty(&pending_raid_disks)) {
  4718. int unit;
  4719. dev_t dev;
  4720. LIST_HEAD(candidates);
  4721. rdev0 = list_entry(pending_raid_disks.next,
  4722. struct md_rdev, same_set);
  4723. printk(KERN_INFO "md: considering %s ...\n",
  4724. bdevname(rdev0->bdev,b));
  4725. INIT_LIST_HEAD(&candidates);
  4726. rdev_for_each_list(rdev, tmp, &pending_raid_disks)
  4727. if (super_90_load(rdev, rdev0, 0) >= 0) {
  4728. printk(KERN_INFO "md: adding %s ...\n",
  4729. bdevname(rdev->bdev,b));
  4730. list_move(&rdev->same_set, &candidates);
  4731. }
  4732. /*
  4733. * now we have a set of devices, with all of them having
  4734. * mostly sane superblocks. It's time to allocate the
  4735. * mddev.
  4736. */
  4737. if (part) {
  4738. dev = MKDEV(mdp_major,
  4739. rdev0->preferred_minor << MdpMinorShift);
  4740. unit = MINOR(dev) >> MdpMinorShift;
  4741. } else {
  4742. dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
  4743. unit = MINOR(dev);
  4744. }
  4745. if (rdev0->preferred_minor != unit) {
  4746. printk(KERN_INFO "md: unit number in %s is bad: %d\n",
  4747. bdevname(rdev0->bdev, b), rdev0->preferred_minor);
  4748. break;
  4749. }
  4750. md_probe(dev, NULL, NULL);
  4751. mddev = mddev_find(dev);
  4752. if (!mddev || !mddev->gendisk) {
  4753. if (mddev)
  4754. mddev_put(mddev);
  4755. printk(KERN_ERR
  4756. "md: cannot allocate memory for md drive.\n");
  4757. break;
  4758. }
  4759. if (mddev_lock(mddev))
  4760. printk(KERN_WARNING "md: %s locked, cannot run\n",
  4761. mdname(mddev));
  4762. else if (mddev->raid_disks || mddev->major_version
  4763. || !list_empty(&mddev->disks)) {
  4764. printk(KERN_WARNING
  4765. "md: %s already running, cannot run %s\n",
  4766. mdname(mddev), bdevname(rdev0->bdev,b));
  4767. mddev_unlock(mddev);
  4768. } else {
  4769. printk(KERN_INFO "md: created %s\n", mdname(mddev));
  4770. mddev->persistent = 1;
  4771. rdev_for_each_list(rdev, tmp, &candidates) {
  4772. list_del_init(&rdev->same_set);
  4773. if (bind_rdev_to_array(rdev, mddev))
  4774. export_rdev(rdev);
  4775. }
  4776. autorun_array(mddev);
  4777. mddev_unlock(mddev);
  4778. }
  4779. /* on success, candidates will be empty, on error
  4780. * it won't...
  4781. */
  4782. rdev_for_each_list(rdev, tmp, &candidates) {
  4783. list_del_init(&rdev->same_set);
  4784. export_rdev(rdev);
  4785. }
  4786. mddev_put(mddev);
  4787. }
  4788. printk(KERN_INFO "md: ... autorun DONE.\n");
  4789. }
  4790. #endif /* !MODULE */
  4791. static int get_version(void __user *arg)
  4792. {
  4793. mdu_version_t ver;
  4794. ver.major = MD_MAJOR_VERSION;
  4795. ver.minor = MD_MINOR_VERSION;
  4796. ver.patchlevel = MD_PATCHLEVEL_VERSION;
  4797. if (copy_to_user(arg, &ver, sizeof(ver)))
  4798. return -EFAULT;
  4799. return 0;
  4800. }
  4801. static int get_array_info(struct mddev *mddev, void __user *arg)
  4802. {
  4803. mdu_array_info_t info;
  4804. int nr,working,insync,failed,spare;
  4805. struct md_rdev *rdev;
  4806. nr = working = insync = failed = spare = 0;
  4807. rcu_read_lock();
  4808. rdev_for_each_rcu(rdev, mddev) {
  4809. nr++;
  4810. if (test_bit(Faulty, &rdev->flags))
  4811. failed++;
  4812. else {
  4813. working++;
  4814. if (test_bit(In_sync, &rdev->flags))
  4815. insync++;
  4816. else
  4817. spare++;
  4818. }
  4819. }
  4820. rcu_read_unlock();
  4821. info.major_version = mddev->major_version;
  4822. info.minor_version = mddev->minor_version;
  4823. info.patch_version = MD_PATCHLEVEL_VERSION;
  4824. info.ctime = mddev->ctime;
  4825. info.level = mddev->level;
  4826. info.size = mddev->dev_sectors / 2;
  4827. if (info.size != mddev->dev_sectors / 2) /* overflow */
  4828. info.size = -1;
  4829. info.nr_disks = nr;
  4830. info.raid_disks = mddev->raid_disks;
  4831. info.md_minor = mddev->md_minor;
  4832. info.not_persistent= !mddev->persistent;
  4833. info.utime = mddev->utime;
  4834. info.state = 0;
  4835. if (mddev->in_sync)
  4836. info.state = (1<<MD_SB_CLEAN);
  4837. if (mddev->bitmap && mddev->bitmap_info.offset)
  4838. info.state |= (1<<MD_SB_BITMAP_PRESENT);
  4839. info.active_disks = insync;
  4840. info.working_disks = working;
  4841. info.failed_disks = failed;
  4842. info.spare_disks = spare;
  4843. info.layout = mddev->layout;
  4844. info.chunk_size = mddev->chunk_sectors << 9;
  4845. if (copy_to_user(arg, &info, sizeof(info)))
  4846. return -EFAULT;
  4847. return 0;
  4848. }
  4849. static int get_bitmap_file(struct mddev *mddev, void __user * arg)
  4850. {
  4851. mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
  4852. char *ptr, *buf = NULL;
  4853. int err = -ENOMEM;
  4854. file = kmalloc(sizeof(*file), GFP_NOIO);
  4855. if (!file)
  4856. goto out;
  4857. /* bitmap disabled, zero the first byte and copy out */
  4858. if (!mddev->bitmap || !mddev->bitmap->storage.file) {
  4859. file->pathname[0] = '\0';
  4860. goto copy_out;
  4861. }
  4862. buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
  4863. if (!buf)
  4864. goto out;
  4865. ptr = d_path(&mddev->bitmap->storage.file->f_path,
  4866. buf, sizeof(file->pathname));
  4867. if (IS_ERR(ptr))
  4868. goto out;
  4869. strcpy(file->pathname, ptr);
  4870. copy_out:
  4871. err = 0;
  4872. if (copy_to_user(arg, file, sizeof(*file)))
  4873. err = -EFAULT;
  4874. out:
  4875. kfree(buf);
  4876. kfree(file);
  4877. return err;
  4878. }
  4879. static int get_disk_info(struct mddev *mddev, void __user * arg)
  4880. {
  4881. mdu_disk_info_t info;
  4882. struct md_rdev *rdev;
  4883. if (copy_from_user(&info, arg, sizeof(info)))
  4884. return -EFAULT;
  4885. rcu_read_lock();
  4886. rdev = find_rdev_nr_rcu(mddev, info.number);
  4887. if (rdev) {
  4888. info.major = MAJOR(rdev->bdev->bd_dev);
  4889. info.minor = MINOR(rdev->bdev->bd_dev);
  4890. info.raid_disk = rdev->raid_disk;
  4891. info.state = 0;
  4892. if (test_bit(Faulty, &rdev->flags))
  4893. info.state |= (1<<MD_DISK_FAULTY);
  4894. else if (test_bit(In_sync, &rdev->flags)) {
  4895. info.state |= (1<<MD_DISK_ACTIVE);
  4896. info.state |= (1<<MD_DISK_SYNC);
  4897. }
  4898. if (test_bit(WriteMostly, &rdev->flags))
  4899. info.state |= (1<<MD_DISK_WRITEMOSTLY);
  4900. } else {
  4901. info.major = info.minor = 0;
  4902. info.raid_disk = -1;
  4903. info.state = (1<<MD_DISK_REMOVED);
  4904. }
  4905. rcu_read_unlock();
  4906. if (copy_to_user(arg, &info, sizeof(info)))
  4907. return -EFAULT;
  4908. return 0;
  4909. }
  4910. static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
  4911. {
  4912. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  4913. struct md_rdev *rdev;
  4914. dev_t dev = MKDEV(info->major,info->minor);
  4915. if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
  4916. return -EOVERFLOW;
  4917. if (!mddev->raid_disks) {
  4918. int err;
  4919. /* expecting a device which has a superblock */
  4920. rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
  4921. if (IS_ERR(rdev)) {
  4922. printk(KERN_WARNING
  4923. "md: md_import_device returned %ld\n",
  4924. PTR_ERR(rdev));
  4925. return PTR_ERR(rdev);
  4926. }
  4927. if (!list_empty(&mddev->disks)) {
  4928. struct md_rdev *rdev0
  4929. = list_entry(mddev->disks.next,
  4930. struct md_rdev, same_set);
  4931. err = super_types[mddev->major_version]
  4932. .load_super(rdev, rdev0, mddev->minor_version);
  4933. if (err < 0) {
  4934. printk(KERN_WARNING
  4935. "md: %s has different UUID to %s\n",
  4936. bdevname(rdev->bdev,b),
  4937. bdevname(rdev0->bdev,b2));
  4938. export_rdev(rdev);
  4939. return -EINVAL;
  4940. }
  4941. }
  4942. err = bind_rdev_to_array(rdev, mddev);
  4943. if (err)
  4944. export_rdev(rdev);
  4945. return err;
  4946. }
  4947. /*
  4948. * add_new_disk can be used once the array is assembled
  4949. * to add "hot spares". They must already have a superblock
  4950. * written
  4951. */
  4952. if (mddev->pers) {
  4953. int err;
  4954. if (!mddev->pers->hot_add_disk) {
  4955. printk(KERN_WARNING
  4956. "%s: personality does not support diskops!\n",
  4957. mdname(mddev));
  4958. return -EINVAL;
  4959. }
  4960. if (mddev->persistent)
  4961. rdev = md_import_device(dev, mddev->major_version,
  4962. mddev->minor_version);
  4963. else
  4964. rdev = md_import_device(dev, -1, -1);
  4965. if (IS_ERR(rdev)) {
  4966. printk(KERN_WARNING
  4967. "md: md_import_device returned %ld\n",
  4968. PTR_ERR(rdev));
  4969. return PTR_ERR(rdev);
  4970. }
  4971. /* set saved_raid_disk if appropriate */
  4972. if (!mddev->persistent) {
  4973. if (info->state & (1<<MD_DISK_SYNC) &&
  4974. info->raid_disk < mddev->raid_disks) {
  4975. rdev->raid_disk = info->raid_disk;
  4976. set_bit(In_sync, &rdev->flags);
  4977. clear_bit(Bitmap_sync, &rdev->flags);
  4978. } else
  4979. rdev->raid_disk = -1;
  4980. rdev->saved_raid_disk = rdev->raid_disk;
  4981. } else
  4982. super_types[mddev->major_version].
  4983. validate_super(mddev, rdev);
  4984. if ((info->state & (1<<MD_DISK_SYNC)) &&
  4985. rdev->raid_disk != info->raid_disk) {
  4986. /* This was a hot-add request, but events doesn't
  4987. * match, so reject it.
  4988. */
  4989. export_rdev(rdev);
  4990. return -EINVAL;
  4991. }
  4992. clear_bit(In_sync, &rdev->flags); /* just to be sure */
  4993. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  4994. set_bit(WriteMostly, &rdev->flags);
  4995. else
  4996. clear_bit(WriteMostly, &rdev->flags);
  4997. rdev->raid_disk = -1;
  4998. err = bind_rdev_to_array(rdev, mddev);
  4999. if (!err && !mddev->pers->hot_remove_disk) {
  5000. /* If there is hot_add_disk but no hot_remove_disk
  5001. * then added disks for geometry changes,
  5002. * and should be added immediately.
  5003. */
  5004. super_types[mddev->major_version].
  5005. validate_super(mddev, rdev);
  5006. err = mddev->pers->hot_add_disk(mddev, rdev);
  5007. if (err)
  5008. unbind_rdev_from_array(rdev);
  5009. }
  5010. if (err)
  5011. export_rdev(rdev);
  5012. else
  5013. sysfs_notify_dirent_safe(rdev->sysfs_state);
  5014. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5015. if (mddev->degraded)
  5016. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  5017. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5018. if (!err)
  5019. md_new_event(mddev);
  5020. md_wakeup_thread(mddev->thread);
  5021. return err;
  5022. }
  5023. /* otherwise, add_new_disk is only allowed
  5024. * for major_version==0 superblocks
  5025. */
  5026. if (mddev->major_version != 0) {
  5027. printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
  5028. mdname(mddev));
  5029. return -EINVAL;
  5030. }
  5031. if (!(info->state & (1<<MD_DISK_FAULTY))) {
  5032. int err;
  5033. rdev = md_import_device(dev, -1, 0);
  5034. if (IS_ERR(rdev)) {
  5035. printk(KERN_WARNING
  5036. "md: error, md_import_device() returned %ld\n",
  5037. PTR_ERR(rdev));
  5038. return PTR_ERR(rdev);
  5039. }
  5040. rdev->desc_nr = info->number;
  5041. if (info->raid_disk < mddev->raid_disks)
  5042. rdev->raid_disk = info->raid_disk;
  5043. else
  5044. rdev->raid_disk = -1;
  5045. if (rdev->raid_disk < mddev->raid_disks)
  5046. if (info->state & (1<<MD_DISK_SYNC))
  5047. set_bit(In_sync, &rdev->flags);
  5048. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5049. set_bit(WriteMostly, &rdev->flags);
  5050. if (!mddev->persistent) {
  5051. printk(KERN_INFO "md: nonpersistent superblock ...\n");
  5052. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5053. } else
  5054. rdev->sb_start = calc_dev_sboffset(rdev);
  5055. rdev->sectors = rdev->sb_start;
  5056. err = bind_rdev_to_array(rdev, mddev);
  5057. if (err) {
  5058. export_rdev(rdev);
  5059. return err;
  5060. }
  5061. }
  5062. return 0;
  5063. }
  5064. static int hot_remove_disk(struct mddev *mddev, dev_t dev)
  5065. {
  5066. char b[BDEVNAME_SIZE];
  5067. struct md_rdev *rdev;
  5068. rdev = find_rdev(mddev, dev);
  5069. if (!rdev)
  5070. return -ENXIO;
  5071. clear_bit(Blocked, &rdev->flags);
  5072. remove_and_add_spares(mddev, rdev);
  5073. if (rdev->raid_disk >= 0)
  5074. goto busy;
  5075. kick_rdev_from_array(rdev);
  5076. md_update_sb(mddev, 1);
  5077. md_new_event(mddev);
  5078. return 0;
  5079. busy:
  5080. printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
  5081. bdevname(rdev->bdev,b), mdname(mddev));
  5082. return -EBUSY;
  5083. }
  5084. static int hot_add_disk(struct mddev *mddev, dev_t dev)
  5085. {
  5086. char b[BDEVNAME_SIZE];
  5087. int err;
  5088. struct md_rdev *rdev;
  5089. if (!mddev->pers)
  5090. return -ENODEV;
  5091. if (mddev->major_version != 0) {
  5092. printk(KERN_WARNING "%s: HOT_ADD may only be used with"
  5093. " version-0 superblocks.\n",
  5094. mdname(mddev));
  5095. return -EINVAL;
  5096. }
  5097. if (!mddev->pers->hot_add_disk) {
  5098. printk(KERN_WARNING
  5099. "%s: personality does not support diskops!\n",
  5100. mdname(mddev));
  5101. return -EINVAL;
  5102. }
  5103. rdev = md_import_device(dev, -1, 0);
  5104. if (IS_ERR(rdev)) {
  5105. printk(KERN_WARNING
  5106. "md: error, md_import_device() returned %ld\n",
  5107. PTR_ERR(rdev));
  5108. return -EINVAL;
  5109. }
  5110. if (mddev->persistent)
  5111. rdev->sb_start = calc_dev_sboffset(rdev);
  5112. else
  5113. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5114. rdev->sectors = rdev->sb_start;
  5115. if (test_bit(Faulty, &rdev->flags)) {
  5116. printk(KERN_WARNING
  5117. "md: can not hot-add faulty %s disk to %s!\n",
  5118. bdevname(rdev->bdev,b), mdname(mddev));
  5119. err = -EINVAL;
  5120. goto abort_export;
  5121. }
  5122. clear_bit(In_sync, &rdev->flags);
  5123. rdev->desc_nr = -1;
  5124. rdev->saved_raid_disk = -1;
  5125. err = bind_rdev_to_array(rdev, mddev);
  5126. if (err)
  5127. goto abort_export;
  5128. /*
  5129. * The rest should better be atomic, we can have disk failures
  5130. * noticed in interrupt contexts ...
  5131. */
  5132. rdev->raid_disk = -1;
  5133. md_update_sb(mddev, 1);
  5134. /*
  5135. * Kick recovery, maybe this spare has to be added to the
  5136. * array immediately.
  5137. */
  5138. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5139. md_wakeup_thread(mddev->thread);
  5140. md_new_event(mddev);
  5141. return 0;
  5142. abort_export:
  5143. export_rdev(rdev);
  5144. return err;
  5145. }
  5146. static int set_bitmap_file(struct mddev *mddev, int fd)
  5147. {
  5148. int err = 0;
  5149. if (mddev->pers) {
  5150. if (!mddev->pers->quiesce || !mddev->thread)
  5151. return -EBUSY;
  5152. if (mddev->recovery || mddev->sync_thread)
  5153. return -EBUSY;
  5154. /* we should be able to change the bitmap.. */
  5155. }
  5156. if (fd >= 0) {
  5157. struct inode *inode;
  5158. if (mddev->bitmap)
  5159. return -EEXIST; /* cannot add when bitmap is present */
  5160. mddev->bitmap_info.file = fget(fd);
  5161. if (mddev->bitmap_info.file == NULL) {
  5162. printk(KERN_ERR "%s: error: failed to get bitmap file\n",
  5163. mdname(mddev));
  5164. return -EBADF;
  5165. }
  5166. inode = mddev->bitmap_info.file->f_mapping->host;
  5167. if (!S_ISREG(inode->i_mode)) {
  5168. printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
  5169. mdname(mddev));
  5170. err = -EBADF;
  5171. } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
  5172. printk(KERN_ERR "%s: error: bitmap file must open for write\n",
  5173. mdname(mddev));
  5174. err = -EBADF;
  5175. } else if (atomic_read(&inode->i_writecount) != 1) {
  5176. printk(KERN_ERR "%s: error: bitmap file is already in use\n",
  5177. mdname(mddev));
  5178. err = -EBUSY;
  5179. }
  5180. if (err) {
  5181. fput(mddev->bitmap_info.file);
  5182. mddev->bitmap_info.file = NULL;
  5183. return err;
  5184. }
  5185. mddev->bitmap_info.offset = 0; /* file overrides offset */
  5186. } else if (mddev->bitmap == NULL)
  5187. return -ENOENT; /* cannot remove what isn't there */
  5188. err = 0;
  5189. if (mddev->pers) {
  5190. mddev->pers->quiesce(mddev, 1);
  5191. if (fd >= 0) {
  5192. err = bitmap_create(mddev);
  5193. if (!err)
  5194. err = bitmap_load(mddev);
  5195. }
  5196. if (fd < 0 || err) {
  5197. bitmap_destroy(mddev);
  5198. fd = -1; /* make sure to put the file */
  5199. }
  5200. mddev->pers->quiesce(mddev, 0);
  5201. }
  5202. if (fd < 0) {
  5203. if (mddev->bitmap_info.file)
  5204. fput(mddev->bitmap_info.file);
  5205. mddev->bitmap_info.file = NULL;
  5206. }
  5207. return err;
  5208. }
  5209. /*
  5210. * set_array_info is used two different ways
  5211. * The original usage is when creating a new array.
  5212. * In this usage, raid_disks is > 0 and it together with
  5213. * level, size, not_persistent,layout,chunksize determine the
  5214. * shape of the array.
  5215. * This will always create an array with a type-0.90.0 superblock.
  5216. * The newer usage is when assembling an array.
  5217. * In this case raid_disks will be 0, and the major_version field is
  5218. * use to determine which style super-blocks are to be found on the devices.
  5219. * The minor and patch _version numbers are also kept incase the
  5220. * super_block handler wishes to interpret them.
  5221. */
  5222. static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
  5223. {
  5224. if (info->raid_disks == 0) {
  5225. /* just setting version number for superblock loading */
  5226. if (info->major_version < 0 ||
  5227. info->major_version >= ARRAY_SIZE(super_types) ||
  5228. super_types[info->major_version].name == NULL) {
  5229. /* maybe try to auto-load a module? */
  5230. printk(KERN_INFO
  5231. "md: superblock version %d not known\n",
  5232. info->major_version);
  5233. return -EINVAL;
  5234. }
  5235. mddev->major_version = info->major_version;
  5236. mddev->minor_version = info->minor_version;
  5237. mddev->patch_version = info->patch_version;
  5238. mddev->persistent = !info->not_persistent;
  5239. /* ensure mddev_put doesn't delete this now that there
  5240. * is some minimal configuration.
  5241. */
  5242. mddev->ctime = get_seconds();
  5243. return 0;
  5244. }
  5245. mddev->major_version = MD_MAJOR_VERSION;
  5246. mddev->minor_version = MD_MINOR_VERSION;
  5247. mddev->patch_version = MD_PATCHLEVEL_VERSION;
  5248. mddev->ctime = get_seconds();
  5249. mddev->level = info->level;
  5250. mddev->clevel[0] = 0;
  5251. mddev->dev_sectors = 2 * (sector_t)info->size;
  5252. mddev->raid_disks = info->raid_disks;
  5253. /* don't set md_minor, it is determined by which /dev/md* was
  5254. * openned
  5255. */
  5256. if (info->state & (1<<MD_SB_CLEAN))
  5257. mddev->recovery_cp = MaxSector;
  5258. else
  5259. mddev->recovery_cp = 0;
  5260. mddev->persistent = ! info->not_persistent;
  5261. mddev->external = 0;
  5262. mddev->layout = info->layout;
  5263. mddev->chunk_sectors = info->chunk_size >> 9;
  5264. mddev->max_disks = MD_SB_DISKS;
  5265. if (mddev->persistent)
  5266. mddev->flags = 0;
  5267. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  5268. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  5269. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  5270. mddev->bitmap_info.offset = 0;
  5271. mddev->reshape_position = MaxSector;
  5272. /*
  5273. * Generate a 128 bit UUID
  5274. */
  5275. get_random_bytes(mddev->uuid, 16);
  5276. mddev->new_level = mddev->level;
  5277. mddev->new_chunk_sectors = mddev->chunk_sectors;
  5278. mddev->new_layout = mddev->layout;
  5279. mddev->delta_disks = 0;
  5280. mddev->reshape_backwards = 0;
  5281. return 0;
  5282. }
  5283. void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
  5284. {
  5285. WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
  5286. if (mddev->external_size)
  5287. return;
  5288. mddev->array_sectors = array_sectors;
  5289. }
  5290. EXPORT_SYMBOL(md_set_array_sectors);
  5291. static int update_size(struct mddev *mddev, sector_t num_sectors)
  5292. {
  5293. struct md_rdev *rdev;
  5294. int rv;
  5295. int fit = (num_sectors == 0);
  5296. if (mddev->pers->resize == NULL)
  5297. return -EINVAL;
  5298. /* The "num_sectors" is the number of sectors of each device that
  5299. * is used. This can only make sense for arrays with redundancy.
  5300. * linear and raid0 always use whatever space is available. We can only
  5301. * consider changing this number if no resync or reconstruction is
  5302. * happening, and if the new size is acceptable. It must fit before the
  5303. * sb_start or, if that is <data_offset, it must fit before the size
  5304. * of each device. If num_sectors is zero, we find the largest size
  5305. * that fits.
  5306. */
  5307. if (mddev->sync_thread)
  5308. return -EBUSY;
  5309. if (mddev->ro)
  5310. return -EROFS;
  5311. rdev_for_each(rdev, mddev) {
  5312. sector_t avail = rdev->sectors;
  5313. if (fit && (num_sectors == 0 || num_sectors > avail))
  5314. num_sectors = avail;
  5315. if (avail < num_sectors)
  5316. return -ENOSPC;
  5317. }
  5318. rv = mddev->pers->resize(mddev, num_sectors);
  5319. if (!rv)
  5320. revalidate_disk(mddev->gendisk);
  5321. return rv;
  5322. }
  5323. static int update_raid_disks(struct mddev *mddev, int raid_disks)
  5324. {
  5325. int rv;
  5326. struct md_rdev *rdev;
  5327. /* change the number of raid disks */
  5328. if (mddev->pers->check_reshape == NULL)
  5329. return -EINVAL;
  5330. if (mddev->ro)
  5331. return -EROFS;
  5332. if (raid_disks <= 0 ||
  5333. (mddev->max_disks && raid_disks >= mddev->max_disks))
  5334. return -EINVAL;
  5335. if (mddev->sync_thread || mddev->reshape_position != MaxSector)
  5336. return -EBUSY;
  5337. rdev_for_each(rdev, mddev) {
  5338. if (mddev->raid_disks < raid_disks &&
  5339. rdev->data_offset < rdev->new_data_offset)
  5340. return -EINVAL;
  5341. if (mddev->raid_disks > raid_disks &&
  5342. rdev->data_offset > rdev->new_data_offset)
  5343. return -EINVAL;
  5344. }
  5345. mddev->delta_disks = raid_disks - mddev->raid_disks;
  5346. if (mddev->delta_disks < 0)
  5347. mddev->reshape_backwards = 1;
  5348. else if (mddev->delta_disks > 0)
  5349. mddev->reshape_backwards = 0;
  5350. rv = mddev->pers->check_reshape(mddev);
  5351. if (rv < 0) {
  5352. mddev->delta_disks = 0;
  5353. mddev->reshape_backwards = 0;
  5354. }
  5355. return rv;
  5356. }
  5357. /*
  5358. * update_array_info is used to change the configuration of an
  5359. * on-line array.
  5360. * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
  5361. * fields in the info are checked against the array.
  5362. * Any differences that cannot be handled will cause an error.
  5363. * Normally, only one change can be managed at a time.
  5364. */
  5365. static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
  5366. {
  5367. int rv = 0;
  5368. int cnt = 0;
  5369. int state = 0;
  5370. /* calculate expected state,ignoring low bits */
  5371. if (mddev->bitmap && mddev->bitmap_info.offset)
  5372. state |= (1 << MD_SB_BITMAP_PRESENT);
  5373. if (mddev->major_version != info->major_version ||
  5374. mddev->minor_version != info->minor_version ||
  5375. /* mddev->patch_version != info->patch_version || */
  5376. mddev->ctime != info->ctime ||
  5377. mddev->level != info->level ||
  5378. /* mddev->layout != info->layout || */
  5379. !mddev->persistent != info->not_persistent||
  5380. mddev->chunk_sectors != info->chunk_size >> 9 ||
  5381. /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
  5382. ((state^info->state) & 0xfffffe00)
  5383. )
  5384. return -EINVAL;
  5385. /* Check there is only one change */
  5386. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  5387. cnt++;
  5388. if (mddev->raid_disks != info->raid_disks)
  5389. cnt++;
  5390. if (mddev->layout != info->layout)
  5391. cnt++;
  5392. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
  5393. cnt++;
  5394. if (cnt == 0)
  5395. return 0;
  5396. if (cnt > 1)
  5397. return -EINVAL;
  5398. if (mddev->layout != info->layout) {
  5399. /* Change layout
  5400. * we don't need to do anything at the md level, the
  5401. * personality will take care of it all.
  5402. */
  5403. if (mddev->pers->check_reshape == NULL)
  5404. return -EINVAL;
  5405. else {
  5406. mddev->new_layout = info->layout;
  5407. rv = mddev->pers->check_reshape(mddev);
  5408. if (rv)
  5409. mddev->new_layout = mddev->layout;
  5410. return rv;
  5411. }
  5412. }
  5413. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  5414. rv = update_size(mddev, (sector_t)info->size * 2);
  5415. if (mddev->raid_disks != info->raid_disks)
  5416. rv = update_raid_disks(mddev, info->raid_disks);
  5417. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
  5418. if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
  5419. return -EINVAL;
  5420. if (mddev->recovery || mddev->sync_thread)
  5421. return -EBUSY;
  5422. if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
  5423. /* add the bitmap */
  5424. if (mddev->bitmap)
  5425. return -EEXIST;
  5426. if (mddev->bitmap_info.default_offset == 0)
  5427. return -EINVAL;
  5428. mddev->bitmap_info.offset =
  5429. mddev->bitmap_info.default_offset;
  5430. mddev->bitmap_info.space =
  5431. mddev->bitmap_info.default_space;
  5432. mddev->pers->quiesce(mddev, 1);
  5433. rv = bitmap_create(mddev);
  5434. if (!rv)
  5435. rv = bitmap_load(mddev);
  5436. if (rv)
  5437. bitmap_destroy(mddev);
  5438. mddev->pers->quiesce(mddev, 0);
  5439. } else {
  5440. /* remove the bitmap */
  5441. if (!mddev->bitmap)
  5442. return -ENOENT;
  5443. if (mddev->bitmap->storage.file)
  5444. return -EINVAL;
  5445. mddev->pers->quiesce(mddev, 1);
  5446. bitmap_destroy(mddev);
  5447. mddev->pers->quiesce(mddev, 0);
  5448. mddev->bitmap_info.offset = 0;
  5449. }
  5450. }
  5451. md_update_sb(mddev, 1);
  5452. return rv;
  5453. }
  5454. static int set_disk_faulty(struct mddev *mddev, dev_t dev)
  5455. {
  5456. struct md_rdev *rdev;
  5457. int err = 0;
  5458. if (mddev->pers == NULL)
  5459. return -ENODEV;
  5460. rcu_read_lock();
  5461. rdev = find_rdev_rcu(mddev, dev);
  5462. if (!rdev)
  5463. err = -ENODEV;
  5464. else {
  5465. md_error(mddev, rdev);
  5466. if (!test_bit(Faulty, &rdev->flags))
  5467. err = -EBUSY;
  5468. }
  5469. rcu_read_unlock();
  5470. return err;
  5471. }
  5472. /*
  5473. * We have a problem here : there is no easy way to give a CHS
  5474. * virtual geometry. We currently pretend that we have a 2 heads
  5475. * 4 sectors (with a BIG number of cylinders...). This drives
  5476. * dosfs just mad... ;-)
  5477. */
  5478. static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  5479. {
  5480. struct mddev *mddev = bdev->bd_disk->private_data;
  5481. geo->heads = 2;
  5482. geo->sectors = 4;
  5483. geo->cylinders = mddev->array_sectors / 8;
  5484. return 0;
  5485. }
  5486. static inline bool md_ioctl_valid(unsigned int cmd)
  5487. {
  5488. switch (cmd) {
  5489. case ADD_NEW_DISK:
  5490. case BLKROSET:
  5491. case GET_ARRAY_INFO:
  5492. case GET_BITMAP_FILE:
  5493. case GET_DISK_INFO:
  5494. case HOT_ADD_DISK:
  5495. case HOT_REMOVE_DISK:
  5496. case RAID_AUTORUN:
  5497. case RAID_VERSION:
  5498. case RESTART_ARRAY_RW:
  5499. case RUN_ARRAY:
  5500. case SET_ARRAY_INFO:
  5501. case SET_BITMAP_FILE:
  5502. case SET_DISK_FAULTY:
  5503. case STOP_ARRAY:
  5504. case STOP_ARRAY_RO:
  5505. return true;
  5506. default:
  5507. return false;
  5508. }
  5509. }
  5510. static int md_ioctl(struct block_device *bdev, fmode_t mode,
  5511. unsigned int cmd, unsigned long arg)
  5512. {
  5513. int err = 0;
  5514. void __user *argp = (void __user *)arg;
  5515. struct mddev *mddev = NULL;
  5516. int ro;
  5517. if (!md_ioctl_valid(cmd))
  5518. return -ENOTTY;
  5519. switch (cmd) {
  5520. case RAID_VERSION:
  5521. case GET_ARRAY_INFO:
  5522. case GET_DISK_INFO:
  5523. break;
  5524. default:
  5525. if (!capable(CAP_SYS_ADMIN))
  5526. return -EACCES;
  5527. }
  5528. /*
  5529. * Commands dealing with the RAID driver but not any
  5530. * particular array:
  5531. */
  5532. switch (cmd) {
  5533. case RAID_VERSION:
  5534. err = get_version(argp);
  5535. goto out;
  5536. #ifndef MODULE
  5537. case RAID_AUTORUN:
  5538. err = 0;
  5539. autostart_arrays(arg);
  5540. goto out;
  5541. #endif
  5542. default:;
  5543. }
  5544. /*
  5545. * Commands creating/starting a new array:
  5546. */
  5547. mddev = bdev->bd_disk->private_data;
  5548. if (!mddev) {
  5549. BUG();
  5550. goto out;
  5551. }
  5552. /* Some actions do not requires the mutex */
  5553. switch (cmd) {
  5554. case GET_ARRAY_INFO:
  5555. if (!mddev->raid_disks && !mddev->external)
  5556. err = -ENODEV;
  5557. else
  5558. err = get_array_info(mddev, argp);
  5559. goto out;
  5560. case GET_DISK_INFO:
  5561. if (!mddev->raid_disks && !mddev->external)
  5562. err = -ENODEV;
  5563. else
  5564. err = get_disk_info(mddev, argp);
  5565. goto out;
  5566. case SET_DISK_FAULTY:
  5567. err = set_disk_faulty(mddev, new_decode_dev(arg));
  5568. goto out;
  5569. }
  5570. if (cmd == ADD_NEW_DISK)
  5571. /* need to ensure md_delayed_delete() has completed */
  5572. flush_workqueue(md_misc_wq);
  5573. if (cmd == HOT_REMOVE_DISK)
  5574. /* need to ensure recovery thread has run */
  5575. wait_event_interruptible_timeout(mddev->sb_wait,
  5576. !test_bit(MD_RECOVERY_NEEDED,
  5577. &mddev->flags),
  5578. msecs_to_jiffies(5000));
  5579. if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
  5580. /* Need to flush page cache, and ensure no-one else opens
  5581. * and writes
  5582. */
  5583. mutex_lock(&mddev->open_mutex);
  5584. if (mddev->pers && atomic_read(&mddev->openers) > 1) {
  5585. mutex_unlock(&mddev->open_mutex);
  5586. err = -EBUSY;
  5587. goto out;
  5588. }
  5589. set_bit(MD_STILL_CLOSED, &mddev->flags);
  5590. mutex_unlock(&mddev->open_mutex);
  5591. sync_blockdev(bdev);
  5592. }
  5593. err = mddev_lock(mddev);
  5594. if (err) {
  5595. printk(KERN_INFO
  5596. "md: ioctl lock interrupted, reason %d, cmd %d\n",
  5597. err, cmd);
  5598. goto out;
  5599. }
  5600. if (cmd == SET_ARRAY_INFO) {
  5601. mdu_array_info_t info;
  5602. if (!arg)
  5603. memset(&info, 0, sizeof(info));
  5604. else if (copy_from_user(&info, argp, sizeof(info))) {
  5605. err = -EFAULT;
  5606. goto unlock;
  5607. }
  5608. if (mddev->pers) {
  5609. err = update_array_info(mddev, &info);
  5610. if (err) {
  5611. printk(KERN_WARNING "md: couldn't update"
  5612. " array info. %d\n", err);
  5613. goto unlock;
  5614. }
  5615. goto unlock;
  5616. }
  5617. if (!list_empty(&mddev->disks)) {
  5618. printk(KERN_WARNING
  5619. "md: array %s already has disks!\n",
  5620. mdname(mddev));
  5621. err = -EBUSY;
  5622. goto unlock;
  5623. }
  5624. if (mddev->raid_disks) {
  5625. printk(KERN_WARNING
  5626. "md: array %s already initialised!\n",
  5627. mdname(mddev));
  5628. err = -EBUSY;
  5629. goto unlock;
  5630. }
  5631. err = set_array_info(mddev, &info);
  5632. if (err) {
  5633. printk(KERN_WARNING "md: couldn't set"
  5634. " array info. %d\n", err);
  5635. goto unlock;
  5636. }
  5637. goto unlock;
  5638. }
  5639. /*
  5640. * Commands querying/configuring an existing array:
  5641. */
  5642. /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
  5643. * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
  5644. if ((!mddev->raid_disks && !mddev->external)
  5645. && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
  5646. && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
  5647. && cmd != GET_BITMAP_FILE) {
  5648. err = -ENODEV;
  5649. goto unlock;
  5650. }
  5651. /*
  5652. * Commands even a read-only array can execute:
  5653. */
  5654. switch (cmd) {
  5655. case GET_BITMAP_FILE:
  5656. err = get_bitmap_file(mddev, argp);
  5657. goto unlock;
  5658. case RESTART_ARRAY_RW:
  5659. err = restart_array(mddev);
  5660. goto unlock;
  5661. case STOP_ARRAY:
  5662. err = do_md_stop(mddev, 0, bdev);
  5663. goto unlock;
  5664. case STOP_ARRAY_RO:
  5665. err = md_set_readonly(mddev, bdev);
  5666. goto unlock;
  5667. case HOT_REMOVE_DISK:
  5668. err = hot_remove_disk(mddev, new_decode_dev(arg));
  5669. goto unlock;
  5670. case ADD_NEW_DISK:
  5671. /* We can support ADD_NEW_DISK on read-only arrays
  5672. * on if we are re-adding a preexisting device.
  5673. * So require mddev->pers and MD_DISK_SYNC.
  5674. */
  5675. if (mddev->pers) {
  5676. mdu_disk_info_t info;
  5677. if (copy_from_user(&info, argp, sizeof(info)))
  5678. err = -EFAULT;
  5679. else if (!(info.state & (1<<MD_DISK_SYNC)))
  5680. /* Need to clear read-only for this */
  5681. break;
  5682. else
  5683. err = add_new_disk(mddev, &info);
  5684. goto unlock;
  5685. }
  5686. break;
  5687. case BLKROSET:
  5688. if (get_user(ro, (int __user *)(arg))) {
  5689. err = -EFAULT;
  5690. goto unlock;
  5691. }
  5692. err = -EINVAL;
  5693. /* if the bdev is going readonly the value of mddev->ro
  5694. * does not matter, no writes are coming
  5695. */
  5696. if (ro)
  5697. goto unlock;
  5698. /* are we are already prepared for writes? */
  5699. if (mddev->ro != 1)
  5700. goto unlock;
  5701. /* transitioning to readauto need only happen for
  5702. * arrays that call md_write_start
  5703. */
  5704. if (mddev->pers) {
  5705. err = restart_array(mddev);
  5706. if (err == 0) {
  5707. mddev->ro = 2;
  5708. set_disk_ro(mddev->gendisk, 0);
  5709. }
  5710. }
  5711. goto unlock;
  5712. }
  5713. /*
  5714. * The remaining ioctls are changing the state of the
  5715. * superblock, so we do not allow them on read-only arrays.
  5716. */
  5717. if (mddev->ro && mddev->pers) {
  5718. if (mddev->ro == 2) {
  5719. mddev->ro = 0;
  5720. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5721. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5722. /* mddev_unlock will wake thread */
  5723. /* If a device failed while we were read-only, we
  5724. * need to make sure the metadata is updated now.
  5725. */
  5726. if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
  5727. mddev_unlock(mddev);
  5728. wait_event(mddev->sb_wait,
  5729. !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
  5730. !test_bit(MD_CHANGE_PENDING, &mddev->flags));
  5731. mddev_lock_nointr(mddev);
  5732. }
  5733. } else {
  5734. err = -EROFS;
  5735. goto unlock;
  5736. }
  5737. }
  5738. switch (cmd) {
  5739. case ADD_NEW_DISK:
  5740. {
  5741. mdu_disk_info_t info;
  5742. if (copy_from_user(&info, argp, sizeof(info)))
  5743. err = -EFAULT;
  5744. else
  5745. err = add_new_disk(mddev, &info);
  5746. goto unlock;
  5747. }
  5748. case HOT_ADD_DISK:
  5749. err = hot_add_disk(mddev, new_decode_dev(arg));
  5750. goto unlock;
  5751. case RUN_ARRAY:
  5752. err = do_md_run(mddev);
  5753. goto unlock;
  5754. case SET_BITMAP_FILE:
  5755. err = set_bitmap_file(mddev, (int)arg);
  5756. goto unlock;
  5757. default:
  5758. err = -EINVAL;
  5759. goto unlock;
  5760. }
  5761. unlock:
  5762. if (mddev->hold_active == UNTIL_IOCTL &&
  5763. err != -EINVAL)
  5764. mddev->hold_active = 0;
  5765. mddev_unlock(mddev);
  5766. out:
  5767. return err;
  5768. }
  5769. #ifdef CONFIG_COMPAT
  5770. static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
  5771. unsigned int cmd, unsigned long arg)
  5772. {
  5773. switch (cmd) {
  5774. case HOT_REMOVE_DISK:
  5775. case HOT_ADD_DISK:
  5776. case SET_DISK_FAULTY:
  5777. case SET_BITMAP_FILE:
  5778. /* These take in integer arg, do not convert */
  5779. break;
  5780. default:
  5781. arg = (unsigned long)compat_ptr(arg);
  5782. break;
  5783. }
  5784. return md_ioctl(bdev, mode, cmd, arg);
  5785. }
  5786. #endif /* CONFIG_COMPAT */
  5787. static int md_open(struct block_device *bdev, fmode_t mode)
  5788. {
  5789. /*
  5790. * Succeed if we can lock the mddev, which confirms that
  5791. * it isn't being stopped right now.
  5792. */
  5793. struct mddev *mddev = mddev_find(bdev->bd_dev);
  5794. int err;
  5795. if (!mddev)
  5796. return -ENODEV;
  5797. if (mddev->gendisk != bdev->bd_disk) {
  5798. /* we are racing with mddev_put which is discarding this
  5799. * bd_disk.
  5800. */
  5801. mddev_put(mddev);
  5802. /* Wait until bdev->bd_disk is definitely gone */
  5803. flush_workqueue(md_misc_wq);
  5804. /* Then retry the open from the top */
  5805. return -ERESTARTSYS;
  5806. }
  5807. BUG_ON(mddev != bdev->bd_disk->private_data);
  5808. if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
  5809. goto out;
  5810. err = 0;
  5811. atomic_inc(&mddev->openers);
  5812. clear_bit(MD_STILL_CLOSED, &mddev->flags);
  5813. mutex_unlock(&mddev->open_mutex);
  5814. check_disk_change(bdev);
  5815. out:
  5816. return err;
  5817. }
  5818. static void md_release(struct gendisk *disk, fmode_t mode)
  5819. {
  5820. struct mddev *mddev = disk->private_data;
  5821. BUG_ON(!mddev);
  5822. atomic_dec(&mddev->openers);
  5823. mddev_put(mddev);
  5824. }
  5825. static int md_media_changed(struct gendisk *disk)
  5826. {
  5827. struct mddev *mddev = disk->private_data;
  5828. return mddev->changed;
  5829. }
  5830. static int md_revalidate(struct gendisk *disk)
  5831. {
  5832. struct mddev *mddev = disk->private_data;
  5833. mddev->changed = 0;
  5834. return 0;
  5835. }
  5836. static const struct block_device_operations md_fops =
  5837. {
  5838. .owner = THIS_MODULE,
  5839. .open = md_open,
  5840. .release = md_release,
  5841. .ioctl = md_ioctl,
  5842. #ifdef CONFIG_COMPAT
  5843. .compat_ioctl = md_compat_ioctl,
  5844. #endif
  5845. .getgeo = md_getgeo,
  5846. .media_changed = md_media_changed,
  5847. .revalidate_disk= md_revalidate,
  5848. };
  5849. static int md_thread(void *arg)
  5850. {
  5851. struct md_thread *thread = arg;
  5852. /*
  5853. * md_thread is a 'system-thread', it's priority should be very
  5854. * high. We avoid resource deadlocks individually in each
  5855. * raid personality. (RAID5 does preallocation) We also use RR and
  5856. * the very same RT priority as kswapd, thus we will never get
  5857. * into a priority inversion deadlock.
  5858. *
  5859. * we definitely have to have equal or higher priority than
  5860. * bdflush, otherwise bdflush will deadlock if there are too
  5861. * many dirty RAID5 blocks.
  5862. */
  5863. allow_signal(SIGKILL);
  5864. while (!kthread_should_stop()) {
  5865. /* We need to wait INTERRUPTIBLE so that
  5866. * we don't add to the load-average.
  5867. * That means we need to be sure no signals are
  5868. * pending
  5869. */
  5870. if (signal_pending(current))
  5871. flush_signals(current);
  5872. wait_event_interruptible_timeout
  5873. (thread->wqueue,
  5874. test_bit(THREAD_WAKEUP, &thread->flags)
  5875. || kthread_should_stop(),
  5876. thread->timeout);
  5877. clear_bit(THREAD_WAKEUP, &thread->flags);
  5878. if (!kthread_should_stop())
  5879. thread->run(thread);
  5880. }
  5881. return 0;
  5882. }
  5883. void md_wakeup_thread(struct md_thread *thread)
  5884. {
  5885. if (thread) {
  5886. pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
  5887. set_bit(THREAD_WAKEUP, &thread->flags);
  5888. wake_up(&thread->wqueue);
  5889. }
  5890. }
  5891. EXPORT_SYMBOL(md_wakeup_thread);
  5892. struct md_thread *md_register_thread(void (*run) (struct md_thread *),
  5893. struct mddev *mddev, const char *name)
  5894. {
  5895. struct md_thread *thread;
  5896. thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
  5897. if (!thread)
  5898. return NULL;
  5899. init_waitqueue_head(&thread->wqueue);
  5900. thread->run = run;
  5901. thread->mddev = mddev;
  5902. thread->timeout = MAX_SCHEDULE_TIMEOUT;
  5903. thread->tsk = kthread_run(md_thread, thread,
  5904. "%s_%s",
  5905. mdname(thread->mddev),
  5906. name);
  5907. if (IS_ERR(thread->tsk)) {
  5908. kfree(thread);
  5909. return NULL;
  5910. }
  5911. return thread;
  5912. }
  5913. EXPORT_SYMBOL(md_register_thread);
  5914. void md_unregister_thread(struct md_thread **threadp)
  5915. {
  5916. struct md_thread *thread = *threadp;
  5917. if (!thread)
  5918. return;
  5919. pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
  5920. /* Locking ensures that mddev_unlock does not wake_up a
  5921. * non-existent thread
  5922. */
  5923. spin_lock(&pers_lock);
  5924. *threadp = NULL;
  5925. spin_unlock(&pers_lock);
  5926. kthread_stop(thread->tsk);
  5927. kfree(thread);
  5928. }
  5929. EXPORT_SYMBOL(md_unregister_thread);
  5930. void md_error(struct mddev *mddev, struct md_rdev *rdev)
  5931. {
  5932. if (!rdev || test_bit(Faulty, &rdev->flags))
  5933. return;
  5934. if (!mddev->pers || !mddev->pers->error_handler)
  5935. return;
  5936. mddev->pers->error_handler(mddev,rdev);
  5937. if (mddev->degraded)
  5938. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  5939. sysfs_notify_dirent_safe(rdev->sysfs_state);
  5940. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  5941. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5942. md_wakeup_thread(mddev->thread);
  5943. if (mddev->event_work.func)
  5944. queue_work(md_misc_wq, &mddev->event_work);
  5945. md_new_event_inintr(mddev);
  5946. }
  5947. EXPORT_SYMBOL(md_error);
  5948. /* seq_file implementation /proc/mdstat */
  5949. static void status_unused(struct seq_file *seq)
  5950. {
  5951. int i = 0;
  5952. struct md_rdev *rdev;
  5953. seq_printf(seq, "unused devices: ");
  5954. list_for_each_entry(rdev, &pending_raid_disks, same_set) {
  5955. char b[BDEVNAME_SIZE];
  5956. i++;
  5957. seq_printf(seq, "%s ",
  5958. bdevname(rdev->bdev,b));
  5959. }
  5960. if (!i)
  5961. seq_printf(seq, "<none>");
  5962. seq_printf(seq, "\n");
  5963. }
  5964. static void status_resync(struct seq_file *seq, struct mddev *mddev)
  5965. {
  5966. sector_t max_sectors, resync, res;
  5967. unsigned long dt, db;
  5968. sector_t rt;
  5969. int scale;
  5970. unsigned int per_milli;
  5971. if (mddev->curr_resync <= 3)
  5972. resync = 0;
  5973. else
  5974. resync = mddev->curr_resync
  5975. - atomic_read(&mddev->recovery_active);
  5976. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  5977. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  5978. max_sectors = mddev->resync_max_sectors;
  5979. else
  5980. max_sectors = mddev->dev_sectors;
  5981. WARN_ON(max_sectors == 0);
  5982. /* Pick 'scale' such that (resync>>scale)*1000 will fit
  5983. * in a sector_t, and (max_sectors>>scale) will fit in a
  5984. * u32, as those are the requirements for sector_div.
  5985. * Thus 'scale' must be at least 10
  5986. */
  5987. scale = 10;
  5988. if (sizeof(sector_t) > sizeof(unsigned long)) {
  5989. while ( max_sectors/2 > (1ULL<<(scale+32)))
  5990. scale++;
  5991. }
  5992. res = (resync>>scale)*1000;
  5993. sector_div(res, (u32)((max_sectors>>scale)+1));
  5994. per_milli = res;
  5995. {
  5996. int i, x = per_milli/50, y = 20-x;
  5997. seq_printf(seq, "[");
  5998. for (i = 0; i < x; i++)
  5999. seq_printf(seq, "=");
  6000. seq_printf(seq, ">");
  6001. for (i = 0; i < y; i++)
  6002. seq_printf(seq, ".");
  6003. seq_printf(seq, "] ");
  6004. }
  6005. seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
  6006. (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
  6007. "reshape" :
  6008. (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
  6009. "check" :
  6010. (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
  6011. "resync" : "recovery"))),
  6012. per_milli/10, per_milli % 10,
  6013. (unsigned long long) resync/2,
  6014. (unsigned long long) max_sectors/2);
  6015. /*
  6016. * dt: time from mark until now
  6017. * db: blocks written from mark until now
  6018. * rt: remaining time
  6019. *
  6020. * rt is a sector_t, so could be 32bit or 64bit.
  6021. * So we divide before multiply in case it is 32bit and close
  6022. * to the limit.
  6023. * We scale the divisor (db) by 32 to avoid losing precision
  6024. * near the end of resync when the number of remaining sectors
  6025. * is close to 'db'.
  6026. * We then divide rt by 32 after multiplying by db to compensate.
  6027. * The '+1' avoids division by zero if db is very small.
  6028. */
  6029. dt = ((jiffies - mddev->resync_mark) / HZ);
  6030. if (!dt) dt++;
  6031. db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
  6032. - mddev->resync_mark_cnt;
  6033. rt = max_sectors - resync; /* number of remaining sectors */
  6034. sector_div(rt, db/32+1);
  6035. rt *= dt;
  6036. rt >>= 5;
  6037. seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
  6038. ((unsigned long)rt % 60)/6);
  6039. seq_printf(seq, " speed=%ldK/sec", db/2/dt);
  6040. }
  6041. static void *md_seq_start(struct seq_file *seq, loff_t *pos)
  6042. {
  6043. struct list_head *tmp;
  6044. loff_t l = *pos;
  6045. struct mddev *mddev;
  6046. if (l >= 0x10000)
  6047. return NULL;
  6048. if (!l--)
  6049. /* header */
  6050. return (void*)1;
  6051. spin_lock(&all_mddevs_lock);
  6052. list_for_each(tmp,&all_mddevs)
  6053. if (!l--) {
  6054. mddev = list_entry(tmp, struct mddev, all_mddevs);
  6055. mddev_get(mddev);
  6056. spin_unlock(&all_mddevs_lock);
  6057. return mddev;
  6058. }
  6059. spin_unlock(&all_mddevs_lock);
  6060. if (!l--)
  6061. return (void*)2;/* tail */
  6062. return NULL;
  6063. }
  6064. static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  6065. {
  6066. struct list_head *tmp;
  6067. struct mddev *next_mddev, *mddev = v;
  6068. ++*pos;
  6069. if (v == (void*)2)
  6070. return NULL;
  6071. spin_lock(&all_mddevs_lock);
  6072. if (v == (void*)1)
  6073. tmp = all_mddevs.next;
  6074. else
  6075. tmp = mddev->all_mddevs.next;
  6076. if (tmp != &all_mddevs)
  6077. next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
  6078. else {
  6079. next_mddev = (void*)2;
  6080. *pos = 0x10000;
  6081. }
  6082. spin_unlock(&all_mddevs_lock);
  6083. if (v != (void*)1)
  6084. mddev_put(mddev);
  6085. return next_mddev;
  6086. }
  6087. static void md_seq_stop(struct seq_file *seq, void *v)
  6088. {
  6089. struct mddev *mddev = v;
  6090. if (mddev && v != (void*)1 && v != (void*)2)
  6091. mddev_put(mddev);
  6092. }
  6093. static int md_seq_show(struct seq_file *seq, void *v)
  6094. {
  6095. struct mddev *mddev = v;
  6096. sector_t sectors;
  6097. struct md_rdev *rdev;
  6098. if (v == (void*)1) {
  6099. struct md_personality *pers;
  6100. seq_printf(seq, "Personalities : ");
  6101. spin_lock(&pers_lock);
  6102. list_for_each_entry(pers, &pers_list, list)
  6103. seq_printf(seq, "[%s] ", pers->name);
  6104. spin_unlock(&pers_lock);
  6105. seq_printf(seq, "\n");
  6106. seq->poll_event = atomic_read(&md_event_count);
  6107. return 0;
  6108. }
  6109. if (v == (void*)2) {
  6110. status_unused(seq);
  6111. return 0;
  6112. }
  6113. if (mddev_lock(mddev) < 0)
  6114. return -EINTR;
  6115. if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
  6116. seq_printf(seq, "%s : %sactive", mdname(mddev),
  6117. mddev->pers ? "" : "in");
  6118. if (mddev->pers) {
  6119. if (mddev->ro==1)
  6120. seq_printf(seq, " (read-only)");
  6121. if (mddev->ro==2)
  6122. seq_printf(seq, " (auto-read-only)");
  6123. seq_printf(seq, " %s", mddev->pers->name);
  6124. }
  6125. sectors = 0;
  6126. rdev_for_each(rdev, mddev) {
  6127. char b[BDEVNAME_SIZE];
  6128. seq_printf(seq, " %s[%d]",
  6129. bdevname(rdev->bdev,b), rdev->desc_nr);
  6130. if (test_bit(WriteMostly, &rdev->flags))
  6131. seq_printf(seq, "(W)");
  6132. if (test_bit(Faulty, &rdev->flags)) {
  6133. seq_printf(seq, "(F)");
  6134. continue;
  6135. }
  6136. if (rdev->raid_disk < 0)
  6137. seq_printf(seq, "(S)"); /* spare */
  6138. if (test_bit(Replacement, &rdev->flags))
  6139. seq_printf(seq, "(R)");
  6140. sectors += rdev->sectors;
  6141. }
  6142. if (!list_empty(&mddev->disks)) {
  6143. if (mddev->pers)
  6144. seq_printf(seq, "\n %llu blocks",
  6145. (unsigned long long)
  6146. mddev->array_sectors / 2);
  6147. else
  6148. seq_printf(seq, "\n %llu blocks",
  6149. (unsigned long long)sectors / 2);
  6150. }
  6151. if (mddev->persistent) {
  6152. if (mddev->major_version != 0 ||
  6153. mddev->minor_version != 90) {
  6154. seq_printf(seq," super %d.%d",
  6155. mddev->major_version,
  6156. mddev->minor_version);
  6157. }
  6158. } else if (mddev->external)
  6159. seq_printf(seq, " super external:%s",
  6160. mddev->metadata_type);
  6161. else
  6162. seq_printf(seq, " super non-persistent");
  6163. if (mddev->pers) {
  6164. mddev->pers->status(seq, mddev);
  6165. seq_printf(seq, "\n ");
  6166. if (mddev->pers->sync_request) {
  6167. if (mddev->curr_resync > 2) {
  6168. status_resync(seq, mddev);
  6169. seq_printf(seq, "\n ");
  6170. } else if (mddev->curr_resync >= 1)
  6171. seq_printf(seq, "\tresync=DELAYED\n ");
  6172. else if (mddev->recovery_cp < MaxSector)
  6173. seq_printf(seq, "\tresync=PENDING\n ");
  6174. }
  6175. } else
  6176. seq_printf(seq, "\n ");
  6177. bitmap_status(seq, mddev->bitmap);
  6178. seq_printf(seq, "\n");
  6179. }
  6180. mddev_unlock(mddev);
  6181. return 0;
  6182. }
  6183. static const struct seq_operations md_seq_ops = {
  6184. .start = md_seq_start,
  6185. .next = md_seq_next,
  6186. .stop = md_seq_stop,
  6187. .show = md_seq_show,
  6188. };
  6189. static int md_seq_open(struct inode *inode, struct file *file)
  6190. {
  6191. struct seq_file *seq;
  6192. int error;
  6193. error = seq_open(file, &md_seq_ops);
  6194. if (error)
  6195. return error;
  6196. seq = file->private_data;
  6197. seq->poll_event = atomic_read(&md_event_count);
  6198. return error;
  6199. }
  6200. static int md_unloading;
  6201. static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
  6202. {
  6203. struct seq_file *seq = filp->private_data;
  6204. int mask;
  6205. if (md_unloading)
  6206. return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
  6207. poll_wait(filp, &md_event_waiters, wait);
  6208. /* always allow read */
  6209. mask = POLLIN | POLLRDNORM;
  6210. if (seq->poll_event != atomic_read(&md_event_count))
  6211. mask |= POLLERR | POLLPRI;
  6212. return mask;
  6213. }
  6214. static const struct file_operations md_seq_fops = {
  6215. .owner = THIS_MODULE,
  6216. .open = md_seq_open,
  6217. .read = seq_read,
  6218. .llseek = seq_lseek,
  6219. .release = seq_release_private,
  6220. .poll = mdstat_poll,
  6221. };
  6222. int register_md_personality(struct md_personality *p)
  6223. {
  6224. printk(KERN_INFO "md: %s personality registered for level %d\n",
  6225. p->name, p->level);
  6226. spin_lock(&pers_lock);
  6227. list_add_tail(&p->list, &pers_list);
  6228. spin_unlock(&pers_lock);
  6229. return 0;
  6230. }
  6231. EXPORT_SYMBOL(register_md_personality);
  6232. int unregister_md_personality(struct md_personality *p)
  6233. {
  6234. printk(KERN_INFO "md: %s personality unregistered\n", p->name);
  6235. spin_lock(&pers_lock);
  6236. list_del_init(&p->list);
  6237. spin_unlock(&pers_lock);
  6238. return 0;
  6239. }
  6240. EXPORT_SYMBOL(unregister_md_personality);
  6241. static int is_mddev_idle(struct mddev *mddev, int init)
  6242. {
  6243. struct md_rdev *rdev;
  6244. int idle;
  6245. int curr_events;
  6246. idle = 1;
  6247. rcu_read_lock();
  6248. rdev_for_each_rcu(rdev, mddev) {
  6249. struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
  6250. curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
  6251. (int)part_stat_read(&disk->part0, sectors[1]) -
  6252. atomic_read(&disk->sync_io);
  6253. /* sync IO will cause sync_io to increase before the disk_stats
  6254. * as sync_io is counted when a request starts, and
  6255. * disk_stats is counted when it completes.
  6256. * So resync activity will cause curr_events to be smaller than
  6257. * when there was no such activity.
  6258. * non-sync IO will cause disk_stat to increase without
  6259. * increasing sync_io so curr_events will (eventually)
  6260. * be larger than it was before. Once it becomes
  6261. * substantially larger, the test below will cause
  6262. * the array to appear non-idle, and resync will slow
  6263. * down.
  6264. * If there is a lot of outstanding resync activity when
  6265. * we set last_event to curr_events, then all that activity
  6266. * completing might cause the array to appear non-idle
  6267. * and resync will be slowed down even though there might
  6268. * not have been non-resync activity. This will only
  6269. * happen once though. 'last_events' will soon reflect
  6270. * the state where there is little or no outstanding
  6271. * resync requests, and further resync activity will
  6272. * always make curr_events less than last_events.
  6273. *
  6274. */
  6275. if (init || curr_events - rdev->last_events > 64) {
  6276. rdev->last_events = curr_events;
  6277. idle = 0;
  6278. }
  6279. }
  6280. rcu_read_unlock();
  6281. return idle;
  6282. }
  6283. void md_done_sync(struct mddev *mddev, int blocks, int ok)
  6284. {
  6285. /* another "blocks" (512byte) blocks have been synced */
  6286. atomic_sub(blocks, &mddev->recovery_active);
  6287. wake_up(&mddev->recovery_wait);
  6288. if (!ok) {
  6289. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6290. set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
  6291. md_wakeup_thread(mddev->thread);
  6292. // stop recovery, signal do_sync ....
  6293. }
  6294. }
  6295. EXPORT_SYMBOL(md_done_sync);
  6296. /* md_write_start(mddev, bi)
  6297. * If we need to update some array metadata (e.g. 'active' flag
  6298. * in superblock) before writing, schedule a superblock update
  6299. * and wait for it to complete.
  6300. */
  6301. void md_write_start(struct mddev *mddev, struct bio *bi)
  6302. {
  6303. int did_change = 0;
  6304. if (bio_data_dir(bi) != WRITE)
  6305. return;
  6306. BUG_ON(mddev->ro == 1);
  6307. if (mddev->ro == 2) {
  6308. /* need to switch to read/write */
  6309. mddev->ro = 0;
  6310. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6311. md_wakeup_thread(mddev->thread);
  6312. md_wakeup_thread(mddev->sync_thread);
  6313. did_change = 1;
  6314. }
  6315. atomic_inc(&mddev->writes_pending);
  6316. if (mddev->safemode == 1)
  6317. mddev->safemode = 0;
  6318. if (mddev->in_sync) {
  6319. spin_lock_irq(&mddev->write_lock);
  6320. if (mddev->in_sync) {
  6321. mddev->in_sync = 0;
  6322. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6323. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  6324. md_wakeup_thread(mddev->thread);
  6325. did_change = 1;
  6326. }
  6327. spin_unlock_irq(&mddev->write_lock);
  6328. }
  6329. if (did_change)
  6330. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6331. wait_event(mddev->sb_wait,
  6332. !test_bit(MD_CHANGE_PENDING, &mddev->flags));
  6333. }
  6334. EXPORT_SYMBOL(md_write_start);
  6335. void md_write_end(struct mddev *mddev)
  6336. {
  6337. if (atomic_dec_and_test(&mddev->writes_pending)) {
  6338. if (mddev->safemode == 2)
  6339. md_wakeup_thread(mddev->thread);
  6340. else if (mddev->safemode_delay)
  6341. mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
  6342. }
  6343. }
  6344. EXPORT_SYMBOL(md_write_end);
  6345. /* md_allow_write(mddev)
  6346. * Calling this ensures that the array is marked 'active' so that writes
  6347. * may proceed without blocking. It is important to call this before
  6348. * attempting a GFP_KERNEL allocation while holding the mddev lock.
  6349. * Must be called with mddev_lock held.
  6350. *
  6351. * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
  6352. * is dropped, so return -EAGAIN after notifying userspace.
  6353. */
  6354. int md_allow_write(struct mddev *mddev)
  6355. {
  6356. if (!mddev->pers)
  6357. return 0;
  6358. if (mddev->ro)
  6359. return 0;
  6360. if (!mddev->pers->sync_request)
  6361. return 0;
  6362. spin_lock_irq(&mddev->write_lock);
  6363. if (mddev->in_sync) {
  6364. mddev->in_sync = 0;
  6365. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6366. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  6367. if (mddev->safemode_delay &&
  6368. mddev->safemode == 0)
  6369. mddev->safemode = 1;
  6370. spin_unlock_irq(&mddev->write_lock);
  6371. md_update_sb(mddev, 0);
  6372. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6373. } else
  6374. spin_unlock_irq(&mddev->write_lock);
  6375. if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
  6376. return -EAGAIN;
  6377. else
  6378. return 0;
  6379. }
  6380. EXPORT_SYMBOL_GPL(md_allow_write);
  6381. #define SYNC_MARKS 10
  6382. #define SYNC_MARK_STEP (3*HZ)
  6383. #define UPDATE_FREQUENCY (5*60*HZ)
  6384. void md_do_sync(struct md_thread *thread)
  6385. {
  6386. struct mddev *mddev = thread->mddev;
  6387. struct mddev *mddev2;
  6388. unsigned int currspeed = 0,
  6389. window;
  6390. sector_t max_sectors,j, io_sectors, recovery_done;
  6391. unsigned long mark[SYNC_MARKS];
  6392. unsigned long update_time;
  6393. sector_t mark_cnt[SYNC_MARKS];
  6394. int last_mark,m;
  6395. struct list_head *tmp;
  6396. sector_t last_check;
  6397. int skipped = 0;
  6398. struct md_rdev *rdev;
  6399. char *desc, *action = NULL;
  6400. struct blk_plug plug;
  6401. /* just incase thread restarts... */
  6402. if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
  6403. return;
  6404. if (mddev->ro) {/* never try to sync a read-only array */
  6405. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6406. return;
  6407. }
  6408. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6409. if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  6410. desc = "data-check";
  6411. action = "check";
  6412. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  6413. desc = "requested-resync";
  6414. action = "repair";
  6415. } else
  6416. desc = "resync";
  6417. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6418. desc = "reshape";
  6419. else
  6420. desc = "recovery";
  6421. mddev->last_sync_action = action ?: desc;
  6422. /* we overload curr_resync somewhat here.
  6423. * 0 == not engaged in resync at all
  6424. * 2 == checking that there is no conflict with another sync
  6425. * 1 == like 2, but have yielded to allow conflicting resync to
  6426. * commense
  6427. * other == active in resync - this many blocks
  6428. *
  6429. * Before starting a resync we must have set curr_resync to
  6430. * 2, and then checked that every "conflicting" array has curr_resync
  6431. * less than ours. When we find one that is the same or higher
  6432. * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
  6433. * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
  6434. * This will mean we have to start checking from the beginning again.
  6435. *
  6436. */
  6437. do {
  6438. mddev->curr_resync = 2;
  6439. try_again:
  6440. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6441. goto skip;
  6442. for_each_mddev(mddev2, tmp) {
  6443. if (mddev2 == mddev)
  6444. continue;
  6445. if (!mddev->parallel_resync
  6446. && mddev2->curr_resync
  6447. && match_mddev_units(mddev, mddev2)) {
  6448. DEFINE_WAIT(wq);
  6449. if (mddev < mddev2 && mddev->curr_resync == 2) {
  6450. /* arbitrarily yield */
  6451. mddev->curr_resync = 1;
  6452. wake_up(&resync_wait);
  6453. }
  6454. if (mddev > mddev2 && mddev->curr_resync == 1)
  6455. /* no need to wait here, we can wait the next
  6456. * time 'round when curr_resync == 2
  6457. */
  6458. continue;
  6459. /* We need to wait 'interruptible' so as not to
  6460. * contribute to the load average, and not to
  6461. * be caught by 'softlockup'
  6462. */
  6463. prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
  6464. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  6465. mddev2->curr_resync >= mddev->curr_resync) {
  6466. printk(KERN_INFO "md: delaying %s of %s"
  6467. " until %s has finished (they"
  6468. " share one or more physical units)\n",
  6469. desc, mdname(mddev), mdname(mddev2));
  6470. mddev_put(mddev2);
  6471. if (signal_pending(current))
  6472. flush_signals(current);
  6473. schedule();
  6474. finish_wait(&resync_wait, &wq);
  6475. goto try_again;
  6476. }
  6477. finish_wait(&resync_wait, &wq);
  6478. }
  6479. }
  6480. } while (mddev->curr_resync < 2);
  6481. j = 0;
  6482. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6483. /* resync follows the size requested by the personality,
  6484. * which defaults to physical size, but can be virtual size
  6485. */
  6486. max_sectors = mddev->resync_max_sectors;
  6487. atomic64_set(&mddev->resync_mismatches, 0);
  6488. /* we don't use the checkpoint if there's a bitmap */
  6489. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6490. j = mddev->resync_min;
  6491. else if (!mddev->bitmap)
  6492. j = mddev->recovery_cp;
  6493. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6494. max_sectors = mddev->resync_max_sectors;
  6495. else {
  6496. /* recovery follows the physical size of devices */
  6497. max_sectors = mddev->dev_sectors;
  6498. j = MaxSector;
  6499. rcu_read_lock();
  6500. rdev_for_each_rcu(rdev, mddev)
  6501. if (rdev->raid_disk >= 0 &&
  6502. !test_bit(Faulty, &rdev->flags) &&
  6503. !test_bit(In_sync, &rdev->flags) &&
  6504. rdev->recovery_offset < j)
  6505. j = rdev->recovery_offset;
  6506. rcu_read_unlock();
  6507. /* If there is a bitmap, we need to make sure all
  6508. * writes that started before we added a spare
  6509. * complete before we start doing a recovery.
  6510. * Otherwise the write might complete and (via
  6511. * bitmap_endwrite) set a bit in the bitmap after the
  6512. * recovery has checked that bit and skipped that
  6513. * region.
  6514. */
  6515. if (mddev->bitmap) {
  6516. mddev->pers->quiesce(mddev, 1);
  6517. mddev->pers->quiesce(mddev, 0);
  6518. }
  6519. }
  6520. printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
  6521. printk(KERN_INFO "md: minimum _guaranteed_ speed:"
  6522. " %d KB/sec/disk.\n", speed_min(mddev));
  6523. printk(KERN_INFO "md: using maximum available idle IO bandwidth "
  6524. "(but not more than %d KB/sec) for %s.\n",
  6525. speed_max(mddev), desc);
  6526. is_mddev_idle(mddev, 1); /* this initializes IO event counters */
  6527. io_sectors = 0;
  6528. for (m = 0; m < SYNC_MARKS; m++) {
  6529. mark[m] = jiffies;
  6530. mark_cnt[m] = io_sectors;
  6531. }
  6532. last_mark = 0;
  6533. mddev->resync_mark = mark[last_mark];
  6534. mddev->resync_mark_cnt = mark_cnt[last_mark];
  6535. /*
  6536. * Tune reconstruction:
  6537. */
  6538. window = 32*(PAGE_SIZE/512);
  6539. printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
  6540. window/2, (unsigned long long)max_sectors/2);
  6541. atomic_set(&mddev->recovery_active, 0);
  6542. last_check = 0;
  6543. if (j>2) {
  6544. printk(KERN_INFO
  6545. "md: resuming %s of %s from checkpoint.\n",
  6546. desc, mdname(mddev));
  6547. mddev->curr_resync = j;
  6548. } else
  6549. mddev->curr_resync = 3; /* no longer delayed */
  6550. mddev->curr_resync_completed = j;
  6551. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  6552. md_new_event(mddev);
  6553. update_time = jiffies;
  6554. blk_start_plug(&plug);
  6555. while (j < max_sectors) {
  6556. sector_t sectors;
  6557. skipped = 0;
  6558. if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  6559. ((mddev->curr_resync > mddev->curr_resync_completed &&
  6560. (mddev->curr_resync - mddev->curr_resync_completed)
  6561. > (max_sectors >> 4)) ||
  6562. time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
  6563. (j - mddev->curr_resync_completed)*2
  6564. >= mddev->resync_max - mddev->curr_resync_completed
  6565. )) {
  6566. /* time to update curr_resync_completed */
  6567. wait_event(mddev->recovery_wait,
  6568. atomic_read(&mddev->recovery_active) == 0);
  6569. mddev->curr_resync_completed = j;
  6570. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  6571. j > mddev->recovery_cp)
  6572. mddev->recovery_cp = j;
  6573. update_time = jiffies;
  6574. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6575. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  6576. }
  6577. while (j >= mddev->resync_max &&
  6578. !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6579. /* As this condition is controlled by user-space,
  6580. * we can block indefinitely, so use '_interruptible'
  6581. * to avoid triggering warnings.
  6582. */
  6583. flush_signals(current); /* just in case */
  6584. wait_event_interruptible(mddev->recovery_wait,
  6585. mddev->resync_max > j
  6586. || test_bit(MD_RECOVERY_INTR,
  6587. &mddev->recovery));
  6588. }
  6589. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6590. break;
  6591. sectors = mddev->pers->sync_request(mddev, j, &skipped,
  6592. currspeed < speed_min(mddev));
  6593. if (sectors == 0) {
  6594. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6595. break;
  6596. }
  6597. if (!skipped) { /* actual IO requested */
  6598. io_sectors += sectors;
  6599. atomic_add(sectors, &mddev->recovery_active);
  6600. }
  6601. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6602. break;
  6603. j += sectors;
  6604. if (j > 2)
  6605. mddev->curr_resync = j;
  6606. mddev->curr_mark_cnt = io_sectors;
  6607. if (last_check == 0)
  6608. /* this is the earliest that rebuild will be
  6609. * visible in /proc/mdstat
  6610. */
  6611. md_new_event(mddev);
  6612. if (last_check + window > io_sectors || j == max_sectors)
  6613. continue;
  6614. last_check = io_sectors;
  6615. repeat:
  6616. if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
  6617. /* step marks */
  6618. int next = (last_mark+1) % SYNC_MARKS;
  6619. mddev->resync_mark = mark[next];
  6620. mddev->resync_mark_cnt = mark_cnt[next];
  6621. mark[next] = jiffies;
  6622. mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
  6623. last_mark = next;
  6624. }
  6625. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6626. break;
  6627. /*
  6628. * this loop exits only if either when we are slower than
  6629. * the 'hard' speed limit, or the system was IO-idle for
  6630. * a jiffy.
  6631. * the system might be non-idle CPU-wise, but we only care
  6632. * about not overloading the IO subsystem. (things like an
  6633. * e2fsck being done on the RAID array should execute fast)
  6634. */
  6635. cond_resched();
  6636. recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
  6637. currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
  6638. /((jiffies-mddev->resync_mark)/HZ +1) +1;
  6639. if (currspeed > speed_min(mddev)) {
  6640. if ((currspeed > speed_max(mddev)) ||
  6641. !is_mddev_idle(mddev, 0)) {
  6642. msleep(500);
  6643. goto repeat;
  6644. }
  6645. }
  6646. }
  6647. printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
  6648. test_bit(MD_RECOVERY_INTR, &mddev->recovery)
  6649. ? "interrupted" : "done");
  6650. /*
  6651. * this also signals 'finished resyncing' to md_stop
  6652. */
  6653. blk_finish_plug(&plug);
  6654. wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
  6655. /* tell personality that we are finished */
  6656. mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
  6657. if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
  6658. mddev->curr_resync > 2) {
  6659. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  6660. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6661. if (mddev->curr_resync >= mddev->recovery_cp) {
  6662. printk(KERN_INFO
  6663. "md: checkpointing %s of %s.\n",
  6664. desc, mdname(mddev));
  6665. if (test_bit(MD_RECOVERY_ERROR,
  6666. &mddev->recovery))
  6667. mddev->recovery_cp =
  6668. mddev->curr_resync_completed;
  6669. else
  6670. mddev->recovery_cp =
  6671. mddev->curr_resync;
  6672. }
  6673. } else
  6674. mddev->recovery_cp = MaxSector;
  6675. } else {
  6676. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  6677. mddev->curr_resync = MaxSector;
  6678. rcu_read_lock();
  6679. rdev_for_each_rcu(rdev, mddev)
  6680. if (rdev->raid_disk >= 0 &&
  6681. mddev->delta_disks >= 0 &&
  6682. !test_bit(Faulty, &rdev->flags) &&
  6683. !test_bit(In_sync, &rdev->flags) &&
  6684. rdev->recovery_offset < mddev->curr_resync)
  6685. rdev->recovery_offset = mddev->curr_resync;
  6686. rcu_read_unlock();
  6687. }
  6688. }
  6689. skip:
  6690. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6691. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  6692. /* We completed so min/max setting can be forgotten if used. */
  6693. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6694. mddev->resync_min = 0;
  6695. mddev->resync_max = MaxSector;
  6696. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  6697. mddev->resync_min = mddev->curr_resync_completed;
  6698. mddev->curr_resync = 0;
  6699. wake_up(&resync_wait);
  6700. set_bit(MD_RECOVERY_DONE, &mddev->recovery);
  6701. md_wakeup_thread(mddev->thread);
  6702. return;
  6703. }
  6704. EXPORT_SYMBOL_GPL(md_do_sync);
  6705. static int remove_and_add_spares(struct mddev *mddev,
  6706. struct md_rdev *this)
  6707. {
  6708. struct md_rdev *rdev;
  6709. int spares = 0;
  6710. int removed = 0;
  6711. rdev_for_each(rdev, mddev)
  6712. if ((this == NULL || rdev == this) &&
  6713. rdev->raid_disk >= 0 &&
  6714. !test_bit(Blocked, &rdev->flags) &&
  6715. (test_bit(Faulty, &rdev->flags) ||
  6716. ! test_bit(In_sync, &rdev->flags)) &&
  6717. atomic_read(&rdev->nr_pending)==0) {
  6718. if (mddev->pers->hot_remove_disk(
  6719. mddev, rdev) == 0) {
  6720. sysfs_unlink_rdev(mddev, rdev);
  6721. rdev->raid_disk = -1;
  6722. removed++;
  6723. }
  6724. }
  6725. if (removed && mddev->kobj.sd)
  6726. sysfs_notify(&mddev->kobj, NULL, "degraded");
  6727. if (this)
  6728. goto no_add;
  6729. rdev_for_each(rdev, mddev) {
  6730. if (rdev->raid_disk >= 0 &&
  6731. !test_bit(In_sync, &rdev->flags) &&
  6732. !test_bit(Faulty, &rdev->flags))
  6733. spares++;
  6734. if (rdev->raid_disk >= 0)
  6735. continue;
  6736. if (test_bit(Faulty, &rdev->flags))
  6737. continue;
  6738. if (mddev->ro &&
  6739. ! (rdev->saved_raid_disk >= 0 &&
  6740. !test_bit(Bitmap_sync, &rdev->flags)))
  6741. continue;
  6742. if (rdev->saved_raid_disk < 0)
  6743. rdev->recovery_offset = 0;
  6744. if (mddev->pers->
  6745. hot_add_disk(mddev, rdev) == 0) {
  6746. if (sysfs_link_rdev(mddev, rdev))
  6747. /* failure here is OK */;
  6748. spares++;
  6749. md_new_event(mddev);
  6750. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6751. }
  6752. }
  6753. no_add:
  6754. if (removed)
  6755. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6756. return spares;
  6757. }
  6758. static void md_start_sync(struct work_struct *ws)
  6759. {
  6760. struct mddev *mddev = container_of(ws, struct mddev, del_work);
  6761. mddev->sync_thread = md_register_thread(md_do_sync,
  6762. mddev,
  6763. "resync");
  6764. if (!mddev->sync_thread) {
  6765. printk(KERN_ERR "%s: could not start resync"
  6766. " thread...\n",
  6767. mdname(mddev));
  6768. /* leave the spares where they are, it shouldn't hurt */
  6769. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6770. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6771. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6772. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6773. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6774. if (test_and_clear_bit(MD_RECOVERY_RECOVER,
  6775. &mddev->recovery))
  6776. if (mddev->sysfs_action)
  6777. sysfs_notify_dirent_safe(mddev->sysfs_action);
  6778. } else
  6779. md_wakeup_thread(mddev->sync_thread);
  6780. sysfs_notify_dirent_safe(mddev->sysfs_action);
  6781. md_new_event(mddev);
  6782. }
  6783. /*
  6784. * This routine is regularly called by all per-raid-array threads to
  6785. * deal with generic issues like resync and super-block update.
  6786. * Raid personalities that don't have a thread (linear/raid0) do not
  6787. * need this as they never do any recovery or update the superblock.
  6788. *
  6789. * It does not do any resync itself, but rather "forks" off other threads
  6790. * to do that as needed.
  6791. * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
  6792. * "->recovery" and create a thread at ->sync_thread.
  6793. * When the thread finishes it sets MD_RECOVERY_DONE
  6794. * and wakeups up this thread which will reap the thread and finish up.
  6795. * This thread also removes any faulty devices (with nr_pending == 0).
  6796. *
  6797. * The overall approach is:
  6798. * 1/ if the superblock needs updating, update it.
  6799. * 2/ If a recovery thread is running, don't do anything else.
  6800. * 3/ If recovery has finished, clean up, possibly marking spares active.
  6801. * 4/ If there are any faulty devices, remove them.
  6802. * 5/ If array is degraded, try to add spares devices
  6803. * 6/ If array has spares or is not in-sync, start a resync thread.
  6804. */
  6805. void md_check_recovery(struct mddev *mddev)
  6806. {
  6807. if (mddev->suspended)
  6808. return;
  6809. if (mddev->bitmap)
  6810. bitmap_daemon_work(mddev);
  6811. if (signal_pending(current)) {
  6812. if (mddev->pers->sync_request && !mddev->external) {
  6813. printk(KERN_INFO "md: %s in immediate safe mode\n",
  6814. mdname(mddev));
  6815. mddev->safemode = 2;
  6816. }
  6817. flush_signals(current);
  6818. }
  6819. if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  6820. return;
  6821. if ( ! (
  6822. (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
  6823. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  6824. test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
  6825. (mddev->external == 0 && mddev->safemode == 1) ||
  6826. (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
  6827. && !mddev->in_sync && mddev->recovery_cp == MaxSector)
  6828. ))
  6829. return;
  6830. if (mddev_trylock(mddev)) {
  6831. int spares = 0;
  6832. if (mddev->ro) {
  6833. /* On a read-only array we can:
  6834. * - remove failed devices
  6835. * - add already-in_sync devices if the array itself
  6836. * is in-sync.
  6837. * As we only add devices that are already in-sync,
  6838. * we can activate the spares immediately.
  6839. */
  6840. remove_and_add_spares(mddev, NULL);
  6841. /* There is no thread, but we need to call
  6842. * ->spare_active and clear saved_raid_disk
  6843. */
  6844. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6845. md_reap_sync_thread(mddev);
  6846. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6847. goto unlock;
  6848. }
  6849. if (!mddev->external) {
  6850. int did_change = 0;
  6851. spin_lock_irq(&mddev->write_lock);
  6852. if (mddev->safemode &&
  6853. !atomic_read(&mddev->writes_pending) &&
  6854. !mddev->in_sync &&
  6855. mddev->recovery_cp == MaxSector) {
  6856. mddev->in_sync = 1;
  6857. did_change = 1;
  6858. set_bit(MD_CHANGE_CLEAN, &mddev->flags);
  6859. }
  6860. if (mddev->safemode == 1)
  6861. mddev->safemode = 0;
  6862. spin_unlock_irq(&mddev->write_lock);
  6863. if (did_change)
  6864. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6865. }
  6866. if (mddev->flags & MD_UPDATE_SB_FLAGS)
  6867. md_update_sb(mddev, 0);
  6868. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
  6869. !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
  6870. /* resync/recovery still happening */
  6871. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6872. goto unlock;
  6873. }
  6874. if (mddev->sync_thread) {
  6875. md_reap_sync_thread(mddev);
  6876. goto unlock;
  6877. }
  6878. /* Set RUNNING before clearing NEEDED to avoid
  6879. * any transients in the value of "sync_action".
  6880. */
  6881. mddev->curr_resync_completed = 0;
  6882. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6883. /* Clear some bits that don't mean anything, but
  6884. * might be left set
  6885. */
  6886. clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6887. clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
  6888. if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  6889. test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  6890. goto not_running;
  6891. /* no recovery is running.
  6892. * remove any failed drives, then
  6893. * add spares if possible.
  6894. * Spares are also removed and re-added, to allow
  6895. * the personality to fail the re-add.
  6896. */
  6897. if (mddev->reshape_position != MaxSector) {
  6898. if (mddev->pers->check_reshape == NULL ||
  6899. mddev->pers->check_reshape(mddev) != 0)
  6900. /* Cannot proceed */
  6901. goto not_running;
  6902. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6903. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6904. } else if ((spares = remove_and_add_spares(mddev, NULL))) {
  6905. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6906. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6907. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6908. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6909. } else if (mddev->recovery_cp < MaxSector) {
  6910. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6911. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6912. } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  6913. /* nothing to be done ... */
  6914. goto not_running;
  6915. if (mddev->pers->sync_request) {
  6916. if (spares) {
  6917. /* We are adding a device or devices to an array
  6918. * which has the bitmap stored on all devices.
  6919. * So make sure all bitmap pages get written
  6920. */
  6921. bitmap_write_all(mddev->bitmap);
  6922. }
  6923. INIT_WORK(&mddev->del_work, md_start_sync);
  6924. queue_work(md_misc_wq, &mddev->del_work);
  6925. goto unlock;
  6926. }
  6927. not_running:
  6928. if (!mddev->sync_thread) {
  6929. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6930. if (test_and_clear_bit(MD_RECOVERY_RECOVER,
  6931. &mddev->recovery))
  6932. if (mddev->sysfs_action)
  6933. sysfs_notify_dirent_safe(mddev->sysfs_action);
  6934. }
  6935. unlock:
  6936. wake_up(&mddev->sb_wait);
  6937. mddev_unlock(mddev);
  6938. }
  6939. }
  6940. EXPORT_SYMBOL(md_check_recovery);
  6941. void md_reap_sync_thread(struct mddev *mddev)
  6942. {
  6943. struct md_rdev *rdev;
  6944. /* resync has finished, collect result */
  6945. md_unregister_thread(&mddev->sync_thread);
  6946. wake_up(&resync_wait);
  6947. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  6948. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  6949. /* success...*/
  6950. /* activate any spares */
  6951. if (mddev->pers->spare_active(mddev)) {
  6952. sysfs_notify(&mddev->kobj, NULL,
  6953. "degraded");
  6954. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  6955. }
  6956. }
  6957. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  6958. mddev->pers->finish_reshape)
  6959. mddev->pers->finish_reshape(mddev);
  6960. /* If array is no-longer degraded, then any saved_raid_disk
  6961. * information must be scrapped.
  6962. */
  6963. if (!mddev->degraded)
  6964. rdev_for_each(rdev, mddev)
  6965. rdev->saved_raid_disk = -1;
  6966. md_update_sb(mddev, 1);
  6967. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  6968. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  6969. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  6970. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  6971. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  6972. /* flag recovery needed just to double check */
  6973. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6974. sysfs_notify_dirent_safe(mddev->sysfs_action);
  6975. md_new_event(mddev);
  6976. if (mddev->event_work.func)
  6977. queue_work(md_misc_wq, &mddev->event_work);
  6978. }
  6979. EXPORT_SYMBOL(md_reap_sync_thread);
  6980. void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
  6981. {
  6982. sysfs_notify_dirent_safe(rdev->sysfs_state);
  6983. wait_event_timeout(rdev->blocked_wait,
  6984. !test_bit(Blocked, &rdev->flags) &&
  6985. !test_bit(BlockedBadBlocks, &rdev->flags),
  6986. msecs_to_jiffies(5000));
  6987. rdev_dec_pending(rdev, mddev);
  6988. }
  6989. EXPORT_SYMBOL(md_wait_for_blocked_rdev);
  6990. void md_finish_reshape(struct mddev *mddev)
  6991. {
  6992. /* called be personality module when reshape completes. */
  6993. struct md_rdev *rdev;
  6994. rdev_for_each(rdev, mddev) {
  6995. if (rdev->data_offset > rdev->new_data_offset)
  6996. rdev->sectors += rdev->data_offset - rdev->new_data_offset;
  6997. else
  6998. rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
  6999. rdev->data_offset = rdev->new_data_offset;
  7000. }
  7001. }
  7002. EXPORT_SYMBOL(md_finish_reshape);
  7003. /* Bad block management.
  7004. * We can record which blocks on each device are 'bad' and so just
  7005. * fail those blocks, or that stripe, rather than the whole device.
  7006. * Entries in the bad-block table are 64bits wide. This comprises:
  7007. * Length of bad-range, in sectors: 0-511 for lengths 1-512
  7008. * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
  7009. * A 'shift' can be set so that larger blocks are tracked and
  7010. * consequently larger devices can be covered.
  7011. * 'Acknowledged' flag - 1 bit. - the most significant bit.
  7012. *
  7013. * Locking of the bad-block table uses a seqlock so md_is_badblock
  7014. * might need to retry if it is very unlucky.
  7015. * We will sometimes want to check for bad blocks in a bi_end_io function,
  7016. * so we use the write_seqlock_irq variant.
  7017. *
  7018. * When looking for a bad block we specify a range and want to
  7019. * know if any block in the range is bad. So we binary-search
  7020. * to the last range that starts at-or-before the given endpoint,
  7021. * (or "before the sector after the target range")
  7022. * then see if it ends after the given start.
  7023. * We return
  7024. * 0 if there are no known bad blocks in the range
  7025. * 1 if there are known bad block which are all acknowledged
  7026. * -1 if there are bad blocks which have not yet been acknowledged in metadata.
  7027. * plus the start/length of the first bad section we overlap.
  7028. */
  7029. int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
  7030. sector_t *first_bad, int *bad_sectors)
  7031. {
  7032. int hi;
  7033. int lo;
  7034. u64 *p = bb->page;
  7035. int rv;
  7036. sector_t target = s + sectors;
  7037. unsigned seq;
  7038. if (bb->shift > 0) {
  7039. /* round the start down, and the end up */
  7040. s >>= bb->shift;
  7041. target += (1<<bb->shift) - 1;
  7042. target >>= bb->shift;
  7043. sectors = target - s;
  7044. }
  7045. /* 'target' is now the first block after the bad range */
  7046. retry:
  7047. seq = read_seqbegin(&bb->lock);
  7048. lo = 0;
  7049. rv = 0;
  7050. hi = bb->count;
  7051. /* Binary search between lo and hi for 'target'
  7052. * i.e. for the last range that starts before 'target'
  7053. */
  7054. /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
  7055. * are known not to be the last range before target.
  7056. * VARIANT: hi-lo is the number of possible
  7057. * ranges, and decreases until it reaches 1
  7058. */
  7059. while (hi - lo > 1) {
  7060. int mid = (lo + hi) / 2;
  7061. sector_t a = BB_OFFSET(p[mid]);
  7062. if (a < target)
  7063. /* This could still be the one, earlier ranges
  7064. * could not. */
  7065. lo = mid;
  7066. else
  7067. /* This and later ranges are definitely out. */
  7068. hi = mid;
  7069. }
  7070. /* 'lo' might be the last that started before target, but 'hi' isn't */
  7071. if (hi > lo) {
  7072. /* need to check all range that end after 's' to see if
  7073. * any are unacknowledged.
  7074. */
  7075. while (lo >= 0 &&
  7076. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  7077. if (BB_OFFSET(p[lo]) < target) {
  7078. /* starts before the end, and finishes after
  7079. * the start, so they must overlap
  7080. */
  7081. if (rv != -1 && BB_ACK(p[lo]))
  7082. rv = 1;
  7083. else
  7084. rv = -1;
  7085. *first_bad = BB_OFFSET(p[lo]);
  7086. *bad_sectors = BB_LEN(p[lo]);
  7087. }
  7088. lo--;
  7089. }
  7090. }
  7091. if (read_seqretry(&bb->lock, seq))
  7092. goto retry;
  7093. return rv;
  7094. }
  7095. EXPORT_SYMBOL_GPL(md_is_badblock);
  7096. /*
  7097. * Add a range of bad blocks to the table.
  7098. * This might extend the table, or might contract it
  7099. * if two adjacent ranges can be merged.
  7100. * We binary-search to find the 'insertion' point, then
  7101. * decide how best to handle it.
  7102. */
  7103. static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
  7104. int acknowledged)
  7105. {
  7106. u64 *p;
  7107. int lo, hi;
  7108. int rv = 1;
  7109. unsigned long flags;
  7110. if (bb->shift < 0)
  7111. /* badblocks are disabled */
  7112. return 0;
  7113. if (bb->shift) {
  7114. /* round the start down, and the end up */
  7115. sector_t next = s + sectors;
  7116. s >>= bb->shift;
  7117. next += (1<<bb->shift) - 1;
  7118. next >>= bb->shift;
  7119. sectors = next - s;
  7120. }
  7121. write_seqlock_irqsave(&bb->lock, flags);
  7122. p = bb->page;
  7123. lo = 0;
  7124. hi = bb->count;
  7125. /* Find the last range that starts at-or-before 's' */
  7126. while (hi - lo > 1) {
  7127. int mid = (lo + hi) / 2;
  7128. sector_t a = BB_OFFSET(p[mid]);
  7129. if (a <= s)
  7130. lo = mid;
  7131. else
  7132. hi = mid;
  7133. }
  7134. if (hi > lo && BB_OFFSET(p[lo]) > s)
  7135. hi = lo;
  7136. if (hi > lo) {
  7137. /* we found a range that might merge with the start
  7138. * of our new range
  7139. */
  7140. sector_t a = BB_OFFSET(p[lo]);
  7141. sector_t e = a + BB_LEN(p[lo]);
  7142. int ack = BB_ACK(p[lo]);
  7143. if (e >= s) {
  7144. /* Yes, we can merge with a previous range */
  7145. if (s == a && s + sectors >= e)
  7146. /* new range covers old */
  7147. ack = acknowledged;
  7148. else
  7149. ack = ack && acknowledged;
  7150. if (e < s + sectors)
  7151. e = s + sectors;
  7152. if (e - a <= BB_MAX_LEN) {
  7153. p[lo] = BB_MAKE(a, e-a, ack);
  7154. s = e;
  7155. } else {
  7156. /* does not all fit in one range,
  7157. * make p[lo] maximal
  7158. */
  7159. if (BB_LEN(p[lo]) != BB_MAX_LEN)
  7160. p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
  7161. s = a + BB_MAX_LEN;
  7162. }
  7163. sectors = e - s;
  7164. }
  7165. }
  7166. if (sectors && hi < bb->count) {
  7167. /* 'hi' points to the first range that starts after 's'.
  7168. * Maybe we can merge with the start of that range */
  7169. sector_t a = BB_OFFSET(p[hi]);
  7170. sector_t e = a + BB_LEN(p[hi]);
  7171. int ack = BB_ACK(p[hi]);
  7172. if (a <= s + sectors) {
  7173. /* merging is possible */
  7174. if (e <= s + sectors) {
  7175. /* full overlap */
  7176. e = s + sectors;
  7177. ack = acknowledged;
  7178. } else
  7179. ack = ack && acknowledged;
  7180. a = s;
  7181. if (e - a <= BB_MAX_LEN) {
  7182. p[hi] = BB_MAKE(a, e-a, ack);
  7183. s = e;
  7184. } else {
  7185. p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
  7186. s = a + BB_MAX_LEN;
  7187. }
  7188. sectors = e - s;
  7189. lo = hi;
  7190. hi++;
  7191. }
  7192. }
  7193. if (sectors == 0 && hi < bb->count) {
  7194. /* we might be able to combine lo and hi */
  7195. /* Note: 's' is at the end of 'lo' */
  7196. sector_t a = BB_OFFSET(p[hi]);
  7197. int lolen = BB_LEN(p[lo]);
  7198. int hilen = BB_LEN(p[hi]);
  7199. int newlen = lolen + hilen - (s - a);
  7200. if (s >= a && newlen < BB_MAX_LEN) {
  7201. /* yes, we can combine them */
  7202. int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
  7203. p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
  7204. memmove(p + hi, p + hi + 1,
  7205. (bb->count - hi - 1) * 8);
  7206. bb->count--;
  7207. }
  7208. }
  7209. while (sectors) {
  7210. /* didn't merge (it all).
  7211. * Need to add a range just before 'hi' */
  7212. if (bb->count >= MD_MAX_BADBLOCKS) {
  7213. /* No room for more */
  7214. rv = 0;
  7215. break;
  7216. } else {
  7217. int this_sectors = sectors;
  7218. memmove(p + hi + 1, p + hi,
  7219. (bb->count - hi) * 8);
  7220. bb->count++;
  7221. if (this_sectors > BB_MAX_LEN)
  7222. this_sectors = BB_MAX_LEN;
  7223. p[hi] = BB_MAKE(s, this_sectors, acknowledged);
  7224. sectors -= this_sectors;
  7225. s += this_sectors;
  7226. }
  7227. }
  7228. bb->changed = 1;
  7229. if (!acknowledged)
  7230. bb->unacked_exist = 1;
  7231. write_sequnlock_irqrestore(&bb->lock, flags);
  7232. return rv;
  7233. }
  7234. int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7235. int is_new)
  7236. {
  7237. int rv;
  7238. if (is_new)
  7239. s += rdev->new_data_offset;
  7240. else
  7241. s += rdev->data_offset;
  7242. rv = md_set_badblocks(&rdev->badblocks,
  7243. s, sectors, 0);
  7244. if (rv) {
  7245. /* Make sure they get written out promptly */
  7246. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7247. set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
  7248. md_wakeup_thread(rdev->mddev->thread);
  7249. }
  7250. return rv;
  7251. }
  7252. EXPORT_SYMBOL_GPL(rdev_set_badblocks);
  7253. /*
  7254. * Remove a range of bad blocks from the table.
  7255. * This may involve extending the table if we spilt a region,
  7256. * but it must not fail. So if the table becomes full, we just
  7257. * drop the remove request.
  7258. */
  7259. static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
  7260. {
  7261. u64 *p;
  7262. int lo, hi;
  7263. sector_t target = s + sectors;
  7264. int rv = 0;
  7265. if (bb->shift > 0) {
  7266. /* When clearing we round the start up and the end down.
  7267. * This should not matter as the shift should align with
  7268. * the block size and no rounding should ever be needed.
  7269. * However it is better the think a block is bad when it
  7270. * isn't than to think a block is not bad when it is.
  7271. */
  7272. s += (1<<bb->shift) - 1;
  7273. s >>= bb->shift;
  7274. target >>= bb->shift;
  7275. sectors = target - s;
  7276. }
  7277. write_seqlock_irq(&bb->lock);
  7278. p = bb->page;
  7279. lo = 0;
  7280. hi = bb->count;
  7281. /* Find the last range that starts before 'target' */
  7282. while (hi - lo > 1) {
  7283. int mid = (lo + hi) / 2;
  7284. sector_t a = BB_OFFSET(p[mid]);
  7285. if (a < target)
  7286. lo = mid;
  7287. else
  7288. hi = mid;
  7289. }
  7290. if (hi > lo) {
  7291. /* p[lo] is the last range that could overlap the
  7292. * current range. Earlier ranges could also overlap,
  7293. * but only this one can overlap the end of the range.
  7294. */
  7295. if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
  7296. /* Partial overlap, leave the tail of this range */
  7297. int ack = BB_ACK(p[lo]);
  7298. sector_t a = BB_OFFSET(p[lo]);
  7299. sector_t end = a + BB_LEN(p[lo]);
  7300. if (a < s) {
  7301. /* we need to split this range */
  7302. if (bb->count >= MD_MAX_BADBLOCKS) {
  7303. rv = -ENOSPC;
  7304. goto out;
  7305. }
  7306. memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
  7307. bb->count++;
  7308. p[lo] = BB_MAKE(a, s-a, ack);
  7309. lo++;
  7310. }
  7311. p[lo] = BB_MAKE(target, end - target, ack);
  7312. /* there is no longer an overlap */
  7313. hi = lo;
  7314. lo--;
  7315. }
  7316. while (lo >= 0 &&
  7317. BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
  7318. /* This range does overlap */
  7319. if (BB_OFFSET(p[lo]) < s) {
  7320. /* Keep the early parts of this range. */
  7321. int ack = BB_ACK(p[lo]);
  7322. sector_t start = BB_OFFSET(p[lo]);
  7323. p[lo] = BB_MAKE(start, s - start, ack);
  7324. /* now low doesn't overlap, so.. */
  7325. break;
  7326. }
  7327. lo--;
  7328. }
  7329. /* 'lo' is strictly before, 'hi' is strictly after,
  7330. * anything between needs to be discarded
  7331. */
  7332. if (hi - lo > 1) {
  7333. memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
  7334. bb->count -= (hi - lo - 1);
  7335. }
  7336. }
  7337. bb->changed = 1;
  7338. out:
  7339. write_sequnlock_irq(&bb->lock);
  7340. return rv;
  7341. }
  7342. int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7343. int is_new)
  7344. {
  7345. if (is_new)
  7346. s += rdev->new_data_offset;
  7347. else
  7348. s += rdev->data_offset;
  7349. return md_clear_badblocks(&rdev->badblocks,
  7350. s, sectors);
  7351. }
  7352. EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
  7353. /*
  7354. * Acknowledge all bad blocks in a list.
  7355. * This only succeeds if ->changed is clear. It is used by
  7356. * in-kernel metadata updates
  7357. */
  7358. void md_ack_all_badblocks(struct badblocks *bb)
  7359. {
  7360. if (bb->page == NULL || bb->changed)
  7361. /* no point even trying */
  7362. return;
  7363. write_seqlock_irq(&bb->lock);
  7364. if (bb->changed == 0 && bb->unacked_exist) {
  7365. u64 *p = bb->page;
  7366. int i;
  7367. for (i = 0; i < bb->count ; i++) {
  7368. if (!BB_ACK(p[i])) {
  7369. sector_t start = BB_OFFSET(p[i]);
  7370. int len = BB_LEN(p[i]);
  7371. p[i] = BB_MAKE(start, len, 1);
  7372. }
  7373. }
  7374. bb->unacked_exist = 0;
  7375. }
  7376. write_sequnlock_irq(&bb->lock);
  7377. }
  7378. EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
  7379. /* sysfs access to bad-blocks list.
  7380. * We present two files.
  7381. * 'bad-blocks' lists sector numbers and lengths of ranges that
  7382. * are recorded as bad. The list is truncated to fit within
  7383. * the one-page limit of sysfs.
  7384. * Writing "sector length" to this file adds an acknowledged
  7385. * bad block list.
  7386. * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
  7387. * been acknowledged. Writing to this file adds bad blocks
  7388. * without acknowledging them. This is largely for testing.
  7389. */
  7390. static ssize_t
  7391. badblocks_show(struct badblocks *bb, char *page, int unack)
  7392. {
  7393. size_t len;
  7394. int i;
  7395. u64 *p = bb->page;
  7396. unsigned seq;
  7397. if (bb->shift < 0)
  7398. return 0;
  7399. retry:
  7400. seq = read_seqbegin(&bb->lock);
  7401. len = 0;
  7402. i = 0;
  7403. while (len < PAGE_SIZE && i < bb->count) {
  7404. sector_t s = BB_OFFSET(p[i]);
  7405. unsigned int length = BB_LEN(p[i]);
  7406. int ack = BB_ACK(p[i]);
  7407. i++;
  7408. if (unack && ack)
  7409. continue;
  7410. len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
  7411. (unsigned long long)s << bb->shift,
  7412. length << bb->shift);
  7413. }
  7414. if (unack && len == 0)
  7415. bb->unacked_exist = 0;
  7416. if (read_seqretry(&bb->lock, seq))
  7417. goto retry;
  7418. return len;
  7419. }
  7420. #define DO_DEBUG 1
  7421. static ssize_t
  7422. badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
  7423. {
  7424. unsigned long long sector;
  7425. int length;
  7426. char newline;
  7427. #ifdef DO_DEBUG
  7428. /* Allow clearing via sysfs *only* for testing/debugging.
  7429. * Normally only a successful write may clear a badblock
  7430. */
  7431. int clear = 0;
  7432. if (page[0] == '-') {
  7433. clear = 1;
  7434. page++;
  7435. }
  7436. #endif /* DO_DEBUG */
  7437. switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
  7438. case 3:
  7439. if (newline != '\n')
  7440. return -EINVAL;
  7441. case 2:
  7442. if (length <= 0)
  7443. return -EINVAL;
  7444. break;
  7445. default:
  7446. return -EINVAL;
  7447. }
  7448. #ifdef DO_DEBUG
  7449. if (clear) {
  7450. md_clear_badblocks(bb, sector, length);
  7451. return len;
  7452. }
  7453. #endif /* DO_DEBUG */
  7454. if (md_set_badblocks(bb, sector, length, !unack))
  7455. return len;
  7456. else
  7457. return -ENOSPC;
  7458. }
  7459. static int md_notify_reboot(struct notifier_block *this,
  7460. unsigned long code, void *x)
  7461. {
  7462. struct list_head *tmp;
  7463. struct mddev *mddev;
  7464. int need_delay = 0;
  7465. for_each_mddev(mddev, tmp) {
  7466. if (mddev_trylock(mddev)) {
  7467. if (mddev->pers)
  7468. __md_stop_writes(mddev);
  7469. if (mddev->persistent)
  7470. mddev->safemode = 2;
  7471. mddev_unlock(mddev);
  7472. }
  7473. need_delay = 1;
  7474. }
  7475. /*
  7476. * certain more exotic SCSI devices are known to be
  7477. * volatile wrt too early system reboots. While the
  7478. * right place to handle this issue is the given
  7479. * driver, we do want to have a safe RAID driver ...
  7480. */
  7481. if (need_delay)
  7482. mdelay(1000*1);
  7483. return NOTIFY_DONE;
  7484. }
  7485. static struct notifier_block md_notifier = {
  7486. .notifier_call = md_notify_reboot,
  7487. .next = NULL,
  7488. .priority = INT_MAX, /* before any real devices */
  7489. };
  7490. static void md_geninit(void)
  7491. {
  7492. pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
  7493. proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
  7494. }
  7495. static int __init md_init(void)
  7496. {
  7497. int ret = -ENOMEM;
  7498. md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
  7499. if (!md_wq)
  7500. goto err_wq;
  7501. md_misc_wq = alloc_workqueue("md_misc", 0, 0);
  7502. if (!md_misc_wq)
  7503. goto err_misc_wq;
  7504. if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
  7505. goto err_md;
  7506. if ((ret = register_blkdev(0, "mdp")) < 0)
  7507. goto err_mdp;
  7508. mdp_major = ret;
  7509. blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
  7510. md_probe, NULL, NULL);
  7511. blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
  7512. md_probe, NULL, NULL);
  7513. register_reboot_notifier(&md_notifier);
  7514. raid_table_header = register_sysctl_table(raid_root_table);
  7515. md_geninit();
  7516. return 0;
  7517. err_mdp:
  7518. unregister_blkdev(MD_MAJOR, "md");
  7519. err_md:
  7520. destroy_workqueue(md_misc_wq);
  7521. err_misc_wq:
  7522. destroy_workqueue(md_wq);
  7523. err_wq:
  7524. return ret;
  7525. }
  7526. #ifndef MODULE
  7527. /*
  7528. * Searches all registered partitions for autorun RAID arrays
  7529. * at boot time.
  7530. */
  7531. static LIST_HEAD(all_detected_devices);
  7532. struct detected_devices_node {
  7533. struct list_head list;
  7534. dev_t dev;
  7535. };
  7536. void md_autodetect_dev(dev_t dev)
  7537. {
  7538. struct detected_devices_node *node_detected_dev;
  7539. node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
  7540. if (node_detected_dev) {
  7541. node_detected_dev->dev = dev;
  7542. list_add_tail(&node_detected_dev->list, &all_detected_devices);
  7543. } else {
  7544. printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
  7545. ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
  7546. }
  7547. }
  7548. static void autostart_arrays(int part)
  7549. {
  7550. struct md_rdev *rdev;
  7551. struct detected_devices_node *node_detected_dev;
  7552. dev_t dev;
  7553. int i_scanned, i_passed;
  7554. i_scanned = 0;
  7555. i_passed = 0;
  7556. printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
  7557. while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
  7558. i_scanned++;
  7559. node_detected_dev = list_entry(all_detected_devices.next,
  7560. struct detected_devices_node, list);
  7561. list_del(&node_detected_dev->list);
  7562. dev = node_detected_dev->dev;
  7563. kfree(node_detected_dev);
  7564. rdev = md_import_device(dev,0, 90);
  7565. if (IS_ERR(rdev))
  7566. continue;
  7567. if (test_bit(Faulty, &rdev->flags))
  7568. continue;
  7569. set_bit(AutoDetected, &rdev->flags);
  7570. list_add(&rdev->same_set, &pending_raid_disks);
  7571. i_passed++;
  7572. }
  7573. printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
  7574. i_scanned, i_passed);
  7575. autorun_devices(part);
  7576. }
  7577. #endif /* !MODULE */
  7578. static __exit void md_exit(void)
  7579. {
  7580. struct mddev *mddev;
  7581. struct list_head *tmp;
  7582. int delay = 1;
  7583. blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
  7584. blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
  7585. unregister_blkdev(MD_MAJOR,"md");
  7586. unregister_blkdev(mdp_major, "mdp");
  7587. unregister_reboot_notifier(&md_notifier);
  7588. unregister_sysctl_table(raid_table_header);
  7589. /* We cannot unload the modules while some process is
  7590. * waiting for us in select() or poll() - wake them up
  7591. */
  7592. md_unloading = 1;
  7593. while (waitqueue_active(&md_event_waiters)) {
  7594. /* not safe to leave yet */
  7595. wake_up(&md_event_waiters);
  7596. msleep(delay);
  7597. delay += delay;
  7598. }
  7599. remove_proc_entry("mdstat", NULL);
  7600. for_each_mddev(mddev, tmp) {
  7601. export_array(mddev);
  7602. mddev->hold_active = 0;
  7603. }
  7604. destroy_workqueue(md_misc_wq);
  7605. destroy_workqueue(md_wq);
  7606. }
  7607. subsys_initcall(md_init);
  7608. module_exit(md_exit)
  7609. static int get_ro(char *buffer, struct kernel_param *kp)
  7610. {
  7611. return sprintf(buffer, "%d", start_readonly);
  7612. }
  7613. static int set_ro(const char *val, struct kernel_param *kp)
  7614. {
  7615. char *e;
  7616. int num = simple_strtoul(val, &e, 10);
  7617. if (*val && (*e == '\0' || *e == '\n')) {
  7618. start_readonly = num;
  7619. return 0;
  7620. }
  7621. return -EINVAL;
  7622. }
  7623. module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
  7624. module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
  7625. module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
  7626. MODULE_LICENSE("GPL");
  7627. MODULE_DESCRIPTION("MD RAID framework");
  7628. MODULE_ALIAS("md");
  7629. MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);