igb_main.c 216 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105
  1. /* Intel(R) Gigabit Ethernet Linux driver
  2. * Copyright(c) 2007-2014 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, see <http://www.gnu.org/licenses/>.
  15. *
  16. * The full GNU General Public License is included in this distribution in
  17. * the file called "COPYING".
  18. *
  19. * Contact Information:
  20. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  21. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <linux/init.h>
  27. #include <linux/bitops.h>
  28. #include <linux/vmalloc.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/netdevice.h>
  31. #include <linux/ipv6.h>
  32. #include <linux/slab.h>
  33. #include <net/checksum.h>
  34. #include <net/ip6_checksum.h>
  35. #include <linux/net_tstamp.h>
  36. #include <linux/mii.h>
  37. #include <linux/ethtool.h>
  38. #include <linux/if.h>
  39. #include <linux/if_vlan.h>
  40. #include <linux/pci.h>
  41. #include <linux/pci-aspm.h>
  42. #include <linux/delay.h>
  43. #include <linux/interrupt.h>
  44. #include <linux/ip.h>
  45. #include <linux/tcp.h>
  46. #include <linux/sctp.h>
  47. #include <linux/if_ether.h>
  48. #include <linux/aer.h>
  49. #include <linux/prefetch.h>
  50. #include <linux/pm_runtime.h>
  51. #ifdef CONFIG_IGB_DCA
  52. #include <linux/dca.h>
  53. #endif
  54. #include <linux/i2c.h>
  55. #include "igb.h"
  56. #define MAJ 5
  57. #define MIN 2
  58. #define BUILD 15
  59. #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
  60. __stringify(BUILD) "-k"
  61. char igb_driver_name[] = "igb";
  62. char igb_driver_version[] = DRV_VERSION;
  63. static const char igb_driver_string[] =
  64. "Intel(R) Gigabit Ethernet Network Driver";
  65. static const char igb_copyright[] =
  66. "Copyright (c) 2007-2014 Intel Corporation.";
  67. static const struct e1000_info *igb_info_tbl[] = {
  68. [board_82575] = &e1000_82575_info,
  69. };
  70. static const struct pci_device_id igb_pci_tbl[] = {
  71. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
  72. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
  73. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
  74. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
  75. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
  76. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
  77. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
  78. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
  79. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
  80. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
  81. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
  82. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
  83. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
  84. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
  85. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
  86. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
  87. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
  88. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
  89. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
  90. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
  91. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
  92. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
  93. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
  94. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
  95. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
  96. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
  97. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
  98. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
  99. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
  100. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
  101. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
  102. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
  103. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
  104. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
  105. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
  106. /* required last entry */
  107. {0, }
  108. };
  109. MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
  110. static int igb_setup_all_tx_resources(struct igb_adapter *);
  111. static int igb_setup_all_rx_resources(struct igb_adapter *);
  112. static void igb_free_all_tx_resources(struct igb_adapter *);
  113. static void igb_free_all_rx_resources(struct igb_adapter *);
  114. static void igb_setup_mrqc(struct igb_adapter *);
  115. static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  116. static void igb_remove(struct pci_dev *pdev);
  117. static int igb_sw_init(struct igb_adapter *);
  118. static int igb_open(struct net_device *);
  119. static int igb_close(struct net_device *);
  120. static void igb_configure(struct igb_adapter *);
  121. static void igb_configure_tx(struct igb_adapter *);
  122. static void igb_configure_rx(struct igb_adapter *);
  123. static void igb_clean_all_tx_rings(struct igb_adapter *);
  124. static void igb_clean_all_rx_rings(struct igb_adapter *);
  125. static void igb_clean_tx_ring(struct igb_ring *);
  126. static void igb_clean_rx_ring(struct igb_ring *);
  127. static void igb_set_rx_mode(struct net_device *);
  128. static void igb_update_phy_info(unsigned long);
  129. static void igb_watchdog(unsigned long);
  130. static void igb_watchdog_task(struct work_struct *);
  131. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
  132. static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
  133. struct rtnl_link_stats64 *stats);
  134. static int igb_change_mtu(struct net_device *, int);
  135. static int igb_set_mac(struct net_device *, void *);
  136. static void igb_set_uta(struct igb_adapter *adapter);
  137. static irqreturn_t igb_intr(int irq, void *);
  138. static irqreturn_t igb_intr_msi(int irq, void *);
  139. static irqreturn_t igb_msix_other(int irq, void *);
  140. static irqreturn_t igb_msix_ring(int irq, void *);
  141. #ifdef CONFIG_IGB_DCA
  142. static void igb_update_dca(struct igb_q_vector *);
  143. static void igb_setup_dca(struct igb_adapter *);
  144. #endif /* CONFIG_IGB_DCA */
  145. static int igb_poll(struct napi_struct *, int);
  146. static bool igb_clean_tx_irq(struct igb_q_vector *);
  147. static bool igb_clean_rx_irq(struct igb_q_vector *, int);
  148. static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
  149. static void igb_tx_timeout(struct net_device *);
  150. static void igb_reset_task(struct work_struct *);
  151. static void igb_vlan_mode(struct net_device *netdev,
  152. netdev_features_t features);
  153. static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
  154. static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
  155. static void igb_restore_vlan(struct igb_adapter *);
  156. static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
  157. static void igb_ping_all_vfs(struct igb_adapter *);
  158. static void igb_msg_task(struct igb_adapter *);
  159. static void igb_vmm_control(struct igb_adapter *);
  160. static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
  161. static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
  162. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
  163. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  164. int vf, u16 vlan, u8 qos);
  165. static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
  166. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  167. bool setting);
  168. static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
  169. struct ifla_vf_info *ivi);
  170. static void igb_check_vf_rate_limit(struct igb_adapter *);
  171. #ifdef CONFIG_PCI_IOV
  172. static int igb_vf_configure(struct igb_adapter *adapter, int vf);
  173. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
  174. #endif
  175. #ifdef CONFIG_PM
  176. #ifdef CONFIG_PM_SLEEP
  177. static int igb_suspend(struct device *);
  178. #endif
  179. static int igb_resume(struct device *);
  180. #ifdef CONFIG_PM_RUNTIME
  181. static int igb_runtime_suspend(struct device *dev);
  182. static int igb_runtime_resume(struct device *dev);
  183. static int igb_runtime_idle(struct device *dev);
  184. #endif
  185. static const struct dev_pm_ops igb_pm_ops = {
  186. SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
  187. SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
  188. igb_runtime_idle)
  189. };
  190. #endif
  191. static void igb_shutdown(struct pci_dev *);
  192. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
  193. #ifdef CONFIG_IGB_DCA
  194. static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
  195. static struct notifier_block dca_notifier = {
  196. .notifier_call = igb_notify_dca,
  197. .next = NULL,
  198. .priority = 0
  199. };
  200. #endif
  201. #ifdef CONFIG_NET_POLL_CONTROLLER
  202. /* for netdump / net console */
  203. static void igb_netpoll(struct net_device *);
  204. #endif
  205. #ifdef CONFIG_PCI_IOV
  206. static unsigned int max_vfs;
  207. module_param(max_vfs, uint, 0);
  208. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
  209. #endif /* CONFIG_PCI_IOV */
  210. static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
  211. pci_channel_state_t);
  212. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
  213. static void igb_io_resume(struct pci_dev *);
  214. static const struct pci_error_handlers igb_err_handler = {
  215. .error_detected = igb_io_error_detected,
  216. .slot_reset = igb_io_slot_reset,
  217. .resume = igb_io_resume,
  218. };
  219. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
  220. static struct pci_driver igb_driver = {
  221. .name = igb_driver_name,
  222. .id_table = igb_pci_tbl,
  223. .probe = igb_probe,
  224. .remove = igb_remove,
  225. #ifdef CONFIG_PM
  226. .driver.pm = &igb_pm_ops,
  227. #endif
  228. .shutdown = igb_shutdown,
  229. .sriov_configure = igb_pci_sriov_configure,
  230. .err_handler = &igb_err_handler
  231. };
  232. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  233. MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
  234. MODULE_LICENSE("GPL");
  235. MODULE_VERSION(DRV_VERSION);
  236. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  237. static int debug = -1;
  238. module_param(debug, int, 0);
  239. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  240. struct igb_reg_info {
  241. u32 ofs;
  242. char *name;
  243. };
  244. static const struct igb_reg_info igb_reg_info_tbl[] = {
  245. /* General Registers */
  246. {E1000_CTRL, "CTRL"},
  247. {E1000_STATUS, "STATUS"},
  248. {E1000_CTRL_EXT, "CTRL_EXT"},
  249. /* Interrupt Registers */
  250. {E1000_ICR, "ICR"},
  251. /* RX Registers */
  252. {E1000_RCTL, "RCTL"},
  253. {E1000_RDLEN(0), "RDLEN"},
  254. {E1000_RDH(0), "RDH"},
  255. {E1000_RDT(0), "RDT"},
  256. {E1000_RXDCTL(0), "RXDCTL"},
  257. {E1000_RDBAL(0), "RDBAL"},
  258. {E1000_RDBAH(0), "RDBAH"},
  259. /* TX Registers */
  260. {E1000_TCTL, "TCTL"},
  261. {E1000_TDBAL(0), "TDBAL"},
  262. {E1000_TDBAH(0), "TDBAH"},
  263. {E1000_TDLEN(0), "TDLEN"},
  264. {E1000_TDH(0), "TDH"},
  265. {E1000_TDT(0), "TDT"},
  266. {E1000_TXDCTL(0), "TXDCTL"},
  267. {E1000_TDFH, "TDFH"},
  268. {E1000_TDFT, "TDFT"},
  269. {E1000_TDFHS, "TDFHS"},
  270. {E1000_TDFPC, "TDFPC"},
  271. /* List Terminator */
  272. {}
  273. };
  274. /* igb_regdump - register printout routine */
  275. static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
  276. {
  277. int n = 0;
  278. char rname[16];
  279. u32 regs[8];
  280. switch (reginfo->ofs) {
  281. case E1000_RDLEN(0):
  282. for (n = 0; n < 4; n++)
  283. regs[n] = rd32(E1000_RDLEN(n));
  284. break;
  285. case E1000_RDH(0):
  286. for (n = 0; n < 4; n++)
  287. regs[n] = rd32(E1000_RDH(n));
  288. break;
  289. case E1000_RDT(0):
  290. for (n = 0; n < 4; n++)
  291. regs[n] = rd32(E1000_RDT(n));
  292. break;
  293. case E1000_RXDCTL(0):
  294. for (n = 0; n < 4; n++)
  295. regs[n] = rd32(E1000_RXDCTL(n));
  296. break;
  297. case E1000_RDBAL(0):
  298. for (n = 0; n < 4; n++)
  299. regs[n] = rd32(E1000_RDBAL(n));
  300. break;
  301. case E1000_RDBAH(0):
  302. for (n = 0; n < 4; n++)
  303. regs[n] = rd32(E1000_RDBAH(n));
  304. break;
  305. case E1000_TDBAL(0):
  306. for (n = 0; n < 4; n++)
  307. regs[n] = rd32(E1000_RDBAL(n));
  308. break;
  309. case E1000_TDBAH(0):
  310. for (n = 0; n < 4; n++)
  311. regs[n] = rd32(E1000_TDBAH(n));
  312. break;
  313. case E1000_TDLEN(0):
  314. for (n = 0; n < 4; n++)
  315. regs[n] = rd32(E1000_TDLEN(n));
  316. break;
  317. case E1000_TDH(0):
  318. for (n = 0; n < 4; n++)
  319. regs[n] = rd32(E1000_TDH(n));
  320. break;
  321. case E1000_TDT(0):
  322. for (n = 0; n < 4; n++)
  323. regs[n] = rd32(E1000_TDT(n));
  324. break;
  325. case E1000_TXDCTL(0):
  326. for (n = 0; n < 4; n++)
  327. regs[n] = rd32(E1000_TXDCTL(n));
  328. break;
  329. default:
  330. pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
  331. return;
  332. }
  333. snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
  334. pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
  335. regs[2], regs[3]);
  336. }
  337. /* igb_dump - Print registers, Tx-rings and Rx-rings */
  338. static void igb_dump(struct igb_adapter *adapter)
  339. {
  340. struct net_device *netdev = adapter->netdev;
  341. struct e1000_hw *hw = &adapter->hw;
  342. struct igb_reg_info *reginfo;
  343. struct igb_ring *tx_ring;
  344. union e1000_adv_tx_desc *tx_desc;
  345. struct my_u0 { u64 a; u64 b; } *u0;
  346. struct igb_ring *rx_ring;
  347. union e1000_adv_rx_desc *rx_desc;
  348. u32 staterr;
  349. u16 i, n;
  350. if (!netif_msg_hw(adapter))
  351. return;
  352. /* Print netdevice Info */
  353. if (netdev) {
  354. dev_info(&adapter->pdev->dev, "Net device Info\n");
  355. pr_info("Device Name state trans_start last_rx\n");
  356. pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
  357. netdev->state, netdev->trans_start, netdev->last_rx);
  358. }
  359. /* Print Registers */
  360. dev_info(&adapter->pdev->dev, "Register Dump\n");
  361. pr_info(" Register Name Value\n");
  362. for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
  363. reginfo->name; reginfo++) {
  364. igb_regdump(hw, reginfo);
  365. }
  366. /* Print TX Ring Summary */
  367. if (!netdev || !netif_running(netdev))
  368. goto exit;
  369. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  370. pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
  371. for (n = 0; n < adapter->num_tx_queues; n++) {
  372. struct igb_tx_buffer *buffer_info;
  373. tx_ring = adapter->tx_ring[n];
  374. buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
  375. pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
  376. n, tx_ring->next_to_use, tx_ring->next_to_clean,
  377. (u64)dma_unmap_addr(buffer_info, dma),
  378. dma_unmap_len(buffer_info, len),
  379. buffer_info->next_to_watch,
  380. (u64)buffer_info->time_stamp);
  381. }
  382. /* Print TX Rings */
  383. if (!netif_msg_tx_done(adapter))
  384. goto rx_ring_summary;
  385. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  386. /* Transmit Descriptor Formats
  387. *
  388. * Advanced Transmit Descriptor
  389. * +--------------------------------------------------------------+
  390. * 0 | Buffer Address [63:0] |
  391. * +--------------------------------------------------------------+
  392. * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
  393. * +--------------------------------------------------------------+
  394. * 63 46 45 40 39 38 36 35 32 31 24 15 0
  395. */
  396. for (n = 0; n < adapter->num_tx_queues; n++) {
  397. tx_ring = adapter->tx_ring[n];
  398. pr_info("------------------------------------\n");
  399. pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
  400. pr_info("------------------------------------\n");
  401. pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
  402. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  403. const char *next_desc;
  404. struct igb_tx_buffer *buffer_info;
  405. tx_desc = IGB_TX_DESC(tx_ring, i);
  406. buffer_info = &tx_ring->tx_buffer_info[i];
  407. u0 = (struct my_u0 *)tx_desc;
  408. if (i == tx_ring->next_to_use &&
  409. i == tx_ring->next_to_clean)
  410. next_desc = " NTC/U";
  411. else if (i == tx_ring->next_to_use)
  412. next_desc = " NTU";
  413. else if (i == tx_ring->next_to_clean)
  414. next_desc = " NTC";
  415. else
  416. next_desc = "";
  417. pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
  418. i, le64_to_cpu(u0->a),
  419. le64_to_cpu(u0->b),
  420. (u64)dma_unmap_addr(buffer_info, dma),
  421. dma_unmap_len(buffer_info, len),
  422. buffer_info->next_to_watch,
  423. (u64)buffer_info->time_stamp,
  424. buffer_info->skb, next_desc);
  425. if (netif_msg_pktdata(adapter) && buffer_info->skb)
  426. print_hex_dump(KERN_INFO, "",
  427. DUMP_PREFIX_ADDRESS,
  428. 16, 1, buffer_info->skb->data,
  429. dma_unmap_len(buffer_info, len),
  430. true);
  431. }
  432. }
  433. /* Print RX Rings Summary */
  434. rx_ring_summary:
  435. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  436. pr_info("Queue [NTU] [NTC]\n");
  437. for (n = 0; n < adapter->num_rx_queues; n++) {
  438. rx_ring = adapter->rx_ring[n];
  439. pr_info(" %5d %5X %5X\n",
  440. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  441. }
  442. /* Print RX Rings */
  443. if (!netif_msg_rx_status(adapter))
  444. goto exit;
  445. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  446. /* Advanced Receive Descriptor (Read) Format
  447. * 63 1 0
  448. * +-----------------------------------------------------+
  449. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  450. * +----------------------------------------------+------+
  451. * 8 | Header Buffer Address [63:1] | DD |
  452. * +-----------------------------------------------------+
  453. *
  454. *
  455. * Advanced Receive Descriptor (Write-Back) Format
  456. *
  457. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  458. * +------------------------------------------------------+
  459. * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
  460. * | Checksum Ident | | | | Type | Type |
  461. * +------------------------------------------------------+
  462. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  463. * +------------------------------------------------------+
  464. * 63 48 47 32 31 20 19 0
  465. */
  466. for (n = 0; n < adapter->num_rx_queues; n++) {
  467. rx_ring = adapter->rx_ring[n];
  468. pr_info("------------------------------------\n");
  469. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  470. pr_info("------------------------------------\n");
  471. pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
  472. pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
  473. for (i = 0; i < rx_ring->count; i++) {
  474. const char *next_desc;
  475. struct igb_rx_buffer *buffer_info;
  476. buffer_info = &rx_ring->rx_buffer_info[i];
  477. rx_desc = IGB_RX_DESC(rx_ring, i);
  478. u0 = (struct my_u0 *)rx_desc;
  479. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  480. if (i == rx_ring->next_to_use)
  481. next_desc = " NTU";
  482. else if (i == rx_ring->next_to_clean)
  483. next_desc = " NTC";
  484. else
  485. next_desc = "";
  486. if (staterr & E1000_RXD_STAT_DD) {
  487. /* Descriptor Done */
  488. pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
  489. "RWB", i,
  490. le64_to_cpu(u0->a),
  491. le64_to_cpu(u0->b),
  492. next_desc);
  493. } else {
  494. pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
  495. "R ", i,
  496. le64_to_cpu(u0->a),
  497. le64_to_cpu(u0->b),
  498. (u64)buffer_info->dma,
  499. next_desc);
  500. if (netif_msg_pktdata(adapter) &&
  501. buffer_info->dma && buffer_info->page) {
  502. print_hex_dump(KERN_INFO, "",
  503. DUMP_PREFIX_ADDRESS,
  504. 16, 1,
  505. page_address(buffer_info->page) +
  506. buffer_info->page_offset,
  507. IGB_RX_BUFSZ, true);
  508. }
  509. }
  510. }
  511. }
  512. exit:
  513. return;
  514. }
  515. /**
  516. * igb_get_i2c_data - Reads the I2C SDA data bit
  517. * @hw: pointer to hardware structure
  518. * @i2cctl: Current value of I2CCTL register
  519. *
  520. * Returns the I2C data bit value
  521. **/
  522. static int igb_get_i2c_data(void *data)
  523. {
  524. struct igb_adapter *adapter = (struct igb_adapter *)data;
  525. struct e1000_hw *hw = &adapter->hw;
  526. s32 i2cctl = rd32(E1000_I2CPARAMS);
  527. return !!(i2cctl & E1000_I2C_DATA_IN);
  528. }
  529. /**
  530. * igb_set_i2c_data - Sets the I2C data bit
  531. * @data: pointer to hardware structure
  532. * @state: I2C data value (0 or 1) to set
  533. *
  534. * Sets the I2C data bit
  535. **/
  536. static void igb_set_i2c_data(void *data, int state)
  537. {
  538. struct igb_adapter *adapter = (struct igb_adapter *)data;
  539. struct e1000_hw *hw = &adapter->hw;
  540. s32 i2cctl = rd32(E1000_I2CPARAMS);
  541. if (state)
  542. i2cctl |= E1000_I2C_DATA_OUT;
  543. else
  544. i2cctl &= ~E1000_I2C_DATA_OUT;
  545. i2cctl &= ~E1000_I2C_DATA_OE_N;
  546. i2cctl |= E1000_I2C_CLK_OE_N;
  547. wr32(E1000_I2CPARAMS, i2cctl);
  548. wrfl();
  549. }
  550. /**
  551. * igb_set_i2c_clk - Sets the I2C SCL clock
  552. * @data: pointer to hardware structure
  553. * @state: state to set clock
  554. *
  555. * Sets the I2C clock line to state
  556. **/
  557. static void igb_set_i2c_clk(void *data, int state)
  558. {
  559. struct igb_adapter *adapter = (struct igb_adapter *)data;
  560. struct e1000_hw *hw = &adapter->hw;
  561. s32 i2cctl = rd32(E1000_I2CPARAMS);
  562. if (state) {
  563. i2cctl |= E1000_I2C_CLK_OUT;
  564. i2cctl &= ~E1000_I2C_CLK_OE_N;
  565. } else {
  566. i2cctl &= ~E1000_I2C_CLK_OUT;
  567. i2cctl &= ~E1000_I2C_CLK_OE_N;
  568. }
  569. wr32(E1000_I2CPARAMS, i2cctl);
  570. wrfl();
  571. }
  572. /**
  573. * igb_get_i2c_clk - Gets the I2C SCL clock state
  574. * @data: pointer to hardware structure
  575. *
  576. * Gets the I2C clock state
  577. **/
  578. static int igb_get_i2c_clk(void *data)
  579. {
  580. struct igb_adapter *adapter = (struct igb_adapter *)data;
  581. struct e1000_hw *hw = &adapter->hw;
  582. s32 i2cctl = rd32(E1000_I2CPARAMS);
  583. return !!(i2cctl & E1000_I2C_CLK_IN);
  584. }
  585. static const struct i2c_algo_bit_data igb_i2c_algo = {
  586. .setsda = igb_set_i2c_data,
  587. .setscl = igb_set_i2c_clk,
  588. .getsda = igb_get_i2c_data,
  589. .getscl = igb_get_i2c_clk,
  590. .udelay = 5,
  591. .timeout = 20,
  592. };
  593. /**
  594. * igb_get_hw_dev - return device
  595. * @hw: pointer to hardware structure
  596. *
  597. * used by hardware layer to print debugging information
  598. **/
  599. struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  600. {
  601. struct igb_adapter *adapter = hw->back;
  602. return adapter->netdev;
  603. }
  604. /**
  605. * igb_init_module - Driver Registration Routine
  606. *
  607. * igb_init_module is the first routine called when the driver is
  608. * loaded. All it does is register with the PCI subsystem.
  609. **/
  610. static int __init igb_init_module(void)
  611. {
  612. int ret;
  613. pr_info("%s - version %s\n",
  614. igb_driver_string, igb_driver_version);
  615. pr_info("%s\n", igb_copyright);
  616. #ifdef CONFIG_IGB_DCA
  617. dca_register_notify(&dca_notifier);
  618. #endif
  619. ret = pci_register_driver(&igb_driver);
  620. return ret;
  621. }
  622. module_init(igb_init_module);
  623. /**
  624. * igb_exit_module - Driver Exit Cleanup Routine
  625. *
  626. * igb_exit_module is called just before the driver is removed
  627. * from memory.
  628. **/
  629. static void __exit igb_exit_module(void)
  630. {
  631. #ifdef CONFIG_IGB_DCA
  632. dca_unregister_notify(&dca_notifier);
  633. #endif
  634. pci_unregister_driver(&igb_driver);
  635. }
  636. module_exit(igb_exit_module);
  637. #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  638. /**
  639. * igb_cache_ring_register - Descriptor ring to register mapping
  640. * @adapter: board private structure to initialize
  641. *
  642. * Once we know the feature-set enabled for the device, we'll cache
  643. * the register offset the descriptor ring is assigned to.
  644. **/
  645. static void igb_cache_ring_register(struct igb_adapter *adapter)
  646. {
  647. int i = 0, j = 0;
  648. u32 rbase_offset = adapter->vfs_allocated_count;
  649. switch (adapter->hw.mac.type) {
  650. case e1000_82576:
  651. /* The queues are allocated for virtualization such that VF 0
  652. * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
  653. * In order to avoid collision we start at the first free queue
  654. * and continue consuming queues in the same sequence
  655. */
  656. if (adapter->vfs_allocated_count) {
  657. for (; i < adapter->rss_queues; i++)
  658. adapter->rx_ring[i]->reg_idx = rbase_offset +
  659. Q_IDX_82576(i);
  660. }
  661. /* Fall through */
  662. case e1000_82575:
  663. case e1000_82580:
  664. case e1000_i350:
  665. case e1000_i354:
  666. case e1000_i210:
  667. case e1000_i211:
  668. /* Fall through */
  669. default:
  670. for (; i < adapter->num_rx_queues; i++)
  671. adapter->rx_ring[i]->reg_idx = rbase_offset + i;
  672. for (; j < adapter->num_tx_queues; j++)
  673. adapter->tx_ring[j]->reg_idx = rbase_offset + j;
  674. break;
  675. }
  676. }
  677. u32 igb_rd32(struct e1000_hw *hw, u32 reg)
  678. {
  679. struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
  680. u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
  681. u32 value = 0;
  682. if (E1000_REMOVED(hw_addr))
  683. return ~value;
  684. value = readl(&hw_addr[reg]);
  685. /* reads should not return all F's */
  686. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  687. struct net_device *netdev = igb->netdev;
  688. hw->hw_addr = NULL;
  689. netif_device_detach(netdev);
  690. netdev_err(netdev, "PCIe link lost, device now detached\n");
  691. }
  692. return value;
  693. }
  694. /**
  695. * igb_write_ivar - configure ivar for given MSI-X vector
  696. * @hw: pointer to the HW structure
  697. * @msix_vector: vector number we are allocating to a given ring
  698. * @index: row index of IVAR register to write within IVAR table
  699. * @offset: column offset of in IVAR, should be multiple of 8
  700. *
  701. * This function is intended to handle the writing of the IVAR register
  702. * for adapters 82576 and newer. The IVAR table consists of 2 columns,
  703. * each containing an cause allocation for an Rx and Tx ring, and a
  704. * variable number of rows depending on the number of queues supported.
  705. **/
  706. static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
  707. int index, int offset)
  708. {
  709. u32 ivar = array_rd32(E1000_IVAR0, index);
  710. /* clear any bits that are currently set */
  711. ivar &= ~((u32)0xFF << offset);
  712. /* write vector and valid bit */
  713. ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
  714. array_wr32(E1000_IVAR0, index, ivar);
  715. }
  716. #define IGB_N0_QUEUE -1
  717. static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
  718. {
  719. struct igb_adapter *adapter = q_vector->adapter;
  720. struct e1000_hw *hw = &adapter->hw;
  721. int rx_queue = IGB_N0_QUEUE;
  722. int tx_queue = IGB_N0_QUEUE;
  723. u32 msixbm = 0;
  724. if (q_vector->rx.ring)
  725. rx_queue = q_vector->rx.ring->reg_idx;
  726. if (q_vector->tx.ring)
  727. tx_queue = q_vector->tx.ring->reg_idx;
  728. switch (hw->mac.type) {
  729. case e1000_82575:
  730. /* The 82575 assigns vectors using a bitmask, which matches the
  731. * bitmask for the EICR/EIMS/EIMC registers. To assign one
  732. * or more queues to a vector, we write the appropriate bits
  733. * into the MSIXBM register for that vector.
  734. */
  735. if (rx_queue > IGB_N0_QUEUE)
  736. msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
  737. if (tx_queue > IGB_N0_QUEUE)
  738. msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
  739. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
  740. msixbm |= E1000_EIMS_OTHER;
  741. array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
  742. q_vector->eims_value = msixbm;
  743. break;
  744. case e1000_82576:
  745. /* 82576 uses a table that essentially consists of 2 columns
  746. * with 8 rows. The ordering is column-major so we use the
  747. * lower 3 bits as the row index, and the 4th bit as the
  748. * column offset.
  749. */
  750. if (rx_queue > IGB_N0_QUEUE)
  751. igb_write_ivar(hw, msix_vector,
  752. rx_queue & 0x7,
  753. (rx_queue & 0x8) << 1);
  754. if (tx_queue > IGB_N0_QUEUE)
  755. igb_write_ivar(hw, msix_vector,
  756. tx_queue & 0x7,
  757. ((tx_queue & 0x8) << 1) + 8);
  758. q_vector->eims_value = 1 << msix_vector;
  759. break;
  760. case e1000_82580:
  761. case e1000_i350:
  762. case e1000_i354:
  763. case e1000_i210:
  764. case e1000_i211:
  765. /* On 82580 and newer adapters the scheme is similar to 82576
  766. * however instead of ordering column-major we have things
  767. * ordered row-major. So we traverse the table by using
  768. * bit 0 as the column offset, and the remaining bits as the
  769. * row index.
  770. */
  771. if (rx_queue > IGB_N0_QUEUE)
  772. igb_write_ivar(hw, msix_vector,
  773. rx_queue >> 1,
  774. (rx_queue & 0x1) << 4);
  775. if (tx_queue > IGB_N0_QUEUE)
  776. igb_write_ivar(hw, msix_vector,
  777. tx_queue >> 1,
  778. ((tx_queue & 0x1) << 4) + 8);
  779. q_vector->eims_value = 1 << msix_vector;
  780. break;
  781. default:
  782. BUG();
  783. break;
  784. }
  785. /* add q_vector eims value to global eims_enable_mask */
  786. adapter->eims_enable_mask |= q_vector->eims_value;
  787. /* configure q_vector to set itr on first interrupt */
  788. q_vector->set_itr = 1;
  789. }
  790. /**
  791. * igb_configure_msix - Configure MSI-X hardware
  792. * @adapter: board private structure to initialize
  793. *
  794. * igb_configure_msix sets up the hardware to properly
  795. * generate MSI-X interrupts.
  796. **/
  797. static void igb_configure_msix(struct igb_adapter *adapter)
  798. {
  799. u32 tmp;
  800. int i, vector = 0;
  801. struct e1000_hw *hw = &adapter->hw;
  802. adapter->eims_enable_mask = 0;
  803. /* set vector for other causes, i.e. link changes */
  804. switch (hw->mac.type) {
  805. case e1000_82575:
  806. tmp = rd32(E1000_CTRL_EXT);
  807. /* enable MSI-X PBA support*/
  808. tmp |= E1000_CTRL_EXT_PBA_CLR;
  809. /* Auto-Mask interrupts upon ICR read. */
  810. tmp |= E1000_CTRL_EXT_EIAME;
  811. tmp |= E1000_CTRL_EXT_IRCA;
  812. wr32(E1000_CTRL_EXT, tmp);
  813. /* enable msix_other interrupt */
  814. array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
  815. adapter->eims_other = E1000_EIMS_OTHER;
  816. break;
  817. case e1000_82576:
  818. case e1000_82580:
  819. case e1000_i350:
  820. case e1000_i354:
  821. case e1000_i210:
  822. case e1000_i211:
  823. /* Turn on MSI-X capability first, or our settings
  824. * won't stick. And it will take days to debug.
  825. */
  826. wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
  827. E1000_GPIE_PBA | E1000_GPIE_EIAME |
  828. E1000_GPIE_NSICR);
  829. /* enable msix_other interrupt */
  830. adapter->eims_other = 1 << vector;
  831. tmp = (vector++ | E1000_IVAR_VALID) << 8;
  832. wr32(E1000_IVAR_MISC, tmp);
  833. break;
  834. default:
  835. /* do nothing, since nothing else supports MSI-X */
  836. break;
  837. } /* switch (hw->mac.type) */
  838. adapter->eims_enable_mask |= adapter->eims_other;
  839. for (i = 0; i < adapter->num_q_vectors; i++)
  840. igb_assign_vector(adapter->q_vector[i], vector++);
  841. wrfl();
  842. }
  843. /**
  844. * igb_request_msix - Initialize MSI-X interrupts
  845. * @adapter: board private structure to initialize
  846. *
  847. * igb_request_msix allocates MSI-X vectors and requests interrupts from the
  848. * kernel.
  849. **/
  850. static int igb_request_msix(struct igb_adapter *adapter)
  851. {
  852. struct net_device *netdev = adapter->netdev;
  853. struct e1000_hw *hw = &adapter->hw;
  854. int i, err = 0, vector = 0, free_vector = 0;
  855. err = request_irq(adapter->msix_entries[vector].vector,
  856. igb_msix_other, 0, netdev->name, adapter);
  857. if (err)
  858. goto err_out;
  859. for (i = 0; i < adapter->num_q_vectors; i++) {
  860. struct igb_q_vector *q_vector = adapter->q_vector[i];
  861. vector++;
  862. q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
  863. if (q_vector->rx.ring && q_vector->tx.ring)
  864. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  865. q_vector->rx.ring->queue_index);
  866. else if (q_vector->tx.ring)
  867. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  868. q_vector->tx.ring->queue_index);
  869. else if (q_vector->rx.ring)
  870. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  871. q_vector->rx.ring->queue_index);
  872. else
  873. sprintf(q_vector->name, "%s-unused", netdev->name);
  874. err = request_irq(adapter->msix_entries[vector].vector,
  875. igb_msix_ring, 0, q_vector->name,
  876. q_vector);
  877. if (err)
  878. goto err_free;
  879. }
  880. igb_configure_msix(adapter);
  881. return 0;
  882. err_free:
  883. /* free already assigned IRQs */
  884. free_irq(adapter->msix_entries[free_vector++].vector, adapter);
  885. vector--;
  886. for (i = 0; i < vector; i++) {
  887. free_irq(adapter->msix_entries[free_vector++].vector,
  888. adapter->q_vector[i]);
  889. }
  890. err_out:
  891. return err;
  892. }
  893. /**
  894. * igb_free_q_vector - Free memory allocated for specific interrupt vector
  895. * @adapter: board private structure to initialize
  896. * @v_idx: Index of vector to be freed
  897. *
  898. * This function frees the memory allocated to the q_vector.
  899. **/
  900. static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
  901. {
  902. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  903. adapter->q_vector[v_idx] = NULL;
  904. /* igb_get_stats64() might access the rings on this vector,
  905. * we must wait a grace period before freeing it.
  906. */
  907. if (q_vector)
  908. kfree_rcu(q_vector, rcu);
  909. }
  910. /**
  911. * igb_reset_q_vector - Reset config for interrupt vector
  912. * @adapter: board private structure to initialize
  913. * @v_idx: Index of vector to be reset
  914. *
  915. * If NAPI is enabled it will delete any references to the
  916. * NAPI struct. This is preparation for igb_free_q_vector.
  917. **/
  918. static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
  919. {
  920. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  921. /* Coming from igb_set_interrupt_capability, the vectors are not yet
  922. * allocated. So, q_vector is NULL so we should stop here.
  923. */
  924. if (!q_vector)
  925. return;
  926. if (q_vector->tx.ring)
  927. adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
  928. if (q_vector->rx.ring)
  929. adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
  930. netif_napi_del(&q_vector->napi);
  931. }
  932. static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
  933. {
  934. int v_idx = adapter->num_q_vectors;
  935. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  936. pci_disable_msix(adapter->pdev);
  937. else if (adapter->flags & IGB_FLAG_HAS_MSI)
  938. pci_disable_msi(adapter->pdev);
  939. while (v_idx--)
  940. igb_reset_q_vector(adapter, v_idx);
  941. }
  942. /**
  943. * igb_free_q_vectors - Free memory allocated for interrupt vectors
  944. * @adapter: board private structure to initialize
  945. *
  946. * This function frees the memory allocated to the q_vectors. In addition if
  947. * NAPI is enabled it will delete any references to the NAPI struct prior
  948. * to freeing the q_vector.
  949. **/
  950. static void igb_free_q_vectors(struct igb_adapter *adapter)
  951. {
  952. int v_idx = adapter->num_q_vectors;
  953. adapter->num_tx_queues = 0;
  954. adapter->num_rx_queues = 0;
  955. adapter->num_q_vectors = 0;
  956. while (v_idx--) {
  957. igb_reset_q_vector(adapter, v_idx);
  958. igb_free_q_vector(adapter, v_idx);
  959. }
  960. }
  961. /**
  962. * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
  963. * @adapter: board private structure to initialize
  964. *
  965. * This function resets the device so that it has 0 Rx queues, Tx queues, and
  966. * MSI-X interrupts allocated.
  967. */
  968. static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  969. {
  970. igb_free_q_vectors(adapter);
  971. igb_reset_interrupt_capability(adapter);
  972. }
  973. /**
  974. * igb_set_interrupt_capability - set MSI or MSI-X if supported
  975. * @adapter: board private structure to initialize
  976. * @msix: boolean value of MSIX capability
  977. *
  978. * Attempt to configure interrupts using the best available
  979. * capabilities of the hardware and kernel.
  980. **/
  981. static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
  982. {
  983. int err;
  984. int numvecs, i;
  985. if (!msix)
  986. goto msi_only;
  987. adapter->flags |= IGB_FLAG_HAS_MSIX;
  988. /* Number of supported queues. */
  989. adapter->num_rx_queues = adapter->rss_queues;
  990. if (adapter->vfs_allocated_count)
  991. adapter->num_tx_queues = 1;
  992. else
  993. adapter->num_tx_queues = adapter->rss_queues;
  994. /* start with one vector for every Rx queue */
  995. numvecs = adapter->num_rx_queues;
  996. /* if Tx handler is separate add 1 for every Tx queue */
  997. if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
  998. numvecs += adapter->num_tx_queues;
  999. /* store the number of vectors reserved for queues */
  1000. adapter->num_q_vectors = numvecs;
  1001. /* add 1 vector for link status interrupts */
  1002. numvecs++;
  1003. for (i = 0; i < numvecs; i++)
  1004. adapter->msix_entries[i].entry = i;
  1005. err = pci_enable_msix_range(adapter->pdev,
  1006. adapter->msix_entries,
  1007. numvecs,
  1008. numvecs);
  1009. if (err > 0)
  1010. return;
  1011. igb_reset_interrupt_capability(adapter);
  1012. /* If we can't do MSI-X, try MSI */
  1013. msi_only:
  1014. adapter->flags &= ~IGB_FLAG_HAS_MSIX;
  1015. #ifdef CONFIG_PCI_IOV
  1016. /* disable SR-IOV for non MSI-X configurations */
  1017. if (adapter->vf_data) {
  1018. struct e1000_hw *hw = &adapter->hw;
  1019. /* disable iov and allow time for transactions to clear */
  1020. pci_disable_sriov(adapter->pdev);
  1021. msleep(500);
  1022. kfree(adapter->vf_data);
  1023. adapter->vf_data = NULL;
  1024. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  1025. wrfl();
  1026. msleep(100);
  1027. dev_info(&adapter->pdev->dev, "IOV Disabled\n");
  1028. }
  1029. #endif
  1030. adapter->vfs_allocated_count = 0;
  1031. adapter->rss_queues = 1;
  1032. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  1033. adapter->num_rx_queues = 1;
  1034. adapter->num_tx_queues = 1;
  1035. adapter->num_q_vectors = 1;
  1036. if (!pci_enable_msi(adapter->pdev))
  1037. adapter->flags |= IGB_FLAG_HAS_MSI;
  1038. }
  1039. static void igb_add_ring(struct igb_ring *ring,
  1040. struct igb_ring_container *head)
  1041. {
  1042. head->ring = ring;
  1043. head->count++;
  1044. }
  1045. /**
  1046. * igb_alloc_q_vector - Allocate memory for a single interrupt vector
  1047. * @adapter: board private structure to initialize
  1048. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  1049. * @v_idx: index of vector in adapter struct
  1050. * @txr_count: total number of Tx rings to allocate
  1051. * @txr_idx: index of first Tx ring to allocate
  1052. * @rxr_count: total number of Rx rings to allocate
  1053. * @rxr_idx: index of first Rx ring to allocate
  1054. *
  1055. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1056. **/
  1057. static int igb_alloc_q_vector(struct igb_adapter *adapter,
  1058. int v_count, int v_idx,
  1059. int txr_count, int txr_idx,
  1060. int rxr_count, int rxr_idx)
  1061. {
  1062. struct igb_q_vector *q_vector;
  1063. struct igb_ring *ring;
  1064. int ring_count, size;
  1065. /* igb only supports 1 Tx and/or 1 Rx queue per vector */
  1066. if (txr_count > 1 || rxr_count > 1)
  1067. return -ENOMEM;
  1068. ring_count = txr_count + rxr_count;
  1069. size = sizeof(struct igb_q_vector) +
  1070. (sizeof(struct igb_ring) * ring_count);
  1071. /* allocate q_vector and rings */
  1072. q_vector = adapter->q_vector[v_idx];
  1073. if (!q_vector)
  1074. q_vector = kzalloc(size, GFP_KERNEL);
  1075. if (!q_vector)
  1076. return -ENOMEM;
  1077. /* initialize NAPI */
  1078. netif_napi_add(adapter->netdev, &q_vector->napi,
  1079. igb_poll, 64);
  1080. /* tie q_vector and adapter together */
  1081. adapter->q_vector[v_idx] = q_vector;
  1082. q_vector->adapter = adapter;
  1083. /* initialize work limits */
  1084. q_vector->tx.work_limit = adapter->tx_work_limit;
  1085. /* initialize ITR configuration */
  1086. q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
  1087. q_vector->itr_val = IGB_START_ITR;
  1088. /* initialize pointer to rings */
  1089. ring = q_vector->ring;
  1090. /* intialize ITR */
  1091. if (rxr_count) {
  1092. /* rx or rx/tx vector */
  1093. if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
  1094. q_vector->itr_val = adapter->rx_itr_setting;
  1095. } else {
  1096. /* tx only vector */
  1097. if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
  1098. q_vector->itr_val = adapter->tx_itr_setting;
  1099. }
  1100. if (txr_count) {
  1101. /* assign generic ring traits */
  1102. ring->dev = &adapter->pdev->dev;
  1103. ring->netdev = adapter->netdev;
  1104. /* configure backlink on ring */
  1105. ring->q_vector = q_vector;
  1106. /* update q_vector Tx values */
  1107. igb_add_ring(ring, &q_vector->tx);
  1108. /* For 82575, context index must be unique per ring. */
  1109. if (adapter->hw.mac.type == e1000_82575)
  1110. set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
  1111. /* apply Tx specific ring traits */
  1112. ring->count = adapter->tx_ring_count;
  1113. ring->queue_index = txr_idx;
  1114. u64_stats_init(&ring->tx_syncp);
  1115. u64_stats_init(&ring->tx_syncp2);
  1116. /* assign ring to adapter */
  1117. adapter->tx_ring[txr_idx] = ring;
  1118. /* push pointer to next ring */
  1119. ring++;
  1120. }
  1121. if (rxr_count) {
  1122. /* assign generic ring traits */
  1123. ring->dev = &adapter->pdev->dev;
  1124. ring->netdev = adapter->netdev;
  1125. /* configure backlink on ring */
  1126. ring->q_vector = q_vector;
  1127. /* update q_vector Rx values */
  1128. igb_add_ring(ring, &q_vector->rx);
  1129. /* set flag indicating ring supports SCTP checksum offload */
  1130. if (adapter->hw.mac.type >= e1000_82576)
  1131. set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  1132. /* On i350, i354, i210, and i211, loopback VLAN packets
  1133. * have the tag byte-swapped.
  1134. */
  1135. if (adapter->hw.mac.type >= e1000_i350)
  1136. set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  1137. /* apply Rx specific ring traits */
  1138. ring->count = adapter->rx_ring_count;
  1139. ring->queue_index = rxr_idx;
  1140. u64_stats_init(&ring->rx_syncp);
  1141. /* assign ring to adapter */
  1142. adapter->rx_ring[rxr_idx] = ring;
  1143. }
  1144. return 0;
  1145. }
  1146. /**
  1147. * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  1148. * @adapter: board private structure to initialize
  1149. *
  1150. * We allocate one q_vector per queue interrupt. If allocation fails we
  1151. * return -ENOMEM.
  1152. **/
  1153. static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  1154. {
  1155. int q_vectors = adapter->num_q_vectors;
  1156. int rxr_remaining = adapter->num_rx_queues;
  1157. int txr_remaining = adapter->num_tx_queues;
  1158. int rxr_idx = 0, txr_idx = 0, v_idx = 0;
  1159. int err;
  1160. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  1161. for (; rxr_remaining; v_idx++) {
  1162. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1163. 0, 0, 1, rxr_idx);
  1164. if (err)
  1165. goto err_out;
  1166. /* update counts and index */
  1167. rxr_remaining--;
  1168. rxr_idx++;
  1169. }
  1170. }
  1171. for (; v_idx < q_vectors; v_idx++) {
  1172. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  1173. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  1174. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1175. tqpv, txr_idx, rqpv, rxr_idx);
  1176. if (err)
  1177. goto err_out;
  1178. /* update counts and index */
  1179. rxr_remaining -= rqpv;
  1180. txr_remaining -= tqpv;
  1181. rxr_idx++;
  1182. txr_idx++;
  1183. }
  1184. return 0;
  1185. err_out:
  1186. adapter->num_tx_queues = 0;
  1187. adapter->num_rx_queues = 0;
  1188. adapter->num_q_vectors = 0;
  1189. while (v_idx--)
  1190. igb_free_q_vector(adapter, v_idx);
  1191. return -ENOMEM;
  1192. }
  1193. /**
  1194. * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  1195. * @adapter: board private structure to initialize
  1196. * @msix: boolean value of MSIX capability
  1197. *
  1198. * This function initializes the interrupts and allocates all of the queues.
  1199. **/
  1200. static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
  1201. {
  1202. struct pci_dev *pdev = adapter->pdev;
  1203. int err;
  1204. igb_set_interrupt_capability(adapter, msix);
  1205. err = igb_alloc_q_vectors(adapter);
  1206. if (err) {
  1207. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  1208. goto err_alloc_q_vectors;
  1209. }
  1210. igb_cache_ring_register(adapter);
  1211. return 0;
  1212. err_alloc_q_vectors:
  1213. igb_reset_interrupt_capability(adapter);
  1214. return err;
  1215. }
  1216. /**
  1217. * igb_request_irq - initialize interrupts
  1218. * @adapter: board private structure to initialize
  1219. *
  1220. * Attempts to configure interrupts using the best available
  1221. * capabilities of the hardware and kernel.
  1222. **/
  1223. static int igb_request_irq(struct igb_adapter *adapter)
  1224. {
  1225. struct net_device *netdev = adapter->netdev;
  1226. struct pci_dev *pdev = adapter->pdev;
  1227. int err = 0;
  1228. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1229. err = igb_request_msix(adapter);
  1230. if (!err)
  1231. goto request_done;
  1232. /* fall back to MSI */
  1233. igb_free_all_tx_resources(adapter);
  1234. igb_free_all_rx_resources(adapter);
  1235. igb_clear_interrupt_scheme(adapter);
  1236. err = igb_init_interrupt_scheme(adapter, false);
  1237. if (err)
  1238. goto request_done;
  1239. igb_setup_all_tx_resources(adapter);
  1240. igb_setup_all_rx_resources(adapter);
  1241. igb_configure(adapter);
  1242. }
  1243. igb_assign_vector(adapter->q_vector[0], 0);
  1244. if (adapter->flags & IGB_FLAG_HAS_MSI) {
  1245. err = request_irq(pdev->irq, igb_intr_msi, 0,
  1246. netdev->name, adapter);
  1247. if (!err)
  1248. goto request_done;
  1249. /* fall back to legacy interrupts */
  1250. igb_reset_interrupt_capability(adapter);
  1251. adapter->flags &= ~IGB_FLAG_HAS_MSI;
  1252. }
  1253. err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
  1254. netdev->name, adapter);
  1255. if (err)
  1256. dev_err(&pdev->dev, "Error %d getting interrupt\n",
  1257. err);
  1258. request_done:
  1259. return err;
  1260. }
  1261. static void igb_free_irq(struct igb_adapter *adapter)
  1262. {
  1263. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1264. int vector = 0, i;
  1265. free_irq(adapter->msix_entries[vector++].vector, adapter);
  1266. for (i = 0; i < adapter->num_q_vectors; i++)
  1267. free_irq(adapter->msix_entries[vector++].vector,
  1268. adapter->q_vector[i]);
  1269. } else {
  1270. free_irq(adapter->pdev->irq, adapter);
  1271. }
  1272. }
  1273. /**
  1274. * igb_irq_disable - Mask off interrupt generation on the NIC
  1275. * @adapter: board private structure
  1276. **/
  1277. static void igb_irq_disable(struct igb_adapter *adapter)
  1278. {
  1279. struct e1000_hw *hw = &adapter->hw;
  1280. /* we need to be careful when disabling interrupts. The VFs are also
  1281. * mapped into these registers and so clearing the bits can cause
  1282. * issues on the VF drivers so we only need to clear what we set
  1283. */
  1284. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1285. u32 regval = rd32(E1000_EIAM);
  1286. wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
  1287. wr32(E1000_EIMC, adapter->eims_enable_mask);
  1288. regval = rd32(E1000_EIAC);
  1289. wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
  1290. }
  1291. wr32(E1000_IAM, 0);
  1292. wr32(E1000_IMC, ~0);
  1293. wrfl();
  1294. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1295. int i;
  1296. for (i = 0; i < adapter->num_q_vectors; i++)
  1297. synchronize_irq(adapter->msix_entries[i].vector);
  1298. } else {
  1299. synchronize_irq(adapter->pdev->irq);
  1300. }
  1301. }
  1302. /**
  1303. * igb_irq_enable - Enable default interrupt generation settings
  1304. * @adapter: board private structure
  1305. **/
  1306. static void igb_irq_enable(struct igb_adapter *adapter)
  1307. {
  1308. struct e1000_hw *hw = &adapter->hw;
  1309. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1310. u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
  1311. u32 regval = rd32(E1000_EIAC);
  1312. wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
  1313. regval = rd32(E1000_EIAM);
  1314. wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
  1315. wr32(E1000_EIMS, adapter->eims_enable_mask);
  1316. if (adapter->vfs_allocated_count) {
  1317. wr32(E1000_MBVFIMR, 0xFF);
  1318. ims |= E1000_IMS_VMMB;
  1319. }
  1320. wr32(E1000_IMS, ims);
  1321. } else {
  1322. wr32(E1000_IMS, IMS_ENABLE_MASK |
  1323. E1000_IMS_DRSTA);
  1324. wr32(E1000_IAM, IMS_ENABLE_MASK |
  1325. E1000_IMS_DRSTA);
  1326. }
  1327. }
  1328. static void igb_update_mng_vlan(struct igb_adapter *adapter)
  1329. {
  1330. struct e1000_hw *hw = &adapter->hw;
  1331. u16 vid = adapter->hw.mng_cookie.vlan_id;
  1332. u16 old_vid = adapter->mng_vlan_id;
  1333. if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
  1334. /* add VID to filter table */
  1335. igb_vfta_set(hw, vid, true);
  1336. adapter->mng_vlan_id = vid;
  1337. } else {
  1338. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  1339. }
  1340. if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
  1341. (vid != old_vid) &&
  1342. !test_bit(old_vid, adapter->active_vlans)) {
  1343. /* remove VID from filter table */
  1344. igb_vfta_set(hw, old_vid, false);
  1345. }
  1346. }
  1347. /**
  1348. * igb_release_hw_control - release control of the h/w to f/w
  1349. * @adapter: address of board private structure
  1350. *
  1351. * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  1352. * For ASF and Pass Through versions of f/w this means that the
  1353. * driver is no longer loaded.
  1354. **/
  1355. static void igb_release_hw_control(struct igb_adapter *adapter)
  1356. {
  1357. struct e1000_hw *hw = &adapter->hw;
  1358. u32 ctrl_ext;
  1359. /* Let firmware take over control of h/w */
  1360. ctrl_ext = rd32(E1000_CTRL_EXT);
  1361. wr32(E1000_CTRL_EXT,
  1362. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  1363. }
  1364. /**
  1365. * igb_get_hw_control - get control of the h/w from f/w
  1366. * @adapter: address of board private structure
  1367. *
  1368. * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  1369. * For ASF and Pass Through versions of f/w this means that
  1370. * the driver is loaded.
  1371. **/
  1372. static void igb_get_hw_control(struct igb_adapter *adapter)
  1373. {
  1374. struct e1000_hw *hw = &adapter->hw;
  1375. u32 ctrl_ext;
  1376. /* Let firmware know the driver has taken over */
  1377. ctrl_ext = rd32(E1000_CTRL_EXT);
  1378. wr32(E1000_CTRL_EXT,
  1379. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  1380. }
  1381. /**
  1382. * igb_configure - configure the hardware for RX and TX
  1383. * @adapter: private board structure
  1384. **/
  1385. static void igb_configure(struct igb_adapter *adapter)
  1386. {
  1387. struct net_device *netdev = adapter->netdev;
  1388. int i;
  1389. igb_get_hw_control(adapter);
  1390. igb_set_rx_mode(netdev);
  1391. igb_restore_vlan(adapter);
  1392. igb_setup_tctl(adapter);
  1393. igb_setup_mrqc(adapter);
  1394. igb_setup_rctl(adapter);
  1395. igb_configure_tx(adapter);
  1396. igb_configure_rx(adapter);
  1397. igb_rx_fifo_flush_82575(&adapter->hw);
  1398. /* call igb_desc_unused which always leaves
  1399. * at least 1 descriptor unused to make sure
  1400. * next_to_use != next_to_clean
  1401. */
  1402. for (i = 0; i < adapter->num_rx_queues; i++) {
  1403. struct igb_ring *ring = adapter->rx_ring[i];
  1404. igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
  1405. }
  1406. }
  1407. /**
  1408. * igb_power_up_link - Power up the phy/serdes link
  1409. * @adapter: address of board private structure
  1410. **/
  1411. void igb_power_up_link(struct igb_adapter *adapter)
  1412. {
  1413. igb_reset_phy(&adapter->hw);
  1414. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1415. igb_power_up_phy_copper(&adapter->hw);
  1416. else
  1417. igb_power_up_serdes_link_82575(&adapter->hw);
  1418. igb_setup_link(&adapter->hw);
  1419. }
  1420. /**
  1421. * igb_power_down_link - Power down the phy/serdes link
  1422. * @adapter: address of board private structure
  1423. */
  1424. static void igb_power_down_link(struct igb_adapter *adapter)
  1425. {
  1426. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1427. igb_power_down_phy_copper_82575(&adapter->hw);
  1428. else
  1429. igb_shutdown_serdes_link_82575(&adapter->hw);
  1430. }
  1431. /**
  1432. * Detect and switch function for Media Auto Sense
  1433. * @adapter: address of the board private structure
  1434. **/
  1435. static void igb_check_swap_media(struct igb_adapter *adapter)
  1436. {
  1437. struct e1000_hw *hw = &adapter->hw;
  1438. u32 ctrl_ext, connsw;
  1439. bool swap_now = false;
  1440. ctrl_ext = rd32(E1000_CTRL_EXT);
  1441. connsw = rd32(E1000_CONNSW);
  1442. /* need to live swap if current media is copper and we have fiber/serdes
  1443. * to go to.
  1444. */
  1445. if ((hw->phy.media_type == e1000_media_type_copper) &&
  1446. (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
  1447. swap_now = true;
  1448. } else if (!(connsw & E1000_CONNSW_SERDESD)) {
  1449. /* copper signal takes time to appear */
  1450. if (adapter->copper_tries < 4) {
  1451. adapter->copper_tries++;
  1452. connsw |= E1000_CONNSW_AUTOSENSE_CONF;
  1453. wr32(E1000_CONNSW, connsw);
  1454. return;
  1455. } else {
  1456. adapter->copper_tries = 0;
  1457. if ((connsw & E1000_CONNSW_PHYSD) &&
  1458. (!(connsw & E1000_CONNSW_PHY_PDN))) {
  1459. swap_now = true;
  1460. connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
  1461. wr32(E1000_CONNSW, connsw);
  1462. }
  1463. }
  1464. }
  1465. if (!swap_now)
  1466. return;
  1467. switch (hw->phy.media_type) {
  1468. case e1000_media_type_copper:
  1469. netdev_info(adapter->netdev,
  1470. "MAS: changing media to fiber/serdes\n");
  1471. ctrl_ext |=
  1472. E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
  1473. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  1474. adapter->copper_tries = 0;
  1475. break;
  1476. case e1000_media_type_internal_serdes:
  1477. case e1000_media_type_fiber:
  1478. netdev_info(adapter->netdev,
  1479. "MAS: changing media to copper\n");
  1480. ctrl_ext &=
  1481. ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
  1482. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  1483. break;
  1484. default:
  1485. /* shouldn't get here during regular operation */
  1486. netdev_err(adapter->netdev,
  1487. "AMS: Invalid media type found, returning\n");
  1488. break;
  1489. }
  1490. wr32(E1000_CTRL_EXT, ctrl_ext);
  1491. }
  1492. /**
  1493. * igb_up - Open the interface and prepare it to handle traffic
  1494. * @adapter: board private structure
  1495. **/
  1496. int igb_up(struct igb_adapter *adapter)
  1497. {
  1498. struct e1000_hw *hw = &adapter->hw;
  1499. int i;
  1500. /* hardware has been reset, we need to reload some things */
  1501. igb_configure(adapter);
  1502. clear_bit(__IGB_DOWN, &adapter->state);
  1503. for (i = 0; i < adapter->num_q_vectors; i++)
  1504. napi_enable(&(adapter->q_vector[i]->napi));
  1505. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  1506. igb_configure_msix(adapter);
  1507. else
  1508. igb_assign_vector(adapter->q_vector[0], 0);
  1509. /* Clear any pending interrupts. */
  1510. rd32(E1000_ICR);
  1511. igb_irq_enable(adapter);
  1512. /* notify VFs that reset has been completed */
  1513. if (adapter->vfs_allocated_count) {
  1514. u32 reg_data = rd32(E1000_CTRL_EXT);
  1515. reg_data |= E1000_CTRL_EXT_PFRSTD;
  1516. wr32(E1000_CTRL_EXT, reg_data);
  1517. }
  1518. netif_tx_start_all_queues(adapter->netdev);
  1519. /* start the watchdog. */
  1520. hw->mac.get_link_status = 1;
  1521. schedule_work(&adapter->watchdog_task);
  1522. if ((adapter->flags & IGB_FLAG_EEE) &&
  1523. (!hw->dev_spec._82575.eee_disable))
  1524. adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
  1525. return 0;
  1526. }
  1527. void igb_down(struct igb_adapter *adapter)
  1528. {
  1529. struct net_device *netdev = adapter->netdev;
  1530. struct e1000_hw *hw = &adapter->hw;
  1531. u32 tctl, rctl;
  1532. int i;
  1533. /* signal that we're down so the interrupt handler does not
  1534. * reschedule our watchdog timer
  1535. */
  1536. set_bit(__IGB_DOWN, &adapter->state);
  1537. /* disable receives in the hardware */
  1538. rctl = rd32(E1000_RCTL);
  1539. wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
  1540. /* flush and sleep below */
  1541. netif_tx_stop_all_queues(netdev);
  1542. /* disable transmits in the hardware */
  1543. tctl = rd32(E1000_TCTL);
  1544. tctl &= ~E1000_TCTL_EN;
  1545. wr32(E1000_TCTL, tctl);
  1546. /* flush both disables and wait for them to finish */
  1547. wrfl();
  1548. usleep_range(10000, 11000);
  1549. igb_irq_disable(adapter);
  1550. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  1551. for (i = 0; i < adapter->num_q_vectors; i++) {
  1552. if (adapter->q_vector[i]) {
  1553. napi_synchronize(&adapter->q_vector[i]->napi);
  1554. napi_disable(&adapter->q_vector[i]->napi);
  1555. }
  1556. }
  1557. del_timer_sync(&adapter->watchdog_timer);
  1558. del_timer_sync(&adapter->phy_info_timer);
  1559. netif_carrier_off(netdev);
  1560. /* record the stats before reset*/
  1561. spin_lock(&adapter->stats64_lock);
  1562. igb_update_stats(adapter, &adapter->stats64);
  1563. spin_unlock(&adapter->stats64_lock);
  1564. adapter->link_speed = 0;
  1565. adapter->link_duplex = 0;
  1566. if (!pci_channel_offline(adapter->pdev))
  1567. igb_reset(adapter);
  1568. igb_clean_all_tx_rings(adapter);
  1569. igb_clean_all_rx_rings(adapter);
  1570. #ifdef CONFIG_IGB_DCA
  1571. /* since we reset the hardware DCA settings were cleared */
  1572. igb_setup_dca(adapter);
  1573. #endif
  1574. }
  1575. void igb_reinit_locked(struct igb_adapter *adapter)
  1576. {
  1577. WARN_ON(in_interrupt());
  1578. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  1579. usleep_range(1000, 2000);
  1580. igb_down(adapter);
  1581. igb_up(adapter);
  1582. clear_bit(__IGB_RESETTING, &adapter->state);
  1583. }
  1584. /** igb_enable_mas - Media Autosense re-enable after swap
  1585. *
  1586. * @adapter: adapter struct
  1587. **/
  1588. static s32 igb_enable_mas(struct igb_adapter *adapter)
  1589. {
  1590. struct e1000_hw *hw = &adapter->hw;
  1591. u32 connsw;
  1592. s32 ret_val = 0;
  1593. connsw = rd32(E1000_CONNSW);
  1594. if (!(hw->phy.media_type == e1000_media_type_copper))
  1595. return ret_val;
  1596. /* configure for SerDes media detect */
  1597. if (!(connsw & E1000_CONNSW_SERDESD)) {
  1598. connsw |= E1000_CONNSW_ENRGSRC;
  1599. connsw |= E1000_CONNSW_AUTOSENSE_EN;
  1600. wr32(E1000_CONNSW, connsw);
  1601. wrfl();
  1602. } else if (connsw & E1000_CONNSW_SERDESD) {
  1603. /* already SerDes, no need to enable anything */
  1604. return ret_val;
  1605. } else {
  1606. netdev_info(adapter->netdev,
  1607. "MAS: Unable to configure feature, disabling..\n");
  1608. adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
  1609. }
  1610. return ret_val;
  1611. }
  1612. void igb_reset(struct igb_adapter *adapter)
  1613. {
  1614. struct pci_dev *pdev = adapter->pdev;
  1615. struct e1000_hw *hw = &adapter->hw;
  1616. struct e1000_mac_info *mac = &hw->mac;
  1617. struct e1000_fc_info *fc = &hw->fc;
  1618. u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
  1619. /* Repartition Pba for greater than 9k mtu
  1620. * To take effect CTRL.RST is required.
  1621. */
  1622. switch (mac->type) {
  1623. case e1000_i350:
  1624. case e1000_i354:
  1625. case e1000_82580:
  1626. pba = rd32(E1000_RXPBS);
  1627. pba = igb_rxpbs_adjust_82580(pba);
  1628. break;
  1629. case e1000_82576:
  1630. pba = rd32(E1000_RXPBS);
  1631. pba &= E1000_RXPBS_SIZE_MASK_82576;
  1632. break;
  1633. case e1000_82575:
  1634. case e1000_i210:
  1635. case e1000_i211:
  1636. default:
  1637. pba = E1000_PBA_34K;
  1638. break;
  1639. }
  1640. if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
  1641. (mac->type < e1000_82576)) {
  1642. /* adjust PBA for jumbo frames */
  1643. wr32(E1000_PBA, pba);
  1644. /* To maintain wire speed transmits, the Tx FIFO should be
  1645. * large enough to accommodate two full transmit packets,
  1646. * rounded up to the next 1KB and expressed in KB. Likewise,
  1647. * the Rx FIFO should be large enough to accommodate at least
  1648. * one full receive packet and is similarly rounded up and
  1649. * expressed in KB.
  1650. */
  1651. pba = rd32(E1000_PBA);
  1652. /* upper 16 bits has Tx packet buffer allocation size in KB */
  1653. tx_space = pba >> 16;
  1654. /* lower 16 bits has Rx packet buffer allocation size in KB */
  1655. pba &= 0xffff;
  1656. /* the Tx fifo also stores 16 bytes of information about the Tx
  1657. * but don't include ethernet FCS because hardware appends it
  1658. */
  1659. min_tx_space = (adapter->max_frame_size +
  1660. sizeof(union e1000_adv_tx_desc) -
  1661. ETH_FCS_LEN) * 2;
  1662. min_tx_space = ALIGN(min_tx_space, 1024);
  1663. min_tx_space >>= 10;
  1664. /* software strips receive CRC, so leave room for it */
  1665. min_rx_space = adapter->max_frame_size;
  1666. min_rx_space = ALIGN(min_rx_space, 1024);
  1667. min_rx_space >>= 10;
  1668. /* If current Tx allocation is less than the min Tx FIFO size,
  1669. * and the min Tx FIFO size is less than the current Rx FIFO
  1670. * allocation, take space away from current Rx allocation
  1671. */
  1672. if (tx_space < min_tx_space &&
  1673. ((min_tx_space - tx_space) < pba)) {
  1674. pba = pba - (min_tx_space - tx_space);
  1675. /* if short on Rx space, Rx wins and must trump Tx
  1676. * adjustment
  1677. */
  1678. if (pba < min_rx_space)
  1679. pba = min_rx_space;
  1680. }
  1681. wr32(E1000_PBA, pba);
  1682. }
  1683. /* flow control settings */
  1684. /* The high water mark must be low enough to fit one full frame
  1685. * (or the size used for early receive) above it in the Rx FIFO.
  1686. * Set it to the lower of:
  1687. * - 90% of the Rx FIFO size, or
  1688. * - the full Rx FIFO size minus one full frame
  1689. */
  1690. hwm = min(((pba << 10) * 9 / 10),
  1691. ((pba << 10) - 2 * adapter->max_frame_size));
  1692. fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
  1693. fc->low_water = fc->high_water - 16;
  1694. fc->pause_time = 0xFFFF;
  1695. fc->send_xon = 1;
  1696. fc->current_mode = fc->requested_mode;
  1697. /* disable receive for all VFs and wait one second */
  1698. if (adapter->vfs_allocated_count) {
  1699. int i;
  1700. for (i = 0 ; i < adapter->vfs_allocated_count; i++)
  1701. adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
  1702. /* ping all the active vfs to let them know we are going down */
  1703. igb_ping_all_vfs(adapter);
  1704. /* disable transmits and receives */
  1705. wr32(E1000_VFRE, 0);
  1706. wr32(E1000_VFTE, 0);
  1707. }
  1708. /* Allow time for pending master requests to run */
  1709. hw->mac.ops.reset_hw(hw);
  1710. wr32(E1000_WUC, 0);
  1711. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  1712. /* need to resetup here after media swap */
  1713. adapter->ei.get_invariants(hw);
  1714. adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
  1715. }
  1716. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  1717. if (igb_enable_mas(adapter))
  1718. dev_err(&pdev->dev,
  1719. "Error enabling Media Auto Sense\n");
  1720. }
  1721. if (hw->mac.ops.init_hw(hw))
  1722. dev_err(&pdev->dev, "Hardware Error\n");
  1723. /* Flow control settings reset on hardware reset, so guarantee flow
  1724. * control is off when forcing speed.
  1725. */
  1726. if (!hw->mac.autoneg)
  1727. igb_force_mac_fc(hw);
  1728. igb_init_dmac(adapter, pba);
  1729. #ifdef CONFIG_IGB_HWMON
  1730. /* Re-initialize the thermal sensor on i350 devices. */
  1731. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  1732. if (mac->type == e1000_i350 && hw->bus.func == 0) {
  1733. /* If present, re-initialize the external thermal sensor
  1734. * interface.
  1735. */
  1736. if (adapter->ets)
  1737. mac->ops.init_thermal_sensor_thresh(hw);
  1738. }
  1739. }
  1740. #endif
  1741. /* Re-establish EEE setting */
  1742. if (hw->phy.media_type == e1000_media_type_copper) {
  1743. switch (mac->type) {
  1744. case e1000_i350:
  1745. case e1000_i210:
  1746. case e1000_i211:
  1747. igb_set_eee_i350(hw, true, true);
  1748. break;
  1749. case e1000_i354:
  1750. igb_set_eee_i354(hw, true, true);
  1751. break;
  1752. default:
  1753. break;
  1754. }
  1755. }
  1756. if (!netif_running(adapter->netdev))
  1757. igb_power_down_link(adapter);
  1758. igb_update_mng_vlan(adapter);
  1759. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  1760. wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
  1761. /* Re-enable PTP, where applicable. */
  1762. igb_ptp_reset(adapter);
  1763. igb_get_phy_info(hw);
  1764. }
  1765. static netdev_features_t igb_fix_features(struct net_device *netdev,
  1766. netdev_features_t features)
  1767. {
  1768. /* Since there is no support for separate Rx/Tx vlan accel
  1769. * enable/disable make sure Tx flag is always in same state as Rx.
  1770. */
  1771. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  1772. features |= NETIF_F_HW_VLAN_CTAG_TX;
  1773. else
  1774. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  1775. return features;
  1776. }
  1777. static int igb_set_features(struct net_device *netdev,
  1778. netdev_features_t features)
  1779. {
  1780. netdev_features_t changed = netdev->features ^ features;
  1781. struct igb_adapter *adapter = netdev_priv(netdev);
  1782. if (changed & NETIF_F_HW_VLAN_CTAG_RX)
  1783. igb_vlan_mode(netdev, features);
  1784. if (!(changed & NETIF_F_RXALL))
  1785. return 0;
  1786. netdev->features = features;
  1787. if (netif_running(netdev))
  1788. igb_reinit_locked(adapter);
  1789. else
  1790. igb_reset(adapter);
  1791. return 0;
  1792. }
  1793. static const struct net_device_ops igb_netdev_ops = {
  1794. .ndo_open = igb_open,
  1795. .ndo_stop = igb_close,
  1796. .ndo_start_xmit = igb_xmit_frame,
  1797. .ndo_get_stats64 = igb_get_stats64,
  1798. .ndo_set_rx_mode = igb_set_rx_mode,
  1799. .ndo_set_mac_address = igb_set_mac,
  1800. .ndo_change_mtu = igb_change_mtu,
  1801. .ndo_do_ioctl = igb_ioctl,
  1802. .ndo_tx_timeout = igb_tx_timeout,
  1803. .ndo_validate_addr = eth_validate_addr,
  1804. .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
  1805. .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
  1806. .ndo_set_vf_mac = igb_ndo_set_vf_mac,
  1807. .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
  1808. .ndo_set_vf_rate = igb_ndo_set_vf_bw,
  1809. .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
  1810. .ndo_get_vf_config = igb_ndo_get_vf_config,
  1811. #ifdef CONFIG_NET_POLL_CONTROLLER
  1812. .ndo_poll_controller = igb_netpoll,
  1813. #endif
  1814. .ndo_fix_features = igb_fix_features,
  1815. .ndo_set_features = igb_set_features,
  1816. };
  1817. /**
  1818. * igb_set_fw_version - Configure version string for ethtool
  1819. * @adapter: adapter struct
  1820. **/
  1821. void igb_set_fw_version(struct igb_adapter *adapter)
  1822. {
  1823. struct e1000_hw *hw = &adapter->hw;
  1824. struct e1000_fw_version fw;
  1825. igb_get_fw_version(hw, &fw);
  1826. switch (hw->mac.type) {
  1827. case e1000_i210:
  1828. case e1000_i211:
  1829. if (!(igb_get_flash_presence_i210(hw))) {
  1830. snprintf(adapter->fw_version,
  1831. sizeof(adapter->fw_version),
  1832. "%2d.%2d-%d",
  1833. fw.invm_major, fw.invm_minor,
  1834. fw.invm_img_type);
  1835. break;
  1836. }
  1837. /* fall through */
  1838. default:
  1839. /* if option is rom valid, display its version too */
  1840. if (fw.or_valid) {
  1841. snprintf(adapter->fw_version,
  1842. sizeof(adapter->fw_version),
  1843. "%d.%d, 0x%08x, %d.%d.%d",
  1844. fw.eep_major, fw.eep_minor, fw.etrack_id,
  1845. fw.or_major, fw.or_build, fw.or_patch);
  1846. /* no option rom */
  1847. } else if (fw.etrack_id != 0X0000) {
  1848. snprintf(adapter->fw_version,
  1849. sizeof(adapter->fw_version),
  1850. "%d.%d, 0x%08x",
  1851. fw.eep_major, fw.eep_minor, fw.etrack_id);
  1852. } else {
  1853. snprintf(adapter->fw_version,
  1854. sizeof(adapter->fw_version),
  1855. "%d.%d.%d",
  1856. fw.eep_major, fw.eep_minor, fw.eep_build);
  1857. }
  1858. break;
  1859. }
  1860. }
  1861. /**
  1862. * igb_init_mas - init Media Autosense feature if enabled in the NVM
  1863. *
  1864. * @adapter: adapter struct
  1865. **/
  1866. static void igb_init_mas(struct igb_adapter *adapter)
  1867. {
  1868. struct e1000_hw *hw = &adapter->hw;
  1869. u16 eeprom_data;
  1870. hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
  1871. switch (hw->bus.func) {
  1872. case E1000_FUNC_0:
  1873. if (eeprom_data & IGB_MAS_ENABLE_0) {
  1874. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  1875. netdev_info(adapter->netdev,
  1876. "MAS: Enabling Media Autosense for port %d\n",
  1877. hw->bus.func);
  1878. }
  1879. break;
  1880. case E1000_FUNC_1:
  1881. if (eeprom_data & IGB_MAS_ENABLE_1) {
  1882. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  1883. netdev_info(adapter->netdev,
  1884. "MAS: Enabling Media Autosense for port %d\n",
  1885. hw->bus.func);
  1886. }
  1887. break;
  1888. case E1000_FUNC_2:
  1889. if (eeprom_data & IGB_MAS_ENABLE_2) {
  1890. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  1891. netdev_info(adapter->netdev,
  1892. "MAS: Enabling Media Autosense for port %d\n",
  1893. hw->bus.func);
  1894. }
  1895. break;
  1896. case E1000_FUNC_3:
  1897. if (eeprom_data & IGB_MAS_ENABLE_3) {
  1898. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  1899. netdev_info(adapter->netdev,
  1900. "MAS: Enabling Media Autosense for port %d\n",
  1901. hw->bus.func);
  1902. }
  1903. break;
  1904. default:
  1905. /* Shouldn't get here */
  1906. netdev_err(adapter->netdev,
  1907. "MAS: Invalid port configuration, returning\n");
  1908. break;
  1909. }
  1910. }
  1911. /**
  1912. * igb_init_i2c - Init I2C interface
  1913. * @adapter: pointer to adapter structure
  1914. **/
  1915. static s32 igb_init_i2c(struct igb_adapter *adapter)
  1916. {
  1917. s32 status = 0;
  1918. /* I2C interface supported on i350 devices */
  1919. if (adapter->hw.mac.type != e1000_i350)
  1920. return 0;
  1921. /* Initialize the i2c bus which is controlled by the registers.
  1922. * This bus will use the i2c_algo_bit structue that implements
  1923. * the protocol through toggling of the 4 bits in the register.
  1924. */
  1925. adapter->i2c_adap.owner = THIS_MODULE;
  1926. adapter->i2c_algo = igb_i2c_algo;
  1927. adapter->i2c_algo.data = adapter;
  1928. adapter->i2c_adap.algo_data = &adapter->i2c_algo;
  1929. adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
  1930. strlcpy(adapter->i2c_adap.name, "igb BB",
  1931. sizeof(adapter->i2c_adap.name));
  1932. status = i2c_bit_add_bus(&adapter->i2c_adap);
  1933. return status;
  1934. }
  1935. /**
  1936. * igb_probe - Device Initialization Routine
  1937. * @pdev: PCI device information struct
  1938. * @ent: entry in igb_pci_tbl
  1939. *
  1940. * Returns 0 on success, negative on failure
  1941. *
  1942. * igb_probe initializes an adapter identified by a pci_dev structure.
  1943. * The OS initialization, configuring of the adapter private structure,
  1944. * and a hardware reset occur.
  1945. **/
  1946. static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1947. {
  1948. struct net_device *netdev;
  1949. struct igb_adapter *adapter;
  1950. struct e1000_hw *hw;
  1951. u16 eeprom_data = 0;
  1952. s32 ret_val;
  1953. static int global_quad_port_a; /* global quad port a indication */
  1954. const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
  1955. int err, pci_using_dac;
  1956. u8 part_str[E1000_PBANUM_LENGTH];
  1957. /* Catch broken hardware that put the wrong VF device ID in
  1958. * the PCIe SR-IOV capability.
  1959. */
  1960. if (pdev->is_virtfn) {
  1961. WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
  1962. pci_name(pdev), pdev->vendor, pdev->device);
  1963. return -EINVAL;
  1964. }
  1965. err = pci_enable_device_mem(pdev);
  1966. if (err)
  1967. return err;
  1968. pci_using_dac = 0;
  1969. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1970. if (!err) {
  1971. pci_using_dac = 1;
  1972. } else {
  1973. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  1974. if (err) {
  1975. dev_err(&pdev->dev,
  1976. "No usable DMA configuration, aborting\n");
  1977. goto err_dma;
  1978. }
  1979. }
  1980. err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
  1981. IORESOURCE_MEM),
  1982. igb_driver_name);
  1983. if (err)
  1984. goto err_pci_reg;
  1985. pci_enable_pcie_error_reporting(pdev);
  1986. pci_set_master(pdev);
  1987. pci_save_state(pdev);
  1988. err = -ENOMEM;
  1989. netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
  1990. IGB_MAX_TX_QUEUES);
  1991. if (!netdev)
  1992. goto err_alloc_etherdev;
  1993. SET_NETDEV_DEV(netdev, &pdev->dev);
  1994. pci_set_drvdata(pdev, netdev);
  1995. adapter = netdev_priv(netdev);
  1996. adapter->netdev = netdev;
  1997. adapter->pdev = pdev;
  1998. hw = &adapter->hw;
  1999. hw->back = adapter;
  2000. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  2001. err = -EIO;
  2002. hw->hw_addr = pci_iomap(pdev, 0, 0);
  2003. if (!hw->hw_addr)
  2004. goto err_ioremap;
  2005. netdev->netdev_ops = &igb_netdev_ops;
  2006. igb_set_ethtool_ops(netdev);
  2007. netdev->watchdog_timeo = 5 * HZ;
  2008. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  2009. netdev->mem_start = pci_resource_start(pdev, 0);
  2010. netdev->mem_end = pci_resource_end(pdev, 0);
  2011. /* PCI config space info */
  2012. hw->vendor_id = pdev->vendor;
  2013. hw->device_id = pdev->device;
  2014. hw->revision_id = pdev->revision;
  2015. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2016. hw->subsystem_device_id = pdev->subsystem_device;
  2017. /* Copy the default MAC, PHY and NVM function pointers */
  2018. memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
  2019. memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
  2020. memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
  2021. /* Initialize skew-specific constants */
  2022. err = ei->get_invariants(hw);
  2023. if (err)
  2024. goto err_sw_init;
  2025. /* setup the private structure */
  2026. err = igb_sw_init(adapter);
  2027. if (err)
  2028. goto err_sw_init;
  2029. igb_get_bus_info_pcie(hw);
  2030. hw->phy.autoneg_wait_to_complete = false;
  2031. /* Copper options */
  2032. if (hw->phy.media_type == e1000_media_type_copper) {
  2033. hw->phy.mdix = AUTO_ALL_MODES;
  2034. hw->phy.disable_polarity_correction = false;
  2035. hw->phy.ms_type = e1000_ms_hw_default;
  2036. }
  2037. if (igb_check_reset_block(hw))
  2038. dev_info(&pdev->dev,
  2039. "PHY reset is blocked due to SOL/IDER session.\n");
  2040. /* features is initialized to 0 in allocation, it might have bits
  2041. * set by igb_sw_init so we should use an or instead of an
  2042. * assignment.
  2043. */
  2044. netdev->features |= NETIF_F_SG |
  2045. NETIF_F_IP_CSUM |
  2046. NETIF_F_IPV6_CSUM |
  2047. NETIF_F_TSO |
  2048. NETIF_F_TSO6 |
  2049. NETIF_F_RXHASH |
  2050. NETIF_F_RXCSUM |
  2051. NETIF_F_HW_VLAN_CTAG_RX |
  2052. NETIF_F_HW_VLAN_CTAG_TX;
  2053. /* copy netdev features into list of user selectable features */
  2054. netdev->hw_features |= netdev->features;
  2055. netdev->hw_features |= NETIF_F_RXALL;
  2056. /* set this bit last since it cannot be part of hw_features */
  2057. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
  2058. netdev->vlan_features |= NETIF_F_TSO |
  2059. NETIF_F_TSO6 |
  2060. NETIF_F_IP_CSUM |
  2061. NETIF_F_IPV6_CSUM |
  2062. NETIF_F_SG;
  2063. netdev->priv_flags |= IFF_SUPP_NOFCS;
  2064. if (pci_using_dac) {
  2065. netdev->features |= NETIF_F_HIGHDMA;
  2066. netdev->vlan_features |= NETIF_F_HIGHDMA;
  2067. }
  2068. if (hw->mac.type >= e1000_82576) {
  2069. netdev->hw_features |= NETIF_F_SCTP_CSUM;
  2070. netdev->features |= NETIF_F_SCTP_CSUM;
  2071. }
  2072. netdev->priv_flags |= IFF_UNICAST_FLT;
  2073. adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
  2074. /* before reading the NVM, reset the controller to put the device in a
  2075. * known good starting state
  2076. */
  2077. hw->mac.ops.reset_hw(hw);
  2078. /* make sure the NVM is good , i211/i210 parts can have special NVM
  2079. * that doesn't contain a checksum
  2080. */
  2081. switch (hw->mac.type) {
  2082. case e1000_i210:
  2083. case e1000_i211:
  2084. if (igb_get_flash_presence_i210(hw)) {
  2085. if (hw->nvm.ops.validate(hw) < 0) {
  2086. dev_err(&pdev->dev,
  2087. "The NVM Checksum Is Not Valid\n");
  2088. err = -EIO;
  2089. goto err_eeprom;
  2090. }
  2091. }
  2092. break;
  2093. default:
  2094. if (hw->nvm.ops.validate(hw) < 0) {
  2095. dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
  2096. err = -EIO;
  2097. goto err_eeprom;
  2098. }
  2099. break;
  2100. }
  2101. /* copy the MAC address out of the NVM */
  2102. if (hw->mac.ops.read_mac_addr(hw))
  2103. dev_err(&pdev->dev, "NVM Read Error\n");
  2104. memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
  2105. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2106. dev_err(&pdev->dev, "Invalid MAC Address\n");
  2107. err = -EIO;
  2108. goto err_eeprom;
  2109. }
  2110. /* get firmware version for ethtool -i */
  2111. igb_set_fw_version(adapter);
  2112. /* configure RXPBSIZE and TXPBSIZE */
  2113. if (hw->mac.type == e1000_i210) {
  2114. wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
  2115. wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
  2116. }
  2117. setup_timer(&adapter->watchdog_timer, igb_watchdog,
  2118. (unsigned long) adapter);
  2119. setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
  2120. (unsigned long) adapter);
  2121. INIT_WORK(&adapter->reset_task, igb_reset_task);
  2122. INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
  2123. /* Initialize link properties that are user-changeable */
  2124. adapter->fc_autoneg = true;
  2125. hw->mac.autoneg = true;
  2126. hw->phy.autoneg_advertised = 0x2f;
  2127. hw->fc.requested_mode = e1000_fc_default;
  2128. hw->fc.current_mode = e1000_fc_default;
  2129. igb_validate_mdi_setting(hw);
  2130. /* By default, support wake on port A */
  2131. if (hw->bus.func == 0)
  2132. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2133. /* Check the NVM for wake support on non-port A ports */
  2134. if (hw->mac.type >= e1000_82580)
  2135. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
  2136. NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
  2137. &eeprom_data);
  2138. else if (hw->bus.func == 1)
  2139. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  2140. if (eeprom_data & IGB_EEPROM_APME)
  2141. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2142. /* now that we have the eeprom settings, apply the special cases where
  2143. * the eeprom may be wrong or the board simply won't support wake on
  2144. * lan on a particular port
  2145. */
  2146. switch (pdev->device) {
  2147. case E1000_DEV_ID_82575GB_QUAD_COPPER:
  2148. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2149. break;
  2150. case E1000_DEV_ID_82575EB_FIBER_SERDES:
  2151. case E1000_DEV_ID_82576_FIBER:
  2152. case E1000_DEV_ID_82576_SERDES:
  2153. /* Wake events only supported on port A for dual fiber
  2154. * regardless of eeprom setting
  2155. */
  2156. if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
  2157. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2158. break;
  2159. case E1000_DEV_ID_82576_QUAD_COPPER:
  2160. case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
  2161. /* if quad port adapter, disable WoL on all but port A */
  2162. if (global_quad_port_a != 0)
  2163. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2164. else
  2165. adapter->flags |= IGB_FLAG_QUAD_PORT_A;
  2166. /* Reset for multiple quad port adapters */
  2167. if (++global_quad_port_a == 4)
  2168. global_quad_port_a = 0;
  2169. break;
  2170. default:
  2171. /* If the device can't wake, don't set software support */
  2172. if (!device_can_wakeup(&adapter->pdev->dev))
  2173. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2174. }
  2175. /* initialize the wol settings based on the eeprom settings */
  2176. if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
  2177. adapter->wol |= E1000_WUFC_MAG;
  2178. /* Some vendors want WoL disabled by default, but still supported */
  2179. if ((hw->mac.type == e1000_i350) &&
  2180. (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
  2181. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2182. adapter->wol = 0;
  2183. }
  2184. device_set_wakeup_enable(&adapter->pdev->dev,
  2185. adapter->flags & IGB_FLAG_WOL_SUPPORTED);
  2186. /* reset the hardware with the new settings */
  2187. igb_reset(adapter);
  2188. /* Init the I2C interface */
  2189. err = igb_init_i2c(adapter);
  2190. if (err) {
  2191. dev_err(&pdev->dev, "failed to init i2c interface\n");
  2192. goto err_eeprom;
  2193. }
  2194. /* let the f/w know that the h/w is now under the control of the
  2195. * driver.
  2196. */
  2197. igb_get_hw_control(adapter);
  2198. strcpy(netdev->name, "eth%d");
  2199. err = register_netdev(netdev);
  2200. if (err)
  2201. goto err_register;
  2202. /* carrier off reporting is important to ethtool even BEFORE open */
  2203. netif_carrier_off(netdev);
  2204. #ifdef CONFIG_IGB_DCA
  2205. if (dca_add_requester(&pdev->dev) == 0) {
  2206. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  2207. dev_info(&pdev->dev, "DCA enabled\n");
  2208. igb_setup_dca(adapter);
  2209. }
  2210. #endif
  2211. #ifdef CONFIG_IGB_HWMON
  2212. /* Initialize the thermal sensor on i350 devices. */
  2213. if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
  2214. u16 ets_word;
  2215. /* Read the NVM to determine if this i350 device supports an
  2216. * external thermal sensor.
  2217. */
  2218. hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
  2219. if (ets_word != 0x0000 && ets_word != 0xFFFF)
  2220. adapter->ets = true;
  2221. else
  2222. adapter->ets = false;
  2223. if (igb_sysfs_init(adapter))
  2224. dev_err(&pdev->dev,
  2225. "failed to allocate sysfs resources\n");
  2226. } else {
  2227. adapter->ets = false;
  2228. }
  2229. #endif
  2230. /* Check if Media Autosense is enabled */
  2231. adapter->ei = *ei;
  2232. if (hw->dev_spec._82575.mas_capable)
  2233. igb_init_mas(adapter);
  2234. /* do hw tstamp init after resetting */
  2235. igb_ptp_init(adapter);
  2236. dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
  2237. /* print bus type/speed/width info, not applicable to i354 */
  2238. if (hw->mac.type != e1000_i354) {
  2239. dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
  2240. netdev->name,
  2241. ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
  2242. (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
  2243. "unknown"),
  2244. ((hw->bus.width == e1000_bus_width_pcie_x4) ?
  2245. "Width x4" :
  2246. (hw->bus.width == e1000_bus_width_pcie_x2) ?
  2247. "Width x2" :
  2248. (hw->bus.width == e1000_bus_width_pcie_x1) ?
  2249. "Width x1" : "unknown"), netdev->dev_addr);
  2250. }
  2251. if ((hw->mac.type >= e1000_i210 ||
  2252. igb_get_flash_presence_i210(hw))) {
  2253. ret_val = igb_read_part_string(hw, part_str,
  2254. E1000_PBANUM_LENGTH);
  2255. } else {
  2256. ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  2257. }
  2258. if (ret_val)
  2259. strcpy(part_str, "Unknown");
  2260. dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
  2261. dev_info(&pdev->dev,
  2262. "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
  2263. (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
  2264. (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
  2265. adapter->num_rx_queues, adapter->num_tx_queues);
  2266. if (hw->phy.media_type == e1000_media_type_copper) {
  2267. switch (hw->mac.type) {
  2268. case e1000_i350:
  2269. case e1000_i210:
  2270. case e1000_i211:
  2271. /* Enable EEE for internal copper PHY devices */
  2272. err = igb_set_eee_i350(hw, true, true);
  2273. if ((!err) &&
  2274. (!hw->dev_spec._82575.eee_disable)) {
  2275. adapter->eee_advert =
  2276. MDIO_EEE_100TX | MDIO_EEE_1000T;
  2277. adapter->flags |= IGB_FLAG_EEE;
  2278. }
  2279. break;
  2280. case e1000_i354:
  2281. if ((rd32(E1000_CTRL_EXT) &
  2282. E1000_CTRL_EXT_LINK_MODE_SGMII)) {
  2283. err = igb_set_eee_i354(hw, true, true);
  2284. if ((!err) &&
  2285. (!hw->dev_spec._82575.eee_disable)) {
  2286. adapter->eee_advert =
  2287. MDIO_EEE_100TX | MDIO_EEE_1000T;
  2288. adapter->flags |= IGB_FLAG_EEE;
  2289. }
  2290. }
  2291. break;
  2292. default:
  2293. break;
  2294. }
  2295. }
  2296. pm_runtime_put_noidle(&pdev->dev);
  2297. return 0;
  2298. err_register:
  2299. igb_release_hw_control(adapter);
  2300. memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
  2301. err_eeprom:
  2302. if (!igb_check_reset_block(hw))
  2303. igb_reset_phy(hw);
  2304. if (hw->flash_address)
  2305. iounmap(hw->flash_address);
  2306. err_sw_init:
  2307. igb_clear_interrupt_scheme(adapter);
  2308. pci_iounmap(pdev, hw->hw_addr);
  2309. err_ioremap:
  2310. free_netdev(netdev);
  2311. err_alloc_etherdev:
  2312. pci_release_selected_regions(pdev,
  2313. pci_select_bars(pdev, IORESOURCE_MEM));
  2314. err_pci_reg:
  2315. err_dma:
  2316. pci_disable_device(pdev);
  2317. return err;
  2318. }
  2319. #ifdef CONFIG_PCI_IOV
  2320. static int igb_disable_sriov(struct pci_dev *pdev)
  2321. {
  2322. struct net_device *netdev = pci_get_drvdata(pdev);
  2323. struct igb_adapter *adapter = netdev_priv(netdev);
  2324. struct e1000_hw *hw = &adapter->hw;
  2325. /* reclaim resources allocated to VFs */
  2326. if (adapter->vf_data) {
  2327. /* disable iov and allow time for transactions to clear */
  2328. if (pci_vfs_assigned(pdev)) {
  2329. dev_warn(&pdev->dev,
  2330. "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
  2331. return -EPERM;
  2332. } else {
  2333. pci_disable_sriov(pdev);
  2334. msleep(500);
  2335. }
  2336. kfree(adapter->vf_data);
  2337. adapter->vf_data = NULL;
  2338. adapter->vfs_allocated_count = 0;
  2339. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  2340. wrfl();
  2341. msleep(100);
  2342. dev_info(&pdev->dev, "IOV Disabled\n");
  2343. /* Re-enable DMA Coalescing flag since IOV is turned off */
  2344. adapter->flags |= IGB_FLAG_DMAC;
  2345. }
  2346. return 0;
  2347. }
  2348. static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
  2349. {
  2350. struct net_device *netdev = pci_get_drvdata(pdev);
  2351. struct igb_adapter *adapter = netdev_priv(netdev);
  2352. int old_vfs = pci_num_vf(pdev);
  2353. int err = 0;
  2354. int i;
  2355. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
  2356. err = -EPERM;
  2357. goto out;
  2358. }
  2359. if (!num_vfs)
  2360. goto out;
  2361. if (old_vfs) {
  2362. dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
  2363. old_vfs, max_vfs);
  2364. adapter->vfs_allocated_count = old_vfs;
  2365. } else
  2366. adapter->vfs_allocated_count = num_vfs;
  2367. adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
  2368. sizeof(struct vf_data_storage), GFP_KERNEL);
  2369. /* if allocation failed then we do not support SR-IOV */
  2370. if (!adapter->vf_data) {
  2371. adapter->vfs_allocated_count = 0;
  2372. dev_err(&pdev->dev,
  2373. "Unable to allocate memory for VF Data Storage\n");
  2374. err = -ENOMEM;
  2375. goto out;
  2376. }
  2377. /* only call pci_enable_sriov() if no VFs are allocated already */
  2378. if (!old_vfs) {
  2379. err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
  2380. if (err)
  2381. goto err_out;
  2382. }
  2383. dev_info(&pdev->dev, "%d VFs allocated\n",
  2384. adapter->vfs_allocated_count);
  2385. for (i = 0; i < adapter->vfs_allocated_count; i++)
  2386. igb_vf_configure(adapter, i);
  2387. /* DMA Coalescing is not supported in IOV mode. */
  2388. adapter->flags &= ~IGB_FLAG_DMAC;
  2389. goto out;
  2390. err_out:
  2391. kfree(adapter->vf_data);
  2392. adapter->vf_data = NULL;
  2393. adapter->vfs_allocated_count = 0;
  2394. out:
  2395. return err;
  2396. }
  2397. #endif
  2398. /**
  2399. * igb_remove_i2c - Cleanup I2C interface
  2400. * @adapter: pointer to adapter structure
  2401. **/
  2402. static void igb_remove_i2c(struct igb_adapter *adapter)
  2403. {
  2404. /* free the adapter bus structure */
  2405. i2c_del_adapter(&adapter->i2c_adap);
  2406. }
  2407. /**
  2408. * igb_remove - Device Removal Routine
  2409. * @pdev: PCI device information struct
  2410. *
  2411. * igb_remove is called by the PCI subsystem to alert the driver
  2412. * that it should release a PCI device. The could be caused by a
  2413. * Hot-Plug event, or because the driver is going to be removed from
  2414. * memory.
  2415. **/
  2416. static void igb_remove(struct pci_dev *pdev)
  2417. {
  2418. struct net_device *netdev = pci_get_drvdata(pdev);
  2419. struct igb_adapter *adapter = netdev_priv(netdev);
  2420. struct e1000_hw *hw = &adapter->hw;
  2421. pm_runtime_get_noresume(&pdev->dev);
  2422. #ifdef CONFIG_IGB_HWMON
  2423. igb_sysfs_exit(adapter);
  2424. #endif
  2425. igb_remove_i2c(adapter);
  2426. igb_ptp_stop(adapter);
  2427. /* The watchdog timer may be rescheduled, so explicitly
  2428. * disable watchdog from being rescheduled.
  2429. */
  2430. set_bit(__IGB_DOWN, &adapter->state);
  2431. del_timer_sync(&adapter->watchdog_timer);
  2432. del_timer_sync(&adapter->phy_info_timer);
  2433. cancel_work_sync(&adapter->reset_task);
  2434. cancel_work_sync(&adapter->watchdog_task);
  2435. #ifdef CONFIG_IGB_DCA
  2436. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  2437. dev_info(&pdev->dev, "DCA disabled\n");
  2438. dca_remove_requester(&pdev->dev);
  2439. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  2440. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  2441. }
  2442. #endif
  2443. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  2444. * would have already happened in close and is redundant.
  2445. */
  2446. igb_release_hw_control(adapter);
  2447. unregister_netdev(netdev);
  2448. igb_clear_interrupt_scheme(adapter);
  2449. #ifdef CONFIG_PCI_IOV
  2450. igb_disable_sriov(pdev);
  2451. #endif
  2452. pci_iounmap(pdev, hw->hw_addr);
  2453. if (hw->flash_address)
  2454. iounmap(hw->flash_address);
  2455. pci_release_selected_regions(pdev,
  2456. pci_select_bars(pdev, IORESOURCE_MEM));
  2457. kfree(adapter->shadow_vfta);
  2458. free_netdev(netdev);
  2459. pci_disable_pcie_error_reporting(pdev);
  2460. pci_disable_device(pdev);
  2461. }
  2462. /**
  2463. * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
  2464. * @adapter: board private structure to initialize
  2465. *
  2466. * This function initializes the vf specific data storage and then attempts to
  2467. * allocate the VFs. The reason for ordering it this way is because it is much
  2468. * mor expensive time wise to disable SR-IOV than it is to allocate and free
  2469. * the memory for the VFs.
  2470. **/
  2471. static void igb_probe_vfs(struct igb_adapter *adapter)
  2472. {
  2473. #ifdef CONFIG_PCI_IOV
  2474. struct pci_dev *pdev = adapter->pdev;
  2475. struct e1000_hw *hw = &adapter->hw;
  2476. /* Virtualization features not supported on i210 family. */
  2477. if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
  2478. return;
  2479. pci_sriov_set_totalvfs(pdev, 7);
  2480. igb_pci_enable_sriov(pdev, max_vfs);
  2481. #endif /* CONFIG_PCI_IOV */
  2482. }
  2483. static void igb_init_queue_configuration(struct igb_adapter *adapter)
  2484. {
  2485. struct e1000_hw *hw = &adapter->hw;
  2486. u32 max_rss_queues;
  2487. /* Determine the maximum number of RSS queues supported. */
  2488. switch (hw->mac.type) {
  2489. case e1000_i211:
  2490. max_rss_queues = IGB_MAX_RX_QUEUES_I211;
  2491. break;
  2492. case e1000_82575:
  2493. case e1000_i210:
  2494. max_rss_queues = IGB_MAX_RX_QUEUES_82575;
  2495. break;
  2496. case e1000_i350:
  2497. /* I350 cannot do RSS and SR-IOV at the same time */
  2498. if (!!adapter->vfs_allocated_count) {
  2499. max_rss_queues = 1;
  2500. break;
  2501. }
  2502. /* fall through */
  2503. case e1000_82576:
  2504. if (!!adapter->vfs_allocated_count) {
  2505. max_rss_queues = 2;
  2506. break;
  2507. }
  2508. /* fall through */
  2509. case e1000_82580:
  2510. case e1000_i354:
  2511. default:
  2512. max_rss_queues = IGB_MAX_RX_QUEUES;
  2513. break;
  2514. }
  2515. adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
  2516. /* Determine if we need to pair queues. */
  2517. switch (hw->mac.type) {
  2518. case e1000_82575:
  2519. case e1000_i211:
  2520. /* Device supports enough interrupts without queue pairing. */
  2521. break;
  2522. case e1000_82576:
  2523. /* If VFs are going to be allocated with RSS queues then we
  2524. * should pair the queues in order to conserve interrupts due
  2525. * to limited supply.
  2526. */
  2527. if ((adapter->rss_queues > 1) &&
  2528. (adapter->vfs_allocated_count > 6))
  2529. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  2530. /* fall through */
  2531. case e1000_82580:
  2532. case e1000_i350:
  2533. case e1000_i354:
  2534. case e1000_i210:
  2535. default:
  2536. /* If rss_queues > half of max_rss_queues, pair the queues in
  2537. * order to conserve interrupts due to limited supply.
  2538. */
  2539. if (adapter->rss_queues > (max_rss_queues / 2))
  2540. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  2541. break;
  2542. }
  2543. }
  2544. /**
  2545. * igb_sw_init - Initialize general software structures (struct igb_adapter)
  2546. * @adapter: board private structure to initialize
  2547. *
  2548. * igb_sw_init initializes the Adapter private data structure.
  2549. * Fields are initialized based on PCI device information and
  2550. * OS network device settings (MTU size).
  2551. **/
  2552. static int igb_sw_init(struct igb_adapter *adapter)
  2553. {
  2554. struct e1000_hw *hw = &adapter->hw;
  2555. struct net_device *netdev = adapter->netdev;
  2556. struct pci_dev *pdev = adapter->pdev;
  2557. pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
  2558. /* set default ring sizes */
  2559. adapter->tx_ring_count = IGB_DEFAULT_TXD;
  2560. adapter->rx_ring_count = IGB_DEFAULT_RXD;
  2561. /* set default ITR values */
  2562. adapter->rx_itr_setting = IGB_DEFAULT_ITR;
  2563. adapter->tx_itr_setting = IGB_DEFAULT_ITR;
  2564. /* set default work limits */
  2565. adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
  2566. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
  2567. VLAN_HLEN;
  2568. adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  2569. spin_lock_init(&adapter->stats64_lock);
  2570. #ifdef CONFIG_PCI_IOV
  2571. switch (hw->mac.type) {
  2572. case e1000_82576:
  2573. case e1000_i350:
  2574. if (max_vfs > 7) {
  2575. dev_warn(&pdev->dev,
  2576. "Maximum of 7 VFs per PF, using max\n");
  2577. max_vfs = adapter->vfs_allocated_count = 7;
  2578. } else
  2579. adapter->vfs_allocated_count = max_vfs;
  2580. if (adapter->vfs_allocated_count)
  2581. dev_warn(&pdev->dev,
  2582. "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
  2583. break;
  2584. default:
  2585. break;
  2586. }
  2587. #endif /* CONFIG_PCI_IOV */
  2588. igb_init_queue_configuration(adapter);
  2589. /* Setup and initialize a copy of the hw vlan table array */
  2590. adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
  2591. GFP_ATOMIC);
  2592. /* This call may decrease the number of queues */
  2593. if (igb_init_interrupt_scheme(adapter, true)) {
  2594. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  2595. return -ENOMEM;
  2596. }
  2597. igb_probe_vfs(adapter);
  2598. /* Explicitly disable IRQ since the NIC can be in any state. */
  2599. igb_irq_disable(adapter);
  2600. if (hw->mac.type >= e1000_i350)
  2601. adapter->flags &= ~IGB_FLAG_DMAC;
  2602. set_bit(__IGB_DOWN, &adapter->state);
  2603. return 0;
  2604. }
  2605. /**
  2606. * igb_open - Called when a network interface is made active
  2607. * @netdev: network interface device structure
  2608. *
  2609. * Returns 0 on success, negative value on failure
  2610. *
  2611. * The open entry point is called when a network interface is made
  2612. * active by the system (IFF_UP). At this point all resources needed
  2613. * for transmit and receive operations are allocated, the interrupt
  2614. * handler is registered with the OS, the watchdog timer is started,
  2615. * and the stack is notified that the interface is ready.
  2616. **/
  2617. static int __igb_open(struct net_device *netdev, bool resuming)
  2618. {
  2619. struct igb_adapter *adapter = netdev_priv(netdev);
  2620. struct e1000_hw *hw = &adapter->hw;
  2621. struct pci_dev *pdev = adapter->pdev;
  2622. int err;
  2623. int i;
  2624. /* disallow open during test */
  2625. if (test_bit(__IGB_TESTING, &adapter->state)) {
  2626. WARN_ON(resuming);
  2627. return -EBUSY;
  2628. }
  2629. if (!resuming)
  2630. pm_runtime_get_sync(&pdev->dev);
  2631. netif_carrier_off(netdev);
  2632. /* allocate transmit descriptors */
  2633. err = igb_setup_all_tx_resources(adapter);
  2634. if (err)
  2635. goto err_setup_tx;
  2636. /* allocate receive descriptors */
  2637. err = igb_setup_all_rx_resources(adapter);
  2638. if (err)
  2639. goto err_setup_rx;
  2640. igb_power_up_link(adapter);
  2641. /* before we allocate an interrupt, we must be ready to handle it.
  2642. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  2643. * as soon as we call pci_request_irq, so we have to setup our
  2644. * clean_rx handler before we do so.
  2645. */
  2646. igb_configure(adapter);
  2647. err = igb_request_irq(adapter);
  2648. if (err)
  2649. goto err_req_irq;
  2650. /* Notify the stack of the actual queue counts. */
  2651. err = netif_set_real_num_tx_queues(adapter->netdev,
  2652. adapter->num_tx_queues);
  2653. if (err)
  2654. goto err_set_queues;
  2655. err = netif_set_real_num_rx_queues(adapter->netdev,
  2656. adapter->num_rx_queues);
  2657. if (err)
  2658. goto err_set_queues;
  2659. /* From here on the code is the same as igb_up() */
  2660. clear_bit(__IGB_DOWN, &adapter->state);
  2661. for (i = 0; i < adapter->num_q_vectors; i++)
  2662. napi_enable(&(adapter->q_vector[i]->napi));
  2663. /* Clear any pending interrupts. */
  2664. rd32(E1000_ICR);
  2665. igb_irq_enable(adapter);
  2666. /* notify VFs that reset has been completed */
  2667. if (adapter->vfs_allocated_count) {
  2668. u32 reg_data = rd32(E1000_CTRL_EXT);
  2669. reg_data |= E1000_CTRL_EXT_PFRSTD;
  2670. wr32(E1000_CTRL_EXT, reg_data);
  2671. }
  2672. netif_tx_start_all_queues(netdev);
  2673. if (!resuming)
  2674. pm_runtime_put(&pdev->dev);
  2675. /* start the watchdog. */
  2676. hw->mac.get_link_status = 1;
  2677. schedule_work(&adapter->watchdog_task);
  2678. return 0;
  2679. err_set_queues:
  2680. igb_free_irq(adapter);
  2681. err_req_irq:
  2682. igb_release_hw_control(adapter);
  2683. igb_power_down_link(adapter);
  2684. igb_free_all_rx_resources(adapter);
  2685. err_setup_rx:
  2686. igb_free_all_tx_resources(adapter);
  2687. err_setup_tx:
  2688. igb_reset(adapter);
  2689. if (!resuming)
  2690. pm_runtime_put(&pdev->dev);
  2691. return err;
  2692. }
  2693. static int igb_open(struct net_device *netdev)
  2694. {
  2695. return __igb_open(netdev, false);
  2696. }
  2697. /**
  2698. * igb_close - Disables a network interface
  2699. * @netdev: network interface device structure
  2700. *
  2701. * Returns 0, this is not allowed to fail
  2702. *
  2703. * The close entry point is called when an interface is de-activated
  2704. * by the OS. The hardware is still under the driver's control, but
  2705. * needs to be disabled. A global MAC reset is issued to stop the
  2706. * hardware, and all transmit and receive resources are freed.
  2707. **/
  2708. static int __igb_close(struct net_device *netdev, bool suspending)
  2709. {
  2710. struct igb_adapter *adapter = netdev_priv(netdev);
  2711. struct pci_dev *pdev = adapter->pdev;
  2712. WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
  2713. if (!suspending)
  2714. pm_runtime_get_sync(&pdev->dev);
  2715. igb_down(adapter);
  2716. igb_free_irq(adapter);
  2717. igb_free_all_tx_resources(adapter);
  2718. igb_free_all_rx_resources(adapter);
  2719. if (!suspending)
  2720. pm_runtime_put_sync(&pdev->dev);
  2721. return 0;
  2722. }
  2723. static int igb_close(struct net_device *netdev)
  2724. {
  2725. return __igb_close(netdev, false);
  2726. }
  2727. /**
  2728. * igb_setup_tx_resources - allocate Tx resources (Descriptors)
  2729. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  2730. *
  2731. * Return 0 on success, negative on failure
  2732. **/
  2733. int igb_setup_tx_resources(struct igb_ring *tx_ring)
  2734. {
  2735. struct device *dev = tx_ring->dev;
  2736. int size;
  2737. size = sizeof(struct igb_tx_buffer) * tx_ring->count;
  2738. tx_ring->tx_buffer_info = vzalloc(size);
  2739. if (!tx_ring->tx_buffer_info)
  2740. goto err;
  2741. /* round up to nearest 4K */
  2742. tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
  2743. tx_ring->size = ALIGN(tx_ring->size, 4096);
  2744. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  2745. &tx_ring->dma, GFP_KERNEL);
  2746. if (!tx_ring->desc)
  2747. goto err;
  2748. tx_ring->next_to_use = 0;
  2749. tx_ring->next_to_clean = 0;
  2750. return 0;
  2751. err:
  2752. vfree(tx_ring->tx_buffer_info);
  2753. tx_ring->tx_buffer_info = NULL;
  2754. dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
  2755. return -ENOMEM;
  2756. }
  2757. /**
  2758. * igb_setup_all_tx_resources - wrapper to allocate Tx resources
  2759. * (Descriptors) for all queues
  2760. * @adapter: board private structure
  2761. *
  2762. * Return 0 on success, negative on failure
  2763. **/
  2764. static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  2765. {
  2766. struct pci_dev *pdev = adapter->pdev;
  2767. int i, err = 0;
  2768. for (i = 0; i < adapter->num_tx_queues; i++) {
  2769. err = igb_setup_tx_resources(adapter->tx_ring[i]);
  2770. if (err) {
  2771. dev_err(&pdev->dev,
  2772. "Allocation for Tx Queue %u failed\n", i);
  2773. for (i--; i >= 0; i--)
  2774. igb_free_tx_resources(adapter->tx_ring[i]);
  2775. break;
  2776. }
  2777. }
  2778. return err;
  2779. }
  2780. /**
  2781. * igb_setup_tctl - configure the transmit control registers
  2782. * @adapter: Board private structure
  2783. **/
  2784. void igb_setup_tctl(struct igb_adapter *adapter)
  2785. {
  2786. struct e1000_hw *hw = &adapter->hw;
  2787. u32 tctl;
  2788. /* disable queue 0 which is enabled by default on 82575 and 82576 */
  2789. wr32(E1000_TXDCTL(0), 0);
  2790. /* Program the Transmit Control Register */
  2791. tctl = rd32(E1000_TCTL);
  2792. tctl &= ~E1000_TCTL_CT;
  2793. tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
  2794. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  2795. igb_config_collision_dist(hw);
  2796. /* Enable transmits */
  2797. tctl |= E1000_TCTL_EN;
  2798. wr32(E1000_TCTL, tctl);
  2799. }
  2800. /**
  2801. * igb_configure_tx_ring - Configure transmit ring after Reset
  2802. * @adapter: board private structure
  2803. * @ring: tx ring to configure
  2804. *
  2805. * Configure a transmit ring after a reset.
  2806. **/
  2807. void igb_configure_tx_ring(struct igb_adapter *adapter,
  2808. struct igb_ring *ring)
  2809. {
  2810. struct e1000_hw *hw = &adapter->hw;
  2811. u32 txdctl = 0;
  2812. u64 tdba = ring->dma;
  2813. int reg_idx = ring->reg_idx;
  2814. /* disable the queue */
  2815. wr32(E1000_TXDCTL(reg_idx), 0);
  2816. wrfl();
  2817. mdelay(10);
  2818. wr32(E1000_TDLEN(reg_idx),
  2819. ring->count * sizeof(union e1000_adv_tx_desc));
  2820. wr32(E1000_TDBAL(reg_idx),
  2821. tdba & 0x00000000ffffffffULL);
  2822. wr32(E1000_TDBAH(reg_idx), tdba >> 32);
  2823. ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
  2824. wr32(E1000_TDH(reg_idx), 0);
  2825. writel(0, ring->tail);
  2826. txdctl |= IGB_TX_PTHRESH;
  2827. txdctl |= IGB_TX_HTHRESH << 8;
  2828. txdctl |= IGB_TX_WTHRESH << 16;
  2829. txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
  2830. wr32(E1000_TXDCTL(reg_idx), txdctl);
  2831. }
  2832. /**
  2833. * igb_configure_tx - Configure transmit Unit after Reset
  2834. * @adapter: board private structure
  2835. *
  2836. * Configure the Tx unit of the MAC after a reset.
  2837. **/
  2838. static void igb_configure_tx(struct igb_adapter *adapter)
  2839. {
  2840. int i;
  2841. for (i = 0; i < adapter->num_tx_queues; i++)
  2842. igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
  2843. }
  2844. /**
  2845. * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  2846. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  2847. *
  2848. * Returns 0 on success, negative on failure
  2849. **/
  2850. int igb_setup_rx_resources(struct igb_ring *rx_ring)
  2851. {
  2852. struct device *dev = rx_ring->dev;
  2853. int size;
  2854. size = sizeof(struct igb_rx_buffer) * rx_ring->count;
  2855. rx_ring->rx_buffer_info = vzalloc(size);
  2856. if (!rx_ring->rx_buffer_info)
  2857. goto err;
  2858. /* Round up to nearest 4K */
  2859. rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
  2860. rx_ring->size = ALIGN(rx_ring->size, 4096);
  2861. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  2862. &rx_ring->dma, GFP_KERNEL);
  2863. if (!rx_ring->desc)
  2864. goto err;
  2865. rx_ring->next_to_alloc = 0;
  2866. rx_ring->next_to_clean = 0;
  2867. rx_ring->next_to_use = 0;
  2868. return 0;
  2869. err:
  2870. vfree(rx_ring->rx_buffer_info);
  2871. rx_ring->rx_buffer_info = NULL;
  2872. dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
  2873. return -ENOMEM;
  2874. }
  2875. /**
  2876. * igb_setup_all_rx_resources - wrapper to allocate Rx resources
  2877. * (Descriptors) for all queues
  2878. * @adapter: board private structure
  2879. *
  2880. * Return 0 on success, negative on failure
  2881. **/
  2882. static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  2883. {
  2884. struct pci_dev *pdev = adapter->pdev;
  2885. int i, err = 0;
  2886. for (i = 0; i < adapter->num_rx_queues; i++) {
  2887. err = igb_setup_rx_resources(adapter->rx_ring[i]);
  2888. if (err) {
  2889. dev_err(&pdev->dev,
  2890. "Allocation for Rx Queue %u failed\n", i);
  2891. for (i--; i >= 0; i--)
  2892. igb_free_rx_resources(adapter->rx_ring[i]);
  2893. break;
  2894. }
  2895. }
  2896. return err;
  2897. }
  2898. /**
  2899. * igb_setup_mrqc - configure the multiple receive queue control registers
  2900. * @adapter: Board private structure
  2901. **/
  2902. static void igb_setup_mrqc(struct igb_adapter *adapter)
  2903. {
  2904. struct e1000_hw *hw = &adapter->hw;
  2905. u32 mrqc, rxcsum;
  2906. u32 j, num_rx_queues;
  2907. static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
  2908. 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
  2909. 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
  2910. 0xFA01ACBE };
  2911. /* Fill out hash function seeds */
  2912. for (j = 0; j < 10; j++)
  2913. wr32(E1000_RSSRK(j), rsskey[j]);
  2914. num_rx_queues = adapter->rss_queues;
  2915. switch (hw->mac.type) {
  2916. case e1000_82576:
  2917. /* 82576 supports 2 RSS queues for SR-IOV */
  2918. if (adapter->vfs_allocated_count)
  2919. num_rx_queues = 2;
  2920. break;
  2921. default:
  2922. break;
  2923. }
  2924. if (adapter->rss_indir_tbl_init != num_rx_queues) {
  2925. for (j = 0; j < IGB_RETA_SIZE; j++)
  2926. adapter->rss_indir_tbl[j] =
  2927. (j * num_rx_queues) / IGB_RETA_SIZE;
  2928. adapter->rss_indir_tbl_init = num_rx_queues;
  2929. }
  2930. igb_write_rss_indir_tbl(adapter);
  2931. /* Disable raw packet checksumming so that RSS hash is placed in
  2932. * descriptor on writeback. No need to enable TCP/UDP/IP checksum
  2933. * offloads as they are enabled by default
  2934. */
  2935. rxcsum = rd32(E1000_RXCSUM);
  2936. rxcsum |= E1000_RXCSUM_PCSD;
  2937. if (adapter->hw.mac.type >= e1000_82576)
  2938. /* Enable Receive Checksum Offload for SCTP */
  2939. rxcsum |= E1000_RXCSUM_CRCOFL;
  2940. /* Don't need to set TUOFL or IPOFL, they default to 1 */
  2941. wr32(E1000_RXCSUM, rxcsum);
  2942. /* Generate RSS hash based on packet types, TCP/UDP
  2943. * port numbers and/or IPv4/v6 src and dst addresses
  2944. */
  2945. mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
  2946. E1000_MRQC_RSS_FIELD_IPV4_TCP |
  2947. E1000_MRQC_RSS_FIELD_IPV6 |
  2948. E1000_MRQC_RSS_FIELD_IPV6_TCP |
  2949. E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
  2950. if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
  2951. mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
  2952. if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
  2953. mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
  2954. /* If VMDq is enabled then we set the appropriate mode for that, else
  2955. * we default to RSS so that an RSS hash is calculated per packet even
  2956. * if we are only using one queue
  2957. */
  2958. if (adapter->vfs_allocated_count) {
  2959. if (hw->mac.type > e1000_82575) {
  2960. /* Set the default pool for the PF's first queue */
  2961. u32 vtctl = rd32(E1000_VT_CTL);
  2962. vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
  2963. E1000_VT_CTL_DISABLE_DEF_POOL);
  2964. vtctl |= adapter->vfs_allocated_count <<
  2965. E1000_VT_CTL_DEFAULT_POOL_SHIFT;
  2966. wr32(E1000_VT_CTL, vtctl);
  2967. }
  2968. if (adapter->rss_queues > 1)
  2969. mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
  2970. else
  2971. mrqc |= E1000_MRQC_ENABLE_VMDQ;
  2972. } else {
  2973. if (hw->mac.type != e1000_i211)
  2974. mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
  2975. }
  2976. igb_vmm_control(adapter);
  2977. wr32(E1000_MRQC, mrqc);
  2978. }
  2979. /**
  2980. * igb_setup_rctl - configure the receive control registers
  2981. * @adapter: Board private structure
  2982. **/
  2983. void igb_setup_rctl(struct igb_adapter *adapter)
  2984. {
  2985. struct e1000_hw *hw = &adapter->hw;
  2986. u32 rctl;
  2987. rctl = rd32(E1000_RCTL);
  2988. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  2989. rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
  2990. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
  2991. (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
  2992. /* enable stripping of CRC. It's unlikely this will break BMC
  2993. * redirection as it did with e1000. Newer features require
  2994. * that the HW strips the CRC.
  2995. */
  2996. rctl |= E1000_RCTL_SECRC;
  2997. /* disable store bad packets and clear size bits. */
  2998. rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
  2999. /* enable LPE to prevent packets larger than max_frame_size */
  3000. rctl |= E1000_RCTL_LPE;
  3001. /* disable queue 0 to prevent tail write w/o re-config */
  3002. wr32(E1000_RXDCTL(0), 0);
  3003. /* Attention!!! For SR-IOV PF driver operations you must enable
  3004. * queue drop for all VF and PF queues to prevent head of line blocking
  3005. * if an un-trusted VF does not provide descriptors to hardware.
  3006. */
  3007. if (adapter->vfs_allocated_count) {
  3008. /* set all queue drop enable bits */
  3009. wr32(E1000_QDE, ALL_QUEUES);
  3010. }
  3011. /* This is useful for sniffing bad packets. */
  3012. if (adapter->netdev->features & NETIF_F_RXALL) {
  3013. /* UPE and MPE will be handled by normal PROMISC logic
  3014. * in e1000e_set_rx_mode
  3015. */
  3016. rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
  3017. E1000_RCTL_BAM | /* RX All Bcast Pkts */
  3018. E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
  3019. rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
  3020. E1000_RCTL_DPF | /* Allow filtered pause */
  3021. E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
  3022. /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
  3023. * and that breaks VLANs.
  3024. */
  3025. }
  3026. wr32(E1000_RCTL, rctl);
  3027. }
  3028. static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
  3029. int vfn)
  3030. {
  3031. struct e1000_hw *hw = &adapter->hw;
  3032. u32 vmolr;
  3033. /* if it isn't the PF check to see if VFs are enabled and
  3034. * increase the size to support vlan tags
  3035. */
  3036. if (vfn < adapter->vfs_allocated_count &&
  3037. adapter->vf_data[vfn].vlans_enabled)
  3038. size += VLAN_TAG_SIZE;
  3039. vmolr = rd32(E1000_VMOLR(vfn));
  3040. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  3041. vmolr |= size | E1000_VMOLR_LPE;
  3042. wr32(E1000_VMOLR(vfn), vmolr);
  3043. return 0;
  3044. }
  3045. /**
  3046. * igb_rlpml_set - set maximum receive packet size
  3047. * @adapter: board private structure
  3048. *
  3049. * Configure maximum receivable packet size.
  3050. **/
  3051. static void igb_rlpml_set(struct igb_adapter *adapter)
  3052. {
  3053. u32 max_frame_size = adapter->max_frame_size;
  3054. struct e1000_hw *hw = &adapter->hw;
  3055. u16 pf_id = adapter->vfs_allocated_count;
  3056. if (pf_id) {
  3057. igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
  3058. /* If we're in VMDQ or SR-IOV mode, then set global RLPML
  3059. * to our max jumbo frame size, in case we need to enable
  3060. * jumbo frames on one of the rings later.
  3061. * This will not pass over-length frames into the default
  3062. * queue because it's gated by the VMOLR.RLPML.
  3063. */
  3064. max_frame_size = MAX_JUMBO_FRAME_SIZE;
  3065. }
  3066. wr32(E1000_RLPML, max_frame_size);
  3067. }
  3068. static inline void igb_set_vmolr(struct igb_adapter *adapter,
  3069. int vfn, bool aupe)
  3070. {
  3071. struct e1000_hw *hw = &adapter->hw;
  3072. u32 vmolr;
  3073. /* This register exists only on 82576 and newer so if we are older then
  3074. * we should exit and do nothing
  3075. */
  3076. if (hw->mac.type < e1000_82576)
  3077. return;
  3078. vmolr = rd32(E1000_VMOLR(vfn));
  3079. vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
  3080. if (hw->mac.type == e1000_i350) {
  3081. u32 dvmolr;
  3082. dvmolr = rd32(E1000_DVMOLR(vfn));
  3083. dvmolr |= E1000_DVMOLR_STRVLAN;
  3084. wr32(E1000_DVMOLR(vfn), dvmolr);
  3085. }
  3086. if (aupe)
  3087. vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
  3088. else
  3089. vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
  3090. /* clear all bits that might not be set */
  3091. vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
  3092. if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
  3093. vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
  3094. /* for VMDq only allow the VFs and pool 0 to accept broadcast and
  3095. * multicast packets
  3096. */
  3097. if (vfn <= adapter->vfs_allocated_count)
  3098. vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
  3099. wr32(E1000_VMOLR(vfn), vmolr);
  3100. }
  3101. /**
  3102. * igb_configure_rx_ring - Configure a receive ring after Reset
  3103. * @adapter: board private structure
  3104. * @ring: receive ring to be configured
  3105. *
  3106. * Configure the Rx unit of the MAC after a reset.
  3107. **/
  3108. void igb_configure_rx_ring(struct igb_adapter *adapter,
  3109. struct igb_ring *ring)
  3110. {
  3111. struct e1000_hw *hw = &adapter->hw;
  3112. u64 rdba = ring->dma;
  3113. int reg_idx = ring->reg_idx;
  3114. u32 srrctl = 0, rxdctl = 0;
  3115. /* disable the queue */
  3116. wr32(E1000_RXDCTL(reg_idx), 0);
  3117. /* Set DMA base address registers */
  3118. wr32(E1000_RDBAL(reg_idx),
  3119. rdba & 0x00000000ffffffffULL);
  3120. wr32(E1000_RDBAH(reg_idx), rdba >> 32);
  3121. wr32(E1000_RDLEN(reg_idx),
  3122. ring->count * sizeof(union e1000_adv_rx_desc));
  3123. /* initialize head and tail */
  3124. ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
  3125. wr32(E1000_RDH(reg_idx), 0);
  3126. writel(0, ring->tail);
  3127. /* set descriptor configuration */
  3128. srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  3129. srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  3130. srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
  3131. if (hw->mac.type >= e1000_82580)
  3132. srrctl |= E1000_SRRCTL_TIMESTAMP;
  3133. /* Only set Drop Enable if we are supporting multiple queues */
  3134. if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
  3135. srrctl |= E1000_SRRCTL_DROP_EN;
  3136. wr32(E1000_SRRCTL(reg_idx), srrctl);
  3137. /* set filtering for VMDQ pools */
  3138. igb_set_vmolr(adapter, reg_idx & 0x7, true);
  3139. rxdctl |= IGB_RX_PTHRESH;
  3140. rxdctl |= IGB_RX_HTHRESH << 8;
  3141. rxdctl |= IGB_RX_WTHRESH << 16;
  3142. /* enable receive descriptor fetching */
  3143. rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
  3144. wr32(E1000_RXDCTL(reg_idx), rxdctl);
  3145. }
  3146. /**
  3147. * igb_configure_rx - Configure receive Unit after Reset
  3148. * @adapter: board private structure
  3149. *
  3150. * Configure the Rx unit of the MAC after a reset.
  3151. **/
  3152. static void igb_configure_rx(struct igb_adapter *adapter)
  3153. {
  3154. int i;
  3155. /* set UTA to appropriate mode */
  3156. igb_set_uta(adapter);
  3157. /* set the correct pool for the PF default MAC address in entry 0 */
  3158. igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
  3159. adapter->vfs_allocated_count);
  3160. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  3161. * the Base and Length of the Rx Descriptor Ring
  3162. */
  3163. for (i = 0; i < adapter->num_rx_queues; i++)
  3164. igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
  3165. }
  3166. /**
  3167. * igb_free_tx_resources - Free Tx Resources per Queue
  3168. * @tx_ring: Tx descriptor ring for a specific queue
  3169. *
  3170. * Free all transmit software resources
  3171. **/
  3172. void igb_free_tx_resources(struct igb_ring *tx_ring)
  3173. {
  3174. igb_clean_tx_ring(tx_ring);
  3175. vfree(tx_ring->tx_buffer_info);
  3176. tx_ring->tx_buffer_info = NULL;
  3177. /* if not set, then don't free */
  3178. if (!tx_ring->desc)
  3179. return;
  3180. dma_free_coherent(tx_ring->dev, tx_ring->size,
  3181. tx_ring->desc, tx_ring->dma);
  3182. tx_ring->desc = NULL;
  3183. }
  3184. /**
  3185. * igb_free_all_tx_resources - Free Tx Resources for All Queues
  3186. * @adapter: board private structure
  3187. *
  3188. * Free all transmit software resources
  3189. **/
  3190. static void igb_free_all_tx_resources(struct igb_adapter *adapter)
  3191. {
  3192. int i;
  3193. for (i = 0; i < adapter->num_tx_queues; i++)
  3194. if (adapter->tx_ring[i])
  3195. igb_free_tx_resources(adapter->tx_ring[i]);
  3196. }
  3197. void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
  3198. struct igb_tx_buffer *tx_buffer)
  3199. {
  3200. if (tx_buffer->skb) {
  3201. dev_kfree_skb_any(tx_buffer->skb);
  3202. if (dma_unmap_len(tx_buffer, len))
  3203. dma_unmap_single(ring->dev,
  3204. dma_unmap_addr(tx_buffer, dma),
  3205. dma_unmap_len(tx_buffer, len),
  3206. DMA_TO_DEVICE);
  3207. } else if (dma_unmap_len(tx_buffer, len)) {
  3208. dma_unmap_page(ring->dev,
  3209. dma_unmap_addr(tx_buffer, dma),
  3210. dma_unmap_len(tx_buffer, len),
  3211. DMA_TO_DEVICE);
  3212. }
  3213. tx_buffer->next_to_watch = NULL;
  3214. tx_buffer->skb = NULL;
  3215. dma_unmap_len_set(tx_buffer, len, 0);
  3216. /* buffer_info must be completely set up in the transmit path */
  3217. }
  3218. /**
  3219. * igb_clean_tx_ring - Free Tx Buffers
  3220. * @tx_ring: ring to be cleaned
  3221. **/
  3222. static void igb_clean_tx_ring(struct igb_ring *tx_ring)
  3223. {
  3224. struct igb_tx_buffer *buffer_info;
  3225. unsigned long size;
  3226. u16 i;
  3227. if (!tx_ring->tx_buffer_info)
  3228. return;
  3229. /* Free all the Tx ring sk_buffs */
  3230. for (i = 0; i < tx_ring->count; i++) {
  3231. buffer_info = &tx_ring->tx_buffer_info[i];
  3232. igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
  3233. }
  3234. netdev_tx_reset_queue(txring_txq(tx_ring));
  3235. size = sizeof(struct igb_tx_buffer) * tx_ring->count;
  3236. memset(tx_ring->tx_buffer_info, 0, size);
  3237. /* Zero out the descriptor ring */
  3238. memset(tx_ring->desc, 0, tx_ring->size);
  3239. tx_ring->next_to_use = 0;
  3240. tx_ring->next_to_clean = 0;
  3241. }
  3242. /**
  3243. * igb_clean_all_tx_rings - Free Tx Buffers for all queues
  3244. * @adapter: board private structure
  3245. **/
  3246. static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  3247. {
  3248. int i;
  3249. for (i = 0; i < adapter->num_tx_queues; i++)
  3250. if (adapter->tx_ring[i])
  3251. igb_clean_tx_ring(adapter->tx_ring[i]);
  3252. }
  3253. /**
  3254. * igb_free_rx_resources - Free Rx Resources
  3255. * @rx_ring: ring to clean the resources from
  3256. *
  3257. * Free all receive software resources
  3258. **/
  3259. void igb_free_rx_resources(struct igb_ring *rx_ring)
  3260. {
  3261. igb_clean_rx_ring(rx_ring);
  3262. vfree(rx_ring->rx_buffer_info);
  3263. rx_ring->rx_buffer_info = NULL;
  3264. /* if not set, then don't free */
  3265. if (!rx_ring->desc)
  3266. return;
  3267. dma_free_coherent(rx_ring->dev, rx_ring->size,
  3268. rx_ring->desc, rx_ring->dma);
  3269. rx_ring->desc = NULL;
  3270. }
  3271. /**
  3272. * igb_free_all_rx_resources - Free Rx Resources for All Queues
  3273. * @adapter: board private structure
  3274. *
  3275. * Free all receive software resources
  3276. **/
  3277. static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  3278. {
  3279. int i;
  3280. for (i = 0; i < adapter->num_rx_queues; i++)
  3281. if (adapter->rx_ring[i])
  3282. igb_free_rx_resources(adapter->rx_ring[i]);
  3283. }
  3284. /**
  3285. * igb_clean_rx_ring - Free Rx Buffers per Queue
  3286. * @rx_ring: ring to free buffers from
  3287. **/
  3288. static void igb_clean_rx_ring(struct igb_ring *rx_ring)
  3289. {
  3290. unsigned long size;
  3291. u16 i;
  3292. if (rx_ring->skb)
  3293. dev_kfree_skb(rx_ring->skb);
  3294. rx_ring->skb = NULL;
  3295. if (!rx_ring->rx_buffer_info)
  3296. return;
  3297. /* Free all the Rx ring sk_buffs */
  3298. for (i = 0; i < rx_ring->count; i++) {
  3299. struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
  3300. if (!buffer_info->page)
  3301. continue;
  3302. dma_unmap_page(rx_ring->dev,
  3303. buffer_info->dma,
  3304. PAGE_SIZE,
  3305. DMA_FROM_DEVICE);
  3306. __free_page(buffer_info->page);
  3307. buffer_info->page = NULL;
  3308. }
  3309. size = sizeof(struct igb_rx_buffer) * rx_ring->count;
  3310. memset(rx_ring->rx_buffer_info, 0, size);
  3311. /* Zero out the descriptor ring */
  3312. memset(rx_ring->desc, 0, rx_ring->size);
  3313. rx_ring->next_to_alloc = 0;
  3314. rx_ring->next_to_clean = 0;
  3315. rx_ring->next_to_use = 0;
  3316. }
  3317. /**
  3318. * igb_clean_all_rx_rings - Free Rx Buffers for all queues
  3319. * @adapter: board private structure
  3320. **/
  3321. static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
  3322. {
  3323. int i;
  3324. for (i = 0; i < adapter->num_rx_queues; i++)
  3325. if (adapter->rx_ring[i])
  3326. igb_clean_rx_ring(adapter->rx_ring[i]);
  3327. }
  3328. /**
  3329. * igb_set_mac - Change the Ethernet Address of the NIC
  3330. * @netdev: network interface device structure
  3331. * @p: pointer to an address structure
  3332. *
  3333. * Returns 0 on success, negative on failure
  3334. **/
  3335. static int igb_set_mac(struct net_device *netdev, void *p)
  3336. {
  3337. struct igb_adapter *adapter = netdev_priv(netdev);
  3338. struct e1000_hw *hw = &adapter->hw;
  3339. struct sockaddr *addr = p;
  3340. if (!is_valid_ether_addr(addr->sa_data))
  3341. return -EADDRNOTAVAIL;
  3342. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  3343. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  3344. /* set the correct pool for the new PF MAC address in entry 0 */
  3345. igb_rar_set_qsel(adapter, hw->mac.addr, 0,
  3346. adapter->vfs_allocated_count);
  3347. return 0;
  3348. }
  3349. /**
  3350. * igb_write_mc_addr_list - write multicast addresses to MTA
  3351. * @netdev: network interface device structure
  3352. *
  3353. * Writes multicast address list to the MTA hash table.
  3354. * Returns: -ENOMEM on failure
  3355. * 0 on no addresses written
  3356. * X on writing X addresses to MTA
  3357. **/
  3358. static int igb_write_mc_addr_list(struct net_device *netdev)
  3359. {
  3360. struct igb_adapter *adapter = netdev_priv(netdev);
  3361. struct e1000_hw *hw = &adapter->hw;
  3362. struct netdev_hw_addr *ha;
  3363. u8 *mta_list;
  3364. int i;
  3365. if (netdev_mc_empty(netdev)) {
  3366. /* nothing to program, so clear mc list */
  3367. igb_update_mc_addr_list(hw, NULL, 0);
  3368. igb_restore_vf_multicasts(adapter);
  3369. return 0;
  3370. }
  3371. mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
  3372. if (!mta_list)
  3373. return -ENOMEM;
  3374. /* The shared function expects a packed array of only addresses. */
  3375. i = 0;
  3376. netdev_for_each_mc_addr(ha, netdev)
  3377. memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  3378. igb_update_mc_addr_list(hw, mta_list, i);
  3379. kfree(mta_list);
  3380. return netdev_mc_count(netdev);
  3381. }
  3382. /**
  3383. * igb_write_uc_addr_list - write unicast addresses to RAR table
  3384. * @netdev: network interface device structure
  3385. *
  3386. * Writes unicast address list to the RAR table.
  3387. * Returns: -ENOMEM on failure/insufficient address space
  3388. * 0 on no addresses written
  3389. * X on writing X addresses to the RAR table
  3390. **/
  3391. static int igb_write_uc_addr_list(struct net_device *netdev)
  3392. {
  3393. struct igb_adapter *adapter = netdev_priv(netdev);
  3394. struct e1000_hw *hw = &adapter->hw;
  3395. unsigned int vfn = adapter->vfs_allocated_count;
  3396. unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
  3397. int count = 0;
  3398. /* return ENOMEM indicating insufficient memory for addresses */
  3399. if (netdev_uc_count(netdev) > rar_entries)
  3400. return -ENOMEM;
  3401. if (!netdev_uc_empty(netdev) && rar_entries) {
  3402. struct netdev_hw_addr *ha;
  3403. netdev_for_each_uc_addr(ha, netdev) {
  3404. if (!rar_entries)
  3405. break;
  3406. igb_rar_set_qsel(adapter, ha->addr,
  3407. rar_entries--,
  3408. vfn);
  3409. count++;
  3410. }
  3411. }
  3412. /* write the addresses in reverse order to avoid write combining */
  3413. for (; rar_entries > 0 ; rar_entries--) {
  3414. wr32(E1000_RAH(rar_entries), 0);
  3415. wr32(E1000_RAL(rar_entries), 0);
  3416. }
  3417. wrfl();
  3418. return count;
  3419. }
  3420. /**
  3421. * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  3422. * @netdev: network interface device structure
  3423. *
  3424. * The set_rx_mode entry point is called whenever the unicast or multicast
  3425. * address lists or the network interface flags are updated. This routine is
  3426. * responsible for configuring the hardware for proper unicast, multicast,
  3427. * promiscuous mode, and all-multi behavior.
  3428. **/
  3429. static void igb_set_rx_mode(struct net_device *netdev)
  3430. {
  3431. struct igb_adapter *adapter = netdev_priv(netdev);
  3432. struct e1000_hw *hw = &adapter->hw;
  3433. unsigned int vfn = adapter->vfs_allocated_count;
  3434. u32 rctl, vmolr = 0;
  3435. int count;
  3436. /* Check for Promiscuous and All Multicast modes */
  3437. rctl = rd32(E1000_RCTL);
  3438. /* clear the effected bits */
  3439. rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
  3440. if (netdev->flags & IFF_PROMISC) {
  3441. /* retain VLAN HW filtering if in VT mode */
  3442. if (adapter->vfs_allocated_count)
  3443. rctl |= E1000_RCTL_VFE;
  3444. rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
  3445. vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
  3446. } else {
  3447. if (netdev->flags & IFF_ALLMULTI) {
  3448. rctl |= E1000_RCTL_MPE;
  3449. vmolr |= E1000_VMOLR_MPME;
  3450. } else {
  3451. /* Write addresses to the MTA, if the attempt fails
  3452. * then we should just turn on promiscuous mode so
  3453. * that we can at least receive multicast traffic
  3454. */
  3455. count = igb_write_mc_addr_list(netdev);
  3456. if (count < 0) {
  3457. rctl |= E1000_RCTL_MPE;
  3458. vmolr |= E1000_VMOLR_MPME;
  3459. } else if (count) {
  3460. vmolr |= E1000_VMOLR_ROMPE;
  3461. }
  3462. }
  3463. /* Write addresses to available RAR registers, if there is not
  3464. * sufficient space to store all the addresses then enable
  3465. * unicast promiscuous mode
  3466. */
  3467. count = igb_write_uc_addr_list(netdev);
  3468. if (count < 0) {
  3469. rctl |= E1000_RCTL_UPE;
  3470. vmolr |= E1000_VMOLR_ROPE;
  3471. }
  3472. rctl |= E1000_RCTL_VFE;
  3473. }
  3474. wr32(E1000_RCTL, rctl);
  3475. /* In order to support SR-IOV and eventually VMDq it is necessary to set
  3476. * the VMOLR to enable the appropriate modes. Without this workaround
  3477. * we will have issues with VLAN tag stripping not being done for frames
  3478. * that are only arriving because we are the default pool
  3479. */
  3480. if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
  3481. return;
  3482. vmolr |= rd32(E1000_VMOLR(vfn)) &
  3483. ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
  3484. wr32(E1000_VMOLR(vfn), vmolr);
  3485. igb_restore_vf_multicasts(adapter);
  3486. }
  3487. static void igb_check_wvbr(struct igb_adapter *adapter)
  3488. {
  3489. struct e1000_hw *hw = &adapter->hw;
  3490. u32 wvbr = 0;
  3491. switch (hw->mac.type) {
  3492. case e1000_82576:
  3493. case e1000_i350:
  3494. wvbr = rd32(E1000_WVBR);
  3495. if (!wvbr)
  3496. return;
  3497. break;
  3498. default:
  3499. break;
  3500. }
  3501. adapter->wvbr |= wvbr;
  3502. }
  3503. #define IGB_STAGGERED_QUEUE_OFFSET 8
  3504. static void igb_spoof_check(struct igb_adapter *adapter)
  3505. {
  3506. int j;
  3507. if (!adapter->wvbr)
  3508. return;
  3509. for (j = 0; j < adapter->vfs_allocated_count; j++) {
  3510. if (adapter->wvbr & (1 << j) ||
  3511. adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
  3512. dev_warn(&adapter->pdev->dev,
  3513. "Spoof event(s) detected on VF %d\n", j);
  3514. adapter->wvbr &=
  3515. ~((1 << j) |
  3516. (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
  3517. }
  3518. }
  3519. }
  3520. /* Need to wait a few seconds after link up to get diagnostic information from
  3521. * the phy
  3522. */
  3523. static void igb_update_phy_info(unsigned long data)
  3524. {
  3525. struct igb_adapter *adapter = (struct igb_adapter *) data;
  3526. igb_get_phy_info(&adapter->hw);
  3527. }
  3528. /**
  3529. * igb_has_link - check shared code for link and determine up/down
  3530. * @adapter: pointer to driver private info
  3531. **/
  3532. bool igb_has_link(struct igb_adapter *adapter)
  3533. {
  3534. struct e1000_hw *hw = &adapter->hw;
  3535. bool link_active = false;
  3536. /* get_link_status is set on LSC (link status) interrupt or
  3537. * rx sequence error interrupt. get_link_status will stay
  3538. * false until the e1000_check_for_link establishes link
  3539. * for copper adapters ONLY
  3540. */
  3541. switch (hw->phy.media_type) {
  3542. case e1000_media_type_copper:
  3543. if (!hw->mac.get_link_status)
  3544. return true;
  3545. case e1000_media_type_internal_serdes:
  3546. hw->mac.ops.check_for_link(hw);
  3547. link_active = !hw->mac.get_link_status;
  3548. break;
  3549. default:
  3550. case e1000_media_type_unknown:
  3551. break;
  3552. }
  3553. if (((hw->mac.type == e1000_i210) ||
  3554. (hw->mac.type == e1000_i211)) &&
  3555. (hw->phy.id == I210_I_PHY_ID)) {
  3556. if (!netif_carrier_ok(adapter->netdev)) {
  3557. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  3558. } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
  3559. adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
  3560. adapter->link_check_timeout = jiffies;
  3561. }
  3562. }
  3563. return link_active;
  3564. }
  3565. static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
  3566. {
  3567. bool ret = false;
  3568. u32 ctrl_ext, thstat;
  3569. /* check for thermal sensor event on i350 copper only */
  3570. if (hw->mac.type == e1000_i350) {
  3571. thstat = rd32(E1000_THSTAT);
  3572. ctrl_ext = rd32(E1000_CTRL_EXT);
  3573. if ((hw->phy.media_type == e1000_media_type_copper) &&
  3574. !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
  3575. ret = !!(thstat & event);
  3576. }
  3577. return ret;
  3578. }
  3579. /**
  3580. * igb_check_lvmmc - check for malformed packets received
  3581. * and indicated in LVMMC register
  3582. * @adapter: pointer to adapter
  3583. **/
  3584. static void igb_check_lvmmc(struct igb_adapter *adapter)
  3585. {
  3586. struct e1000_hw *hw = &adapter->hw;
  3587. u32 lvmmc;
  3588. lvmmc = rd32(E1000_LVMMC);
  3589. if (lvmmc) {
  3590. if (unlikely(net_ratelimit())) {
  3591. netdev_warn(adapter->netdev,
  3592. "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
  3593. lvmmc);
  3594. }
  3595. }
  3596. }
  3597. /**
  3598. * igb_watchdog - Timer Call-back
  3599. * @data: pointer to adapter cast into an unsigned long
  3600. **/
  3601. static void igb_watchdog(unsigned long data)
  3602. {
  3603. struct igb_adapter *adapter = (struct igb_adapter *)data;
  3604. /* Do the rest outside of interrupt context */
  3605. schedule_work(&adapter->watchdog_task);
  3606. }
  3607. static void igb_watchdog_task(struct work_struct *work)
  3608. {
  3609. struct igb_adapter *adapter = container_of(work,
  3610. struct igb_adapter,
  3611. watchdog_task);
  3612. struct e1000_hw *hw = &adapter->hw;
  3613. struct e1000_phy_info *phy = &hw->phy;
  3614. struct net_device *netdev = adapter->netdev;
  3615. u32 link;
  3616. int i;
  3617. u32 connsw;
  3618. link = igb_has_link(adapter);
  3619. if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
  3620. if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
  3621. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  3622. else
  3623. link = false;
  3624. }
  3625. /* Force link down if we have fiber to swap to */
  3626. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  3627. if (hw->phy.media_type == e1000_media_type_copper) {
  3628. connsw = rd32(E1000_CONNSW);
  3629. if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
  3630. link = 0;
  3631. }
  3632. }
  3633. if (link) {
  3634. /* Perform a reset if the media type changed. */
  3635. if (hw->dev_spec._82575.media_changed) {
  3636. hw->dev_spec._82575.media_changed = false;
  3637. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  3638. igb_reset(adapter);
  3639. }
  3640. /* Cancel scheduled suspend requests. */
  3641. pm_runtime_resume(netdev->dev.parent);
  3642. if (!netif_carrier_ok(netdev)) {
  3643. u32 ctrl;
  3644. hw->mac.ops.get_speed_and_duplex(hw,
  3645. &adapter->link_speed,
  3646. &adapter->link_duplex);
  3647. ctrl = rd32(E1000_CTRL);
  3648. /* Links status message must follow this format */
  3649. netdev_info(netdev,
  3650. "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
  3651. netdev->name,
  3652. adapter->link_speed,
  3653. adapter->link_duplex == FULL_DUPLEX ?
  3654. "Full" : "Half",
  3655. (ctrl & E1000_CTRL_TFCE) &&
  3656. (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
  3657. (ctrl & E1000_CTRL_RFCE) ? "RX" :
  3658. (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
  3659. /* disable EEE if enabled */
  3660. if ((adapter->flags & IGB_FLAG_EEE) &&
  3661. (adapter->link_duplex == HALF_DUPLEX)) {
  3662. dev_info(&adapter->pdev->dev,
  3663. "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
  3664. adapter->hw.dev_spec._82575.eee_disable = true;
  3665. adapter->flags &= ~IGB_FLAG_EEE;
  3666. }
  3667. /* check if SmartSpeed worked */
  3668. igb_check_downshift(hw);
  3669. if (phy->speed_downgraded)
  3670. netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
  3671. /* check for thermal sensor event */
  3672. if (igb_thermal_sensor_event(hw,
  3673. E1000_THSTAT_LINK_THROTTLE))
  3674. netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
  3675. /* adjust timeout factor according to speed/duplex */
  3676. adapter->tx_timeout_factor = 1;
  3677. switch (adapter->link_speed) {
  3678. case SPEED_10:
  3679. adapter->tx_timeout_factor = 14;
  3680. break;
  3681. case SPEED_100:
  3682. /* maybe add some timeout factor ? */
  3683. break;
  3684. }
  3685. netif_carrier_on(netdev);
  3686. igb_ping_all_vfs(adapter);
  3687. igb_check_vf_rate_limit(adapter);
  3688. /* link state has changed, schedule phy info update */
  3689. if (!test_bit(__IGB_DOWN, &adapter->state))
  3690. mod_timer(&adapter->phy_info_timer,
  3691. round_jiffies(jiffies + 2 * HZ));
  3692. }
  3693. } else {
  3694. if (netif_carrier_ok(netdev)) {
  3695. adapter->link_speed = 0;
  3696. adapter->link_duplex = 0;
  3697. /* check for thermal sensor event */
  3698. if (igb_thermal_sensor_event(hw,
  3699. E1000_THSTAT_PWR_DOWN)) {
  3700. netdev_err(netdev, "The network adapter was stopped because it overheated\n");
  3701. }
  3702. /* Links status message must follow this format */
  3703. netdev_info(netdev, "igb: %s NIC Link is Down\n",
  3704. netdev->name);
  3705. netif_carrier_off(netdev);
  3706. igb_ping_all_vfs(adapter);
  3707. /* link state has changed, schedule phy info update */
  3708. if (!test_bit(__IGB_DOWN, &adapter->state))
  3709. mod_timer(&adapter->phy_info_timer,
  3710. round_jiffies(jiffies + 2 * HZ));
  3711. /* link is down, time to check for alternate media */
  3712. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  3713. igb_check_swap_media(adapter);
  3714. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  3715. schedule_work(&adapter->reset_task);
  3716. /* return immediately */
  3717. return;
  3718. }
  3719. }
  3720. pm_schedule_suspend(netdev->dev.parent,
  3721. MSEC_PER_SEC * 5);
  3722. /* also check for alternate media here */
  3723. } else if (!netif_carrier_ok(netdev) &&
  3724. (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
  3725. igb_check_swap_media(adapter);
  3726. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  3727. schedule_work(&adapter->reset_task);
  3728. /* return immediately */
  3729. return;
  3730. }
  3731. }
  3732. }
  3733. spin_lock(&adapter->stats64_lock);
  3734. igb_update_stats(adapter, &adapter->stats64);
  3735. spin_unlock(&adapter->stats64_lock);
  3736. for (i = 0; i < adapter->num_tx_queues; i++) {
  3737. struct igb_ring *tx_ring = adapter->tx_ring[i];
  3738. if (!netif_carrier_ok(netdev)) {
  3739. /* We've lost link, so the controller stops DMA,
  3740. * but we've got queued Tx work that's never going
  3741. * to get done, so reset controller to flush Tx.
  3742. * (Do the reset outside of interrupt context).
  3743. */
  3744. if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
  3745. adapter->tx_timeout_count++;
  3746. schedule_work(&adapter->reset_task);
  3747. /* return immediately since reset is imminent */
  3748. return;
  3749. }
  3750. }
  3751. /* Force detection of hung controller every watchdog period */
  3752. set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  3753. }
  3754. /* Cause software interrupt to ensure Rx ring is cleaned */
  3755. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  3756. u32 eics = 0;
  3757. for (i = 0; i < adapter->num_q_vectors; i++)
  3758. eics |= adapter->q_vector[i]->eims_value;
  3759. wr32(E1000_EICS, eics);
  3760. } else {
  3761. wr32(E1000_ICS, E1000_ICS_RXDMT0);
  3762. }
  3763. igb_spoof_check(adapter);
  3764. igb_ptp_rx_hang(adapter);
  3765. /* Check LVMMC register on i350/i354 only */
  3766. if ((adapter->hw.mac.type == e1000_i350) ||
  3767. (adapter->hw.mac.type == e1000_i354))
  3768. igb_check_lvmmc(adapter);
  3769. /* Reset the timer */
  3770. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  3771. if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
  3772. mod_timer(&adapter->watchdog_timer,
  3773. round_jiffies(jiffies + HZ));
  3774. else
  3775. mod_timer(&adapter->watchdog_timer,
  3776. round_jiffies(jiffies + 2 * HZ));
  3777. }
  3778. }
  3779. enum latency_range {
  3780. lowest_latency = 0,
  3781. low_latency = 1,
  3782. bulk_latency = 2,
  3783. latency_invalid = 255
  3784. };
  3785. /**
  3786. * igb_update_ring_itr - update the dynamic ITR value based on packet size
  3787. * @q_vector: pointer to q_vector
  3788. *
  3789. * Stores a new ITR value based on strictly on packet size. This
  3790. * algorithm is less sophisticated than that used in igb_update_itr,
  3791. * due to the difficulty of synchronizing statistics across multiple
  3792. * receive rings. The divisors and thresholds used by this function
  3793. * were determined based on theoretical maximum wire speed and testing
  3794. * data, in order to minimize response time while increasing bulk
  3795. * throughput.
  3796. * This functionality is controlled by ethtool's coalescing settings.
  3797. * NOTE: This function is called only when operating in a multiqueue
  3798. * receive environment.
  3799. **/
  3800. static void igb_update_ring_itr(struct igb_q_vector *q_vector)
  3801. {
  3802. int new_val = q_vector->itr_val;
  3803. int avg_wire_size = 0;
  3804. struct igb_adapter *adapter = q_vector->adapter;
  3805. unsigned int packets;
  3806. /* For non-gigabit speeds, just fix the interrupt rate at 4000
  3807. * ints/sec - ITR timer value of 120 ticks.
  3808. */
  3809. if (adapter->link_speed != SPEED_1000) {
  3810. new_val = IGB_4K_ITR;
  3811. goto set_itr_val;
  3812. }
  3813. packets = q_vector->rx.total_packets;
  3814. if (packets)
  3815. avg_wire_size = q_vector->rx.total_bytes / packets;
  3816. packets = q_vector->tx.total_packets;
  3817. if (packets)
  3818. avg_wire_size = max_t(u32, avg_wire_size,
  3819. q_vector->tx.total_bytes / packets);
  3820. /* if avg_wire_size isn't set no work was done */
  3821. if (!avg_wire_size)
  3822. goto clear_counts;
  3823. /* Add 24 bytes to size to account for CRC, preamble, and gap */
  3824. avg_wire_size += 24;
  3825. /* Don't starve jumbo frames */
  3826. avg_wire_size = min(avg_wire_size, 3000);
  3827. /* Give a little boost to mid-size frames */
  3828. if ((avg_wire_size > 300) && (avg_wire_size < 1200))
  3829. new_val = avg_wire_size / 3;
  3830. else
  3831. new_val = avg_wire_size / 2;
  3832. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  3833. if (new_val < IGB_20K_ITR &&
  3834. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  3835. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  3836. new_val = IGB_20K_ITR;
  3837. set_itr_val:
  3838. if (new_val != q_vector->itr_val) {
  3839. q_vector->itr_val = new_val;
  3840. q_vector->set_itr = 1;
  3841. }
  3842. clear_counts:
  3843. q_vector->rx.total_bytes = 0;
  3844. q_vector->rx.total_packets = 0;
  3845. q_vector->tx.total_bytes = 0;
  3846. q_vector->tx.total_packets = 0;
  3847. }
  3848. /**
  3849. * igb_update_itr - update the dynamic ITR value based on statistics
  3850. * @q_vector: pointer to q_vector
  3851. * @ring_container: ring info to update the itr for
  3852. *
  3853. * Stores a new ITR value based on packets and byte
  3854. * counts during the last interrupt. The advantage of per interrupt
  3855. * computation is faster updates and more accurate ITR for the current
  3856. * traffic pattern. Constants in this function were computed
  3857. * based on theoretical maximum wire speed and thresholds were set based
  3858. * on testing data as well as attempting to minimize response time
  3859. * while increasing bulk throughput.
  3860. * This functionality is controlled by ethtool's coalescing settings.
  3861. * NOTE: These calculations are only valid when operating in a single-
  3862. * queue environment.
  3863. **/
  3864. static void igb_update_itr(struct igb_q_vector *q_vector,
  3865. struct igb_ring_container *ring_container)
  3866. {
  3867. unsigned int packets = ring_container->total_packets;
  3868. unsigned int bytes = ring_container->total_bytes;
  3869. u8 itrval = ring_container->itr;
  3870. /* no packets, exit with status unchanged */
  3871. if (packets == 0)
  3872. return;
  3873. switch (itrval) {
  3874. case lowest_latency:
  3875. /* handle TSO and jumbo frames */
  3876. if (bytes/packets > 8000)
  3877. itrval = bulk_latency;
  3878. else if ((packets < 5) && (bytes > 512))
  3879. itrval = low_latency;
  3880. break;
  3881. case low_latency: /* 50 usec aka 20000 ints/s */
  3882. if (bytes > 10000) {
  3883. /* this if handles the TSO accounting */
  3884. if (bytes/packets > 8000)
  3885. itrval = bulk_latency;
  3886. else if ((packets < 10) || ((bytes/packets) > 1200))
  3887. itrval = bulk_latency;
  3888. else if ((packets > 35))
  3889. itrval = lowest_latency;
  3890. } else if (bytes/packets > 2000) {
  3891. itrval = bulk_latency;
  3892. } else if (packets <= 2 && bytes < 512) {
  3893. itrval = lowest_latency;
  3894. }
  3895. break;
  3896. case bulk_latency: /* 250 usec aka 4000 ints/s */
  3897. if (bytes > 25000) {
  3898. if (packets > 35)
  3899. itrval = low_latency;
  3900. } else if (bytes < 1500) {
  3901. itrval = low_latency;
  3902. }
  3903. break;
  3904. }
  3905. /* clear work counters since we have the values we need */
  3906. ring_container->total_bytes = 0;
  3907. ring_container->total_packets = 0;
  3908. /* write updated itr to ring container */
  3909. ring_container->itr = itrval;
  3910. }
  3911. static void igb_set_itr(struct igb_q_vector *q_vector)
  3912. {
  3913. struct igb_adapter *adapter = q_vector->adapter;
  3914. u32 new_itr = q_vector->itr_val;
  3915. u8 current_itr = 0;
  3916. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  3917. if (adapter->link_speed != SPEED_1000) {
  3918. current_itr = 0;
  3919. new_itr = IGB_4K_ITR;
  3920. goto set_itr_now;
  3921. }
  3922. igb_update_itr(q_vector, &q_vector->tx);
  3923. igb_update_itr(q_vector, &q_vector->rx);
  3924. current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  3925. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  3926. if (current_itr == lowest_latency &&
  3927. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  3928. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  3929. current_itr = low_latency;
  3930. switch (current_itr) {
  3931. /* counts and packets in update_itr are dependent on these numbers */
  3932. case lowest_latency:
  3933. new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
  3934. break;
  3935. case low_latency:
  3936. new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
  3937. break;
  3938. case bulk_latency:
  3939. new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
  3940. break;
  3941. default:
  3942. break;
  3943. }
  3944. set_itr_now:
  3945. if (new_itr != q_vector->itr_val) {
  3946. /* this attempts to bias the interrupt rate towards Bulk
  3947. * by adding intermediate steps when interrupt rate is
  3948. * increasing
  3949. */
  3950. new_itr = new_itr > q_vector->itr_val ?
  3951. max((new_itr * q_vector->itr_val) /
  3952. (new_itr + (q_vector->itr_val >> 2)),
  3953. new_itr) : new_itr;
  3954. /* Don't write the value here; it resets the adapter's
  3955. * internal timer, and causes us to delay far longer than
  3956. * we should between interrupts. Instead, we write the ITR
  3957. * value at the beginning of the next interrupt so the timing
  3958. * ends up being correct.
  3959. */
  3960. q_vector->itr_val = new_itr;
  3961. q_vector->set_itr = 1;
  3962. }
  3963. }
  3964. static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
  3965. u32 type_tucmd, u32 mss_l4len_idx)
  3966. {
  3967. struct e1000_adv_tx_context_desc *context_desc;
  3968. u16 i = tx_ring->next_to_use;
  3969. context_desc = IGB_TX_CTXTDESC(tx_ring, i);
  3970. i++;
  3971. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  3972. /* set bits to identify this as an advanced context descriptor */
  3973. type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  3974. /* For 82575, context index must be unique per ring. */
  3975. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  3976. mss_l4len_idx |= tx_ring->reg_idx << 4;
  3977. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  3978. context_desc->seqnum_seed = 0;
  3979. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  3980. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  3981. }
  3982. static int igb_tso(struct igb_ring *tx_ring,
  3983. struct igb_tx_buffer *first,
  3984. u8 *hdr_len)
  3985. {
  3986. struct sk_buff *skb = first->skb;
  3987. u32 vlan_macip_lens, type_tucmd;
  3988. u32 mss_l4len_idx, l4len;
  3989. int err;
  3990. if (skb->ip_summed != CHECKSUM_PARTIAL)
  3991. return 0;
  3992. if (!skb_is_gso(skb))
  3993. return 0;
  3994. err = skb_cow_head(skb, 0);
  3995. if (err < 0)
  3996. return err;
  3997. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  3998. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  3999. if (first->protocol == htons(ETH_P_IP)) {
  4000. struct iphdr *iph = ip_hdr(skb);
  4001. iph->tot_len = 0;
  4002. iph->check = 0;
  4003. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  4004. iph->daddr, 0,
  4005. IPPROTO_TCP,
  4006. 0);
  4007. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  4008. first->tx_flags |= IGB_TX_FLAGS_TSO |
  4009. IGB_TX_FLAGS_CSUM |
  4010. IGB_TX_FLAGS_IPV4;
  4011. } else if (skb_is_gso_v6(skb)) {
  4012. ipv6_hdr(skb)->payload_len = 0;
  4013. tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  4014. &ipv6_hdr(skb)->daddr,
  4015. 0, IPPROTO_TCP, 0);
  4016. first->tx_flags |= IGB_TX_FLAGS_TSO |
  4017. IGB_TX_FLAGS_CSUM;
  4018. }
  4019. /* compute header lengths */
  4020. l4len = tcp_hdrlen(skb);
  4021. *hdr_len = skb_transport_offset(skb) + l4len;
  4022. /* update gso size and bytecount with header size */
  4023. first->gso_segs = skb_shinfo(skb)->gso_segs;
  4024. first->bytecount += (first->gso_segs - 1) * *hdr_len;
  4025. /* MSS L4LEN IDX */
  4026. mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
  4027. mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
  4028. /* VLAN MACLEN IPLEN */
  4029. vlan_macip_lens = skb_network_header_len(skb);
  4030. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  4031. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  4032. igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
  4033. return 1;
  4034. }
  4035. static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
  4036. {
  4037. struct sk_buff *skb = first->skb;
  4038. u32 vlan_macip_lens = 0;
  4039. u32 mss_l4len_idx = 0;
  4040. u32 type_tucmd = 0;
  4041. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  4042. if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
  4043. return;
  4044. } else {
  4045. u8 l4_hdr = 0;
  4046. switch (first->protocol) {
  4047. case htons(ETH_P_IP):
  4048. vlan_macip_lens |= skb_network_header_len(skb);
  4049. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  4050. l4_hdr = ip_hdr(skb)->protocol;
  4051. break;
  4052. case htons(ETH_P_IPV6):
  4053. vlan_macip_lens |= skb_network_header_len(skb);
  4054. l4_hdr = ipv6_hdr(skb)->nexthdr;
  4055. break;
  4056. default:
  4057. if (unlikely(net_ratelimit())) {
  4058. dev_warn(tx_ring->dev,
  4059. "partial checksum but proto=%x!\n",
  4060. first->protocol);
  4061. }
  4062. break;
  4063. }
  4064. switch (l4_hdr) {
  4065. case IPPROTO_TCP:
  4066. type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
  4067. mss_l4len_idx = tcp_hdrlen(skb) <<
  4068. E1000_ADVTXD_L4LEN_SHIFT;
  4069. break;
  4070. case IPPROTO_SCTP:
  4071. type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
  4072. mss_l4len_idx = sizeof(struct sctphdr) <<
  4073. E1000_ADVTXD_L4LEN_SHIFT;
  4074. break;
  4075. case IPPROTO_UDP:
  4076. mss_l4len_idx = sizeof(struct udphdr) <<
  4077. E1000_ADVTXD_L4LEN_SHIFT;
  4078. break;
  4079. default:
  4080. if (unlikely(net_ratelimit())) {
  4081. dev_warn(tx_ring->dev,
  4082. "partial checksum but l4 proto=%x!\n",
  4083. l4_hdr);
  4084. }
  4085. break;
  4086. }
  4087. /* update TX checksum flag */
  4088. first->tx_flags |= IGB_TX_FLAGS_CSUM;
  4089. }
  4090. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  4091. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  4092. igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
  4093. }
  4094. #define IGB_SET_FLAG(_input, _flag, _result) \
  4095. ((_flag <= _result) ? \
  4096. ((u32)(_input & _flag) * (_result / _flag)) : \
  4097. ((u32)(_input & _flag) / (_flag / _result)))
  4098. static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
  4099. {
  4100. /* set type for advanced descriptor with frame checksum insertion */
  4101. u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
  4102. E1000_ADVTXD_DCMD_DEXT |
  4103. E1000_ADVTXD_DCMD_IFCS;
  4104. /* set HW vlan bit if vlan is present */
  4105. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
  4106. (E1000_ADVTXD_DCMD_VLE));
  4107. /* set segmentation bits for TSO */
  4108. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
  4109. (E1000_ADVTXD_DCMD_TSE));
  4110. /* set timestamp bit if present */
  4111. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
  4112. (E1000_ADVTXD_MAC_TSTAMP));
  4113. /* insert frame checksum */
  4114. cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
  4115. return cmd_type;
  4116. }
  4117. static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
  4118. union e1000_adv_tx_desc *tx_desc,
  4119. u32 tx_flags, unsigned int paylen)
  4120. {
  4121. u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
  4122. /* 82575 requires a unique index per ring */
  4123. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  4124. olinfo_status |= tx_ring->reg_idx << 4;
  4125. /* insert L4 checksum */
  4126. olinfo_status |= IGB_SET_FLAG(tx_flags,
  4127. IGB_TX_FLAGS_CSUM,
  4128. (E1000_TXD_POPTS_TXSM << 8));
  4129. /* insert IPv4 checksum */
  4130. olinfo_status |= IGB_SET_FLAG(tx_flags,
  4131. IGB_TX_FLAGS_IPV4,
  4132. (E1000_TXD_POPTS_IXSM << 8));
  4133. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  4134. }
  4135. static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  4136. {
  4137. struct net_device *netdev = tx_ring->netdev;
  4138. netif_stop_subqueue(netdev, tx_ring->queue_index);
  4139. /* Herbert's original patch had:
  4140. * smp_mb__after_netif_stop_queue();
  4141. * but since that doesn't exist yet, just open code it.
  4142. */
  4143. smp_mb();
  4144. /* We need to check again in a case another CPU has just
  4145. * made room available.
  4146. */
  4147. if (igb_desc_unused(tx_ring) < size)
  4148. return -EBUSY;
  4149. /* A reprieve! */
  4150. netif_wake_subqueue(netdev, tx_ring->queue_index);
  4151. u64_stats_update_begin(&tx_ring->tx_syncp2);
  4152. tx_ring->tx_stats.restart_queue2++;
  4153. u64_stats_update_end(&tx_ring->tx_syncp2);
  4154. return 0;
  4155. }
  4156. static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  4157. {
  4158. if (igb_desc_unused(tx_ring) >= size)
  4159. return 0;
  4160. return __igb_maybe_stop_tx(tx_ring, size);
  4161. }
  4162. static void igb_tx_map(struct igb_ring *tx_ring,
  4163. struct igb_tx_buffer *first,
  4164. const u8 hdr_len)
  4165. {
  4166. struct sk_buff *skb = first->skb;
  4167. struct igb_tx_buffer *tx_buffer;
  4168. union e1000_adv_tx_desc *tx_desc;
  4169. struct skb_frag_struct *frag;
  4170. dma_addr_t dma;
  4171. unsigned int data_len, size;
  4172. u32 tx_flags = first->tx_flags;
  4173. u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
  4174. u16 i = tx_ring->next_to_use;
  4175. tx_desc = IGB_TX_DESC(tx_ring, i);
  4176. igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
  4177. size = skb_headlen(skb);
  4178. data_len = skb->data_len;
  4179. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  4180. tx_buffer = first;
  4181. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  4182. if (dma_mapping_error(tx_ring->dev, dma))
  4183. goto dma_error;
  4184. /* record length, and DMA address */
  4185. dma_unmap_len_set(tx_buffer, len, size);
  4186. dma_unmap_addr_set(tx_buffer, dma, dma);
  4187. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  4188. while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
  4189. tx_desc->read.cmd_type_len =
  4190. cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
  4191. i++;
  4192. tx_desc++;
  4193. if (i == tx_ring->count) {
  4194. tx_desc = IGB_TX_DESC(tx_ring, 0);
  4195. i = 0;
  4196. }
  4197. tx_desc->read.olinfo_status = 0;
  4198. dma += IGB_MAX_DATA_PER_TXD;
  4199. size -= IGB_MAX_DATA_PER_TXD;
  4200. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  4201. }
  4202. if (likely(!data_len))
  4203. break;
  4204. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
  4205. i++;
  4206. tx_desc++;
  4207. if (i == tx_ring->count) {
  4208. tx_desc = IGB_TX_DESC(tx_ring, 0);
  4209. i = 0;
  4210. }
  4211. tx_desc->read.olinfo_status = 0;
  4212. size = skb_frag_size(frag);
  4213. data_len -= size;
  4214. dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
  4215. size, DMA_TO_DEVICE);
  4216. tx_buffer = &tx_ring->tx_buffer_info[i];
  4217. }
  4218. /* write last descriptor with RS and EOP bits */
  4219. cmd_type |= size | IGB_TXD_DCMD;
  4220. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
  4221. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  4222. /* set the timestamp */
  4223. first->time_stamp = jiffies;
  4224. /* Force memory writes to complete before letting h/w know there
  4225. * are new descriptors to fetch. (Only applicable for weak-ordered
  4226. * memory model archs, such as IA-64).
  4227. *
  4228. * We also need this memory barrier to make certain all of the
  4229. * status bits have been updated before next_to_watch is written.
  4230. */
  4231. wmb();
  4232. /* set next_to_watch value indicating a packet is present */
  4233. first->next_to_watch = tx_desc;
  4234. i++;
  4235. if (i == tx_ring->count)
  4236. i = 0;
  4237. tx_ring->next_to_use = i;
  4238. /* Make sure there is space in the ring for the next send. */
  4239. igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
  4240. if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
  4241. writel(i, tx_ring->tail);
  4242. /* we need this if more than one processor can write to our tail
  4243. * at a time, it synchronizes IO on IA64/Altix systems
  4244. */
  4245. mmiowb();
  4246. }
  4247. return;
  4248. dma_error:
  4249. dev_err(tx_ring->dev, "TX DMA map failed\n");
  4250. /* clear dma mappings for failed tx_buffer_info map */
  4251. for (;;) {
  4252. tx_buffer = &tx_ring->tx_buffer_info[i];
  4253. igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
  4254. if (tx_buffer == first)
  4255. break;
  4256. if (i == 0)
  4257. i = tx_ring->count;
  4258. i--;
  4259. }
  4260. tx_ring->next_to_use = i;
  4261. }
  4262. netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
  4263. struct igb_ring *tx_ring)
  4264. {
  4265. struct igb_tx_buffer *first;
  4266. int tso;
  4267. u32 tx_flags = 0;
  4268. u16 count = TXD_USE_COUNT(skb_headlen(skb));
  4269. __be16 protocol = vlan_get_protocol(skb);
  4270. u8 hdr_len = 0;
  4271. /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
  4272. * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
  4273. * + 2 desc gap to keep tail from touching head,
  4274. * + 1 desc for context descriptor,
  4275. * otherwise try next time
  4276. */
  4277. if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
  4278. unsigned short f;
  4279. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  4280. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  4281. } else {
  4282. count += skb_shinfo(skb)->nr_frags;
  4283. }
  4284. if (igb_maybe_stop_tx(tx_ring, count + 3)) {
  4285. /* this is a hard error */
  4286. return NETDEV_TX_BUSY;
  4287. }
  4288. /* record the location of the first descriptor for this packet */
  4289. first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
  4290. first->skb = skb;
  4291. first->bytecount = skb->len;
  4292. first->gso_segs = 1;
  4293. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  4294. struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  4295. if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
  4296. &adapter->state)) {
  4297. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  4298. tx_flags |= IGB_TX_FLAGS_TSTAMP;
  4299. adapter->ptp_tx_skb = skb_get(skb);
  4300. adapter->ptp_tx_start = jiffies;
  4301. if (adapter->hw.mac.type == e1000_82576)
  4302. schedule_work(&adapter->ptp_tx_work);
  4303. }
  4304. }
  4305. skb_tx_timestamp(skb);
  4306. if (vlan_tx_tag_present(skb)) {
  4307. tx_flags |= IGB_TX_FLAGS_VLAN;
  4308. tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
  4309. }
  4310. /* record initial flags and protocol */
  4311. first->tx_flags = tx_flags;
  4312. first->protocol = protocol;
  4313. tso = igb_tso(tx_ring, first, &hdr_len);
  4314. if (tso < 0)
  4315. goto out_drop;
  4316. else if (!tso)
  4317. igb_tx_csum(tx_ring, first);
  4318. igb_tx_map(tx_ring, first, hdr_len);
  4319. return NETDEV_TX_OK;
  4320. out_drop:
  4321. igb_unmap_and_free_tx_resource(tx_ring, first);
  4322. return NETDEV_TX_OK;
  4323. }
  4324. static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
  4325. struct sk_buff *skb)
  4326. {
  4327. unsigned int r_idx = skb->queue_mapping;
  4328. if (r_idx >= adapter->num_tx_queues)
  4329. r_idx = r_idx % adapter->num_tx_queues;
  4330. return adapter->tx_ring[r_idx];
  4331. }
  4332. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
  4333. struct net_device *netdev)
  4334. {
  4335. struct igb_adapter *adapter = netdev_priv(netdev);
  4336. if (test_bit(__IGB_DOWN, &adapter->state)) {
  4337. dev_kfree_skb_any(skb);
  4338. return NETDEV_TX_OK;
  4339. }
  4340. if (skb->len <= 0) {
  4341. dev_kfree_skb_any(skb);
  4342. return NETDEV_TX_OK;
  4343. }
  4344. /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
  4345. * in order to meet this minimum size requirement.
  4346. */
  4347. if (unlikely(skb->len < 17)) {
  4348. if (skb_pad(skb, 17 - skb->len))
  4349. return NETDEV_TX_OK;
  4350. skb->len = 17;
  4351. skb_set_tail_pointer(skb, 17);
  4352. }
  4353. return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
  4354. }
  4355. /**
  4356. * igb_tx_timeout - Respond to a Tx Hang
  4357. * @netdev: network interface device structure
  4358. **/
  4359. static void igb_tx_timeout(struct net_device *netdev)
  4360. {
  4361. struct igb_adapter *adapter = netdev_priv(netdev);
  4362. struct e1000_hw *hw = &adapter->hw;
  4363. /* Do the reset outside of interrupt context */
  4364. adapter->tx_timeout_count++;
  4365. if (hw->mac.type >= e1000_82580)
  4366. hw->dev_spec._82575.global_device_reset = true;
  4367. schedule_work(&adapter->reset_task);
  4368. wr32(E1000_EICS,
  4369. (adapter->eims_enable_mask & ~adapter->eims_other));
  4370. }
  4371. static void igb_reset_task(struct work_struct *work)
  4372. {
  4373. struct igb_adapter *adapter;
  4374. adapter = container_of(work, struct igb_adapter, reset_task);
  4375. igb_dump(adapter);
  4376. netdev_err(adapter->netdev, "Reset adapter\n");
  4377. igb_reinit_locked(adapter);
  4378. }
  4379. /**
  4380. * igb_get_stats64 - Get System Network Statistics
  4381. * @netdev: network interface device structure
  4382. * @stats: rtnl_link_stats64 pointer
  4383. **/
  4384. static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
  4385. struct rtnl_link_stats64 *stats)
  4386. {
  4387. struct igb_adapter *adapter = netdev_priv(netdev);
  4388. spin_lock(&adapter->stats64_lock);
  4389. igb_update_stats(adapter, &adapter->stats64);
  4390. memcpy(stats, &adapter->stats64, sizeof(*stats));
  4391. spin_unlock(&adapter->stats64_lock);
  4392. return stats;
  4393. }
  4394. /**
  4395. * igb_change_mtu - Change the Maximum Transfer Unit
  4396. * @netdev: network interface device structure
  4397. * @new_mtu: new value for maximum frame size
  4398. *
  4399. * Returns 0 on success, negative on failure
  4400. **/
  4401. static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  4402. {
  4403. struct igb_adapter *adapter = netdev_priv(netdev);
  4404. struct pci_dev *pdev = adapter->pdev;
  4405. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  4406. if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
  4407. dev_err(&pdev->dev, "Invalid MTU setting\n");
  4408. return -EINVAL;
  4409. }
  4410. #define MAX_STD_JUMBO_FRAME_SIZE 9238
  4411. if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
  4412. dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
  4413. return -EINVAL;
  4414. }
  4415. /* adjust max frame to be at least the size of a standard frame */
  4416. if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
  4417. max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
  4418. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  4419. usleep_range(1000, 2000);
  4420. /* igb_down has a dependency on max_frame_size */
  4421. adapter->max_frame_size = max_frame;
  4422. if (netif_running(netdev))
  4423. igb_down(adapter);
  4424. dev_info(&pdev->dev, "changing MTU from %d to %d\n",
  4425. netdev->mtu, new_mtu);
  4426. netdev->mtu = new_mtu;
  4427. if (netif_running(netdev))
  4428. igb_up(adapter);
  4429. else
  4430. igb_reset(adapter);
  4431. clear_bit(__IGB_RESETTING, &adapter->state);
  4432. return 0;
  4433. }
  4434. /**
  4435. * igb_update_stats - Update the board statistics counters
  4436. * @adapter: board private structure
  4437. **/
  4438. void igb_update_stats(struct igb_adapter *adapter,
  4439. struct rtnl_link_stats64 *net_stats)
  4440. {
  4441. struct e1000_hw *hw = &adapter->hw;
  4442. struct pci_dev *pdev = adapter->pdev;
  4443. u32 reg, mpc;
  4444. int i;
  4445. u64 bytes, packets;
  4446. unsigned int start;
  4447. u64 _bytes, _packets;
  4448. /* Prevent stats update while adapter is being reset, or if the pci
  4449. * connection is down.
  4450. */
  4451. if (adapter->link_speed == 0)
  4452. return;
  4453. if (pci_channel_offline(pdev))
  4454. return;
  4455. bytes = 0;
  4456. packets = 0;
  4457. rcu_read_lock();
  4458. for (i = 0; i < adapter->num_rx_queues; i++) {
  4459. struct igb_ring *ring = adapter->rx_ring[i];
  4460. u32 rqdpc = rd32(E1000_RQDPC(i));
  4461. if (hw->mac.type >= e1000_i210)
  4462. wr32(E1000_RQDPC(i), 0);
  4463. if (rqdpc) {
  4464. ring->rx_stats.drops += rqdpc;
  4465. net_stats->rx_fifo_errors += rqdpc;
  4466. }
  4467. do {
  4468. start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
  4469. _bytes = ring->rx_stats.bytes;
  4470. _packets = ring->rx_stats.packets;
  4471. } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
  4472. bytes += _bytes;
  4473. packets += _packets;
  4474. }
  4475. net_stats->rx_bytes = bytes;
  4476. net_stats->rx_packets = packets;
  4477. bytes = 0;
  4478. packets = 0;
  4479. for (i = 0; i < adapter->num_tx_queues; i++) {
  4480. struct igb_ring *ring = adapter->tx_ring[i];
  4481. do {
  4482. start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
  4483. _bytes = ring->tx_stats.bytes;
  4484. _packets = ring->tx_stats.packets;
  4485. } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
  4486. bytes += _bytes;
  4487. packets += _packets;
  4488. }
  4489. net_stats->tx_bytes = bytes;
  4490. net_stats->tx_packets = packets;
  4491. rcu_read_unlock();
  4492. /* read stats registers */
  4493. adapter->stats.crcerrs += rd32(E1000_CRCERRS);
  4494. adapter->stats.gprc += rd32(E1000_GPRC);
  4495. adapter->stats.gorc += rd32(E1000_GORCL);
  4496. rd32(E1000_GORCH); /* clear GORCL */
  4497. adapter->stats.bprc += rd32(E1000_BPRC);
  4498. adapter->stats.mprc += rd32(E1000_MPRC);
  4499. adapter->stats.roc += rd32(E1000_ROC);
  4500. adapter->stats.prc64 += rd32(E1000_PRC64);
  4501. adapter->stats.prc127 += rd32(E1000_PRC127);
  4502. adapter->stats.prc255 += rd32(E1000_PRC255);
  4503. adapter->stats.prc511 += rd32(E1000_PRC511);
  4504. adapter->stats.prc1023 += rd32(E1000_PRC1023);
  4505. adapter->stats.prc1522 += rd32(E1000_PRC1522);
  4506. adapter->stats.symerrs += rd32(E1000_SYMERRS);
  4507. adapter->stats.sec += rd32(E1000_SEC);
  4508. mpc = rd32(E1000_MPC);
  4509. adapter->stats.mpc += mpc;
  4510. net_stats->rx_fifo_errors += mpc;
  4511. adapter->stats.scc += rd32(E1000_SCC);
  4512. adapter->stats.ecol += rd32(E1000_ECOL);
  4513. adapter->stats.mcc += rd32(E1000_MCC);
  4514. adapter->stats.latecol += rd32(E1000_LATECOL);
  4515. adapter->stats.dc += rd32(E1000_DC);
  4516. adapter->stats.rlec += rd32(E1000_RLEC);
  4517. adapter->stats.xonrxc += rd32(E1000_XONRXC);
  4518. adapter->stats.xontxc += rd32(E1000_XONTXC);
  4519. adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
  4520. adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
  4521. adapter->stats.fcruc += rd32(E1000_FCRUC);
  4522. adapter->stats.gptc += rd32(E1000_GPTC);
  4523. adapter->stats.gotc += rd32(E1000_GOTCL);
  4524. rd32(E1000_GOTCH); /* clear GOTCL */
  4525. adapter->stats.rnbc += rd32(E1000_RNBC);
  4526. adapter->stats.ruc += rd32(E1000_RUC);
  4527. adapter->stats.rfc += rd32(E1000_RFC);
  4528. adapter->stats.rjc += rd32(E1000_RJC);
  4529. adapter->stats.tor += rd32(E1000_TORH);
  4530. adapter->stats.tot += rd32(E1000_TOTH);
  4531. adapter->stats.tpr += rd32(E1000_TPR);
  4532. adapter->stats.ptc64 += rd32(E1000_PTC64);
  4533. adapter->stats.ptc127 += rd32(E1000_PTC127);
  4534. adapter->stats.ptc255 += rd32(E1000_PTC255);
  4535. adapter->stats.ptc511 += rd32(E1000_PTC511);
  4536. adapter->stats.ptc1023 += rd32(E1000_PTC1023);
  4537. adapter->stats.ptc1522 += rd32(E1000_PTC1522);
  4538. adapter->stats.mptc += rd32(E1000_MPTC);
  4539. adapter->stats.bptc += rd32(E1000_BPTC);
  4540. adapter->stats.tpt += rd32(E1000_TPT);
  4541. adapter->stats.colc += rd32(E1000_COLC);
  4542. adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
  4543. /* read internal phy specific stats */
  4544. reg = rd32(E1000_CTRL_EXT);
  4545. if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
  4546. adapter->stats.rxerrc += rd32(E1000_RXERRC);
  4547. /* this stat has invalid values on i210/i211 */
  4548. if ((hw->mac.type != e1000_i210) &&
  4549. (hw->mac.type != e1000_i211))
  4550. adapter->stats.tncrs += rd32(E1000_TNCRS);
  4551. }
  4552. adapter->stats.tsctc += rd32(E1000_TSCTC);
  4553. adapter->stats.tsctfc += rd32(E1000_TSCTFC);
  4554. adapter->stats.iac += rd32(E1000_IAC);
  4555. adapter->stats.icrxoc += rd32(E1000_ICRXOC);
  4556. adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
  4557. adapter->stats.icrxatc += rd32(E1000_ICRXATC);
  4558. adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
  4559. adapter->stats.ictxatc += rd32(E1000_ICTXATC);
  4560. adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
  4561. adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
  4562. adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
  4563. /* Fill out the OS statistics structure */
  4564. net_stats->multicast = adapter->stats.mprc;
  4565. net_stats->collisions = adapter->stats.colc;
  4566. /* Rx Errors */
  4567. /* RLEC on some newer hardware can be incorrect so build
  4568. * our own version based on RUC and ROC
  4569. */
  4570. net_stats->rx_errors = adapter->stats.rxerrc +
  4571. adapter->stats.crcerrs + adapter->stats.algnerrc +
  4572. adapter->stats.ruc + adapter->stats.roc +
  4573. adapter->stats.cexterr;
  4574. net_stats->rx_length_errors = adapter->stats.ruc +
  4575. adapter->stats.roc;
  4576. net_stats->rx_crc_errors = adapter->stats.crcerrs;
  4577. net_stats->rx_frame_errors = adapter->stats.algnerrc;
  4578. net_stats->rx_missed_errors = adapter->stats.mpc;
  4579. /* Tx Errors */
  4580. net_stats->tx_errors = adapter->stats.ecol +
  4581. adapter->stats.latecol;
  4582. net_stats->tx_aborted_errors = adapter->stats.ecol;
  4583. net_stats->tx_window_errors = adapter->stats.latecol;
  4584. net_stats->tx_carrier_errors = adapter->stats.tncrs;
  4585. /* Tx Dropped needs to be maintained elsewhere */
  4586. /* Management Stats */
  4587. adapter->stats.mgptc += rd32(E1000_MGTPTC);
  4588. adapter->stats.mgprc += rd32(E1000_MGTPRC);
  4589. adapter->stats.mgpdc += rd32(E1000_MGTPDC);
  4590. /* OS2BMC Stats */
  4591. reg = rd32(E1000_MANC);
  4592. if (reg & E1000_MANC_EN_BMC2OS) {
  4593. adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
  4594. adapter->stats.o2bspc += rd32(E1000_O2BSPC);
  4595. adapter->stats.b2ospc += rd32(E1000_B2OSPC);
  4596. adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
  4597. }
  4598. }
  4599. static irqreturn_t igb_msix_other(int irq, void *data)
  4600. {
  4601. struct igb_adapter *adapter = data;
  4602. struct e1000_hw *hw = &adapter->hw;
  4603. u32 icr = rd32(E1000_ICR);
  4604. /* reading ICR causes bit 31 of EICR to be cleared */
  4605. if (icr & E1000_ICR_DRSTA)
  4606. schedule_work(&adapter->reset_task);
  4607. if (icr & E1000_ICR_DOUTSYNC) {
  4608. /* HW is reporting DMA is out of sync */
  4609. adapter->stats.doosync++;
  4610. /* The DMA Out of Sync is also indication of a spoof event
  4611. * in IOV mode. Check the Wrong VM Behavior register to
  4612. * see if it is really a spoof event.
  4613. */
  4614. igb_check_wvbr(adapter);
  4615. }
  4616. /* Check for a mailbox event */
  4617. if (icr & E1000_ICR_VMMB)
  4618. igb_msg_task(adapter);
  4619. if (icr & E1000_ICR_LSC) {
  4620. hw->mac.get_link_status = 1;
  4621. /* guard against interrupt when we're going down */
  4622. if (!test_bit(__IGB_DOWN, &adapter->state))
  4623. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  4624. }
  4625. if (icr & E1000_ICR_TS) {
  4626. u32 tsicr = rd32(E1000_TSICR);
  4627. if (tsicr & E1000_TSICR_TXTS) {
  4628. /* acknowledge the interrupt */
  4629. wr32(E1000_TSICR, E1000_TSICR_TXTS);
  4630. /* retrieve hardware timestamp */
  4631. schedule_work(&adapter->ptp_tx_work);
  4632. }
  4633. }
  4634. wr32(E1000_EIMS, adapter->eims_other);
  4635. return IRQ_HANDLED;
  4636. }
  4637. static void igb_write_itr(struct igb_q_vector *q_vector)
  4638. {
  4639. struct igb_adapter *adapter = q_vector->adapter;
  4640. u32 itr_val = q_vector->itr_val & 0x7FFC;
  4641. if (!q_vector->set_itr)
  4642. return;
  4643. if (!itr_val)
  4644. itr_val = 0x4;
  4645. if (adapter->hw.mac.type == e1000_82575)
  4646. itr_val |= itr_val << 16;
  4647. else
  4648. itr_val |= E1000_EITR_CNT_IGNR;
  4649. writel(itr_val, q_vector->itr_register);
  4650. q_vector->set_itr = 0;
  4651. }
  4652. static irqreturn_t igb_msix_ring(int irq, void *data)
  4653. {
  4654. struct igb_q_vector *q_vector = data;
  4655. /* Write the ITR value calculated from the previous interrupt. */
  4656. igb_write_itr(q_vector);
  4657. napi_schedule(&q_vector->napi);
  4658. return IRQ_HANDLED;
  4659. }
  4660. #ifdef CONFIG_IGB_DCA
  4661. static void igb_update_tx_dca(struct igb_adapter *adapter,
  4662. struct igb_ring *tx_ring,
  4663. int cpu)
  4664. {
  4665. struct e1000_hw *hw = &adapter->hw;
  4666. u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
  4667. if (hw->mac.type != e1000_82575)
  4668. txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
  4669. /* We can enable relaxed ordering for reads, but not writes when
  4670. * DCA is enabled. This is due to a known issue in some chipsets
  4671. * which will cause the DCA tag to be cleared.
  4672. */
  4673. txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
  4674. E1000_DCA_TXCTRL_DATA_RRO_EN |
  4675. E1000_DCA_TXCTRL_DESC_DCA_EN;
  4676. wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
  4677. }
  4678. static void igb_update_rx_dca(struct igb_adapter *adapter,
  4679. struct igb_ring *rx_ring,
  4680. int cpu)
  4681. {
  4682. struct e1000_hw *hw = &adapter->hw;
  4683. u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
  4684. if (hw->mac.type != e1000_82575)
  4685. rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
  4686. /* We can enable relaxed ordering for reads, but not writes when
  4687. * DCA is enabled. This is due to a known issue in some chipsets
  4688. * which will cause the DCA tag to be cleared.
  4689. */
  4690. rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
  4691. E1000_DCA_RXCTRL_DESC_DCA_EN;
  4692. wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
  4693. }
  4694. static void igb_update_dca(struct igb_q_vector *q_vector)
  4695. {
  4696. struct igb_adapter *adapter = q_vector->adapter;
  4697. int cpu = get_cpu();
  4698. if (q_vector->cpu == cpu)
  4699. goto out_no_update;
  4700. if (q_vector->tx.ring)
  4701. igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
  4702. if (q_vector->rx.ring)
  4703. igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
  4704. q_vector->cpu = cpu;
  4705. out_no_update:
  4706. put_cpu();
  4707. }
  4708. static void igb_setup_dca(struct igb_adapter *adapter)
  4709. {
  4710. struct e1000_hw *hw = &adapter->hw;
  4711. int i;
  4712. if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
  4713. return;
  4714. /* Always use CB2 mode, difference is masked in the CB driver. */
  4715. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  4716. for (i = 0; i < adapter->num_q_vectors; i++) {
  4717. adapter->q_vector[i]->cpu = -1;
  4718. igb_update_dca(adapter->q_vector[i]);
  4719. }
  4720. }
  4721. static int __igb_notify_dca(struct device *dev, void *data)
  4722. {
  4723. struct net_device *netdev = dev_get_drvdata(dev);
  4724. struct igb_adapter *adapter = netdev_priv(netdev);
  4725. struct pci_dev *pdev = adapter->pdev;
  4726. struct e1000_hw *hw = &adapter->hw;
  4727. unsigned long event = *(unsigned long *)data;
  4728. switch (event) {
  4729. case DCA_PROVIDER_ADD:
  4730. /* if already enabled, don't do it again */
  4731. if (adapter->flags & IGB_FLAG_DCA_ENABLED)
  4732. break;
  4733. if (dca_add_requester(dev) == 0) {
  4734. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  4735. dev_info(&pdev->dev, "DCA enabled\n");
  4736. igb_setup_dca(adapter);
  4737. break;
  4738. }
  4739. /* Fall Through since DCA is disabled. */
  4740. case DCA_PROVIDER_REMOVE:
  4741. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  4742. /* without this a class_device is left
  4743. * hanging around in the sysfs model
  4744. */
  4745. dca_remove_requester(dev);
  4746. dev_info(&pdev->dev, "DCA disabled\n");
  4747. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  4748. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  4749. }
  4750. break;
  4751. }
  4752. return 0;
  4753. }
  4754. static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  4755. void *p)
  4756. {
  4757. int ret_val;
  4758. ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
  4759. __igb_notify_dca);
  4760. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  4761. }
  4762. #endif /* CONFIG_IGB_DCA */
  4763. #ifdef CONFIG_PCI_IOV
  4764. static int igb_vf_configure(struct igb_adapter *adapter, int vf)
  4765. {
  4766. unsigned char mac_addr[ETH_ALEN];
  4767. eth_zero_addr(mac_addr);
  4768. igb_set_vf_mac(adapter, vf, mac_addr);
  4769. /* By default spoof check is enabled for all VFs */
  4770. adapter->vf_data[vf].spoofchk_enabled = true;
  4771. return 0;
  4772. }
  4773. #endif
  4774. static void igb_ping_all_vfs(struct igb_adapter *adapter)
  4775. {
  4776. struct e1000_hw *hw = &adapter->hw;
  4777. u32 ping;
  4778. int i;
  4779. for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
  4780. ping = E1000_PF_CONTROL_MSG;
  4781. if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
  4782. ping |= E1000_VT_MSGTYPE_CTS;
  4783. igb_write_mbx(hw, &ping, 1, i);
  4784. }
  4785. }
  4786. static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  4787. {
  4788. struct e1000_hw *hw = &adapter->hw;
  4789. u32 vmolr = rd32(E1000_VMOLR(vf));
  4790. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  4791. vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
  4792. IGB_VF_FLAG_MULTI_PROMISC);
  4793. vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  4794. if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
  4795. vmolr |= E1000_VMOLR_MPME;
  4796. vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
  4797. *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
  4798. } else {
  4799. /* if we have hashes and we are clearing a multicast promisc
  4800. * flag we need to write the hashes to the MTA as this step
  4801. * was previously skipped
  4802. */
  4803. if (vf_data->num_vf_mc_hashes > 30) {
  4804. vmolr |= E1000_VMOLR_MPME;
  4805. } else if (vf_data->num_vf_mc_hashes) {
  4806. int j;
  4807. vmolr |= E1000_VMOLR_ROMPE;
  4808. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  4809. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  4810. }
  4811. }
  4812. wr32(E1000_VMOLR(vf), vmolr);
  4813. /* there are flags left unprocessed, likely not supported */
  4814. if (*msgbuf & E1000_VT_MSGINFO_MASK)
  4815. return -EINVAL;
  4816. return 0;
  4817. }
  4818. static int igb_set_vf_multicasts(struct igb_adapter *adapter,
  4819. u32 *msgbuf, u32 vf)
  4820. {
  4821. int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  4822. u16 *hash_list = (u16 *)&msgbuf[1];
  4823. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  4824. int i;
  4825. /* salt away the number of multicast addresses assigned
  4826. * to this VF for later use to restore when the PF multi cast
  4827. * list changes
  4828. */
  4829. vf_data->num_vf_mc_hashes = n;
  4830. /* only up to 30 hash values supported */
  4831. if (n > 30)
  4832. n = 30;
  4833. /* store the hashes for later use */
  4834. for (i = 0; i < n; i++)
  4835. vf_data->vf_mc_hashes[i] = hash_list[i];
  4836. /* Flush and reset the mta with the new values */
  4837. igb_set_rx_mode(adapter->netdev);
  4838. return 0;
  4839. }
  4840. static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
  4841. {
  4842. struct e1000_hw *hw = &adapter->hw;
  4843. struct vf_data_storage *vf_data;
  4844. int i, j;
  4845. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  4846. u32 vmolr = rd32(E1000_VMOLR(i));
  4847. vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  4848. vf_data = &adapter->vf_data[i];
  4849. if ((vf_data->num_vf_mc_hashes > 30) ||
  4850. (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
  4851. vmolr |= E1000_VMOLR_MPME;
  4852. } else if (vf_data->num_vf_mc_hashes) {
  4853. vmolr |= E1000_VMOLR_ROMPE;
  4854. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  4855. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  4856. }
  4857. wr32(E1000_VMOLR(i), vmolr);
  4858. }
  4859. }
  4860. static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
  4861. {
  4862. struct e1000_hw *hw = &adapter->hw;
  4863. u32 pool_mask, reg, vid;
  4864. int i;
  4865. pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
  4866. /* Find the vlan filter for this id */
  4867. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  4868. reg = rd32(E1000_VLVF(i));
  4869. /* remove the vf from the pool */
  4870. reg &= ~pool_mask;
  4871. /* if pool is empty then remove entry from vfta */
  4872. if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
  4873. (reg & E1000_VLVF_VLANID_ENABLE)) {
  4874. reg = 0;
  4875. vid = reg & E1000_VLVF_VLANID_MASK;
  4876. igb_vfta_set(hw, vid, false);
  4877. }
  4878. wr32(E1000_VLVF(i), reg);
  4879. }
  4880. adapter->vf_data[vf].vlans_enabled = 0;
  4881. }
  4882. static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
  4883. {
  4884. struct e1000_hw *hw = &adapter->hw;
  4885. u32 reg, i;
  4886. /* The vlvf table only exists on 82576 hardware and newer */
  4887. if (hw->mac.type < e1000_82576)
  4888. return -1;
  4889. /* we only need to do this if VMDq is enabled */
  4890. if (!adapter->vfs_allocated_count)
  4891. return -1;
  4892. /* Find the vlan filter for this id */
  4893. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  4894. reg = rd32(E1000_VLVF(i));
  4895. if ((reg & E1000_VLVF_VLANID_ENABLE) &&
  4896. vid == (reg & E1000_VLVF_VLANID_MASK))
  4897. break;
  4898. }
  4899. if (add) {
  4900. if (i == E1000_VLVF_ARRAY_SIZE) {
  4901. /* Did not find a matching VLAN ID entry that was
  4902. * enabled. Search for a free filter entry, i.e.
  4903. * one without the enable bit set
  4904. */
  4905. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  4906. reg = rd32(E1000_VLVF(i));
  4907. if (!(reg & E1000_VLVF_VLANID_ENABLE))
  4908. break;
  4909. }
  4910. }
  4911. if (i < E1000_VLVF_ARRAY_SIZE) {
  4912. /* Found an enabled/available entry */
  4913. reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
  4914. /* if !enabled we need to set this up in vfta */
  4915. if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
  4916. /* add VID to filter table */
  4917. igb_vfta_set(hw, vid, true);
  4918. reg |= E1000_VLVF_VLANID_ENABLE;
  4919. }
  4920. reg &= ~E1000_VLVF_VLANID_MASK;
  4921. reg |= vid;
  4922. wr32(E1000_VLVF(i), reg);
  4923. /* do not modify RLPML for PF devices */
  4924. if (vf >= adapter->vfs_allocated_count)
  4925. return 0;
  4926. if (!adapter->vf_data[vf].vlans_enabled) {
  4927. u32 size;
  4928. reg = rd32(E1000_VMOLR(vf));
  4929. size = reg & E1000_VMOLR_RLPML_MASK;
  4930. size += 4;
  4931. reg &= ~E1000_VMOLR_RLPML_MASK;
  4932. reg |= size;
  4933. wr32(E1000_VMOLR(vf), reg);
  4934. }
  4935. adapter->vf_data[vf].vlans_enabled++;
  4936. }
  4937. } else {
  4938. if (i < E1000_VLVF_ARRAY_SIZE) {
  4939. /* remove vf from the pool */
  4940. reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
  4941. /* if pool is empty then remove entry from vfta */
  4942. if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
  4943. reg = 0;
  4944. igb_vfta_set(hw, vid, false);
  4945. }
  4946. wr32(E1000_VLVF(i), reg);
  4947. /* do not modify RLPML for PF devices */
  4948. if (vf >= adapter->vfs_allocated_count)
  4949. return 0;
  4950. adapter->vf_data[vf].vlans_enabled--;
  4951. if (!adapter->vf_data[vf].vlans_enabled) {
  4952. u32 size;
  4953. reg = rd32(E1000_VMOLR(vf));
  4954. size = reg & E1000_VMOLR_RLPML_MASK;
  4955. size -= 4;
  4956. reg &= ~E1000_VMOLR_RLPML_MASK;
  4957. reg |= size;
  4958. wr32(E1000_VMOLR(vf), reg);
  4959. }
  4960. }
  4961. }
  4962. return 0;
  4963. }
  4964. static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
  4965. {
  4966. struct e1000_hw *hw = &adapter->hw;
  4967. if (vid)
  4968. wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
  4969. else
  4970. wr32(E1000_VMVIR(vf), 0);
  4971. }
  4972. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  4973. int vf, u16 vlan, u8 qos)
  4974. {
  4975. int err = 0;
  4976. struct igb_adapter *adapter = netdev_priv(netdev);
  4977. if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
  4978. return -EINVAL;
  4979. if (vlan || qos) {
  4980. err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
  4981. if (err)
  4982. goto out;
  4983. igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
  4984. igb_set_vmolr(adapter, vf, !vlan);
  4985. adapter->vf_data[vf].pf_vlan = vlan;
  4986. adapter->vf_data[vf].pf_qos = qos;
  4987. dev_info(&adapter->pdev->dev,
  4988. "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
  4989. if (test_bit(__IGB_DOWN, &adapter->state)) {
  4990. dev_warn(&adapter->pdev->dev,
  4991. "The VF VLAN has been set, but the PF device is not up.\n");
  4992. dev_warn(&adapter->pdev->dev,
  4993. "Bring the PF device up before attempting to use the VF device.\n");
  4994. }
  4995. } else {
  4996. igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
  4997. false, vf);
  4998. igb_set_vmvir(adapter, vlan, vf);
  4999. igb_set_vmolr(adapter, vf, true);
  5000. adapter->vf_data[vf].pf_vlan = 0;
  5001. adapter->vf_data[vf].pf_qos = 0;
  5002. }
  5003. out:
  5004. return err;
  5005. }
  5006. static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
  5007. {
  5008. struct e1000_hw *hw = &adapter->hw;
  5009. int i;
  5010. u32 reg;
  5011. /* Find the vlan filter for this id */
  5012. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  5013. reg = rd32(E1000_VLVF(i));
  5014. if ((reg & E1000_VLVF_VLANID_ENABLE) &&
  5015. vid == (reg & E1000_VLVF_VLANID_MASK))
  5016. break;
  5017. }
  5018. if (i >= E1000_VLVF_ARRAY_SIZE)
  5019. i = -1;
  5020. return i;
  5021. }
  5022. static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  5023. {
  5024. struct e1000_hw *hw = &adapter->hw;
  5025. int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  5026. int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
  5027. int err = 0;
  5028. /* If in promiscuous mode we need to make sure the PF also has
  5029. * the VLAN filter set.
  5030. */
  5031. if (add && (adapter->netdev->flags & IFF_PROMISC))
  5032. err = igb_vlvf_set(adapter, vid, add,
  5033. adapter->vfs_allocated_count);
  5034. if (err)
  5035. goto out;
  5036. err = igb_vlvf_set(adapter, vid, add, vf);
  5037. if (err)
  5038. goto out;
  5039. /* Go through all the checks to see if the VLAN filter should
  5040. * be wiped completely.
  5041. */
  5042. if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
  5043. u32 vlvf, bits;
  5044. int regndx = igb_find_vlvf_entry(adapter, vid);
  5045. if (regndx < 0)
  5046. goto out;
  5047. /* See if any other pools are set for this VLAN filter
  5048. * entry other than the PF.
  5049. */
  5050. vlvf = bits = rd32(E1000_VLVF(regndx));
  5051. bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
  5052. adapter->vfs_allocated_count);
  5053. /* If the filter was removed then ensure PF pool bit
  5054. * is cleared if the PF only added itself to the pool
  5055. * because the PF is in promiscuous mode.
  5056. */
  5057. if ((vlvf & VLAN_VID_MASK) == vid &&
  5058. !test_bit(vid, adapter->active_vlans) &&
  5059. !bits)
  5060. igb_vlvf_set(adapter, vid, add,
  5061. adapter->vfs_allocated_count);
  5062. }
  5063. out:
  5064. return err;
  5065. }
  5066. static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
  5067. {
  5068. /* clear flags - except flag that indicates PF has set the MAC */
  5069. adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
  5070. adapter->vf_data[vf].last_nack = jiffies;
  5071. /* reset offloads to defaults */
  5072. igb_set_vmolr(adapter, vf, true);
  5073. /* reset vlans for device */
  5074. igb_clear_vf_vfta(adapter, vf);
  5075. if (adapter->vf_data[vf].pf_vlan)
  5076. igb_ndo_set_vf_vlan(adapter->netdev, vf,
  5077. adapter->vf_data[vf].pf_vlan,
  5078. adapter->vf_data[vf].pf_qos);
  5079. else
  5080. igb_clear_vf_vfta(adapter, vf);
  5081. /* reset multicast table array for vf */
  5082. adapter->vf_data[vf].num_vf_mc_hashes = 0;
  5083. /* Flush and reset the mta with the new values */
  5084. igb_set_rx_mode(adapter->netdev);
  5085. }
  5086. static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
  5087. {
  5088. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  5089. /* clear mac address as we were hotplug removed/added */
  5090. if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
  5091. eth_zero_addr(vf_mac);
  5092. /* process remaining reset events */
  5093. igb_vf_reset(adapter, vf);
  5094. }
  5095. static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
  5096. {
  5097. struct e1000_hw *hw = &adapter->hw;
  5098. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  5099. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  5100. u32 reg, msgbuf[3];
  5101. u8 *addr = (u8 *)(&msgbuf[1]);
  5102. /* process all the same items cleared in a function level reset */
  5103. igb_vf_reset(adapter, vf);
  5104. /* set vf mac address */
  5105. igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
  5106. /* enable transmit and receive for vf */
  5107. reg = rd32(E1000_VFTE);
  5108. wr32(E1000_VFTE, reg | (1 << vf));
  5109. reg = rd32(E1000_VFRE);
  5110. wr32(E1000_VFRE, reg | (1 << vf));
  5111. adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
  5112. /* reply to reset with ack and vf mac address */
  5113. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
  5114. memcpy(addr, vf_mac, ETH_ALEN);
  5115. igb_write_mbx(hw, msgbuf, 3, vf);
  5116. }
  5117. static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
  5118. {
  5119. /* The VF MAC Address is stored in a packed array of bytes
  5120. * starting at the second 32 bit word of the msg array
  5121. */
  5122. unsigned char *addr = (char *)&msg[1];
  5123. int err = -1;
  5124. if (is_valid_ether_addr(addr))
  5125. err = igb_set_vf_mac(adapter, vf, addr);
  5126. return err;
  5127. }
  5128. static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
  5129. {
  5130. struct e1000_hw *hw = &adapter->hw;
  5131. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5132. u32 msg = E1000_VT_MSGTYPE_NACK;
  5133. /* if device isn't clear to send it shouldn't be reading either */
  5134. if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
  5135. time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
  5136. igb_write_mbx(hw, &msg, 1, vf);
  5137. vf_data->last_nack = jiffies;
  5138. }
  5139. }
  5140. static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
  5141. {
  5142. struct pci_dev *pdev = adapter->pdev;
  5143. u32 msgbuf[E1000_VFMAILBOX_SIZE];
  5144. struct e1000_hw *hw = &adapter->hw;
  5145. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5146. s32 retval;
  5147. retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
  5148. if (retval) {
  5149. /* if receive failed revoke VF CTS stats and restart init */
  5150. dev_err(&pdev->dev, "Error receiving message from VF\n");
  5151. vf_data->flags &= ~IGB_VF_FLAG_CTS;
  5152. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  5153. return;
  5154. goto out;
  5155. }
  5156. /* this is a message we already processed, do nothing */
  5157. if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
  5158. return;
  5159. /* until the vf completes a reset it should not be
  5160. * allowed to start any configuration.
  5161. */
  5162. if (msgbuf[0] == E1000_VF_RESET) {
  5163. igb_vf_reset_msg(adapter, vf);
  5164. return;
  5165. }
  5166. if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
  5167. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  5168. return;
  5169. retval = -1;
  5170. goto out;
  5171. }
  5172. switch ((msgbuf[0] & 0xFFFF)) {
  5173. case E1000_VF_SET_MAC_ADDR:
  5174. retval = -EINVAL;
  5175. if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
  5176. retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
  5177. else
  5178. dev_warn(&pdev->dev,
  5179. "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
  5180. vf);
  5181. break;
  5182. case E1000_VF_SET_PROMISC:
  5183. retval = igb_set_vf_promisc(adapter, msgbuf, vf);
  5184. break;
  5185. case E1000_VF_SET_MULTICAST:
  5186. retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
  5187. break;
  5188. case E1000_VF_SET_LPE:
  5189. retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
  5190. break;
  5191. case E1000_VF_SET_VLAN:
  5192. retval = -1;
  5193. if (vf_data->pf_vlan)
  5194. dev_warn(&pdev->dev,
  5195. "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
  5196. vf);
  5197. else
  5198. retval = igb_set_vf_vlan(adapter, msgbuf, vf);
  5199. break;
  5200. default:
  5201. dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
  5202. retval = -1;
  5203. break;
  5204. }
  5205. msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
  5206. out:
  5207. /* notify the VF of the results of what it sent us */
  5208. if (retval)
  5209. msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
  5210. else
  5211. msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
  5212. igb_write_mbx(hw, msgbuf, 1, vf);
  5213. }
  5214. static void igb_msg_task(struct igb_adapter *adapter)
  5215. {
  5216. struct e1000_hw *hw = &adapter->hw;
  5217. u32 vf;
  5218. for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
  5219. /* process any reset requests */
  5220. if (!igb_check_for_rst(hw, vf))
  5221. igb_vf_reset_event(adapter, vf);
  5222. /* process any messages pending */
  5223. if (!igb_check_for_msg(hw, vf))
  5224. igb_rcv_msg_from_vf(adapter, vf);
  5225. /* process any acks */
  5226. if (!igb_check_for_ack(hw, vf))
  5227. igb_rcv_ack_from_vf(adapter, vf);
  5228. }
  5229. }
  5230. /**
  5231. * igb_set_uta - Set unicast filter table address
  5232. * @adapter: board private structure
  5233. *
  5234. * The unicast table address is a register array of 32-bit registers.
  5235. * The table is meant to be used in a way similar to how the MTA is used
  5236. * however due to certain limitations in the hardware it is necessary to
  5237. * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
  5238. * enable bit to allow vlan tag stripping when promiscuous mode is enabled
  5239. **/
  5240. static void igb_set_uta(struct igb_adapter *adapter)
  5241. {
  5242. struct e1000_hw *hw = &adapter->hw;
  5243. int i;
  5244. /* The UTA table only exists on 82576 hardware and newer */
  5245. if (hw->mac.type < e1000_82576)
  5246. return;
  5247. /* we only need to do this if VMDq is enabled */
  5248. if (!adapter->vfs_allocated_count)
  5249. return;
  5250. for (i = 0; i < hw->mac.uta_reg_count; i++)
  5251. array_wr32(E1000_UTA, i, ~0);
  5252. }
  5253. /**
  5254. * igb_intr_msi - Interrupt Handler
  5255. * @irq: interrupt number
  5256. * @data: pointer to a network interface device structure
  5257. **/
  5258. static irqreturn_t igb_intr_msi(int irq, void *data)
  5259. {
  5260. struct igb_adapter *adapter = data;
  5261. struct igb_q_vector *q_vector = adapter->q_vector[0];
  5262. struct e1000_hw *hw = &adapter->hw;
  5263. /* read ICR disables interrupts using IAM */
  5264. u32 icr = rd32(E1000_ICR);
  5265. igb_write_itr(q_vector);
  5266. if (icr & E1000_ICR_DRSTA)
  5267. schedule_work(&adapter->reset_task);
  5268. if (icr & E1000_ICR_DOUTSYNC) {
  5269. /* HW is reporting DMA is out of sync */
  5270. adapter->stats.doosync++;
  5271. }
  5272. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  5273. hw->mac.get_link_status = 1;
  5274. if (!test_bit(__IGB_DOWN, &adapter->state))
  5275. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  5276. }
  5277. if (icr & E1000_ICR_TS) {
  5278. u32 tsicr = rd32(E1000_TSICR);
  5279. if (tsicr & E1000_TSICR_TXTS) {
  5280. /* acknowledge the interrupt */
  5281. wr32(E1000_TSICR, E1000_TSICR_TXTS);
  5282. /* retrieve hardware timestamp */
  5283. schedule_work(&adapter->ptp_tx_work);
  5284. }
  5285. }
  5286. napi_schedule(&q_vector->napi);
  5287. return IRQ_HANDLED;
  5288. }
  5289. /**
  5290. * igb_intr - Legacy Interrupt Handler
  5291. * @irq: interrupt number
  5292. * @data: pointer to a network interface device structure
  5293. **/
  5294. static irqreturn_t igb_intr(int irq, void *data)
  5295. {
  5296. struct igb_adapter *adapter = data;
  5297. struct igb_q_vector *q_vector = adapter->q_vector[0];
  5298. struct e1000_hw *hw = &adapter->hw;
  5299. /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
  5300. * need for the IMC write
  5301. */
  5302. u32 icr = rd32(E1000_ICR);
  5303. /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  5304. * not set, then the adapter didn't send an interrupt
  5305. */
  5306. if (!(icr & E1000_ICR_INT_ASSERTED))
  5307. return IRQ_NONE;
  5308. igb_write_itr(q_vector);
  5309. if (icr & E1000_ICR_DRSTA)
  5310. schedule_work(&adapter->reset_task);
  5311. if (icr & E1000_ICR_DOUTSYNC) {
  5312. /* HW is reporting DMA is out of sync */
  5313. adapter->stats.doosync++;
  5314. }
  5315. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  5316. hw->mac.get_link_status = 1;
  5317. /* guard against interrupt when we're going down */
  5318. if (!test_bit(__IGB_DOWN, &adapter->state))
  5319. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  5320. }
  5321. if (icr & E1000_ICR_TS) {
  5322. u32 tsicr = rd32(E1000_TSICR);
  5323. if (tsicr & E1000_TSICR_TXTS) {
  5324. /* acknowledge the interrupt */
  5325. wr32(E1000_TSICR, E1000_TSICR_TXTS);
  5326. /* retrieve hardware timestamp */
  5327. schedule_work(&adapter->ptp_tx_work);
  5328. }
  5329. }
  5330. napi_schedule(&q_vector->napi);
  5331. return IRQ_HANDLED;
  5332. }
  5333. static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
  5334. {
  5335. struct igb_adapter *adapter = q_vector->adapter;
  5336. struct e1000_hw *hw = &adapter->hw;
  5337. if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
  5338. (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
  5339. if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
  5340. igb_set_itr(q_vector);
  5341. else
  5342. igb_update_ring_itr(q_vector);
  5343. }
  5344. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  5345. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  5346. wr32(E1000_EIMS, q_vector->eims_value);
  5347. else
  5348. igb_irq_enable(adapter);
  5349. }
  5350. }
  5351. /**
  5352. * igb_poll - NAPI Rx polling callback
  5353. * @napi: napi polling structure
  5354. * @budget: count of how many packets we should handle
  5355. **/
  5356. static int igb_poll(struct napi_struct *napi, int budget)
  5357. {
  5358. struct igb_q_vector *q_vector = container_of(napi,
  5359. struct igb_q_vector,
  5360. napi);
  5361. bool clean_complete = true;
  5362. #ifdef CONFIG_IGB_DCA
  5363. if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
  5364. igb_update_dca(q_vector);
  5365. #endif
  5366. if (q_vector->tx.ring)
  5367. clean_complete = igb_clean_tx_irq(q_vector);
  5368. if (q_vector->rx.ring)
  5369. clean_complete &= igb_clean_rx_irq(q_vector, budget);
  5370. /* If all work not completed, return budget and keep polling */
  5371. if (!clean_complete)
  5372. return budget;
  5373. /* If not enough Rx work done, exit the polling mode */
  5374. napi_complete(napi);
  5375. igb_ring_irq_enable(q_vector);
  5376. return 0;
  5377. }
  5378. /**
  5379. * igb_clean_tx_irq - Reclaim resources after transmit completes
  5380. * @q_vector: pointer to q_vector containing needed info
  5381. *
  5382. * returns true if ring is completely cleaned
  5383. **/
  5384. static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
  5385. {
  5386. struct igb_adapter *adapter = q_vector->adapter;
  5387. struct igb_ring *tx_ring = q_vector->tx.ring;
  5388. struct igb_tx_buffer *tx_buffer;
  5389. union e1000_adv_tx_desc *tx_desc;
  5390. unsigned int total_bytes = 0, total_packets = 0;
  5391. unsigned int budget = q_vector->tx.work_limit;
  5392. unsigned int i = tx_ring->next_to_clean;
  5393. if (test_bit(__IGB_DOWN, &adapter->state))
  5394. return true;
  5395. tx_buffer = &tx_ring->tx_buffer_info[i];
  5396. tx_desc = IGB_TX_DESC(tx_ring, i);
  5397. i -= tx_ring->count;
  5398. do {
  5399. union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
  5400. /* if next_to_watch is not set then there is no work pending */
  5401. if (!eop_desc)
  5402. break;
  5403. /* prevent any other reads prior to eop_desc */
  5404. read_barrier_depends();
  5405. /* if DD is not set pending work has not been completed */
  5406. if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
  5407. break;
  5408. /* clear next_to_watch to prevent false hangs */
  5409. tx_buffer->next_to_watch = NULL;
  5410. /* update the statistics for this packet */
  5411. total_bytes += tx_buffer->bytecount;
  5412. total_packets += tx_buffer->gso_segs;
  5413. /* free the skb */
  5414. dev_consume_skb_any(tx_buffer->skb);
  5415. /* unmap skb header data */
  5416. dma_unmap_single(tx_ring->dev,
  5417. dma_unmap_addr(tx_buffer, dma),
  5418. dma_unmap_len(tx_buffer, len),
  5419. DMA_TO_DEVICE);
  5420. /* clear tx_buffer data */
  5421. tx_buffer->skb = NULL;
  5422. dma_unmap_len_set(tx_buffer, len, 0);
  5423. /* clear last DMA location and unmap remaining buffers */
  5424. while (tx_desc != eop_desc) {
  5425. tx_buffer++;
  5426. tx_desc++;
  5427. i++;
  5428. if (unlikely(!i)) {
  5429. i -= tx_ring->count;
  5430. tx_buffer = tx_ring->tx_buffer_info;
  5431. tx_desc = IGB_TX_DESC(tx_ring, 0);
  5432. }
  5433. /* unmap any remaining paged data */
  5434. if (dma_unmap_len(tx_buffer, len)) {
  5435. dma_unmap_page(tx_ring->dev,
  5436. dma_unmap_addr(tx_buffer, dma),
  5437. dma_unmap_len(tx_buffer, len),
  5438. DMA_TO_DEVICE);
  5439. dma_unmap_len_set(tx_buffer, len, 0);
  5440. }
  5441. }
  5442. /* move us one more past the eop_desc for start of next pkt */
  5443. tx_buffer++;
  5444. tx_desc++;
  5445. i++;
  5446. if (unlikely(!i)) {
  5447. i -= tx_ring->count;
  5448. tx_buffer = tx_ring->tx_buffer_info;
  5449. tx_desc = IGB_TX_DESC(tx_ring, 0);
  5450. }
  5451. /* issue prefetch for next Tx descriptor */
  5452. prefetch(tx_desc);
  5453. /* update budget accounting */
  5454. budget--;
  5455. } while (likely(budget));
  5456. netdev_tx_completed_queue(txring_txq(tx_ring),
  5457. total_packets, total_bytes);
  5458. i += tx_ring->count;
  5459. tx_ring->next_to_clean = i;
  5460. u64_stats_update_begin(&tx_ring->tx_syncp);
  5461. tx_ring->tx_stats.bytes += total_bytes;
  5462. tx_ring->tx_stats.packets += total_packets;
  5463. u64_stats_update_end(&tx_ring->tx_syncp);
  5464. q_vector->tx.total_bytes += total_bytes;
  5465. q_vector->tx.total_packets += total_packets;
  5466. if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
  5467. struct e1000_hw *hw = &adapter->hw;
  5468. /* Detect a transmit hang in hardware, this serializes the
  5469. * check with the clearing of time_stamp and movement of i
  5470. */
  5471. clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  5472. if (tx_buffer->next_to_watch &&
  5473. time_after(jiffies, tx_buffer->time_stamp +
  5474. (adapter->tx_timeout_factor * HZ)) &&
  5475. !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
  5476. /* detected Tx unit hang */
  5477. dev_err(tx_ring->dev,
  5478. "Detected Tx Unit Hang\n"
  5479. " Tx Queue <%d>\n"
  5480. " TDH <%x>\n"
  5481. " TDT <%x>\n"
  5482. " next_to_use <%x>\n"
  5483. " next_to_clean <%x>\n"
  5484. "buffer_info[next_to_clean]\n"
  5485. " time_stamp <%lx>\n"
  5486. " next_to_watch <%p>\n"
  5487. " jiffies <%lx>\n"
  5488. " desc.status <%x>\n",
  5489. tx_ring->queue_index,
  5490. rd32(E1000_TDH(tx_ring->reg_idx)),
  5491. readl(tx_ring->tail),
  5492. tx_ring->next_to_use,
  5493. tx_ring->next_to_clean,
  5494. tx_buffer->time_stamp,
  5495. tx_buffer->next_to_watch,
  5496. jiffies,
  5497. tx_buffer->next_to_watch->wb.status);
  5498. netif_stop_subqueue(tx_ring->netdev,
  5499. tx_ring->queue_index);
  5500. /* we are about to reset, no point in enabling stuff */
  5501. return true;
  5502. }
  5503. }
  5504. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  5505. if (unlikely(total_packets &&
  5506. netif_carrier_ok(tx_ring->netdev) &&
  5507. igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
  5508. /* Make sure that anybody stopping the queue after this
  5509. * sees the new next_to_clean.
  5510. */
  5511. smp_mb();
  5512. if (__netif_subqueue_stopped(tx_ring->netdev,
  5513. tx_ring->queue_index) &&
  5514. !(test_bit(__IGB_DOWN, &adapter->state))) {
  5515. netif_wake_subqueue(tx_ring->netdev,
  5516. tx_ring->queue_index);
  5517. u64_stats_update_begin(&tx_ring->tx_syncp);
  5518. tx_ring->tx_stats.restart_queue++;
  5519. u64_stats_update_end(&tx_ring->tx_syncp);
  5520. }
  5521. }
  5522. return !!budget;
  5523. }
  5524. /**
  5525. * igb_reuse_rx_page - page flip buffer and store it back on the ring
  5526. * @rx_ring: rx descriptor ring to store buffers on
  5527. * @old_buff: donor buffer to have page reused
  5528. *
  5529. * Synchronizes page for reuse by the adapter
  5530. **/
  5531. static void igb_reuse_rx_page(struct igb_ring *rx_ring,
  5532. struct igb_rx_buffer *old_buff)
  5533. {
  5534. struct igb_rx_buffer *new_buff;
  5535. u16 nta = rx_ring->next_to_alloc;
  5536. new_buff = &rx_ring->rx_buffer_info[nta];
  5537. /* update, and store next to alloc */
  5538. nta++;
  5539. rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
  5540. /* transfer page from old buffer to new buffer */
  5541. *new_buff = *old_buff;
  5542. /* sync the buffer for use by the device */
  5543. dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
  5544. old_buff->page_offset,
  5545. IGB_RX_BUFSZ,
  5546. DMA_FROM_DEVICE);
  5547. }
  5548. static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
  5549. struct page *page,
  5550. unsigned int truesize)
  5551. {
  5552. /* avoid re-using remote pages */
  5553. if (unlikely(page_to_nid(page) != numa_node_id()))
  5554. return false;
  5555. if (unlikely(page->pfmemalloc))
  5556. return false;
  5557. #if (PAGE_SIZE < 8192)
  5558. /* if we are only owner of page we can reuse it */
  5559. if (unlikely(page_count(page) != 1))
  5560. return false;
  5561. /* flip page offset to other buffer */
  5562. rx_buffer->page_offset ^= IGB_RX_BUFSZ;
  5563. /* Even if we own the page, we are not allowed to use atomic_set()
  5564. * This would break get_page_unless_zero() users.
  5565. */
  5566. atomic_inc(&page->_count);
  5567. #else
  5568. /* move offset up to the next cache line */
  5569. rx_buffer->page_offset += truesize;
  5570. if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
  5571. return false;
  5572. /* bump ref count on page before it is given to the stack */
  5573. get_page(page);
  5574. #endif
  5575. return true;
  5576. }
  5577. /**
  5578. * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
  5579. * @rx_ring: rx descriptor ring to transact packets on
  5580. * @rx_buffer: buffer containing page to add
  5581. * @rx_desc: descriptor containing length of buffer written by hardware
  5582. * @skb: sk_buff to place the data into
  5583. *
  5584. * This function will add the data contained in rx_buffer->page to the skb.
  5585. * This is done either through a direct copy if the data in the buffer is
  5586. * less than the skb header size, otherwise it will just attach the page as
  5587. * a frag to the skb.
  5588. *
  5589. * The function will then update the page offset if necessary and return
  5590. * true if the buffer can be reused by the adapter.
  5591. **/
  5592. static bool igb_add_rx_frag(struct igb_ring *rx_ring,
  5593. struct igb_rx_buffer *rx_buffer,
  5594. union e1000_adv_rx_desc *rx_desc,
  5595. struct sk_buff *skb)
  5596. {
  5597. struct page *page = rx_buffer->page;
  5598. unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
  5599. #if (PAGE_SIZE < 8192)
  5600. unsigned int truesize = IGB_RX_BUFSZ;
  5601. #else
  5602. unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
  5603. #endif
  5604. if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
  5605. unsigned char *va = page_address(page) + rx_buffer->page_offset;
  5606. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
  5607. igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
  5608. va += IGB_TS_HDR_LEN;
  5609. size -= IGB_TS_HDR_LEN;
  5610. }
  5611. memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
  5612. /* we can reuse buffer as-is, just make sure it is local */
  5613. if (likely((page_to_nid(page) == numa_node_id()) &&
  5614. !page->pfmemalloc))
  5615. return true;
  5616. /* this page cannot be reused so discard it */
  5617. put_page(page);
  5618. return false;
  5619. }
  5620. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  5621. rx_buffer->page_offset, size, truesize);
  5622. return igb_can_reuse_rx_page(rx_buffer, page, truesize);
  5623. }
  5624. static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
  5625. union e1000_adv_rx_desc *rx_desc,
  5626. struct sk_buff *skb)
  5627. {
  5628. struct igb_rx_buffer *rx_buffer;
  5629. struct page *page;
  5630. rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
  5631. page = rx_buffer->page;
  5632. prefetchw(page);
  5633. if (likely(!skb)) {
  5634. void *page_addr = page_address(page) +
  5635. rx_buffer->page_offset;
  5636. /* prefetch first cache line of first page */
  5637. prefetch(page_addr);
  5638. #if L1_CACHE_BYTES < 128
  5639. prefetch(page_addr + L1_CACHE_BYTES);
  5640. #endif
  5641. /* allocate a skb to store the frags */
  5642. skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
  5643. IGB_RX_HDR_LEN);
  5644. if (unlikely(!skb)) {
  5645. rx_ring->rx_stats.alloc_failed++;
  5646. return NULL;
  5647. }
  5648. /* we will be copying header into skb->data in
  5649. * pskb_may_pull so it is in our interest to prefetch
  5650. * it now to avoid a possible cache miss
  5651. */
  5652. prefetchw(skb->data);
  5653. }
  5654. /* we are reusing so sync this buffer for CPU use */
  5655. dma_sync_single_range_for_cpu(rx_ring->dev,
  5656. rx_buffer->dma,
  5657. rx_buffer->page_offset,
  5658. IGB_RX_BUFSZ,
  5659. DMA_FROM_DEVICE);
  5660. /* pull page into skb */
  5661. if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
  5662. /* hand second half of page back to the ring */
  5663. igb_reuse_rx_page(rx_ring, rx_buffer);
  5664. } else {
  5665. /* we are not reusing the buffer so unmap it */
  5666. dma_unmap_page(rx_ring->dev, rx_buffer->dma,
  5667. PAGE_SIZE, DMA_FROM_DEVICE);
  5668. }
  5669. /* clear contents of rx_buffer */
  5670. rx_buffer->page = NULL;
  5671. return skb;
  5672. }
  5673. static inline void igb_rx_checksum(struct igb_ring *ring,
  5674. union e1000_adv_rx_desc *rx_desc,
  5675. struct sk_buff *skb)
  5676. {
  5677. skb_checksum_none_assert(skb);
  5678. /* Ignore Checksum bit is set */
  5679. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
  5680. return;
  5681. /* Rx checksum disabled via ethtool */
  5682. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  5683. return;
  5684. /* TCP/UDP checksum error bit is set */
  5685. if (igb_test_staterr(rx_desc,
  5686. E1000_RXDEXT_STATERR_TCPE |
  5687. E1000_RXDEXT_STATERR_IPE)) {
  5688. /* work around errata with sctp packets where the TCPE aka
  5689. * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
  5690. * packets, (aka let the stack check the crc32c)
  5691. */
  5692. if (!((skb->len == 60) &&
  5693. test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
  5694. u64_stats_update_begin(&ring->rx_syncp);
  5695. ring->rx_stats.csum_err++;
  5696. u64_stats_update_end(&ring->rx_syncp);
  5697. }
  5698. /* let the stack verify checksum errors */
  5699. return;
  5700. }
  5701. /* It must be a TCP or UDP packet with a valid checksum */
  5702. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
  5703. E1000_RXD_STAT_UDPCS))
  5704. skb->ip_summed = CHECKSUM_UNNECESSARY;
  5705. dev_dbg(ring->dev, "cksum success: bits %08X\n",
  5706. le32_to_cpu(rx_desc->wb.upper.status_error));
  5707. }
  5708. static inline void igb_rx_hash(struct igb_ring *ring,
  5709. union e1000_adv_rx_desc *rx_desc,
  5710. struct sk_buff *skb)
  5711. {
  5712. if (ring->netdev->features & NETIF_F_RXHASH)
  5713. skb_set_hash(skb,
  5714. le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
  5715. PKT_HASH_TYPE_L3);
  5716. }
  5717. /**
  5718. * igb_is_non_eop - process handling of non-EOP buffers
  5719. * @rx_ring: Rx ring being processed
  5720. * @rx_desc: Rx descriptor for current buffer
  5721. * @skb: current socket buffer containing buffer in progress
  5722. *
  5723. * This function updates next to clean. If the buffer is an EOP buffer
  5724. * this function exits returning false, otherwise it will place the
  5725. * sk_buff in the next buffer to be chained and return true indicating
  5726. * that this is in fact a non-EOP buffer.
  5727. **/
  5728. static bool igb_is_non_eop(struct igb_ring *rx_ring,
  5729. union e1000_adv_rx_desc *rx_desc)
  5730. {
  5731. u32 ntc = rx_ring->next_to_clean + 1;
  5732. /* fetch, update, and store next to clean */
  5733. ntc = (ntc < rx_ring->count) ? ntc : 0;
  5734. rx_ring->next_to_clean = ntc;
  5735. prefetch(IGB_RX_DESC(rx_ring, ntc));
  5736. if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
  5737. return false;
  5738. return true;
  5739. }
  5740. /**
  5741. * igb_pull_tail - igb specific version of skb_pull_tail
  5742. * @rx_ring: rx descriptor ring packet is being transacted on
  5743. * @rx_desc: pointer to the EOP Rx descriptor
  5744. * @skb: pointer to current skb being adjusted
  5745. *
  5746. * This function is an igb specific version of __pskb_pull_tail. The
  5747. * main difference between this version and the original function is that
  5748. * this function can make several assumptions about the state of things
  5749. * that allow for significant optimizations versus the standard function.
  5750. * As a result we can do things like drop a frag and maintain an accurate
  5751. * truesize for the skb.
  5752. */
  5753. static void igb_pull_tail(struct igb_ring *rx_ring,
  5754. union e1000_adv_rx_desc *rx_desc,
  5755. struct sk_buff *skb)
  5756. {
  5757. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  5758. unsigned char *va;
  5759. unsigned int pull_len;
  5760. /* it is valid to use page_address instead of kmap since we are
  5761. * working with pages allocated out of the lomem pool per
  5762. * alloc_page(GFP_ATOMIC)
  5763. */
  5764. va = skb_frag_address(frag);
  5765. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
  5766. /* retrieve timestamp from buffer */
  5767. igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
  5768. /* update pointers to remove timestamp header */
  5769. skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
  5770. frag->page_offset += IGB_TS_HDR_LEN;
  5771. skb->data_len -= IGB_TS_HDR_LEN;
  5772. skb->len -= IGB_TS_HDR_LEN;
  5773. /* move va to start of packet data */
  5774. va += IGB_TS_HDR_LEN;
  5775. }
  5776. /* we need the header to contain the greater of either ETH_HLEN or
  5777. * 60 bytes if the skb->len is less than 60 for skb_pad.
  5778. */
  5779. pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
  5780. /* align pull length to size of long to optimize memcpy performance */
  5781. skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
  5782. /* update all of the pointers */
  5783. skb_frag_size_sub(frag, pull_len);
  5784. frag->page_offset += pull_len;
  5785. skb->data_len -= pull_len;
  5786. skb->tail += pull_len;
  5787. }
  5788. /**
  5789. * igb_cleanup_headers - Correct corrupted or empty headers
  5790. * @rx_ring: rx descriptor ring packet is being transacted on
  5791. * @rx_desc: pointer to the EOP Rx descriptor
  5792. * @skb: pointer to current skb being fixed
  5793. *
  5794. * Address the case where we are pulling data in on pages only
  5795. * and as such no data is present in the skb header.
  5796. *
  5797. * In addition if skb is not at least 60 bytes we need to pad it so that
  5798. * it is large enough to qualify as a valid Ethernet frame.
  5799. *
  5800. * Returns true if an error was encountered and skb was freed.
  5801. **/
  5802. static bool igb_cleanup_headers(struct igb_ring *rx_ring,
  5803. union e1000_adv_rx_desc *rx_desc,
  5804. struct sk_buff *skb)
  5805. {
  5806. if (unlikely((igb_test_staterr(rx_desc,
  5807. E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
  5808. struct net_device *netdev = rx_ring->netdev;
  5809. if (!(netdev->features & NETIF_F_RXALL)) {
  5810. dev_kfree_skb_any(skb);
  5811. return true;
  5812. }
  5813. }
  5814. /* place header in linear portion of buffer */
  5815. if (skb_is_nonlinear(skb))
  5816. igb_pull_tail(rx_ring, rx_desc, skb);
  5817. /* if skb_pad returns an error the skb was freed */
  5818. if (unlikely(skb->len < 60)) {
  5819. int pad_len = 60 - skb->len;
  5820. if (skb_pad(skb, pad_len))
  5821. return true;
  5822. __skb_put(skb, pad_len);
  5823. }
  5824. return false;
  5825. }
  5826. /**
  5827. * igb_process_skb_fields - Populate skb header fields from Rx descriptor
  5828. * @rx_ring: rx descriptor ring packet is being transacted on
  5829. * @rx_desc: pointer to the EOP Rx descriptor
  5830. * @skb: pointer to current skb being populated
  5831. *
  5832. * This function checks the ring, descriptor, and packet information in
  5833. * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  5834. * other fields within the skb.
  5835. **/
  5836. static void igb_process_skb_fields(struct igb_ring *rx_ring,
  5837. union e1000_adv_rx_desc *rx_desc,
  5838. struct sk_buff *skb)
  5839. {
  5840. struct net_device *dev = rx_ring->netdev;
  5841. igb_rx_hash(rx_ring, rx_desc, skb);
  5842. igb_rx_checksum(rx_ring, rx_desc, skb);
  5843. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
  5844. !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
  5845. igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
  5846. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  5847. igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
  5848. u16 vid;
  5849. if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
  5850. test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
  5851. vid = be16_to_cpu(rx_desc->wb.upper.vlan);
  5852. else
  5853. vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  5854. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  5855. }
  5856. skb_record_rx_queue(skb, rx_ring->queue_index);
  5857. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  5858. }
  5859. static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
  5860. {
  5861. struct igb_ring *rx_ring = q_vector->rx.ring;
  5862. struct sk_buff *skb = rx_ring->skb;
  5863. unsigned int total_bytes = 0, total_packets = 0;
  5864. u16 cleaned_count = igb_desc_unused(rx_ring);
  5865. while (likely(total_packets < budget)) {
  5866. union e1000_adv_rx_desc *rx_desc;
  5867. /* return some buffers to hardware, one at a time is too slow */
  5868. if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
  5869. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  5870. cleaned_count = 0;
  5871. }
  5872. rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
  5873. if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
  5874. break;
  5875. /* This memory barrier is needed to keep us from reading
  5876. * any other fields out of the rx_desc until we know the
  5877. * RXD_STAT_DD bit is set
  5878. */
  5879. rmb();
  5880. /* retrieve a buffer from the ring */
  5881. skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
  5882. /* exit if we failed to retrieve a buffer */
  5883. if (!skb)
  5884. break;
  5885. cleaned_count++;
  5886. /* fetch next buffer in frame if non-eop */
  5887. if (igb_is_non_eop(rx_ring, rx_desc))
  5888. continue;
  5889. /* verify the packet layout is correct */
  5890. if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
  5891. skb = NULL;
  5892. continue;
  5893. }
  5894. /* probably a little skewed due to removing CRC */
  5895. total_bytes += skb->len;
  5896. /* populate checksum, timestamp, VLAN, and protocol */
  5897. igb_process_skb_fields(rx_ring, rx_desc, skb);
  5898. napi_gro_receive(&q_vector->napi, skb);
  5899. /* reset skb pointer */
  5900. skb = NULL;
  5901. /* update budget accounting */
  5902. total_packets++;
  5903. }
  5904. /* place incomplete frames back on ring for completion */
  5905. rx_ring->skb = skb;
  5906. u64_stats_update_begin(&rx_ring->rx_syncp);
  5907. rx_ring->rx_stats.packets += total_packets;
  5908. rx_ring->rx_stats.bytes += total_bytes;
  5909. u64_stats_update_end(&rx_ring->rx_syncp);
  5910. q_vector->rx.total_packets += total_packets;
  5911. q_vector->rx.total_bytes += total_bytes;
  5912. if (cleaned_count)
  5913. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  5914. return total_packets < budget;
  5915. }
  5916. static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
  5917. struct igb_rx_buffer *bi)
  5918. {
  5919. struct page *page = bi->page;
  5920. dma_addr_t dma;
  5921. /* since we are recycling buffers we should seldom need to alloc */
  5922. if (likely(page))
  5923. return true;
  5924. /* alloc new page for storage */
  5925. page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
  5926. if (unlikely(!page)) {
  5927. rx_ring->rx_stats.alloc_failed++;
  5928. return false;
  5929. }
  5930. /* map page for use */
  5931. dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  5932. /* if mapping failed free memory back to system since
  5933. * there isn't much point in holding memory we can't use
  5934. */
  5935. if (dma_mapping_error(rx_ring->dev, dma)) {
  5936. __free_page(page);
  5937. rx_ring->rx_stats.alloc_failed++;
  5938. return false;
  5939. }
  5940. bi->dma = dma;
  5941. bi->page = page;
  5942. bi->page_offset = 0;
  5943. return true;
  5944. }
  5945. /**
  5946. * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  5947. * @adapter: address of board private structure
  5948. **/
  5949. void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
  5950. {
  5951. union e1000_adv_rx_desc *rx_desc;
  5952. struct igb_rx_buffer *bi;
  5953. u16 i = rx_ring->next_to_use;
  5954. /* nothing to do */
  5955. if (!cleaned_count)
  5956. return;
  5957. rx_desc = IGB_RX_DESC(rx_ring, i);
  5958. bi = &rx_ring->rx_buffer_info[i];
  5959. i -= rx_ring->count;
  5960. do {
  5961. if (!igb_alloc_mapped_page(rx_ring, bi))
  5962. break;
  5963. /* Refresh the desc even if buffer_addrs didn't change
  5964. * because each write-back erases this info.
  5965. */
  5966. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  5967. rx_desc++;
  5968. bi++;
  5969. i++;
  5970. if (unlikely(!i)) {
  5971. rx_desc = IGB_RX_DESC(rx_ring, 0);
  5972. bi = rx_ring->rx_buffer_info;
  5973. i -= rx_ring->count;
  5974. }
  5975. /* clear the hdr_addr for the next_to_use descriptor */
  5976. rx_desc->read.hdr_addr = 0;
  5977. cleaned_count--;
  5978. } while (cleaned_count);
  5979. i += rx_ring->count;
  5980. if (rx_ring->next_to_use != i) {
  5981. /* record the next descriptor to use */
  5982. rx_ring->next_to_use = i;
  5983. /* update next to alloc since we have filled the ring */
  5984. rx_ring->next_to_alloc = i;
  5985. /* Force memory writes to complete before letting h/w
  5986. * know there are new descriptors to fetch. (Only
  5987. * applicable for weak-ordered memory model archs,
  5988. * such as IA-64).
  5989. */
  5990. wmb();
  5991. writel(i, rx_ring->tail);
  5992. }
  5993. }
  5994. /**
  5995. * igb_mii_ioctl -
  5996. * @netdev:
  5997. * @ifreq:
  5998. * @cmd:
  5999. **/
  6000. static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  6001. {
  6002. struct igb_adapter *adapter = netdev_priv(netdev);
  6003. struct mii_ioctl_data *data = if_mii(ifr);
  6004. if (adapter->hw.phy.media_type != e1000_media_type_copper)
  6005. return -EOPNOTSUPP;
  6006. switch (cmd) {
  6007. case SIOCGMIIPHY:
  6008. data->phy_id = adapter->hw.phy.addr;
  6009. break;
  6010. case SIOCGMIIREG:
  6011. if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  6012. &data->val_out))
  6013. return -EIO;
  6014. break;
  6015. case SIOCSMIIREG:
  6016. default:
  6017. return -EOPNOTSUPP;
  6018. }
  6019. return 0;
  6020. }
  6021. /**
  6022. * igb_ioctl -
  6023. * @netdev:
  6024. * @ifreq:
  6025. * @cmd:
  6026. **/
  6027. static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  6028. {
  6029. switch (cmd) {
  6030. case SIOCGMIIPHY:
  6031. case SIOCGMIIREG:
  6032. case SIOCSMIIREG:
  6033. return igb_mii_ioctl(netdev, ifr, cmd);
  6034. case SIOCGHWTSTAMP:
  6035. return igb_ptp_get_ts_config(netdev, ifr);
  6036. case SIOCSHWTSTAMP:
  6037. return igb_ptp_set_ts_config(netdev, ifr);
  6038. default:
  6039. return -EOPNOTSUPP;
  6040. }
  6041. }
  6042. void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
  6043. {
  6044. struct igb_adapter *adapter = hw->back;
  6045. pci_read_config_word(adapter->pdev, reg, value);
  6046. }
  6047. void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
  6048. {
  6049. struct igb_adapter *adapter = hw->back;
  6050. pci_write_config_word(adapter->pdev, reg, *value);
  6051. }
  6052. s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  6053. {
  6054. struct igb_adapter *adapter = hw->back;
  6055. if (pcie_capability_read_word(adapter->pdev, reg, value))
  6056. return -E1000_ERR_CONFIG;
  6057. return 0;
  6058. }
  6059. s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  6060. {
  6061. struct igb_adapter *adapter = hw->back;
  6062. if (pcie_capability_write_word(adapter->pdev, reg, *value))
  6063. return -E1000_ERR_CONFIG;
  6064. return 0;
  6065. }
  6066. static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
  6067. {
  6068. struct igb_adapter *adapter = netdev_priv(netdev);
  6069. struct e1000_hw *hw = &adapter->hw;
  6070. u32 ctrl, rctl;
  6071. bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
  6072. if (enable) {
  6073. /* enable VLAN tag insert/strip */
  6074. ctrl = rd32(E1000_CTRL);
  6075. ctrl |= E1000_CTRL_VME;
  6076. wr32(E1000_CTRL, ctrl);
  6077. /* Disable CFI check */
  6078. rctl = rd32(E1000_RCTL);
  6079. rctl &= ~E1000_RCTL_CFIEN;
  6080. wr32(E1000_RCTL, rctl);
  6081. } else {
  6082. /* disable VLAN tag insert/strip */
  6083. ctrl = rd32(E1000_CTRL);
  6084. ctrl &= ~E1000_CTRL_VME;
  6085. wr32(E1000_CTRL, ctrl);
  6086. }
  6087. igb_rlpml_set(adapter);
  6088. }
  6089. static int igb_vlan_rx_add_vid(struct net_device *netdev,
  6090. __be16 proto, u16 vid)
  6091. {
  6092. struct igb_adapter *adapter = netdev_priv(netdev);
  6093. struct e1000_hw *hw = &adapter->hw;
  6094. int pf_id = adapter->vfs_allocated_count;
  6095. /* attempt to add filter to vlvf array */
  6096. igb_vlvf_set(adapter, vid, true, pf_id);
  6097. /* add the filter since PF can receive vlans w/o entry in vlvf */
  6098. igb_vfta_set(hw, vid, true);
  6099. set_bit(vid, adapter->active_vlans);
  6100. return 0;
  6101. }
  6102. static int igb_vlan_rx_kill_vid(struct net_device *netdev,
  6103. __be16 proto, u16 vid)
  6104. {
  6105. struct igb_adapter *adapter = netdev_priv(netdev);
  6106. struct e1000_hw *hw = &adapter->hw;
  6107. int pf_id = adapter->vfs_allocated_count;
  6108. s32 err;
  6109. /* remove vlan from VLVF table array */
  6110. err = igb_vlvf_set(adapter, vid, false, pf_id);
  6111. /* if vid was not present in VLVF just remove it from table */
  6112. if (err)
  6113. igb_vfta_set(hw, vid, false);
  6114. clear_bit(vid, adapter->active_vlans);
  6115. return 0;
  6116. }
  6117. static void igb_restore_vlan(struct igb_adapter *adapter)
  6118. {
  6119. u16 vid;
  6120. igb_vlan_mode(adapter->netdev, adapter->netdev->features);
  6121. for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
  6122. igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  6123. }
  6124. int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
  6125. {
  6126. struct pci_dev *pdev = adapter->pdev;
  6127. struct e1000_mac_info *mac = &adapter->hw.mac;
  6128. mac->autoneg = 0;
  6129. /* Make sure dplx is at most 1 bit and lsb of speed is not set
  6130. * for the switch() below to work
  6131. */
  6132. if ((spd & 1) || (dplx & ~1))
  6133. goto err_inval;
  6134. /* Fiber NIC's only allow 1000 gbps Full duplex
  6135. * and 100Mbps Full duplex for 100baseFx sfp
  6136. */
  6137. if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
  6138. switch (spd + dplx) {
  6139. case SPEED_10 + DUPLEX_HALF:
  6140. case SPEED_10 + DUPLEX_FULL:
  6141. case SPEED_100 + DUPLEX_HALF:
  6142. goto err_inval;
  6143. default:
  6144. break;
  6145. }
  6146. }
  6147. switch (spd + dplx) {
  6148. case SPEED_10 + DUPLEX_HALF:
  6149. mac->forced_speed_duplex = ADVERTISE_10_HALF;
  6150. break;
  6151. case SPEED_10 + DUPLEX_FULL:
  6152. mac->forced_speed_duplex = ADVERTISE_10_FULL;
  6153. break;
  6154. case SPEED_100 + DUPLEX_HALF:
  6155. mac->forced_speed_duplex = ADVERTISE_100_HALF;
  6156. break;
  6157. case SPEED_100 + DUPLEX_FULL:
  6158. mac->forced_speed_duplex = ADVERTISE_100_FULL;
  6159. break;
  6160. case SPEED_1000 + DUPLEX_FULL:
  6161. mac->autoneg = 1;
  6162. adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
  6163. break;
  6164. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  6165. default:
  6166. goto err_inval;
  6167. }
  6168. /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
  6169. adapter->hw.phy.mdix = AUTO_ALL_MODES;
  6170. return 0;
  6171. err_inval:
  6172. dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
  6173. return -EINVAL;
  6174. }
  6175. static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
  6176. bool runtime)
  6177. {
  6178. struct net_device *netdev = pci_get_drvdata(pdev);
  6179. struct igb_adapter *adapter = netdev_priv(netdev);
  6180. struct e1000_hw *hw = &adapter->hw;
  6181. u32 ctrl, rctl, status;
  6182. u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
  6183. #ifdef CONFIG_PM
  6184. int retval = 0;
  6185. #endif
  6186. netif_device_detach(netdev);
  6187. if (netif_running(netdev))
  6188. __igb_close(netdev, true);
  6189. igb_clear_interrupt_scheme(adapter);
  6190. #ifdef CONFIG_PM
  6191. retval = pci_save_state(pdev);
  6192. if (retval)
  6193. return retval;
  6194. #endif
  6195. status = rd32(E1000_STATUS);
  6196. if (status & E1000_STATUS_LU)
  6197. wufc &= ~E1000_WUFC_LNKC;
  6198. if (wufc) {
  6199. igb_setup_rctl(adapter);
  6200. igb_set_rx_mode(netdev);
  6201. /* turn on all-multi mode if wake on multicast is enabled */
  6202. if (wufc & E1000_WUFC_MC) {
  6203. rctl = rd32(E1000_RCTL);
  6204. rctl |= E1000_RCTL_MPE;
  6205. wr32(E1000_RCTL, rctl);
  6206. }
  6207. ctrl = rd32(E1000_CTRL);
  6208. /* advertise wake from D3Cold */
  6209. #define E1000_CTRL_ADVD3WUC 0x00100000
  6210. /* phy power management enable */
  6211. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  6212. ctrl |= E1000_CTRL_ADVD3WUC;
  6213. wr32(E1000_CTRL, ctrl);
  6214. /* Allow time for pending master requests to run */
  6215. igb_disable_pcie_master(hw);
  6216. wr32(E1000_WUC, E1000_WUC_PME_EN);
  6217. wr32(E1000_WUFC, wufc);
  6218. } else {
  6219. wr32(E1000_WUC, 0);
  6220. wr32(E1000_WUFC, 0);
  6221. }
  6222. *enable_wake = wufc || adapter->en_mng_pt;
  6223. if (!*enable_wake)
  6224. igb_power_down_link(adapter);
  6225. else
  6226. igb_power_up_link(adapter);
  6227. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  6228. * would have already happened in close and is redundant.
  6229. */
  6230. igb_release_hw_control(adapter);
  6231. pci_disable_device(pdev);
  6232. return 0;
  6233. }
  6234. #ifdef CONFIG_PM
  6235. #ifdef CONFIG_PM_SLEEP
  6236. static int igb_suspend(struct device *dev)
  6237. {
  6238. int retval;
  6239. bool wake;
  6240. struct pci_dev *pdev = to_pci_dev(dev);
  6241. retval = __igb_shutdown(pdev, &wake, 0);
  6242. if (retval)
  6243. return retval;
  6244. if (wake) {
  6245. pci_prepare_to_sleep(pdev);
  6246. } else {
  6247. pci_wake_from_d3(pdev, false);
  6248. pci_set_power_state(pdev, PCI_D3hot);
  6249. }
  6250. return 0;
  6251. }
  6252. #endif /* CONFIG_PM_SLEEP */
  6253. static int igb_resume(struct device *dev)
  6254. {
  6255. struct pci_dev *pdev = to_pci_dev(dev);
  6256. struct net_device *netdev = pci_get_drvdata(pdev);
  6257. struct igb_adapter *adapter = netdev_priv(netdev);
  6258. struct e1000_hw *hw = &adapter->hw;
  6259. u32 err;
  6260. pci_set_power_state(pdev, PCI_D0);
  6261. pci_restore_state(pdev);
  6262. pci_save_state(pdev);
  6263. if (!pci_device_is_present(pdev))
  6264. return -ENODEV;
  6265. err = pci_enable_device_mem(pdev);
  6266. if (err) {
  6267. dev_err(&pdev->dev,
  6268. "igb: Cannot enable PCI device from suspend\n");
  6269. return err;
  6270. }
  6271. pci_set_master(pdev);
  6272. pci_enable_wake(pdev, PCI_D3hot, 0);
  6273. pci_enable_wake(pdev, PCI_D3cold, 0);
  6274. if (igb_init_interrupt_scheme(adapter, true)) {
  6275. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  6276. return -ENOMEM;
  6277. }
  6278. igb_reset(adapter);
  6279. /* let the f/w know that the h/w is now under the control of the
  6280. * driver.
  6281. */
  6282. igb_get_hw_control(adapter);
  6283. wr32(E1000_WUS, ~0);
  6284. if (netdev->flags & IFF_UP) {
  6285. rtnl_lock();
  6286. err = __igb_open(netdev, true);
  6287. rtnl_unlock();
  6288. if (err)
  6289. return err;
  6290. }
  6291. netif_device_attach(netdev);
  6292. return 0;
  6293. }
  6294. #ifdef CONFIG_PM_RUNTIME
  6295. static int igb_runtime_idle(struct device *dev)
  6296. {
  6297. struct pci_dev *pdev = to_pci_dev(dev);
  6298. struct net_device *netdev = pci_get_drvdata(pdev);
  6299. struct igb_adapter *adapter = netdev_priv(netdev);
  6300. if (!igb_has_link(adapter))
  6301. pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
  6302. return -EBUSY;
  6303. }
  6304. static int igb_runtime_suspend(struct device *dev)
  6305. {
  6306. struct pci_dev *pdev = to_pci_dev(dev);
  6307. int retval;
  6308. bool wake;
  6309. retval = __igb_shutdown(pdev, &wake, 1);
  6310. if (retval)
  6311. return retval;
  6312. if (wake) {
  6313. pci_prepare_to_sleep(pdev);
  6314. } else {
  6315. pci_wake_from_d3(pdev, false);
  6316. pci_set_power_state(pdev, PCI_D3hot);
  6317. }
  6318. return 0;
  6319. }
  6320. static int igb_runtime_resume(struct device *dev)
  6321. {
  6322. return igb_resume(dev);
  6323. }
  6324. #endif /* CONFIG_PM_RUNTIME */
  6325. #endif
  6326. static void igb_shutdown(struct pci_dev *pdev)
  6327. {
  6328. bool wake;
  6329. __igb_shutdown(pdev, &wake, 0);
  6330. if (system_state == SYSTEM_POWER_OFF) {
  6331. pci_wake_from_d3(pdev, wake);
  6332. pci_set_power_state(pdev, PCI_D3hot);
  6333. }
  6334. }
  6335. #ifdef CONFIG_PCI_IOV
  6336. static int igb_sriov_reinit(struct pci_dev *dev)
  6337. {
  6338. struct net_device *netdev = pci_get_drvdata(dev);
  6339. struct igb_adapter *adapter = netdev_priv(netdev);
  6340. struct pci_dev *pdev = adapter->pdev;
  6341. rtnl_lock();
  6342. if (netif_running(netdev))
  6343. igb_close(netdev);
  6344. else
  6345. igb_reset(adapter);
  6346. igb_clear_interrupt_scheme(adapter);
  6347. igb_init_queue_configuration(adapter);
  6348. if (igb_init_interrupt_scheme(adapter, true)) {
  6349. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  6350. return -ENOMEM;
  6351. }
  6352. if (netif_running(netdev))
  6353. igb_open(netdev);
  6354. rtnl_unlock();
  6355. return 0;
  6356. }
  6357. static int igb_pci_disable_sriov(struct pci_dev *dev)
  6358. {
  6359. int err = igb_disable_sriov(dev);
  6360. if (!err)
  6361. err = igb_sriov_reinit(dev);
  6362. return err;
  6363. }
  6364. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
  6365. {
  6366. int err = igb_enable_sriov(dev, num_vfs);
  6367. if (err)
  6368. goto out;
  6369. err = igb_sriov_reinit(dev);
  6370. if (!err)
  6371. return num_vfs;
  6372. out:
  6373. return err;
  6374. }
  6375. #endif
  6376. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
  6377. {
  6378. #ifdef CONFIG_PCI_IOV
  6379. if (num_vfs == 0)
  6380. return igb_pci_disable_sriov(dev);
  6381. else
  6382. return igb_pci_enable_sriov(dev, num_vfs);
  6383. #endif
  6384. return 0;
  6385. }
  6386. #ifdef CONFIG_NET_POLL_CONTROLLER
  6387. /* Polling 'interrupt' - used by things like netconsole to send skbs
  6388. * without having to re-enable interrupts. It's not called while
  6389. * the interrupt routine is executing.
  6390. */
  6391. static void igb_netpoll(struct net_device *netdev)
  6392. {
  6393. struct igb_adapter *adapter = netdev_priv(netdev);
  6394. struct e1000_hw *hw = &adapter->hw;
  6395. struct igb_q_vector *q_vector;
  6396. int i;
  6397. for (i = 0; i < adapter->num_q_vectors; i++) {
  6398. q_vector = adapter->q_vector[i];
  6399. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  6400. wr32(E1000_EIMC, q_vector->eims_value);
  6401. else
  6402. igb_irq_disable(adapter);
  6403. napi_schedule(&q_vector->napi);
  6404. }
  6405. }
  6406. #endif /* CONFIG_NET_POLL_CONTROLLER */
  6407. /**
  6408. * igb_io_error_detected - called when PCI error is detected
  6409. * @pdev: Pointer to PCI device
  6410. * @state: The current pci connection state
  6411. *
  6412. * This function is called after a PCI bus error affecting
  6413. * this device has been detected.
  6414. **/
  6415. static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  6416. pci_channel_state_t state)
  6417. {
  6418. struct net_device *netdev = pci_get_drvdata(pdev);
  6419. struct igb_adapter *adapter = netdev_priv(netdev);
  6420. netif_device_detach(netdev);
  6421. if (state == pci_channel_io_perm_failure)
  6422. return PCI_ERS_RESULT_DISCONNECT;
  6423. if (netif_running(netdev))
  6424. igb_down(adapter);
  6425. pci_disable_device(pdev);
  6426. /* Request a slot slot reset. */
  6427. return PCI_ERS_RESULT_NEED_RESET;
  6428. }
  6429. /**
  6430. * igb_io_slot_reset - called after the pci bus has been reset.
  6431. * @pdev: Pointer to PCI device
  6432. *
  6433. * Restart the card from scratch, as if from a cold-boot. Implementation
  6434. * resembles the first-half of the igb_resume routine.
  6435. **/
  6436. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  6437. {
  6438. struct net_device *netdev = pci_get_drvdata(pdev);
  6439. struct igb_adapter *adapter = netdev_priv(netdev);
  6440. struct e1000_hw *hw = &adapter->hw;
  6441. pci_ers_result_t result;
  6442. int err;
  6443. if (pci_enable_device_mem(pdev)) {
  6444. dev_err(&pdev->dev,
  6445. "Cannot re-enable PCI device after reset.\n");
  6446. result = PCI_ERS_RESULT_DISCONNECT;
  6447. } else {
  6448. pci_set_master(pdev);
  6449. pci_restore_state(pdev);
  6450. pci_save_state(pdev);
  6451. pci_enable_wake(pdev, PCI_D3hot, 0);
  6452. pci_enable_wake(pdev, PCI_D3cold, 0);
  6453. igb_reset(adapter);
  6454. wr32(E1000_WUS, ~0);
  6455. result = PCI_ERS_RESULT_RECOVERED;
  6456. }
  6457. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  6458. if (err) {
  6459. dev_err(&pdev->dev,
  6460. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  6461. err);
  6462. /* non-fatal, continue */
  6463. }
  6464. return result;
  6465. }
  6466. /**
  6467. * igb_io_resume - called when traffic can start flowing again.
  6468. * @pdev: Pointer to PCI device
  6469. *
  6470. * This callback is called when the error recovery driver tells us that
  6471. * its OK to resume normal operation. Implementation resembles the
  6472. * second-half of the igb_resume routine.
  6473. */
  6474. static void igb_io_resume(struct pci_dev *pdev)
  6475. {
  6476. struct net_device *netdev = pci_get_drvdata(pdev);
  6477. struct igb_adapter *adapter = netdev_priv(netdev);
  6478. if (netif_running(netdev)) {
  6479. if (igb_up(adapter)) {
  6480. dev_err(&pdev->dev, "igb_up failed after reset\n");
  6481. return;
  6482. }
  6483. }
  6484. netif_device_attach(netdev);
  6485. /* let the f/w know that the h/w is now under the control of the
  6486. * driver.
  6487. */
  6488. igb_get_hw_control(adapter);
  6489. }
  6490. static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
  6491. u8 qsel)
  6492. {
  6493. u32 rar_low, rar_high;
  6494. struct e1000_hw *hw = &adapter->hw;
  6495. /* HW expects these in little endian so we reverse the byte order
  6496. * from network order (big endian) to little endian
  6497. */
  6498. rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
  6499. ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
  6500. rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
  6501. /* Indicate to hardware the Address is Valid. */
  6502. rar_high |= E1000_RAH_AV;
  6503. if (hw->mac.type == e1000_82575)
  6504. rar_high |= E1000_RAH_POOL_1 * qsel;
  6505. else
  6506. rar_high |= E1000_RAH_POOL_1 << qsel;
  6507. wr32(E1000_RAL(index), rar_low);
  6508. wrfl();
  6509. wr32(E1000_RAH(index), rar_high);
  6510. wrfl();
  6511. }
  6512. static int igb_set_vf_mac(struct igb_adapter *adapter,
  6513. int vf, unsigned char *mac_addr)
  6514. {
  6515. struct e1000_hw *hw = &adapter->hw;
  6516. /* VF MAC addresses start at end of receive addresses and moves
  6517. * towards the first, as a result a collision should not be possible
  6518. */
  6519. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  6520. memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
  6521. igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
  6522. return 0;
  6523. }
  6524. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  6525. {
  6526. struct igb_adapter *adapter = netdev_priv(netdev);
  6527. if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
  6528. return -EINVAL;
  6529. adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
  6530. dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
  6531. dev_info(&adapter->pdev->dev,
  6532. "Reload the VF driver to make this change effective.");
  6533. if (test_bit(__IGB_DOWN, &adapter->state)) {
  6534. dev_warn(&adapter->pdev->dev,
  6535. "The VF MAC address has been set, but the PF device is not up.\n");
  6536. dev_warn(&adapter->pdev->dev,
  6537. "Bring the PF device up before attempting to use the VF device.\n");
  6538. }
  6539. return igb_set_vf_mac(adapter, vf, mac);
  6540. }
  6541. static int igb_link_mbps(int internal_link_speed)
  6542. {
  6543. switch (internal_link_speed) {
  6544. case SPEED_100:
  6545. return 100;
  6546. case SPEED_1000:
  6547. return 1000;
  6548. default:
  6549. return 0;
  6550. }
  6551. }
  6552. static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
  6553. int link_speed)
  6554. {
  6555. int rf_dec, rf_int;
  6556. u32 bcnrc_val;
  6557. if (tx_rate != 0) {
  6558. /* Calculate the rate factor values to set */
  6559. rf_int = link_speed / tx_rate;
  6560. rf_dec = (link_speed - (rf_int * tx_rate));
  6561. rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
  6562. tx_rate;
  6563. bcnrc_val = E1000_RTTBCNRC_RS_ENA;
  6564. bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
  6565. E1000_RTTBCNRC_RF_INT_MASK);
  6566. bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
  6567. } else {
  6568. bcnrc_val = 0;
  6569. }
  6570. wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
  6571. /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
  6572. * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
  6573. */
  6574. wr32(E1000_RTTBCNRM, 0x14);
  6575. wr32(E1000_RTTBCNRC, bcnrc_val);
  6576. }
  6577. static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
  6578. {
  6579. int actual_link_speed, i;
  6580. bool reset_rate = false;
  6581. /* VF TX rate limit was not set or not supported */
  6582. if ((adapter->vf_rate_link_speed == 0) ||
  6583. (adapter->hw.mac.type != e1000_82576))
  6584. return;
  6585. actual_link_speed = igb_link_mbps(adapter->link_speed);
  6586. if (actual_link_speed != adapter->vf_rate_link_speed) {
  6587. reset_rate = true;
  6588. adapter->vf_rate_link_speed = 0;
  6589. dev_info(&adapter->pdev->dev,
  6590. "Link speed has been changed. VF Transmit rate is disabled\n");
  6591. }
  6592. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  6593. if (reset_rate)
  6594. adapter->vf_data[i].tx_rate = 0;
  6595. igb_set_vf_rate_limit(&adapter->hw, i,
  6596. adapter->vf_data[i].tx_rate,
  6597. actual_link_speed);
  6598. }
  6599. }
  6600. static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
  6601. int min_tx_rate, int max_tx_rate)
  6602. {
  6603. struct igb_adapter *adapter = netdev_priv(netdev);
  6604. struct e1000_hw *hw = &adapter->hw;
  6605. int actual_link_speed;
  6606. if (hw->mac.type != e1000_82576)
  6607. return -EOPNOTSUPP;
  6608. if (min_tx_rate)
  6609. return -EINVAL;
  6610. actual_link_speed = igb_link_mbps(adapter->link_speed);
  6611. if ((vf >= adapter->vfs_allocated_count) ||
  6612. (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
  6613. (max_tx_rate < 0) ||
  6614. (max_tx_rate > actual_link_speed))
  6615. return -EINVAL;
  6616. adapter->vf_rate_link_speed = actual_link_speed;
  6617. adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
  6618. igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
  6619. return 0;
  6620. }
  6621. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  6622. bool setting)
  6623. {
  6624. struct igb_adapter *adapter = netdev_priv(netdev);
  6625. struct e1000_hw *hw = &adapter->hw;
  6626. u32 reg_val, reg_offset;
  6627. if (!adapter->vfs_allocated_count)
  6628. return -EOPNOTSUPP;
  6629. if (vf >= adapter->vfs_allocated_count)
  6630. return -EINVAL;
  6631. reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
  6632. reg_val = rd32(reg_offset);
  6633. if (setting)
  6634. reg_val |= ((1 << vf) |
  6635. (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
  6636. else
  6637. reg_val &= ~((1 << vf) |
  6638. (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
  6639. wr32(reg_offset, reg_val);
  6640. adapter->vf_data[vf].spoofchk_enabled = setting;
  6641. return 0;
  6642. }
  6643. static int igb_ndo_get_vf_config(struct net_device *netdev,
  6644. int vf, struct ifla_vf_info *ivi)
  6645. {
  6646. struct igb_adapter *adapter = netdev_priv(netdev);
  6647. if (vf >= adapter->vfs_allocated_count)
  6648. return -EINVAL;
  6649. ivi->vf = vf;
  6650. memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
  6651. ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
  6652. ivi->min_tx_rate = 0;
  6653. ivi->vlan = adapter->vf_data[vf].pf_vlan;
  6654. ivi->qos = adapter->vf_data[vf].pf_qos;
  6655. ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
  6656. return 0;
  6657. }
  6658. static void igb_vmm_control(struct igb_adapter *adapter)
  6659. {
  6660. struct e1000_hw *hw = &adapter->hw;
  6661. u32 reg;
  6662. switch (hw->mac.type) {
  6663. case e1000_82575:
  6664. case e1000_i210:
  6665. case e1000_i211:
  6666. case e1000_i354:
  6667. default:
  6668. /* replication is not supported for 82575 */
  6669. return;
  6670. case e1000_82576:
  6671. /* notify HW that the MAC is adding vlan tags */
  6672. reg = rd32(E1000_DTXCTL);
  6673. reg |= E1000_DTXCTL_VLAN_ADDED;
  6674. wr32(E1000_DTXCTL, reg);
  6675. /* Fall through */
  6676. case e1000_82580:
  6677. /* enable replication vlan tag stripping */
  6678. reg = rd32(E1000_RPLOLR);
  6679. reg |= E1000_RPLOLR_STRVLAN;
  6680. wr32(E1000_RPLOLR, reg);
  6681. /* Fall through */
  6682. case e1000_i350:
  6683. /* none of the above registers are supported by i350 */
  6684. break;
  6685. }
  6686. if (adapter->vfs_allocated_count) {
  6687. igb_vmdq_set_loopback_pf(hw, true);
  6688. igb_vmdq_set_replication_pf(hw, true);
  6689. igb_vmdq_set_anti_spoofing_pf(hw, true,
  6690. adapter->vfs_allocated_count);
  6691. } else {
  6692. igb_vmdq_set_loopback_pf(hw, false);
  6693. igb_vmdq_set_replication_pf(hw, false);
  6694. }
  6695. }
  6696. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
  6697. {
  6698. struct e1000_hw *hw = &adapter->hw;
  6699. u32 dmac_thr;
  6700. u16 hwm;
  6701. if (hw->mac.type > e1000_82580) {
  6702. if (adapter->flags & IGB_FLAG_DMAC) {
  6703. u32 reg;
  6704. /* force threshold to 0. */
  6705. wr32(E1000_DMCTXTH, 0);
  6706. /* DMA Coalescing high water mark needs to be greater
  6707. * than the Rx threshold. Set hwm to PBA - max frame
  6708. * size in 16B units, capping it at PBA - 6KB.
  6709. */
  6710. hwm = 64 * pba - adapter->max_frame_size / 16;
  6711. if (hwm < 64 * (pba - 6))
  6712. hwm = 64 * (pba - 6);
  6713. reg = rd32(E1000_FCRTC);
  6714. reg &= ~E1000_FCRTC_RTH_COAL_MASK;
  6715. reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
  6716. & E1000_FCRTC_RTH_COAL_MASK);
  6717. wr32(E1000_FCRTC, reg);
  6718. /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
  6719. * frame size, capping it at PBA - 10KB.
  6720. */
  6721. dmac_thr = pba - adapter->max_frame_size / 512;
  6722. if (dmac_thr < pba - 10)
  6723. dmac_thr = pba - 10;
  6724. reg = rd32(E1000_DMACR);
  6725. reg &= ~E1000_DMACR_DMACTHR_MASK;
  6726. reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
  6727. & E1000_DMACR_DMACTHR_MASK);
  6728. /* transition to L0x or L1 if available..*/
  6729. reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
  6730. /* watchdog timer= +-1000 usec in 32usec intervals */
  6731. reg |= (1000 >> 5);
  6732. /* Disable BMC-to-OS Watchdog Enable */
  6733. if (hw->mac.type != e1000_i354)
  6734. reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
  6735. wr32(E1000_DMACR, reg);
  6736. /* no lower threshold to disable
  6737. * coalescing(smart fifb)-UTRESH=0
  6738. */
  6739. wr32(E1000_DMCRTRH, 0);
  6740. reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
  6741. wr32(E1000_DMCTLX, reg);
  6742. /* free space in tx packet buffer to wake from
  6743. * DMA coal
  6744. */
  6745. wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
  6746. (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
  6747. /* make low power state decision controlled
  6748. * by DMA coal
  6749. */
  6750. reg = rd32(E1000_PCIEMISC);
  6751. reg &= ~E1000_PCIEMISC_LX_DECISION;
  6752. wr32(E1000_PCIEMISC, reg);
  6753. } /* endif adapter->dmac is not disabled */
  6754. } else if (hw->mac.type == e1000_82580) {
  6755. u32 reg = rd32(E1000_PCIEMISC);
  6756. wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
  6757. wr32(E1000_DMACR, 0);
  6758. }
  6759. }
  6760. /**
  6761. * igb_read_i2c_byte - Reads 8 bit word over I2C
  6762. * @hw: pointer to hardware structure
  6763. * @byte_offset: byte offset to read
  6764. * @dev_addr: device address
  6765. * @data: value read
  6766. *
  6767. * Performs byte read operation over I2C interface at
  6768. * a specified device address.
  6769. **/
  6770. s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
  6771. u8 dev_addr, u8 *data)
  6772. {
  6773. struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
  6774. struct i2c_client *this_client = adapter->i2c_client;
  6775. s32 status;
  6776. u16 swfw_mask = 0;
  6777. if (!this_client)
  6778. return E1000_ERR_I2C;
  6779. swfw_mask = E1000_SWFW_PHY0_SM;
  6780. if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
  6781. return E1000_ERR_SWFW_SYNC;
  6782. status = i2c_smbus_read_byte_data(this_client, byte_offset);
  6783. hw->mac.ops.release_swfw_sync(hw, swfw_mask);
  6784. if (status < 0)
  6785. return E1000_ERR_I2C;
  6786. else {
  6787. *data = status;
  6788. return 0;
  6789. }
  6790. }
  6791. /**
  6792. * igb_write_i2c_byte - Writes 8 bit word over I2C
  6793. * @hw: pointer to hardware structure
  6794. * @byte_offset: byte offset to write
  6795. * @dev_addr: device address
  6796. * @data: value to write
  6797. *
  6798. * Performs byte write operation over I2C interface at
  6799. * a specified device address.
  6800. **/
  6801. s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
  6802. u8 dev_addr, u8 data)
  6803. {
  6804. struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
  6805. struct i2c_client *this_client = adapter->i2c_client;
  6806. s32 status;
  6807. u16 swfw_mask = E1000_SWFW_PHY0_SM;
  6808. if (!this_client)
  6809. return E1000_ERR_I2C;
  6810. if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
  6811. return E1000_ERR_SWFW_SYNC;
  6812. status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
  6813. hw->mac.ops.release_swfw_sync(hw, swfw_mask);
  6814. if (status)
  6815. return E1000_ERR_I2C;
  6816. else
  6817. return 0;
  6818. }
  6819. int igb_reinit_queues(struct igb_adapter *adapter)
  6820. {
  6821. struct net_device *netdev = adapter->netdev;
  6822. struct pci_dev *pdev = adapter->pdev;
  6823. int err = 0;
  6824. if (netif_running(netdev))
  6825. igb_close(netdev);
  6826. igb_reset_interrupt_capability(adapter);
  6827. if (igb_init_interrupt_scheme(adapter, true)) {
  6828. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  6829. return -ENOMEM;
  6830. }
  6831. if (netif_running(netdev))
  6832. err = igb_open(netdev);
  6833. return err;
  6834. }
  6835. /* igb_main.c */