cmdq_core.c 236 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181
  1. #include "cmdq_core.h"
  2. #include "cmdq_virtual.h"
  3. #include "cmdq_reg.h"
  4. #include "cmdq_struct.h"
  5. #include "cmdq_device.h"
  6. #include "cmdq_record.h"
  7. #include "cmdq_sec.h"
  8. #ifdef CMDQ_PROFILE_MMP
  9. #include "cmdq_mmp.h"
  10. #endif
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/sched.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/errno.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/mutex.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/atomic.h>
  21. #include <linux/slab.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/memory.h>
  24. #include <linux/ftrace.h>
  25. #ifdef CMDQ_MET_READY
  26. #include <linux/met_drv.h>
  27. #endif
  28. #include <linux/seq_file.h>
  29. #include <linux/kthread.h>
  30. #ifndef CMDQ_OF_SUPPORT
  31. #include <mach/mt_irq.h>
  32. #include "ddp_reg.h"
  33. #endif
  34. #ifdef CMDQ_OF_SUPPORT
  35. #define MMSYS_CONFIG_BASE cmdq_dev_get_module_base_VA_MMSYS_CONFIG()
  36. #else
  37. #include <mach/mt_reg_base.h>
  38. #endif
  39. /* #define CMDQ_PROFILE_COMMAND_TRIGGER_LOOP */
  40. /* #define CMDQ_APPEND_WITHOUT_SUSPEND */
  41. /* #define CMDQ_ENABLE_BUS_ULTRA */
  42. #define CMDQ_GET_COOKIE_CNT(thread) (CMDQ_REG_GET32(CMDQ_THR_EXEC_CNT(thread)) & CMDQ_MAX_COOKIE_VALUE)
  43. #define CMDQ_SYNC_TOKEN_APPEND_THR(id) (CMDQ_SYNC_TOKEN_APPEND_THR0 + id)
  44. /* use mutex because we don't access task list in IRQ */
  45. /* and we may allocate memory when create list items */
  46. static DEFINE_MUTEX(gCmdqTaskMutex);
  47. static DEFINE_MUTEX(gCmdqSaveBufferMutex);
  48. /* static DEFINE_MUTEX(gCmdqWriteAddrMutex); */
  49. static DEFINE_SPINLOCK(gCmdqWriteAddrLock);
  50. #if defined(CMDQ_SECURE_PATH_SUPPORT) && !defined(CMDQ_SECURE_PATH_NORMAL_IRQ)
  51. /* ensure atomic start/stop notify loop*/
  52. static DEFINE_MUTEX(gCmdqNotifyLoopMutex);
  53. #endif
  54. /* t-base(secure OS) doesn't allow entry secure world in ISR context, */
  55. /* but M4U has to restore lab0 register in secure world when enable lab 0 first time. */
  56. /* HACK: use m4u_larb0_enable() to lab0 on/off to ensure larb0 restore and clock on/off sequence */
  57. /* HACK: use gCmdqClockMutex to ensure acquire/release thread and enable/disable clock sequence */
  58. static DEFINE_MUTEX(gCmdqClockMutex);
  59. /* These may access in IRQ so use spin lock. */
  60. static DEFINE_SPINLOCK(gCmdqThreadLock);
  61. static atomic_t gCmdqThreadUsage;
  62. static atomic_t gSMIThreadUsage;
  63. static bool gCmdqSuspended;
  64. static DEFINE_SPINLOCK(gCmdqExecLock);
  65. static DEFINE_SPINLOCK(gCmdqRecordLock);
  66. static DEFINE_MUTEX(gCmdqResourceMutex);
  67. /* Emergency buffer when fail to allocate memory. */
  68. static DEFINE_SPINLOCK(gCmdqAllocLock);
  69. static EmergencyBufferStruct gCmdqEmergencyBuffer[CMDQ_EMERGENCY_BLOCK_COUNT];
  70. /* The main context structure */
  71. static wait_queue_head_t gCmdWaitQueue[CMDQ_MAX_THREAD_COUNT]; /* task done notification */
  72. static wait_queue_head_t gCmdqThreadDispatchQueue; /* thread acquire notification */
  73. static ContextStruct gCmdqContext;
  74. static CmdqCBkStruct gCmdqGroupCallback[CMDQ_MAX_GROUP_COUNT];
  75. static CmdqDebugCBkStruct gCmdqDebugCallback;
  76. static cmdq_dts_setting g_dts_setting;
  77. #ifdef CMDQ_DUMP_FIRSTERROR
  78. DumpFirstErrorStruct gCmdqFirstError;
  79. #endif
  80. static DumpCommandBufferStruct gCmdqBufferDump;
  81. /* Debug or test usage */
  82. /* enable this option only when test emergency buffer*/
  83. /* #define CMDQ_TEST_EMERGENCY_BUFFER */
  84. #ifdef CMDQ_TEST_EMERGENCY_BUFFER
  85. static atomic_t gCmdqDebugForceUseEmergencyBuffer = ATOMIC_INIT(0);
  86. #endif
  87. #ifdef CMDQ_SECURE_PATH_CONSUME_AGAIN
  88. static bool g_cmdq_consume_again;
  89. #endif
  90. /* use to generate [CMDQ_ENGINE_ENUM_id and name] mapping for status print */
  91. #define CMDQ_FOREACH_MODULE_PRINT(ACTION)\
  92. { \
  93. ACTION(CMDQ_ENG_ISP_IMGI, ISP_IMGI) \
  94. ACTION(CMDQ_ENG_MDP_RDMA0, MDP_RDMA0) \
  95. ACTION(CMDQ_ENG_MDP_RDMA1, MDP_RDMA1) \
  96. ACTION(CMDQ_ENG_MDP_RSZ0, MDP_RSZ0) \
  97. ACTION(CMDQ_ENG_MDP_RSZ1, MDP_RSZ1) \
  98. ACTION(CMDQ_ENG_MDP_RSZ2, MDP_RSZ2) \
  99. ACTION(CMDQ_ENG_MDP_TDSHP0, MDP_TDSHP0) \
  100. ACTION(CMDQ_ENG_MDP_TDSHP1, MDP_TDSHP1) \
  101. ACTION(CMDQ_ENG_MDP_COLOR0, MDP_COLOR0) \
  102. ACTION(CMDQ_ENG_MDP_WROT0, MDP_WROT0) \
  103. ACTION(CMDQ_ENG_MDP_WROT1, MDP_WROT1) \
  104. ACTION(CMDQ_ENG_MDP_WDMA, MDP_WDMA) \
  105. }
  106. static const uint64_t gCmdqEngineGroupBits[CMDQ_MAX_GROUP_COUNT] = {
  107. CMDQ_ENG_ISP_GROUP_BITS,
  108. CMDQ_ENG_MDP_GROUP_BITS,
  109. CMDQ_ENG_DISP_GROUP_BITS,
  110. CMDQ_ENG_JPEG_GROUP_BITS,
  111. CMDQ_ENG_VENC_GROUP_BITS,
  112. CMDQ_ENG_DPE_GROUP_BITS
  113. };
  114. static cmdqDTSDataStruct gCmdqDtsData;
  115. uint32_t cmdq_core_max_task_in_thread(int32_t thread)
  116. {
  117. int32_t maxTaskNUM = CMDQ_MAX_TASK_IN_THREAD;
  118. #ifdef CMDQ_SECURE_PATH_SUPPORT
  119. if (true == cmdq_get_func()->isSecureThread(thread))
  120. maxTaskNUM = CMDQ_MAX_TASK_IN_SECURE_THREAD;
  121. #endif
  122. return maxTaskNUM;
  123. }
  124. /* Use CMDQ as Resource Manager */
  125. void cmdq_core_unlock_resource(struct work_struct *workItem)
  126. {
  127. struct ResourceUnitStruct *pResource = NULL;
  128. struct delayed_work *delayedWorkItem = NULL;
  129. int32_t status = 0;
  130. delayedWorkItem = container_of(workItem, struct delayed_work, work);
  131. pResource = container_of(delayedWorkItem, struct ResourceUnitStruct, delayCheckWork);
  132. mutex_lock(&gCmdqResourceMutex);
  133. CMDQ_MSG("[Res] unlock resource with engine: 0x%016llx\n", pResource->engine);
  134. if (pResource->used && pResource->delaying) {
  135. pResource->unlock = sched_clock();
  136. pResource->used = false;
  137. pResource->delaying = false;
  138. /* delay time is reached and unlock resource */
  139. if (NULL == pResource->availableCB) {
  140. /* print error message */
  141. CMDQ_LOG("[Res]: available CB func is NULL, event:%d\n", pResource->lockEvent);
  142. } else {
  143. /* before call callback, release lock at first */
  144. mutex_unlock(&gCmdqResourceMutex);
  145. status =
  146. pResource->availableCB(pResource->lockEvent);
  147. mutex_lock(&gCmdqResourceMutex);
  148. if (status < 0) {
  149. /* Error status print */
  150. CMDQ_ERR("[Res]: available CB (%d) return fail:%d\n",
  151. pResource->lockEvent, status);
  152. }
  153. }
  154. }
  155. mutex_unlock(&gCmdqResourceMutex);
  156. }
  157. void cmdq_core_init_resource(uint32_t engineFlag, CMDQ_EVENT_ENUM resourceEvent)
  158. {
  159. struct ResourceUnitStruct *pResource;
  160. pResource = kzalloc(sizeof(ResourceUnitStruct), GFP_KERNEL);
  161. if (pResource) {
  162. pResource->engine = (1LL << engineFlag);
  163. pResource->lockEvent = resourceEvent;
  164. INIT_DELAYED_WORK(&pResource->delayCheckWork, cmdq_core_unlock_resource);
  165. INIT_LIST_HEAD(&(pResource->listEntry));
  166. list_add_tail(&(pResource->listEntry), &gCmdqContext.resourceList);
  167. }
  168. }
  169. /* engineFlag: task original engineFlag */
  170. /* enginesNotUsed: flag which indicate Not Used engine after release task */
  171. void cmdq_core_delay_check_unlock(uint64_t engineFlag, const uint64_t enginesNotUsed)
  172. {
  173. /* Check engine in enginesNotUsed */
  174. struct ResourceUnitStruct *pResource = NULL;
  175. struct list_head *p = NULL;
  176. if (cmdq_core_is_feature_off(CMDQ_FEATURE_SRAM_SHARE))
  177. return;
  178. list_for_each(p, &gCmdqContext.resourceList) {
  179. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  180. if (enginesNotUsed & pResource->engine) {
  181. mutex_lock(&gCmdqResourceMutex);
  182. /* find matched engine become not used*/
  183. if (!pResource->used) {
  184. /* resource is not used but we got engine is released! */
  185. /* log as error and still continue */
  186. CMDQ_ERR("[Res]: resource will delay but not used, engine: 0x%016llx\n",
  187. pResource->engine);
  188. }
  189. /* Cancel previous delay task if existed */
  190. if (pResource->delaying) {
  191. pResource->delaying = false;
  192. cancel_delayed_work(&pResource->delayCheckWork);
  193. }
  194. /* Start a new delay task */
  195. queue_delayed_work(gCmdqContext.resourceCheckWQ,
  196. &pResource->delayCheckWork, CMDQ_DELAY_RELEASE_RESOURCE_MS);
  197. pResource->delay = sched_clock();
  198. pResource->delaying = true;
  199. mutex_unlock(&gCmdqResourceMutex);
  200. }
  201. }
  202. }
  203. cmdqDTSDataStruct *cmdq_core_get_whole_DTS_Data(void)
  204. {
  205. return &gCmdqDtsData;
  206. }
  207. void cmdq_core_init_DTS_data(void)
  208. {
  209. uint32_t i;
  210. memset(&(gCmdqDtsData), 0x0, sizeof(gCmdqDtsData));
  211. for (i = 0; i < CMDQ_SYNC_TOKEN_MAX; i++) {
  212. if (i <= CMDQ_MAX_HW_EVENT_COUNT) {
  213. /* GCE HW evevt */
  214. gCmdqDtsData.eventTable[i] = CMDQ_SYNC_TOKEN_INVALID - 1 - i;
  215. } else {
  216. /* GCE SW evevt */
  217. gCmdqDtsData.eventTable[i] = i;
  218. }
  219. }
  220. }
  221. void cmdq_core_set_event_table(CMDQ_EVENT_ENUM event, const int32_t value)
  222. {
  223. if (event >= 0 && event < CMDQ_SYNC_TOKEN_MAX)
  224. gCmdqDtsData.eventTable[event] = value;
  225. }
  226. int32_t cmdq_core_get_event_value(CMDQ_EVENT_ENUM event)
  227. {
  228. if (event < 0 || event >= CMDQ_SYNC_TOKEN_MAX)
  229. return -EINVAL;
  230. return gCmdqDtsData.eventTable[event];
  231. }
  232. int32_t cmdq_core_reverse_event_ENUM(const uint32_t value)
  233. {
  234. uint32_t eventENUM = CMDQ_SYNC_TOKEN_INVALID;
  235. uint32_t i;
  236. for (i = 0; i < CMDQ_SYNC_TOKEN_MAX; i++) {
  237. if (value == gCmdqDtsData.eventTable[i]) {
  238. eventENUM = i;
  239. break;
  240. }
  241. }
  242. return eventENUM;
  243. }
  244. static bool cmdq_core_is_valid_in_active_list(TaskStruct *pTask)
  245. {
  246. bool isValid = true;
  247. do {
  248. if (NULL == pTask) {
  249. isValid = false;
  250. break;
  251. }
  252. if (TASK_STATE_IDLE == pTask->taskState || CMDQ_INVALID_THREAD == pTask->thread
  253. || NULL == pTask->pCMDEnd || NULL == pTask->pVABase) {
  254. /* check CMDQ task's contain */
  255. isValid = false;
  256. }
  257. } while (0);
  258. return isValid;
  259. }
  260. void *cmdq_core_alloc_hw_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
  261. const gfp_t flag)
  262. {
  263. void *pVA;
  264. dma_addr_t PA;
  265. do {
  266. PA = 0;
  267. pVA = NULL;
  268. #ifdef CMDQ_TEST_EMERGENCY_BUFFER
  269. const uint32_t minForceEmergencyBufferSize = 64 * 1024;
  270. if ((1 == atomic_read(&gCmdqDebugForceUseEmergencyBuffer)) &&
  271. (minForceEmergencyBufferSize < size)) {
  272. CMDQ_LOG("[INFO]%s failed because...force use emergency buffer\n",
  273. __func__);
  274. break;
  275. }
  276. #endif
  277. CMDQ_PROF_START(current->pid, __func__);
  278. pVA = dma_alloc_coherent(dev, size, &PA, flag);
  279. CMDQ_PROF_END(current->pid, __func__);
  280. } while (0);
  281. *dma_handle = PA;
  282. CMDQ_VERBOSE("%s, pVA:0x%p, PA:0x%pa, PAout:0x%pa\n", __func__, pVA, &PA, &(*dma_handle));
  283. return pVA;
  284. }
  285. void cmdq_core_free_hw_buffer(struct device *dev, size_t size, void *cpu_addr,
  286. dma_addr_t dma_handle)
  287. {
  288. dma_free_coherent(dev, size, cpu_addr, dma_handle);
  289. }
  290. static bool cmdq_core_init_emergency_buffer(void)
  291. {
  292. int i;
  293. memset(&gCmdqEmergencyBuffer[0], 0, sizeof(gCmdqEmergencyBuffer));
  294. for (i = 0; i < CMDQ_EMERGENCY_BLOCK_COUNT; ++i) {
  295. EmergencyBufferStruct *buf = &gCmdqEmergencyBuffer[i];
  296. buf->va = cmdq_core_alloc_hw_buffer(cmdq_dev_get(), CMDQ_EMERGENCY_BLOCK_SIZE,
  297. &buf->pa, GFP_KERNEL);
  298. buf->size = CMDQ_EMERGENCY_BLOCK_SIZE;
  299. buf->used = false;
  300. }
  301. return true;
  302. }
  303. static bool cmdq_core_uninit_emergency_buffer(void)
  304. {
  305. int i;
  306. for (i = 0; i < CMDQ_EMERGENCY_BLOCK_COUNT; ++i) {
  307. EmergencyBufferStruct *buf = &gCmdqEmergencyBuffer[i];
  308. cmdq_core_free_hw_buffer(cmdq_dev_get(), CMDQ_EMERGENCY_BLOCK_SIZE,
  309. buf->va, buf->pa);
  310. if (buf->used) {
  311. CMDQ_ERR("Emergency buffer %d, 0x%p, 0x%pa still using\n",
  312. i, buf->va, &buf->pa);
  313. }
  314. }
  315. memset(&gCmdqEmergencyBuffer[0], 0, sizeof(gCmdqEmergencyBuffer));
  316. return true;
  317. }
  318. static bool cmdq_core_alloc_emergency_buffer(void **va, dma_addr_t *pa)
  319. {
  320. int i;
  321. bool ret = false;
  322. spin_lock(&gCmdqAllocLock);
  323. for (i = 0; i < CMDQ_EMERGENCY_BLOCK_COUNT; ++i) {
  324. /* find a free emergency buffer */
  325. CMDQ_VERBOSE("EmergencyBuffer[%d], use:%d, 0x%p, 0x%pa\n",
  326. i,
  327. gCmdqEmergencyBuffer[i].used,
  328. gCmdqEmergencyBuffer[i].va, &(gCmdqEmergencyBuffer[i].pa));
  329. if (!gCmdqEmergencyBuffer[i].used && gCmdqEmergencyBuffer[i].va) {
  330. gCmdqEmergencyBuffer[i].used = true;
  331. *va = gCmdqEmergencyBuffer[i].va;
  332. *pa = gCmdqEmergencyBuffer[i].pa;
  333. ret = true;
  334. break;
  335. }
  336. }
  337. spin_unlock(&gCmdqAllocLock);
  338. return ret;
  339. }
  340. static void cmdq_core_free_emergency_buffer(void *va, dma_addr_t pa)
  341. {
  342. int i;
  343. spin_lock(&gCmdqAllocLock);
  344. for (i = 0; i < CMDQ_EMERGENCY_BLOCK_COUNT; ++i) {
  345. if (gCmdqEmergencyBuffer[i].used && va == gCmdqEmergencyBuffer[i].va) {
  346. gCmdqEmergencyBuffer[i].used = false;
  347. break;
  348. }
  349. }
  350. spin_unlock(&gCmdqAllocLock);
  351. }
  352. bool cmdq_core_is_emergency_buffer(void *va)
  353. {
  354. int i;
  355. bool ret = false;
  356. spin_lock(&gCmdqAllocLock);
  357. for (i = 0; i < CMDQ_EMERGENCY_BLOCK_COUNT; ++i) {
  358. if (gCmdqEmergencyBuffer[i].used && va == gCmdqEmergencyBuffer[i].va) {
  359. ret = true;
  360. break;
  361. }
  362. }
  363. spin_unlock(&gCmdqAllocLock);
  364. return ret;
  365. }
  366. int32_t cmdq_core_set_secure_IRQ_status(uint32_t value)
  367. {
  368. #ifdef CMDQ_SECURE_PATH_SUPPORT
  369. const uint32_t offset = CMDQ_SEC_SHARED_IRQ_RAISED_OFFSET;
  370. uint32_t *pVA;
  371. value = 0x0;
  372. if (NULL == gCmdqContext.hSecSharedMem) {
  373. CMDQ_ERR("%s, shared memory is not created\n", __func__);
  374. return -EFAULT;
  375. }
  376. pVA = (uint32_t *) (gCmdqContext.hSecSharedMem->pVABase + offset);
  377. (*pVA) = value;
  378. CMDQ_VERBOSE("[shared_IRQ]set raisedIRQ:0x%08x\n", value);
  379. return 0;
  380. #else
  381. CMDQ_ERR("func:%s failed since CMDQ secure path not support in this proj\n", __func__);
  382. return -EFAULT;
  383. #endif
  384. }
  385. int32_t cmdq_core_get_secure_IRQ_status(void)
  386. {
  387. #ifdef CMDQ_SECURE_PATH_SUPPORT
  388. const uint32_t offset = CMDQ_SEC_SHARED_IRQ_RAISED_OFFSET;
  389. uint32_t *pVA;
  390. int32_t value;
  391. value = 0x0;
  392. if (NULL == gCmdqContext.hSecSharedMem) {
  393. CMDQ_ERR("%s, shared memory is not created\n", __func__);
  394. return -EFAULT;
  395. }
  396. pVA = (uint32_t *) (gCmdqContext.hSecSharedMem->pVABase + offset);
  397. value = *pVA;
  398. CMDQ_VERBOSE("[shared_IRQ]IRQ raised:0x%08x\n", value);
  399. return value;
  400. #else
  401. CMDQ_ERR("func:%s failed since CMDQ secure path not support in this proj\n", __func__);
  402. return -EFAULT;
  403. #endif
  404. }
  405. int32_t cmdq_core_set_secure_thread_exec_counter(const int32_t thread, const uint32_t cookie)
  406. {
  407. #ifdef CMDQ_SECURE_PATH_SUPPORT
  408. const uint32_t offset = CMDQ_SEC_SHARED_THR_CNT_OFFSET + thread * sizeof(uint32_t);
  409. uint32_t *pVA = NULL;
  410. if (0 > cmdq_get_func()->isSecureThread(thread)) {
  411. CMDQ_ERR("%s, invalid param, thread: %d\n", __func__, thread);
  412. return -EFAULT;
  413. }
  414. if (NULL == gCmdqContext.hSecSharedMem) {
  415. CMDQ_ERR("%s, shared memory is not created\n", __func__);
  416. return -EFAULT;
  417. }
  418. CMDQ_MSG("[shared_cookie] set thread %d CNT(%p) to %d\n", thread, pVA, cookie);
  419. pVA = (uint32_t *) (gCmdqContext.hSecSharedMem->pVABase + offset);
  420. (*pVA) = cookie;
  421. return 0;
  422. #else
  423. CMDQ_ERR("func:%s failed since CMDQ secure path not support in this proj\n", __func__);
  424. return -EFAULT;
  425. #endif
  426. }
  427. int32_t cmdq_core_get_secure_thread_exec_counter(const int32_t thread)
  428. {
  429. #ifdef CMDQ_SECURE_PATH_SUPPORT
  430. const uint32_t offset = CMDQ_SEC_SHARED_THR_CNT_OFFSET + thread * sizeof(uint32_t);
  431. uint32_t *pVA;
  432. uint32_t value;
  433. if (0 > cmdq_get_func()->isSecureThread(thread)) {
  434. CMDQ_ERR("%s, invalid param, thread: %d\n", __func__, thread);
  435. return -EFAULT;
  436. }
  437. if (NULL == gCmdqContext.hSecSharedMem) {
  438. CMDQ_ERR("%s, shared memory is not created\n", __func__);
  439. return -EFAULT;
  440. }
  441. pVA = (uint32_t *) (gCmdqContext.hSecSharedMem->pVABase + offset);
  442. value = *pVA;
  443. #if defined(CMDQ_SECURE_PATH_NORMAL_IRQ) || defined(CMDQ_SECURE_PATH_HW_LOCK)
  444. value = value + 1;
  445. #endif
  446. CMDQ_VERBOSE("[shared_cookie] get thread %d CNT(%p) value is %d\n", thread, pVA, value);
  447. return value;
  448. #else
  449. CMDQ_ERR("func:%s failed since CMDQ secure path not support in this proj\n", __func__);
  450. return -EFAULT;
  451. #endif
  452. }
  453. int32_t cmdq_core_thread_exec_counter(const int32_t thread)
  454. {
  455. return (false == cmdq_get_func()->isSecureThread(thread)) ?
  456. (CMDQ_GET_COOKIE_CNT(thread)) : (cmdq_core_get_secure_thread_exec_counter(thread));
  457. }
  458. cmdqSecSharedMemoryHandle cmdq_core_get_secure_shared_memory(void)
  459. {
  460. return gCmdqContext.hSecSharedMem;
  461. }
  462. int32_t cmdq_core_stop_secure_path_notify_thread(void)
  463. {
  464. #if defined(CMDQ_SECURE_PATH_SUPPORT) && !defined(CMDQ_SECURE_PATH_NORMAL_IRQ)
  465. int status = 0;
  466. unsigned long flags;
  467. mutex_lock(&gCmdqNotifyLoopMutex);
  468. do {
  469. if (NULL == gCmdqContext.hNotifyLoop) {
  470. /* no notify thread */
  471. CMDQ_MSG("[WARNING]NULL notify loop\n");
  472. break;
  473. }
  474. status = cmdqRecStopLoop(gCmdqContext.hNotifyLoop);
  475. if (0 > status) {
  476. CMDQ_ERR("stop notify loop failed, status:%d\n", status);
  477. break;
  478. }
  479. /* destroy handle */
  480. spin_lock_irqsave(&gCmdqExecLock, flags);
  481. cmdqRecDestroy(gCmdqContext.hNotifyLoop);
  482. gCmdqContext.hNotifyLoop = NULL;
  483. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  484. /* CPU clear event */
  485. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_UPD, CMDQ_SYNC_SECURE_THR_EOF);
  486. } while (0);
  487. mutex_unlock(&gCmdqNotifyLoopMutex);
  488. return status;
  489. #else
  490. return 0;
  491. #endif
  492. }
  493. int32_t cmdq_core_start_secure_path_notify_thread(void)
  494. {
  495. #if defined(CMDQ_SECURE_PATH_SUPPORT) && !defined(CMDQ_SECURE_PATH_NORMAL_IRQ)
  496. int status = 0;
  497. cmdqRecHandle handle;
  498. unsigned long flags;
  499. mutex_lock(&gCmdqNotifyLoopMutex);
  500. do {
  501. if (NULL != gCmdqContext.hNotifyLoop) {
  502. /* already created it */
  503. break;
  504. }
  505. /* CPU clear event */
  506. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_UPD, CMDQ_SYNC_SECURE_THR_EOF);
  507. /* record command */
  508. cmdqRecCreate(CMDQ_SCENARIO_SECURE_NOTIFY_LOOP, &handle);
  509. cmdqRecReset(handle);
  510. cmdqRecWait(handle, CMDQ_SYNC_SECURE_THR_EOF);
  511. #ifdef CMDQ_SECURE_PATH_HW_LOCK
  512. cmdqRecWait(handle, CMDQ_SYNC_SECURE_WSM_LOCK);
  513. #endif
  514. status = cmdqRecStartLoop(handle);
  515. if (0 > status) {
  516. CMDQ_ERR("start notify loop failed, status:%d\n", status);
  517. break;
  518. }
  519. /* update notify handle */
  520. spin_lock_irqsave(&gCmdqExecLock, flags);
  521. gCmdqContext.hNotifyLoop = (CmdqRecLoopHandle *) handle;
  522. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  523. } while (0);
  524. mutex_unlock(&gCmdqNotifyLoopMutex);
  525. return status;
  526. #else
  527. return 0;
  528. #endif
  529. }
  530. const char *cmdq_core_get_event_name_ENUM(CMDQ_EVENT_ENUM event)
  531. {
  532. const char *eventName = "CMDQ_EVENT_UNKNOWN";
  533. #undef DECLARE_CMDQ_EVENT
  534. #define DECLARE_CMDQ_EVENT(name, val, dts_name) { if (val == event) { eventName = #name; break; } }
  535. do {
  536. #include "cmdq_event_common.h"
  537. } while (0);
  538. #undef DECLARE_CMDQ_EVENT
  539. return eventName;
  540. }
  541. const char *cmdq_core_get_event_name(CMDQ_EVENT_ENUM event)
  542. {
  543. const int32_t eventENUM = cmdq_core_reverse_event_ENUM(event);
  544. return cmdq_core_get_event_name_ENUM(eventENUM);
  545. }
  546. void cmdqCoreClearEvent(CMDQ_EVENT_ENUM event)
  547. {
  548. int32_t eventValue = cmdq_core_get_event_value(event);
  549. CMDQ_MSG("clear event %d\n", eventValue);
  550. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_UPD, eventValue);
  551. }
  552. void cmdqCoreSetEvent(CMDQ_EVENT_ENUM event)
  553. {
  554. int32_t eventValue = cmdq_core_get_event_value(event);
  555. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_UPD, (1L << 16) | eventValue);
  556. }
  557. uint32_t cmdqCoreGetEvent(CMDQ_EVENT_ENUM event)
  558. {
  559. uint32_t regValue = 0;
  560. int32_t eventValue = cmdq_core_get_event_value(event);
  561. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_ID, (0x3FF & eventValue));
  562. regValue = CMDQ_REG_GET32(CMDQ_SYNC_TOKEN_VAL);
  563. return regValue;
  564. }
  565. bool cmdq_core_support_sync_non_suspendable(void)
  566. {
  567. #ifdef CMDQ_USE_LEGACY
  568. return false;
  569. #else
  570. return true;
  571. #endif
  572. }
  573. ssize_t cmdqCorePrintLogLevel(struct device *dev, struct device_attribute *attr, char *buf)
  574. {
  575. int len = 0;
  576. if (buf)
  577. len = sprintf(buf, "%d\n", gCmdqContext.logLevel);
  578. return len;
  579. }
  580. ssize_t cmdqCoreWriteLogLevel(struct device *dev,
  581. struct device_attribute *attr, const char *buf, size_t size)
  582. {
  583. int len = 0;
  584. int value = 0;
  585. int status = 0;
  586. char textBuf[10] = { 0 };
  587. do {
  588. if (size >= 10) {
  589. status = -EFAULT;
  590. break;
  591. }
  592. len = size;
  593. memcpy(textBuf, buf, len);
  594. textBuf[len] = '\0';
  595. if (0 > kstrtoint(textBuf, 10, &value)) {
  596. status = -EFAULT;
  597. break;
  598. }
  599. status = len;
  600. if (value < 0 || value > 3)
  601. value = 0;
  602. cmdq_core_set_log_level(value);
  603. } while (0);
  604. return status;
  605. }
  606. ssize_t cmdqCorePrintProfileEnable(struct device *dev, struct device_attribute *attr, char *buf)
  607. {
  608. int len = 0;
  609. if (buf)
  610. len = sprintf(buf, "%d\n", gCmdqContext.enableProfile);
  611. return len;
  612. }
  613. ssize_t cmdqCoreWriteProfileEnable(struct device *dev,
  614. struct device_attribute *attr, const char *buf, size_t size)
  615. {
  616. int len = 0;
  617. int value = 0;
  618. int status = 0;
  619. char textBuf[10] = { 0 };
  620. do {
  621. if (size >= 10) {
  622. status = -EFAULT;
  623. break;
  624. }
  625. len = size;
  626. memcpy(textBuf, buf, len);
  627. textBuf[len] = '\0';
  628. if (0 > kstrtoint(textBuf, 10, &value)) {
  629. status = -EFAULT;
  630. break;
  631. }
  632. status = len;
  633. if (value < 0 || value > 3)
  634. value = 0;
  635. gCmdqContext.enableProfile = value;
  636. if (0 < value)
  637. cmdqSecEnableProfile(true);
  638. else
  639. cmdqSecEnableProfile(false);
  640. } while (0);
  641. return status;
  642. }
  643. static uint32_t *cmdq_core_get_pc(const TaskStruct *pTask, uint32_t thread, uint32_t insts[4])
  644. {
  645. long currPC = 0L;
  646. uint8_t *pInst = NULL;
  647. insts[0] = 0;
  648. insts[1] = 0;
  649. insts[2] = 0;
  650. insts[3] = 0;
  651. if (NULL == pTask) {
  652. CMDQ_ERR("get pc failed since pTask is NULL");
  653. return NULL;
  654. }
  655. if ((NULL == pTask->pVABase) || (CMDQ_INVALID_THREAD == thread)) {
  656. CMDQ_ERR
  657. ("get pc failed since invalid param, pTask %p, pTask->pVABase:%p, thread:%d\n",
  658. pTask, pTask->pVABase, thread);
  659. return NULL;
  660. }
  661. if (true == pTask->secData.isSecure) {
  662. /* be carefull it dose not allow normal access to secure threads' register */
  663. return NULL;
  664. }
  665. currPC = CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread)));
  666. pInst = (uint8_t *) pTask->pVABase + (currPC - pTask->MVABase);
  667. if (((uint8_t *) pTask->pVABase <= pInst) && (pInst <= (uint8_t *) pTask->pCMDEnd)) {
  668. if (pInst != (uint8_t *) pTask->pCMDEnd) {
  669. /* If PC points to start of pCMD, */
  670. /* - 8 causes access violation */
  671. /* insts[0] = CMDQ_REG_GET32(pInst - 8); */
  672. /* insts[1] = CMDQ_REG_GET32(pInst - 4); */
  673. insts[2] = CMDQ_REG_GET32(pInst + 0);
  674. insts[3] = CMDQ_REG_GET32(pInst + 4);
  675. } else {
  676. /* insts[0] = CMDQ_REG_GET32(pInst - 16); */
  677. /* insts[1] = CMDQ_REG_GET32(pInst - 12); */
  678. insts[2] = CMDQ_REG_GET32(pInst - 8);
  679. insts[3] = CMDQ_REG_GET32(pInst - 4);
  680. }
  681. } else {
  682. /* invalid PC address */
  683. return NULL;
  684. }
  685. return (uint32_t *) pInst;
  686. }
  687. static int cmdq_core_print_profile_marker(const RecordStruct *pRecord, char *_buf, int bufLen)
  688. {
  689. int length = 0;
  690. #ifdef CMDQ_PROFILE_MARKER_SUPPORT
  691. int32_t profileMarkerCount;
  692. int32_t i;
  693. char *buf;
  694. buf = _buf;
  695. profileMarkerCount = pRecord->profileMarkerCount;
  696. if (profileMarkerCount > CMDQ_MAX_PROFILE_MARKER_IN_TASK)
  697. profileMarkerCount = CMDQ_MAX_PROFILE_MARKER_IN_TASK;
  698. for (i = 0; i < profileMarkerCount; i++) {
  699. length = snprintf(buf, bufLen, ",P%d,%s,%lld",
  700. i, pRecord->profileMarkerTag[i], pRecord->profileMarkerTimeNS[i]);
  701. bufLen -= length;
  702. buf += length;
  703. }
  704. if (i > 0) {
  705. length = snprintf(buf, bufLen, "\n");
  706. bufLen -= length;
  707. buf += length;
  708. }
  709. length = (buf - _buf);
  710. #endif
  711. return length;
  712. }
  713. static int cmdq_core_print_record(const RecordStruct *pRecord, int index, char *_buf, int bufLen)
  714. {
  715. int length = 0;
  716. char *unit[5] = { "ms", "ms", "ms", "ms", "ms" };
  717. int32_t IRQTime;
  718. int32_t execTime;
  719. int32_t beginWaitTime;
  720. int32_t totalTime;
  721. int32_t acquireThreadTime;
  722. unsigned long rem_nsec;
  723. CMDQ_TIME submitTimeSec;
  724. char *buf;
  725. rem_nsec = 0;
  726. submitTimeSec = pRecord->submit;
  727. rem_nsec = do_div(submitTimeSec, 1000000000);
  728. buf = _buf;
  729. unit[0] = "ms";
  730. unit[1] = "ms";
  731. unit[2] = "ms";
  732. unit[3] = "ms";
  733. unit[4] = "ms";
  734. CMDQ_GET_TIME_IN_MS(pRecord->submit, pRecord->done, totalTime);
  735. CMDQ_GET_TIME_IN_MS(pRecord->submit, pRecord->trigger, acquireThreadTime);
  736. CMDQ_GET_TIME_IN_MS(pRecord->submit, pRecord->beginWait, beginWaitTime);
  737. CMDQ_GET_TIME_IN_MS(pRecord->trigger, pRecord->gotIRQ, IRQTime);
  738. CMDQ_GET_TIME_IN_MS(pRecord->trigger, pRecord->wakedUp, execTime);
  739. /* detect us interval */
  740. if (0 == acquireThreadTime) {
  741. CMDQ_GET_TIME_IN_US_PART(pRecord->submit, pRecord->trigger, acquireThreadTime);
  742. unit[0] = "us";
  743. }
  744. if (0 == IRQTime) {
  745. CMDQ_GET_TIME_IN_US_PART(pRecord->trigger, pRecord->gotIRQ, IRQTime);
  746. unit[1] = "us";
  747. }
  748. if (0 == beginWaitTime) {
  749. CMDQ_GET_TIME_IN_US_PART(pRecord->submit, pRecord->beginWait, beginWaitTime);
  750. unit[2] = "us";
  751. }
  752. if (0 == execTime) {
  753. CMDQ_GET_TIME_IN_US_PART(pRecord->trigger, pRecord->wakedUp, execTime);
  754. unit[3] = "us";
  755. }
  756. if (0 == totalTime) {
  757. CMDQ_GET_TIME_IN_US_PART(pRecord->submit, pRecord->done, totalTime);
  758. unit[4] = "us";
  759. }
  760. /* pRecord->priority for task priority */
  761. /* when pRecord->isSecure is 0 for secure task */
  762. length = snprintf(buf, bufLen,
  763. "%4d,(%5d, %2d, 0x%012llx, %2d, %d, %d),(%02d, %02d),(%5dns , %lld, %lld),",
  764. index, pRecord->user, pRecord->scenario, pRecord->engineFlag,
  765. pRecord->priority, pRecord->isSecure, pRecord->size,
  766. pRecord->thread,
  767. cmdq_get_func()->priority(pRecord->scenario),
  768. pRecord->writeTimeNS, pRecord->writeTimeNSBegin, pRecord->writeTimeNSEnd);
  769. bufLen -= length;
  770. buf += length;
  771. length = snprintf(buf, bufLen,
  772. "(%5llu.%06lu, %4d%s, %4d%s, %4d%s, %4d%s),%4d%s",
  773. submitTimeSec, rem_nsec / 1000,
  774. acquireThreadTime, unit[0],
  775. IRQTime, unit[1], beginWaitTime, unit[2],
  776. execTime, unit[3], totalTime, unit[4]);
  777. bufLen -= length;
  778. buf += length;
  779. length = snprintf(buf, bufLen, "\n");
  780. bufLen -= length;
  781. buf += length;
  782. length = (buf - _buf);
  783. return length;
  784. }
  785. int cmdqCorePrintRecordSeq(struct seq_file *m, void *v)
  786. {
  787. unsigned long flags;
  788. int32_t index;
  789. int32_t numRec;
  790. RecordStruct record;
  791. char msg[180] = { 0 };
  792. /* we try to minimize time spent in spin lock */
  793. /* since record is an array so it is okay to */
  794. /* allow displaying an out-of-date entry. */
  795. spin_lock_irqsave(&gCmdqRecordLock, flags);
  796. numRec = gCmdqContext.recNum;
  797. index = gCmdqContext.lastID - 1;
  798. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  799. /* we print record in reverse order. */
  800. for (; numRec > 0; --numRec, --index) {
  801. if (index >= CMDQ_MAX_RECORD_COUNT)
  802. index = 0;
  803. else if (index < 0)
  804. index = CMDQ_MAX_RECORD_COUNT - 1;
  805. /* Make sure we don't print a record that is during updating. */
  806. /* However, this record may already be different */
  807. /* from the time of entering cmdqCorePrintRecordSeq(). */
  808. spin_lock_irqsave(&gCmdqRecordLock, flags);
  809. record = gCmdqContext.record[index];
  810. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  811. cmdq_core_print_record(&record, index, msg, sizeof(msg));
  812. seq_printf(m, "%s", msg);
  813. cmdq_core_print_profile_marker(&record, msg, sizeof(msg));
  814. seq_printf(m, "%s", msg);
  815. }
  816. return 0;
  817. }
  818. int cmdqCorePrintErrorSeq(struct seq_file *m, void *v)
  819. {
  820. /* error is not used by now */
  821. return 0;
  822. }
  823. int cmdqCorePrintStatusSeq(struct seq_file *m, void *v)
  824. {
  825. unsigned long flags = 0;
  826. EngineStruct *pEngine = NULL;
  827. TaskStruct *pTask = NULL;
  828. struct list_head *p = NULL;
  829. ThreadStruct *pThread = NULL;
  830. int32_t index = 0;
  831. int32_t inner = 0;
  832. int listIdx = 0;
  833. const struct list_head *lists[] = {
  834. &gCmdqContext.taskFreeList,
  835. &gCmdqContext.taskActiveList,
  836. &gCmdqContext.taskWaitList
  837. };
  838. uint32_t *pcVA = NULL;
  839. uint32_t insts[4] = { 0 };
  840. char parsedInstruction[128] = { 0 };
  841. static const char *const listNames[] = { "Free", "Active", "Wait" };
  842. const CMDQ_ENG_ENUM engines[] = CMDQ_FOREACH_MODULE_PRINT(GENERATE_ENUM);
  843. static const char *const engineNames[] = CMDQ_FOREACH_MODULE_PRINT(GENERATE_STRING);
  844. #ifdef CMDQ_DUMP_FIRSTERROR
  845. if (gCmdqFirstError.cmdqCount > 0) {
  846. unsigned long long saveTimeSec = gCmdqFirstError.savetime;
  847. unsigned long rem_nsec = do_div(saveTimeSec, 1000000000);
  848. struct tm nowTM;
  849. time_to_tm(gCmdqFirstError.savetv.tv_sec, sys_tz.tz_minuteswest * 60, &nowTM);
  850. seq_puts(m, "================= [CMDQ] Dump first error ================\n");
  851. seq_printf(m, "kernel time:[%5llu.%06lu],", saveTimeSec, rem_nsec / 1000);
  852. seq_printf(m, " UTC time:[%04ld-%02d-%02d %02d:%02d:%02d.%06ld],",
  853. (nowTM.tm_year + 1900), (nowTM.tm_mon + 1), nowTM.tm_mday,
  854. nowTM.tm_hour, nowTM.tm_min, nowTM.tm_sec,
  855. gCmdqFirstError.savetv.tv_usec);
  856. seq_printf(m, " Pid: %d, Name: %s\n", gCmdqFirstError.callerPid,
  857. gCmdqFirstError.callerName);
  858. seq_printf(m, "%s", gCmdqFirstError.cmdqString);
  859. if (gCmdqFirstError.cmdqMaxSize <= 0)
  860. seq_printf(m, "\nWARNING: MAX size: %d is full\n", CMDQ_MAX_FIRSTERROR);
  861. seq_puts(m, "\n\n");
  862. }
  863. #endif
  864. /* Save command buffer dump */
  865. if (gCmdqBufferDump.count > 0) {
  866. int32_t buffer_id;
  867. seq_puts(m, "================= [CMDQ] Dump Command Buffer =================\n");
  868. mutex_lock(&gCmdqTaskMutex);
  869. for (buffer_id = 0; buffer_id < gCmdqBufferDump.bufferSize; buffer_id++)
  870. seq_printf(m, "%c", gCmdqBufferDump.cmdqString[buffer_id]);
  871. mutex_unlock(&gCmdqTaskMutex);
  872. seq_puts(m, "\n=============== [CMDQ] Dump Command Buffer END ===============\n\n\n");
  873. }
  874. #ifdef CMDQ_PWR_AWARE
  875. /* note for constatnt format (without a % substitution), use seq_puts to speed up outputs */
  876. seq_puts(m, "====== Clock Status =======\n");
  877. cmdq_get_func()->printStatusSeqClock(m);
  878. #endif
  879. seq_puts(m, "====== Engine Usage =======\n");
  880. for (listIdx = 0; listIdx < (sizeof(engines) / sizeof(engines[0])); ++listIdx) {
  881. pEngine = &gCmdqContext.engine[engines[listIdx]];
  882. seq_printf(m, "%s: count %d, owner %d, fail: %d, reset: %d\n",
  883. engineNames[listIdx],
  884. pEngine->userCount,
  885. pEngine->currOwner, pEngine->failCount, pEngine->resetCount);
  886. }
  887. mutex_lock(&gCmdqTaskMutex);
  888. /* print all tasks in both list */
  889. for (listIdx = 0; listIdx < (sizeof(lists) / sizeof(lists[0])); listIdx++) {
  890. /* skip FreeTasks by default */
  891. if (!cmdq_core_should_print_msg() && 0 == listIdx)
  892. continue;
  893. index = 0;
  894. list_for_each(p, lists[listIdx]) {
  895. pTask = list_entry(p, struct TaskStruct, listEntry);
  896. seq_printf(m, "====== %s Task(%d) 0x%p Usage =======\n", listNames[listIdx],
  897. index, pTask);
  898. seq_printf(m, "State %d, VABase: 0x%p, MVABase: %pa, Size: %d\n",
  899. pTask->taskState, pTask->pVABase, &pTask->MVABase,
  900. pTask->commandSize);
  901. seq_printf(m, "Scenario %d, Priority: %d, Flag: 0x%08llx, VAEnd: 0x%p\n",
  902. pTask->scenario, pTask->priority, pTask->engineFlag,
  903. pTask->pCMDEnd);
  904. seq_printf(m,
  905. "Reorder:%d, Trigger %lld, IRQ: %lld, Wait: %lld, Wake Up: %lld\n",
  906. pTask->reorder,
  907. pTask->trigger, pTask->gotIRQ, pTask->beginWait, pTask->wakedUp);
  908. ++index;
  909. }
  910. seq_printf(m, "====== Total %d %s Task =======\n", index, listNames[listIdx]);
  911. }
  912. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++) {
  913. pThread = &(gCmdqContext.thread[index]);
  914. if (pThread->taskCount > 0) {
  915. seq_printf(m, "====== Thread %d Usage =======\n", index);
  916. seq_printf(m, "Wait Cookie %d, Next Cookie %d\n", pThread->waitCookie,
  917. pThread->nextCookie);
  918. spin_lock_irqsave(&gCmdqThreadLock, flags);
  919. for (inner = 0; inner < cmdq_core_max_task_in_thread(index); inner++) {
  920. pTask = pThread->pCurTask[inner];
  921. if (NULL != pTask) {
  922. /* dump task basic info */
  923. seq_printf(m,
  924. "Slot: %d, Task: 0x%p, Pid: %d, Name: %s, Scn: %d,",
  925. index, pTask, pTask->callerPid,
  926. pTask->callerName, pTask->scenario);
  927. seq_printf(m,
  928. " VABase: 0x%p, MVABase: %pa, Size: %d",
  929. pTask->pVABase, &pTask->MVABase,
  930. pTask->commandSize);
  931. if (pTask->pCMDEnd) {
  932. seq_printf(m,
  933. ", Last Command: 0x%08x:0x%08x",
  934. pTask->pCMDEnd[-1], pTask->pCMDEnd[0]);
  935. }
  936. seq_puts(m, "\n");
  937. /* dump PC info */
  938. pcVA = cmdq_core_get_pc(pTask, index, insts);
  939. if (pcVA) {
  940. cmdq_core_parse_instruction(pcVA, parsedInstruction,
  941. sizeof(parsedInstruction));
  942. seq_printf(m,
  943. "PC(VA): 0x%p, 0x%08x:0x%08x => %s",
  944. pcVA, insts[2], insts[3],
  945. parsedInstruction);
  946. } else {
  947. seq_puts(m, "PC(VA): Not available\n");
  948. }
  949. }
  950. }
  951. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  952. }
  953. }
  954. mutex_unlock(&gCmdqTaskMutex);
  955. return 0;
  956. }
  957. ssize_t cmdqCorePrintRecord(struct device *dev, struct device_attribute *attr, char *buf)
  958. {
  959. unsigned long flags;
  960. int32_t begin;
  961. int32_t curPos;
  962. ssize_t bufLen = PAGE_SIZE;
  963. ssize_t length;
  964. int32_t index;
  965. int32_t numRec;
  966. RecordStruct record;
  967. begin = 0;
  968. curPos = 0;
  969. length = 0;
  970. bufLen = PAGE_SIZE;
  971. /* we try to minimize time spent in spin lock */
  972. /* since record is an array so it is okay to */
  973. /* allow displaying an out-of-date entry. */
  974. spin_lock_irqsave(&gCmdqRecordLock, flags);
  975. numRec = gCmdqContext.recNum;
  976. index = gCmdqContext.lastID - 1;
  977. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  978. /* we print record in reverse order. */
  979. for (; numRec > 0; --numRec, --index) {
  980. /* CMDQ_ERR("[rec] index=%d numRec =%d\n", index, numRec); */
  981. if (index >= CMDQ_MAX_RECORD_COUNT)
  982. index = 0;
  983. else if (index < 0)
  984. index = CMDQ_MAX_RECORD_COUNT - 1;
  985. /* Make sure we don't print a record that is during updating. */
  986. /* However, this record may already be different */
  987. /* from the time of entering cmdqCorePrintRecordSeq(). */
  988. spin_lock_irqsave(&gCmdqRecordLock, flags);
  989. record = (gCmdqContext.record[index]);
  990. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  991. length = cmdq_core_print_record(&record, index, &buf[curPos], bufLen);
  992. bufLen -= length;
  993. curPos += length;
  994. if (bufLen <= 0 || curPos >= PAGE_SIZE)
  995. break;
  996. }
  997. if (curPos >= PAGE_SIZE)
  998. curPos = PAGE_SIZE;
  999. return curPos;
  1000. }
  1001. ssize_t cmdqCorePrintError(struct device *dev, struct device_attribute *attr, char *buf)
  1002. {
  1003. int i;
  1004. int length = 0;
  1005. for (i = 0; i < gCmdqContext.errNum && i < CMDQ_MAX_ERROR_COUNT; ++i) {
  1006. ErrorStruct *pError = &gCmdqContext.error[i];
  1007. u64 ts = pError->ts_nsec;
  1008. unsigned long rem_nsec = do_div(ts, 1000000000);
  1009. length += snprintf(buf + length,
  1010. PAGE_SIZE - length,
  1011. "[%5lu.%06lu] ", (unsigned long)ts, rem_nsec / 1000);
  1012. length += cmdq_core_print_record(&pError->errorRec,
  1013. i, buf + length, PAGE_SIZE - length);
  1014. if (length >= PAGE_SIZE)
  1015. break;
  1016. }
  1017. return length;
  1018. }
  1019. ssize_t cmdqCorePrintStatus(struct device *dev, struct device_attribute *attr, char *buf)
  1020. {
  1021. unsigned long flags = 0L;
  1022. EngineStruct *pEngine = NULL;
  1023. TaskStruct *pTask = NULL;
  1024. struct list_head *p = NULL;
  1025. ThreadStruct *pThread = NULL;
  1026. int32_t index = 0;
  1027. int32_t inner = 0;
  1028. int32_t length = 0;
  1029. int listIdx = 0;
  1030. char *pBuffer = buf;
  1031. const struct list_head *lists[] = {
  1032. &gCmdqContext.taskFreeList,
  1033. &gCmdqContext.taskActiveList,
  1034. &gCmdqContext.taskWaitList
  1035. };
  1036. uint32_t *pcVA = NULL;
  1037. uint32_t insts[4] = { 0 };
  1038. char parsedInstruction[128] = { 0 };
  1039. static const char *const listNames[] = { "Free", "Active", "Wait" };
  1040. const CMDQ_ENG_ENUM engines[] = CMDQ_FOREACH_MODULE_PRINT(GENERATE_ENUM);
  1041. static const char *const engineNames[] = CMDQ_FOREACH_MODULE_PRINT(GENERATE_STRING);
  1042. #ifdef CMDQ_PWR_AWARE
  1043. pBuffer += sprintf(pBuffer, "====== Clock Status =======\n");
  1044. pBuffer += cmdq_get_func()->printStatusClock(pBuffer);
  1045. #endif
  1046. pBuffer += sprintf(pBuffer, "====== Engine Usage =======\n");
  1047. for (listIdx = 0; listIdx < (sizeof(engines) / sizeof(engines[0])); ++listIdx) {
  1048. pEngine = &gCmdqContext.engine[engines[listIdx]];
  1049. pBuffer += sprintf(pBuffer, "%s: count %d, owner %d, fail: %d, reset: %d\n",
  1050. engineNames[listIdx],
  1051. pEngine->userCount,
  1052. pEngine->currOwner, pEngine->failCount, pEngine->resetCount);
  1053. }
  1054. mutex_lock(&gCmdqTaskMutex);
  1055. /* print all tasks in both list */
  1056. for (listIdx = 0; listIdx < (sizeof(lists) / sizeof(lists[0])); listIdx++) {
  1057. /* skip FreeTasks by default */
  1058. if (!cmdq_core_should_print_msg() && 0 == listIdx)
  1059. continue;
  1060. index = 0;
  1061. list_for_each(p, lists[listIdx]) {
  1062. pTask = list_entry(p, struct TaskStruct, listEntry);
  1063. pBuffer += sprintf(pBuffer,
  1064. "====== %s Task(%d) 0x%p Usage =======\n",
  1065. listNames[listIdx], index, pTask);
  1066. pBuffer += sprintf(pBuffer,
  1067. "State %d, VABase: 0x%p, MVABase: %pa, Size: %d\n",
  1068. pTask->taskState, pTask->pVABase, &pTask->MVABase,
  1069. pTask->commandSize);
  1070. pBuffer += sprintf(pBuffer,
  1071. "Scenario %d, Priority: %d, Flag: 0x%08llx, VAEnd: 0x%p\n",
  1072. pTask->scenario, pTask->priority, pTask->engineFlag,
  1073. pTask->pCMDEnd);
  1074. pBuffer += sprintf(pBuffer,
  1075. "Reoder:%d, Trigger %lld, IRQ: %lld, Wait: %lld, Wake Up: %lld\n",
  1076. pTask->reorder,
  1077. pTask->trigger, pTask->gotIRQ, pTask->beginWait,
  1078. pTask->wakedUp);
  1079. ++index;
  1080. }
  1081. pBuffer +=
  1082. sprintf(pBuffer, "====== Total %d %s Task =======\n", index,
  1083. listNames[listIdx]);
  1084. }
  1085. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++) {
  1086. pThread = &(gCmdqContext.thread[index]);
  1087. if (pThread->taskCount > 0) {
  1088. pBuffer += sprintf(pBuffer, "====== Thread %d Usage =======\n", index);
  1089. pBuffer += sprintf(pBuffer, "Wait Cookie %d, Next Cookie %d\n",
  1090. pThread->waitCookie, pThread->nextCookie);
  1091. spin_lock_irqsave(&gCmdqThreadLock, flags);
  1092. for (inner = 0; inner < cmdq_core_max_task_in_thread(index); inner++) {
  1093. pTask = pThread->pCurTask[inner];
  1094. if (NULL != pTask) {
  1095. /* dump task basic info */
  1096. pBuffer += sprintf(pBuffer,
  1097. "Slot: %d, Task: 0x%p, Pid: %d, Name: %s, Scn: %d,",
  1098. index, pTask, pTask->callerPid,
  1099. pTask->callerName, pTask->scenario);
  1100. pBuffer += sprintf(pBuffer,
  1101. " VABase: 0x%p, MVABase: %pa, Size: %d",
  1102. pTask->pVABase, &pTask->MVABase,
  1103. pTask->commandSize);
  1104. if (pTask->pCMDEnd) {
  1105. pBuffer += sprintf(pBuffer,
  1106. ", Last Command: 0x%08x:0x%08x",
  1107. pTask->pCMDEnd[-1],
  1108. pTask->pCMDEnd[0]);
  1109. }
  1110. pBuffer += sprintf(pBuffer, "\n");
  1111. /* dump PC info */
  1112. pcVA = cmdq_core_get_pc(pTask, index, insts);
  1113. if (pcVA) {
  1114. cmdq_core_parse_instruction(pcVA, parsedInstruction,
  1115. sizeof(parsedInstruction));
  1116. pBuffer += sprintf(pBuffer,
  1117. "PC(VA): 0x%p, 0x%08x:0x%08x => %s",
  1118. pcVA,
  1119. insts[2],
  1120. insts[3],
  1121. parsedInstruction);
  1122. } else {
  1123. pBuffer += sprintf(pBuffer, "PC(VA): Not available\n");
  1124. }
  1125. }
  1126. }
  1127. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  1128. }
  1129. }
  1130. mutex_unlock(&gCmdqTaskMutex);
  1131. length = pBuffer - buf;
  1132. BUG_ON(length > PAGE_SIZE);
  1133. return length;
  1134. }
  1135. static void cmdq_task_init_profile_marker_data(cmdqCommandStruct *pCommandDesc, TaskStruct *pTask)
  1136. {
  1137. #ifdef CMDQ_PROFILE_MARKER_SUPPORT
  1138. uint32_t i;
  1139. pTask->profileMarker.count = pCommandDesc->profileMarker.count;
  1140. pTask->profileMarker.hSlot = pCommandDesc->profileMarker.hSlot;
  1141. for (i = 0; i < CMDQ_MAX_PROFILE_MARKER_IN_TASK; i++)
  1142. pTask->profileMarker.tag[i] = pCommandDesc->profileMarker.tag[i];
  1143. #endif
  1144. }
  1145. static void cmdq_task_deinit_profile_marker_data(TaskStruct *pTask)
  1146. {
  1147. #ifdef CMDQ_PROFILE_MARKER_SUPPORT
  1148. if (NULL == pTask)
  1149. return;
  1150. if ((0 >= pTask->profileMarker.count) || (0 == pTask->profileMarker.hSlot))
  1151. return;
  1152. cmdqBackupFreeSlot((cmdqBackupSlotHandle) (pTask->profileMarker.hSlot));
  1153. pTask->profileMarker.hSlot = 0LL;
  1154. pTask->profileMarker.count = 0;
  1155. #endif
  1156. }
  1157. /* */
  1158. /* For kmemcache, initialize variables of TaskStruct (but not buffers) */
  1159. static void cmdq_core_task_ctor(void *param)
  1160. {
  1161. struct TaskStruct *pTask = (TaskStruct *) param;
  1162. CMDQ_VERBOSE("cmdq_core_task_ctor: 0x%p\n", param);
  1163. memset(pTask, 0, sizeof(TaskStruct));
  1164. INIT_LIST_HEAD(&(pTask->listEntry));
  1165. pTask->taskState = TASK_STATE_IDLE;
  1166. pTask->thread = CMDQ_INVALID_THREAD;
  1167. }
  1168. void cmdq_task_free_task_command_buffer(TaskStruct *pTask)
  1169. {
  1170. if (pTask->pVABase) {
  1171. if (pTask->useEmergencyBuf) {
  1172. cmdq_core_free_emergency_buffer(pTask->pVABase, pTask->MVABase);
  1173. } else {
  1174. cmdq_core_free_hw_buffer(cmdq_dev_get(), pTask->bufferSize,
  1175. pTask->pVABase, pTask->MVABase);
  1176. }
  1177. pTask->pVABase = NULL;
  1178. pTask->MVABase = 0;
  1179. pTask->bufferSize = 0;
  1180. pTask->commandSize = 0;
  1181. pTask->pCMDEnd = NULL;
  1182. }
  1183. }
  1184. /* */
  1185. /* Ensures size of command buffer of the given task. */
  1186. /* Existing buffer will be copied to new buffer. */
  1187. /* */
  1188. /* This buffer is guranteed to be physically continuous. */
  1189. /* */
  1190. /* returns -ENOMEM if cannot allocate new buffer */
  1191. static int32_t cmdq_core_task_realloc_buffer_size(TaskStruct *pTask, uint32_t size)
  1192. {
  1193. void *pNewBuffer = NULL;
  1194. dma_addr_t newMVABase = 0;
  1195. int32_t commandSize = 0;
  1196. uint32_t *pCMDEnd = NULL;
  1197. if (pTask->pVABase && pTask->bufferSize >= size) {
  1198. /* buffer size is already good, do nothing. */
  1199. return 0;
  1200. }
  1201. do {
  1202. /* allocate new buffer, try if we can alloc without reclaim */
  1203. pNewBuffer = cmdq_core_alloc_hw_buffer(cmdq_dev_get(), size,
  1204. &newMVABase, GFP_KERNEL | __GFP_NO_KSWAPD);
  1205. if (pNewBuffer) {
  1206. pTask->useEmergencyBuf = false;
  1207. break;
  1208. }
  1209. /* failed. Try emergency buffer */
  1210. if (size <= CMDQ_EMERGENCY_BLOCK_SIZE)
  1211. cmdq_core_alloc_emergency_buffer(&pNewBuffer, &newMVABase);
  1212. if (pNewBuffer) {
  1213. CMDQ_MSG("emergency buffer %p allocated\n", pNewBuffer);
  1214. pTask->useEmergencyBuf = true;
  1215. break;
  1216. }
  1217. /* finally try reclaim */
  1218. pNewBuffer =
  1219. cmdq_core_alloc_hw_buffer(cmdq_dev_get(), size, &newMVABase,
  1220. GFP_KERNEL);
  1221. if (pNewBuffer) {
  1222. pTask->useEmergencyBuf = false;
  1223. break;
  1224. }
  1225. } while (0);
  1226. if (NULL == pNewBuffer) {
  1227. CMDQ_ERR("realloc cmd buffer of size %d failed\n", size);
  1228. return -ENOMEM;
  1229. }
  1230. memset(pNewBuffer, 0, size);
  1231. /* copy and release old buffer */
  1232. if (pTask->pVABase)
  1233. memcpy(pNewBuffer, pTask->pVABase, pTask->bufferSize);
  1234. /* we should keep track of pCMDEnd and cmdSize since they are cleared in free command buffer */
  1235. pCMDEnd = pTask->pCMDEnd;
  1236. commandSize = pTask->commandSize;
  1237. cmdq_task_free_task_command_buffer(pTask);
  1238. /* attach the new buffer */
  1239. pTask->pVABase = (uint32_t *) pNewBuffer;
  1240. pTask->MVABase = newMVABase;
  1241. pTask->bufferSize = size;
  1242. pTask->pCMDEnd = pCMDEnd;
  1243. pTask->commandSize = commandSize;
  1244. CMDQ_MSG("Task Buffer:0x%p, VA:%p PA:%pa\n", pTask, pTask->pVABase, &pTask->MVABase);
  1245. return 0;
  1246. }
  1247. /* */
  1248. /* Allocate and initialize TaskStruct and its command buffer */
  1249. static TaskStruct *cmdq_core_task_create(void)
  1250. {
  1251. struct TaskStruct *pTask = NULL;
  1252. int32_t status = 0;
  1253. pTask = (TaskStruct *) kmem_cache_alloc(gCmdqContext.taskCache, GFP_KERNEL);
  1254. if (NULL == pTask) {
  1255. CMDQ_AEE("CMDQ", "Allocate command buffer by kmem_cache_alloc failed\n");
  1256. return NULL;
  1257. }
  1258. status = cmdq_core_task_realloc_buffer_size(pTask, CMDQ_INITIAL_CMD_BLOCK_SIZE);
  1259. if (status < 0) {
  1260. CMDQ_AEE("CMDQ", "Allocate command buffer failed\n");
  1261. kmem_cache_free(gCmdqContext.taskCache, pTask);
  1262. pTask = NULL;
  1263. return NULL;
  1264. }
  1265. return pTask;
  1266. }
  1267. void cmdq_core_reset_hw_events_impl(CMDQ_EVENT_ENUM event)
  1268. {
  1269. int32_t value = cmdq_core_get_event_value(event);
  1270. if (value > 0) {
  1271. /* Reset GCE event */
  1272. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_UPD, (CMDQ_SYNC_TOKEN_MAX & value));
  1273. }
  1274. }
  1275. void cmdq_core_reset_hw_events(void)
  1276. {
  1277. int index;
  1278. /* set all defined events to 0 */
  1279. CMDQ_MSG("cmdq_core_reset_hw_events\n");
  1280. #undef DECLARE_CMDQ_EVENT
  1281. #define DECLARE_CMDQ_EVENT(name, val, dts_name) \
  1282. { \
  1283. cmdq_core_reset_hw_events_impl(name); \
  1284. }
  1285. #include "cmdq_event_common.h"
  1286. #undef DECLARE_CMDQ_EVENT
  1287. /* However, GRP_SET are resource flags, */
  1288. /* by default they should be 1. */
  1289. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_GPR_SET_0);
  1290. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_GPR_SET_1);
  1291. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_GPR_SET_2);
  1292. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_GPR_SET_3);
  1293. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_GPR_SET_4);
  1294. /* However, CMDQ_SYNC_RESOURCE are resource flags, */
  1295. /* by default they should be 1. */
  1296. cmdqCoreSetEvent(CMDQ_SYNC_RESOURCE_WROT0);
  1297. /* However, CMDQ_SYNC_RESOURCE are WSM lock flags, */
  1298. /* by default they should be 1. */
  1299. cmdqCoreSetEvent(CMDQ_SYNC_SECURE_WSM_LOCK);
  1300. /* However, APPEND_THR are resource flags, */
  1301. /* by default they should be 1. */
  1302. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++)
  1303. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_APPEND_THR(index));
  1304. }
  1305. #if 0
  1306. uint32_t *addressToDump[3] = { IO_VIRT_TO_PHYS(MMSYS_CONFIG_BASE + 0x0890),
  1307. IO_VIRT_TO_PHYS(MMSYS_CONFIG_BASE + 0x0890),
  1308. IO_VIRT_TO_PHYS(MMSYS_CONFIG_BASE + 0x0890)
  1309. };
  1310. static int32_t testcase_regdump_begin(uint32_t taskID, uint32_t *regCount, uint32_t **regAddress)
  1311. {
  1312. CMDQ_MSG("@@@@@@@@@@@@@@@@@@ testcase_regdump_begin, tid = %d\n", taskID);
  1313. *regCount = 3;
  1314. *regAddress = addressToDump;
  1315. return 0;
  1316. }
  1317. static int32_t testcase_regdump_end(uint32_t taskID, uint32_t regCount, uint32_t *regValues)
  1318. {
  1319. int i;
  1320. CMDQ_MSG("@@@@@@@@@@@@@@@@@@ testcase_regdump_end, tid = %d\n", taskID);
  1321. CMDQ_MSG("@@@@@@@@@@@@@@@@@@ regCount = %d\n", regCount);
  1322. for (i = 0; i < regCount; ++i)
  1323. CMDQ_MSG("@@@@@@@@@@@@@@@@@@ regValue[%d] = 0x%08x\n", i, regValues[i]);
  1324. return 0;
  1325. }
  1326. #endif
  1327. void cmdq_core_config_prefetch_gsize(void)
  1328. {
  1329. if (g_dts_setting.prefetch_thread_count == 4) {
  1330. uint32_t prefetch_gsize = (g_dts_setting.prefetch_size[0]/32-1) |
  1331. (g_dts_setting.prefetch_size[1]/32-1) << 4 |
  1332. (g_dts_setting.prefetch_size[2]/32-1) << 8 |
  1333. (g_dts_setting.prefetch_size[3]/32-1) << 12;
  1334. CMDQ_REG_SET32(CMDQ_PREFETCH_GSIZE, prefetch_gsize);
  1335. CMDQ_MSG("prefetch gsize configure: 0x%08x\n", prefetch_gsize);
  1336. }
  1337. }
  1338. void cmdq_core_reset_engine_struct(void)
  1339. {
  1340. struct EngineStruct *pEngine;
  1341. int index;
  1342. /* Reset engine status */
  1343. pEngine = gCmdqContext.engine;
  1344. for (index = 0; index < CMDQ_MAX_ENGINE_COUNT; index++)
  1345. pEngine[index].currOwner = CMDQ_INVALID_THREAD;
  1346. }
  1347. void cmdq_core_reset_thread_struct(void)
  1348. {
  1349. struct ThreadStruct *pThread;
  1350. int index;
  1351. /* Reset thread status */
  1352. pThread = &(gCmdqContext.thread[0]);
  1353. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++)
  1354. pThread[index].allowDispatching = 1;
  1355. }
  1356. void cmdq_core_init_thread_work_queue(void)
  1357. {
  1358. struct ThreadStruct *pThread;
  1359. int index;
  1360. /* Initialize work queue per thread */
  1361. pThread = &(gCmdqContext.thread[0]);
  1362. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++) {
  1363. gCmdqContext.taskThreadAutoReleaseWQ[index] =
  1364. create_singlethread_workqueue("cmdq_auto_release_thread");
  1365. }
  1366. }
  1367. void cmdq_core_destroy_thread_work_queue(void)
  1368. {
  1369. struct ThreadStruct *pThread;
  1370. int index;
  1371. /* Initialize work queue per thread */
  1372. pThread = &(gCmdqContext.thread[0]);
  1373. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++) {
  1374. destroy_workqueue(gCmdqContext.taskThreadAutoReleaseWQ[index]);
  1375. gCmdqContext.taskThreadAutoReleaseWQ[index] = NULL;
  1376. }
  1377. }
  1378. bool cmdq_core_is_valid_group(CMDQ_GROUP_ENUM engGroup)
  1379. {
  1380. /* check range */
  1381. if (engGroup < 0 || engGroup >= CMDQ_MAX_GROUP_COUNT)
  1382. return false;
  1383. return true;
  1384. }
  1385. int32_t cmdq_core_is_group_flag(CMDQ_GROUP_ENUM engGroup, uint64_t engineFlag)
  1386. {
  1387. if (!cmdq_core_is_valid_group(engGroup))
  1388. return false;
  1389. if (gCmdqEngineGroupBits[engGroup] & engineFlag)
  1390. return true;
  1391. return false;
  1392. }
  1393. static inline uint32_t cmdq_core_get_task_timeout_cycle(struct ThreadStruct *pThread)
  1394. {
  1395. /* if there is loop callback, this thread is in loop mode, */
  1396. /* and should not have a timeout. */
  1397. /* So pass 0 as "no timeout" */
  1398. /* return pThread->loopCallback ? 0 : CMDQ_MAX_INST_CYCLE; */
  1399. /* HACK: disable HW timeout */
  1400. return 0;
  1401. }
  1402. void cmdqCoreInitGroupCB(void)
  1403. {
  1404. memset(&(gCmdqGroupCallback), 0x0, sizeof(gCmdqGroupCallback));
  1405. memset(&(gCmdqDebugCallback), 0x0, sizeof(gCmdqDebugCallback));
  1406. }
  1407. void cmdqCoreDeinitGroupCB(void)
  1408. {
  1409. memset(&(gCmdqGroupCallback), 0x0, sizeof(gCmdqGroupCallback));
  1410. memset(&(gCmdqDebugCallback), 0x0, sizeof(gCmdqDebugCallback));
  1411. }
  1412. int32_t cmdqCoreRegisterCB(CMDQ_GROUP_ENUM engGroup,
  1413. CmdqClockOnCB clockOn,
  1414. CmdqDumpInfoCB dumpInfo,
  1415. CmdqResetEngCB resetEng, CmdqClockOffCB clockOff)
  1416. {
  1417. CmdqCBkStruct *pCallback;
  1418. if (!cmdq_core_is_valid_group(engGroup))
  1419. return -EFAULT;
  1420. CMDQ_MSG("Register %d group engines' callback\n", engGroup);
  1421. CMDQ_MSG("clockOn: 0x%pf, dumpInfo: 0x%pf\n", clockOn, dumpInfo);
  1422. CMDQ_MSG("resetEng: 0x%pf, clockOff: 0x%pf\n", resetEng, clockOff);
  1423. pCallback = &(gCmdqGroupCallback[engGroup]);
  1424. pCallback->clockOn = clockOn;
  1425. pCallback->dumpInfo = dumpInfo;
  1426. pCallback->resetEng = resetEng;
  1427. pCallback->clockOff = clockOff;
  1428. return 0;
  1429. }
  1430. int32_t cmdqCoreRegisterDebugRegDumpCB(CmdqDebugRegDumpBeginCB beginCB, CmdqDebugRegDumpEndCB endCB)
  1431. {
  1432. CMDQ_VERBOSE("Register reg dump: begin=%p, end=%p\n", beginCB, endCB);
  1433. gCmdqDebugCallback.beginDebugRegDump = beginCB;
  1434. gCmdqDebugCallback.endDebugRegDump = endCB;
  1435. return 0;
  1436. }
  1437. bool cmdqIsValidTaskPtr(void *pTask)
  1438. {
  1439. struct TaskStruct *ptr = NULL;
  1440. struct list_head *p = NULL;
  1441. bool ret = false;
  1442. mutex_lock(&gCmdqTaskMutex);
  1443. list_for_each(p, &gCmdqContext.taskActiveList) {
  1444. ptr = list_entry(p, struct TaskStruct, listEntry);
  1445. if (ptr == pTask && TASK_STATE_IDLE != ptr->taskState) {
  1446. ret = true;
  1447. break;
  1448. }
  1449. }
  1450. list_for_each(p, &gCmdqContext.taskWaitList) {
  1451. ptr = list_entry(p, struct TaskStruct, listEntry);
  1452. if (ptr == pTask && TASK_STATE_WAITING == ptr->taskState) {
  1453. ret = true;
  1454. break;
  1455. }
  1456. }
  1457. mutex_unlock(&gCmdqTaskMutex);
  1458. return ret;
  1459. }
  1460. static void cmdq_core_release_buffer(TaskStruct *pTask)
  1461. {
  1462. CMDQ_MSG("cmdq_core_release_buffer start\n");
  1463. if (pTask->profileData) {
  1464. cmdq_core_free_hw_buffer(cmdq_dev_get(),
  1465. 2 * sizeof(uint32_t), pTask->profileData,
  1466. pTask->profileDataPA);
  1467. pTask->profileData = NULL;
  1468. pTask->profileDataPA = 0;
  1469. }
  1470. if (pTask->regResults) {
  1471. CMDQ_MSG("COMMAND: Free result buf VA:0x%p, PA:%pa\n", pTask->regResults,
  1472. &pTask->regResultsMVA);
  1473. cmdq_core_free_hw_buffer(cmdq_dev_get(),
  1474. pTask->regCount * sizeof(pTask->regResults[0]),
  1475. pTask->regResults, pTask->regResultsMVA);
  1476. }
  1477. pTask->regResults = NULL;
  1478. pTask->regResultsMVA = 0;
  1479. pTask->regCount = 0;
  1480. cmdq_task_free_task_command_buffer(pTask);
  1481. cmdq_task_deinit_profile_marker_data(pTask);
  1482. CMDQ_MSG("cmdq_core_release_buffer end\n");
  1483. }
  1484. static void cmdq_core_release_task_unlocked(TaskStruct *pTask)
  1485. {
  1486. CMDQ_MSG("cmdq_core_release_task_unlocked start\n");
  1487. pTask->taskState = TASK_STATE_IDLE;
  1488. pTask->thread = CMDQ_INVALID_THREAD;
  1489. cmdq_core_release_buffer(pTask);
  1490. /* remove from active/waiting list */
  1491. list_del_init(&(pTask->listEntry));
  1492. /* insert into free list. Currently we don't shrink free list. */
  1493. list_add_tail(&(pTask->listEntry), &gCmdqContext.taskFreeList);
  1494. CMDQ_MSG("cmdq_core_release_task_unlocked end\n");
  1495. }
  1496. static void cmdq_core_release_task(TaskStruct *pTask)
  1497. {
  1498. CMDQ_MSG("-->TASK: Release task structure 0x%p begin\n", pTask);
  1499. pTask->taskState = TASK_STATE_IDLE;
  1500. pTask->thread = CMDQ_INVALID_THREAD;
  1501. cmdq_core_release_buffer(pTask);
  1502. mutex_lock(&gCmdqTaskMutex);
  1503. /* remove from active/waiting list */
  1504. list_del_init(&(pTask->listEntry));
  1505. /* insert into free list. Currently we don't shrink free list. */
  1506. list_add_tail(&(pTask->listEntry), &gCmdqContext.taskFreeList);
  1507. mutex_unlock(&gCmdqTaskMutex);
  1508. CMDQ_MSG("<--TASK: Release task structure end\n");
  1509. }
  1510. static void cmdq_core_release_task_in_queue(struct work_struct *workItem)
  1511. {
  1512. TaskStruct *pTask = NULL;
  1513. pTask = container_of(workItem, struct TaskStruct, autoReleaseWork);
  1514. CMDQ_MSG("-->Work QUEUE: TASK: Release task structure 0x%p begin\n", pTask);
  1515. pTask->taskState = TASK_STATE_IDLE;
  1516. pTask->thread = CMDQ_INVALID_THREAD;
  1517. cmdq_core_release_buffer(pTask);
  1518. mutex_lock(&gCmdqTaskMutex);
  1519. /* remove from active/waiting list */
  1520. list_del_init(&(pTask->listEntry));
  1521. /* insert into free list. Currently we don't shrink free list. */
  1522. list_add_tail(&(pTask->listEntry), &gCmdqContext.taskFreeList);
  1523. mutex_unlock(&gCmdqTaskMutex);
  1524. CMDQ_MSG("<--Work QUEUE: TASK: Release task structure end\n");
  1525. }
  1526. static void cmdq_core_auto_release_task(TaskStruct *pTask)
  1527. {
  1528. CMDQ_MSG("-->TASK: Auto release task structure 0x%p begin\n", pTask);
  1529. if (pTask->useWorkQueue) {
  1530. /* this is called via auto release work, no need to put in work queue again */
  1531. cmdq_core_release_task(pTask);
  1532. } else {
  1533. /* Not auto release work, use for auto release task ! */
  1534. /* the work item is embeded in pTask already */
  1535. /* but we need to initialized it */
  1536. INIT_WORK(&pTask->autoReleaseWork, cmdq_core_release_task_in_queue);
  1537. pTask->useWorkQueue = true;
  1538. queue_work(gCmdqContext.taskAutoReleaseWQ, &pTask->autoReleaseWork);
  1539. }
  1540. CMDQ_MSG("<--TASK: Auto release task structure end\n");
  1541. }
  1542. /**
  1543. * Re-fetch thread's command buffer
  1544. * Usage:
  1545. * If SW motifies command buffer content after SW configed command to GCE,
  1546. * SW should notify GCE to re-fetch command in order to ensure inconsistent command buffer content
  1547. * between DRAM and GCE's SRAM. */
  1548. void cmdq_core_invalidate_hw_fetched_buffer(int32_t thread)
  1549. {
  1550. /* Setting HW thread PC will invoke that */
  1551. /* GCE (CMDQ HW) gives up fetched command buffer, and fetch command from DRAM to GCE's SRAM again. */
  1552. const int32_t pc = CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread));
  1553. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread), pc);
  1554. }
  1555. void cmdq_core_fix_command_scenario_for_user_space(cmdqCommandStruct *pCommand)
  1556. {
  1557. if ((CMDQ_SCENARIO_USER_DISP_COLOR == pCommand->scenario)
  1558. || (CMDQ_SCENARIO_USER_MDP == pCommand->scenario)) {
  1559. CMDQ_VERBOSE("user space request, scenario:%d\n", pCommand->scenario);
  1560. } else {
  1561. CMDQ_VERBOSE("[WARNING]fix user space request to CMDQ_SCENARIO_USER_SPACE\n");
  1562. pCommand->scenario = CMDQ_SCENARIO_USER_SPACE;
  1563. }
  1564. }
  1565. bool cmdq_core_is_request_from_user_space(const CMDQ_SCENARIO_ENUM scenario)
  1566. {
  1567. switch (scenario) {
  1568. case CMDQ_SCENARIO_USER_DISP_COLOR:
  1569. case CMDQ_SCENARIO_USER_MDP:
  1570. case CMDQ_SCENARIO_USER_SPACE: /* phased out */
  1571. return true;
  1572. default:
  1573. return false;
  1574. }
  1575. return false;
  1576. }
  1577. static void cmdq_core_append_command(TaskStruct *pTask, uint32_t argA, uint32_t argB)
  1578. {
  1579. pTask->pCMDEnd[1] = argB;
  1580. pTask->pCMDEnd[2] = argA;
  1581. pTask->commandSize += 1 * CMDQ_INST_SIZE;
  1582. pTask->pCMDEnd += 2;
  1583. }
  1584. static void cmdq_core_dump_task(const TaskStruct *pTask)
  1585. {
  1586. CMDQ_ERR
  1587. ("Task: 0x%p, Scenario: %d, State: %d, Priority: %d, Flag: 0x%016llx, VABase: 0x%p\n",
  1588. pTask, pTask->scenario, pTask->taskState, pTask->priority, pTask->engineFlag,
  1589. pTask->pVABase);
  1590. /* dump last Inst only when VALID command buffer */
  1591. /* otherwise data abort is happened */
  1592. if (pTask->pVABase) {
  1593. CMDQ_ERR
  1594. ("CMDEnd: 0x%p, MVABase: %pa, Size: %d, Last Inst: 0x%08x:0x%08x, 0x%08x:0x%08x\n",
  1595. pTask->pCMDEnd, &pTask->MVABase, pTask->commandSize, pTask->pCMDEnd[-3],
  1596. pTask->pCMDEnd[-2], pTask->pCMDEnd[-1], pTask->pCMDEnd[0]);
  1597. } else {
  1598. CMDQ_ERR("CMDEnd: 0x%p, MVABase: %pa, Size: %d\n",
  1599. pTask->pCMDEnd, &pTask->MVABase, pTask->commandSize);
  1600. }
  1601. CMDQ_ERR("Reorder:%d, Trigger: %lld, Got IRQ: %lld, Wait: %lld, Finish: %lld\n",
  1602. pTask->reorder, pTask->trigger, pTask->gotIRQ, pTask->beginWait, pTask->wakedUp);
  1603. CMDQ_ERR("Caller pid:%d name:%s\n", pTask->callerPid, pTask->callerName);
  1604. }
  1605. static void cmdq_core_dump_all_task(void)
  1606. {
  1607. struct TaskStruct *ptr = NULL;
  1608. struct list_head *p = NULL;
  1609. mutex_lock(&gCmdqTaskMutex);
  1610. CMDQ_ERR("=============== [CMDQ] All active tasks ===============\n");
  1611. list_for_each(p, &gCmdqContext.taskActiveList) {
  1612. ptr = list_entry(p, struct TaskStruct, listEntry);
  1613. if (true == cmdq_core_is_valid_in_active_list(ptr))
  1614. cmdq_core_dump_task(ptr);
  1615. }
  1616. CMDQ_ERR("=============== [CMDQ] All wait tasks ===============\n");
  1617. list_for_each(p, &gCmdqContext.taskWaitList) {
  1618. ptr = list_entry(p, struct TaskStruct, listEntry);
  1619. if (TASK_STATE_WAITING == ptr->taskState)
  1620. cmdq_core_dump_task(ptr);
  1621. }
  1622. mutex_unlock(&gCmdqTaskMutex);
  1623. }
  1624. static void cmdq_core_insert_backup_instr(TaskStruct *pTask,
  1625. const uint32_t regAddr,
  1626. const dma_addr_t writeAddress,
  1627. const CMDQ_DATA_REGISTER_ENUM valueRegId,
  1628. const CMDQ_DATA_REGISTER_ENUM destRegId)
  1629. {
  1630. uint32_t argA;
  1631. int32_t subsysCode;
  1632. /* register to read from */
  1633. /* note that we force convert to physical reg address. */
  1634. /* if it is already physical address, it won't be affected (at least on this platform) */
  1635. argA = regAddr;
  1636. subsysCode = cmdq_core_subsys_from_phys_addr(argA);
  1637. /* CMDQ_ERR("test %d\n", __LINE__); */
  1638. /* */
  1639. if (CMDQ_SPECIAL_SUBSYS_ADDR == subsysCode) {
  1640. CMDQ_LOG("Backup: Special handle memory base address 0x%08x\n", argA);
  1641. /* Move extra handle APB address to destRegId */
  1642. cmdq_core_append_command(pTask,
  1643. (CMDQ_CODE_MOVE << 24) | ((destRegId & 0x1f) << 16) | (4 << 21),
  1644. argA);
  1645. /* Use arg-A GPR enable instruction to read destRegId value to valueRegId */
  1646. cmdq_core_append_command(pTask,
  1647. (CMDQ_CODE_READ << 24) | ((destRegId & 0x1f) << 16) | (6 << 21),
  1648. valueRegId);
  1649. } else if (-1 == subsysCode) {
  1650. CMDQ_ERR("Backup: Unsupported memory base address 0x%08x\n", argA);
  1651. } else {
  1652. /* Load into 32-bit GPR (R0-R15) */
  1653. cmdq_core_append_command(pTask, (CMDQ_CODE_READ << 24) | (argA & 0xffff) |
  1654. ((subsysCode & 0x1f) << 16) | (2 << 21), valueRegId);
  1655. }
  1656. /* CMDQ_ERR("test %d\n", __LINE__); */
  1657. /* Note that <MOVE> argB is 48-bit */
  1658. /* so writeAddress is split into 2 parts */
  1659. /* and we store address in 64-bit GPR (P0-P7) */
  1660. cmdq_core_append_command(pTask, (CMDQ_CODE_MOVE << 24) |
  1661. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  1662. ((writeAddress >> 32) & 0xffff) |
  1663. #endif
  1664. ((destRegId & 0x1f) << 16) | (4 << 21), (uint32_t) writeAddress);
  1665. /* CMDQ_ERR("test %d\n", __LINE__); */
  1666. /* write to memory */
  1667. cmdq_core_append_command(pTask,
  1668. (CMDQ_CODE_WRITE << 24) | (0 & 0xffff) |
  1669. ((destRegId & 0x1f) << 16) | (6 << 21), valueRegId);
  1670. CMDQ_VERBOSE("COMMAND: copy reg:0x%08x to phys:%pa, GPR(%d, %d)\n", argA, &writeAddress,
  1671. valueRegId, destRegId);
  1672. /* CMDQ_ERR("test %d\n", __LINE__); */
  1673. }
  1674. /**
  1675. * Insert instruction to back secure threads' cookie count to normal world
  1676. * Return:
  1677. * < 0, return the error code
  1678. * >=0, okay case, return number of bytes for inserting instruction
  1679. */
  1680. #ifdef CMDQ_SECURE_PATH_NORMAL_IRQ
  1681. static int32_t cmdq_core_insert_backup_cookie_instr(TaskStruct *pTask, int32_t thread)
  1682. {
  1683. const uint32_t originalSize = pTask->commandSize;
  1684. const CMDQ_EVENT_ENUM regAccessToken = CMDQ_SYNC_TOKEN_GPR_SET_4;
  1685. const CMDQ_DATA_REGISTER_ENUM valueRegId = CMDQ_DATA_REG_DEBUG;
  1686. const CMDQ_DATA_REGISTER_ENUM destRegId = CMDQ_DATA_REG_DEBUG_DST;
  1687. const uint32_t regAddr = CMDQ_THR_EXEC_CNT_PA(thread);
  1688. uint64_t addrCookieOffset = CMDQ_SEC_SHARED_THR_CNT_OFFSET + thread * sizeof(uint32_t);
  1689. uint64_t WSMCookieAddr = gCmdqContext.hSecSharedMem->MVABase + addrCookieOffset;
  1690. const uint32_t subsysBit = cmdq_get_func()->getSubsysLSBArgA();
  1691. int32_t subsysCode = cmdq_core_subsys_from_phys_addr(regAddr);
  1692. int32_t offset;
  1693. if (0 > cmdq_get_func()->isSecureThread(thread)) {
  1694. CMDQ_ERR("%s, invalid param, thread: %d\n", __func__, thread);
  1695. return -EFAULT;
  1696. }
  1697. if (NULL == gCmdqContext.hSecSharedMem) {
  1698. CMDQ_ERR("%s, shared memory is not created\n", __func__);
  1699. return -EFAULT;
  1700. }
  1701. /* Shift JUMP and EOC */
  1702. pTask->pCMDEnd[10] = pTask->pCMDEnd[0];
  1703. pTask->pCMDEnd[9] = pTask->pCMDEnd[-1];
  1704. pTask->pCMDEnd[8] = pTask->pCMDEnd[-2];
  1705. pTask->pCMDEnd[7] = pTask->pCMDEnd[-3];
  1706. pTask->pCMDEnd -= 4;
  1707. /* use SYNC TOKEN to make sure only 1 thread access at a time */
  1708. /* bit 0-11: wait_value */
  1709. /* bit 15: to_wait, true */
  1710. /* bit 31: to_update, true */
  1711. /* bit 16-27: update_value */
  1712. /* wait and clear */
  1713. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | regAccessToken,
  1714. ((1 << 31) | (1 << 15) | 1));
  1715. /* Load into 32-bit GPR (R0-R15) */
  1716. cmdq_core_append_command(pTask,
  1717. (CMDQ_CODE_READ << 24) | (regAddr & 0xffff) |
  1718. ((subsysCode & 0x1f) << subsysBit) | (2 << 21), valueRegId);
  1719. /* Note that <MOVE> argB is 48-bit */
  1720. /* so writeAddress is split into 2 parts */
  1721. /* and we store address in 64-bit GPR (P0-P7) */
  1722. cmdq_core_append_command(pTask,
  1723. (CMDQ_CODE_MOVE << 24) | ((WSMCookieAddr >> 32) & 0xffff) |
  1724. ((destRegId & 0x1f) << 16) | (4 << 21), (uint32_t) WSMCookieAddr);
  1725. /* write to memory */
  1726. cmdq_core_append_command(pTask,
  1727. (CMDQ_CODE_WRITE << 24) |
  1728. ((destRegId & 0x1f) << 16) | (6 << 21), valueRegId);
  1729. /* set directly */
  1730. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | regAccessToken,
  1731. ((1 << 31) | (1 << 16)));
  1732. pTask->pCMDEnd += 4;
  1733. /* calculate added command length */
  1734. offset = pTask->commandSize - originalSize;
  1735. CMDQ_VERBOSE("insert_backup_cookie, offset:%d\n", offset);
  1736. return offset;
  1737. }
  1738. #endif
  1739. /**
  1740. * Insert instruction to backup secure threads' cookie and IRQ to normal world
  1741. * Return:
  1742. * < 0, return the error code
  1743. * >=0, okay case, return number of bytes for inserting instruction
  1744. */
  1745. #ifdef CMDQ_SECURE_PATH_HW_LOCK
  1746. static int32_t cmdq_core_insert_secure_IRQ_instr(TaskStruct *pTask, int32_t thread)
  1747. {
  1748. const uint32_t originalSize = pTask->commandSize;
  1749. const CMDQ_EVENT_ENUM regAccessToken = CMDQ_SYNC_TOKEN_GPR_SET_4;
  1750. const CMDQ_DATA_REGISTER_ENUM valueRegId = CMDQ_DATA_REG_DEBUG;
  1751. const CMDQ_DATA_REGISTER_ENUM destRegId = CMDQ_DATA_REG_DEBUG_DST;
  1752. const uint32_t regAddr = CMDQ_THR_EXEC_CNT_PA(thread);
  1753. uint64_t addrCookieOffset = CMDQ_SEC_SHARED_THR_CNT_OFFSET + thread * sizeof(uint32_t);
  1754. uint64_t WSMCookieAddr = gCmdqContext.hSecSharedMem->MVABase + addrCookieOffset;
  1755. uint64_t WSMIRQAddr = gCmdqContext.hSecSharedMem->MVABase + CMDQ_SEC_SHARED_IRQ_RAISED_OFFSET;
  1756. const uint32_t subsysBit = cmdq_get_func()->getSubsysLSBArgA();
  1757. int32_t subsysCode = cmdq_core_subsys_from_phys_addr(regAddr);
  1758. int32_t offset;
  1759. if (0 > cmdq_get_func()->isSecureThread(thread)) {
  1760. CMDQ_ERR("%s, invalid param, thread: %d\n", __func__, thread);
  1761. return -EFAULT;
  1762. }
  1763. if (NULL == gCmdqContext.hSecSharedMem) {
  1764. CMDQ_ERR("%s, shared memory is not created\n", __func__);
  1765. return -EFAULT;
  1766. }
  1767. /* Shift JUMP and EOC */
  1768. pTask->pCMDEnd[22] = pTask->pCMDEnd[0];
  1769. pTask->pCMDEnd[21] = pTask->pCMDEnd[-1];
  1770. pTask->pCMDEnd[20] = pTask->pCMDEnd[-2];
  1771. pTask->pCMDEnd[19] = pTask->pCMDEnd[-3];
  1772. pTask->pCMDEnd -= 4;
  1773. /* use SYNC TOKEN to make sure only 1 thread access at a time */
  1774. /* bit 0-11: wait_value */
  1775. /* bit 15: to_wait, true */
  1776. /* bit 31: to_update, true */
  1777. /* bit 16-27: update_value */
  1778. /* wait and clear */
  1779. /* set unlock WSM resource directly */
  1780. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | CMDQ_SYNC_SECURE_WSM_LOCK,
  1781. ((1 << 31) | (1 << 15) | 1));
  1782. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | regAccessToken,
  1783. ((1 << 31) | (1 << 15) | 1));
  1784. /* Load into 32-bit GPR (R0-R15) */
  1785. cmdq_core_append_command(pTask,
  1786. (CMDQ_CODE_READ << 24) | (regAddr & 0xffff) |
  1787. ((subsysCode & 0x1f) << subsysBit) | (2 << 21), valueRegId);
  1788. /* Note that <MOVE> argB is 48-bit */
  1789. /* so writeAddress is split into 2 parts */
  1790. /* and we store address in 64-bit GPR (P0-P7) */
  1791. cmdq_core_append_command(pTask,
  1792. (CMDQ_CODE_MOVE << 24) | ((WSMCookieAddr >> 32) & 0xffff) |
  1793. ((destRegId & 0x1f) << 16) | (4 << 21), (uint32_t) WSMCookieAddr);
  1794. /* write to memory */
  1795. cmdq_core_append_command(pTask,
  1796. (CMDQ_CODE_WRITE << 24) |
  1797. ((destRegId & 0x1f) << 16) | (6 << 21), valueRegId);
  1798. /* Write GCE secure thread's IRQ to WSM */
  1799. cmdq_core_append_command(pTask,
  1800. (CMDQ_CODE_MOVE << 24), ~(1 << thread));
  1801. cmdq_core_append_command(pTask,
  1802. (CMDQ_CODE_MOVE << 24) |
  1803. ((destRegId & 0x1f) << 16) | (4 << 21), WSMIRQAddr);
  1804. cmdq_core_append_command(pTask,
  1805. (CMDQ_CODE_WRITE << 24) |
  1806. ((destRegId & 0x1f) << 16) | (4 << 21) | 1, (1 << thread));
  1807. /* set directly */
  1808. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | regAccessToken,
  1809. ((1 << 31) | (1 << 16)));
  1810. /* set unlock WSM resource directly */
  1811. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | CMDQ_SYNC_SECURE_WSM_LOCK,
  1812. ((1 << 31) | (1 << 16)));
  1813. /* set notify thread token directly */
  1814. cmdq_core_append_command(pTask, (CMDQ_CODE_WFE << 24) | CMDQ_SYNC_SECURE_THR_EOF,
  1815. ((1 << 31) | (1 << 16)));
  1816. pTask->pCMDEnd += 4;
  1817. /* secure thread doesn't raise IRQ */
  1818. pTask->pCMDEnd[-3] = pTask->pCMDEnd[-3] & 0xFFFFFFFE;
  1819. /* calculate added command length */
  1820. offset = pTask->commandSize - originalSize;
  1821. CMDQ_VERBOSE("insert_backup_cookie, offset:%d\n", offset);
  1822. return offset;
  1823. }
  1824. #endif
  1825. static int32_t cmdq_core_insert_secure_handle_instr(TaskStruct *pTask, int32_t thread)
  1826. {
  1827. #ifdef CMDQ_SECURE_PATH_HW_LOCK
  1828. return cmdq_core_insert_secure_IRQ_instr(pTask, thread);
  1829. #else
  1830. #ifdef CMDQ_SECURE_PATH_NORMAL_IRQ
  1831. return cmdq_core_insert_backup_cookie_instr(pTask, thread);
  1832. #else
  1833. return 0;
  1834. #endif
  1835. #endif
  1836. }
  1837. static void cmdq_core_reorder_task_array(ThreadStruct *pThread, int32_t thread, int32_t prevID)
  1838. {
  1839. int loop, nextID, searchLoop, searchID;
  1840. int reorderCount = 0;
  1841. nextID = prevID + 1;
  1842. for (loop = 1; loop < (cmdq_core_max_task_in_thread(thread) - 1); loop++, nextID++) {
  1843. if (nextID >= cmdq_core_max_task_in_thread(thread))
  1844. nextID = 0;
  1845. if (NULL != pThread->pCurTask[nextID])
  1846. break;
  1847. searchID = nextID + 1;
  1848. for (searchLoop = (loop + 1); searchLoop < cmdq_core_max_task_in_thread(thread);
  1849. searchLoop++, searchID++) {
  1850. if (searchID >= cmdq_core_max_task_in_thread(thread))
  1851. searchID = 0;
  1852. if (NULL != pThread->pCurTask[searchID]) {
  1853. pThread->pCurTask[nextID] = pThread->pCurTask[searchID];
  1854. pThread->pCurTask[searchID] = NULL;
  1855. CMDQ_VERBOSE("WAIT: reorder slot %d to slot 0%d.\n",
  1856. searchID, nextID);
  1857. if ((searchLoop - loop) > reorderCount)
  1858. reorderCount = searchLoop - loop;
  1859. break;
  1860. }
  1861. }
  1862. if ((0x10000000 == pThread->pCurTask[nextID]->pCMDEnd[0]) &&
  1863. (0x00000008 == pThread->pCurTask[nextID]->pCMDEnd[-1])) {
  1864. /* We reached the last task */
  1865. break;
  1866. }
  1867. }
  1868. pThread->nextCookie -= reorderCount;
  1869. CMDQ_VERBOSE("WAIT: nextcookie minus %d.\n", reorderCount);
  1870. }
  1871. static int32_t cmdq_core_copy_buffer_impl(void *dst, void *src, const uint32_t size,
  1872. const bool copyFromUser)
  1873. {
  1874. int32_t status = 0;
  1875. if (false == copyFromUser) {
  1876. CMDQ_VERBOSE("COMMAND: Copy kernel to 0x%p\n", dst);
  1877. memcpy(dst, src, size);
  1878. } else {
  1879. CMDQ_VERBOSE("COMMAND: Copy user to 0x%p\n", dst);
  1880. if (copy_from_user(dst, src, size)) {
  1881. CMDQ_AEE("CMDQ",
  1882. "CRDISPATCH_KEY:CMDQ Fail to copy from user 0x%p, size:%d\n",
  1883. src, size);
  1884. status = -ENOMEM;
  1885. }
  1886. }
  1887. return status;
  1888. }
  1889. bool cmdq_core_verfiy_command_desc_end(cmdqCommandStruct *pCommandDesc)
  1890. {
  1891. uint32_t *pCMDEnd = NULL;
  1892. bool valid = true;
  1893. /* make sure we have sufficient command to parse */
  1894. if (!CMDQ_U32_PTR(pCommandDesc->pVABase) || pCommandDesc->blockSize < (2 * CMDQ_INST_SIZE))
  1895. return false;
  1896. if (true == cmdq_core_is_request_from_user_space(pCommandDesc->scenario)) {
  1897. /* command buffer has not copied from user space yet, skip verify. */
  1898. return true;
  1899. }
  1900. pCMDEnd =
  1901. CMDQ_U32_PTR(pCommandDesc->pVABase) + (pCommandDesc->blockSize / sizeof(uint32_t)) - 1;
  1902. /* make sure the command is ended by EOC + JUMP */
  1903. if ((pCMDEnd[-3] & 0x1) != 1) {
  1904. CMDQ_ERR
  1905. ("[CMD] command desc 0x%p does not throw IRQ (%08x:%08x), pEnd:%p(%p, %d)\n",
  1906. pCommandDesc, pCMDEnd[-3], pCMDEnd[-2], pCMDEnd,
  1907. CMDQ_U32_PTR(pCommandDesc->pVABase), pCommandDesc->blockSize);
  1908. valid = false;
  1909. }
  1910. if (((pCMDEnd[-2] & 0xFF000000) >> 24) != CMDQ_CODE_EOC ||
  1911. ((pCMDEnd[0] & 0xFF000000) >> 24) != CMDQ_CODE_JUMP) {
  1912. CMDQ_ERR
  1913. ("[CMD] command desc 0x%p does not end in EOC+JUMP (%08x:%08x, %08x:%08x), pEnd:%p(%p, %d)\n",
  1914. pCommandDesc, pCMDEnd[-3], pCMDEnd[-2], pCMDEnd[-1], pCMDEnd[0], pCMDEnd,
  1915. CMDQ_U32_PTR(pCommandDesc->pVABase), pCommandDesc->blockSize);
  1916. valid = false;
  1917. }
  1918. #if 0
  1919. BUG_ON(!valid);
  1920. #else
  1921. if (false == valid) {
  1922. /* invalid command, raise AEE */
  1923. CMDQ_AEE("CMDQ", "INVALID command desc 0x%p\n", pCommandDesc);
  1924. }
  1925. #endif
  1926. return valid;
  1927. }
  1928. bool cmdq_core_verfiy_command_end(const TaskStruct *pTask)
  1929. {
  1930. bool valid = true;
  1931. bool noIRQ = false;
  1932. /* make sure we have sufficient command to parse */
  1933. if (!pTask->pVABase || pTask->commandSize < (2 * CMDQ_INST_SIZE))
  1934. return false;
  1935. #ifdef CMDQ_SECURE_PATH_HW_LOCK
  1936. if ((pTask->pCMDEnd[-3] & 0x1) != 1 && false == pTask->secData.isSecure)
  1937. noIRQ = true;
  1938. #else
  1939. if ((pTask->pCMDEnd[-3] & 0x1) != 1)
  1940. noIRQ = true;
  1941. #endif
  1942. /* make sure the command is ended by EOC + JUMP */
  1943. if (true == noIRQ) {
  1944. if (cmdq_get_func()->isLoopScenario(pTask->scenario, true)) {
  1945. /* Allow display only loop not throw IRQ */
  1946. CMDQ_MSG("[CMD] DISP Loop pTask 0x%p does not throw IRQ (%08x:%08x)\n",
  1947. pTask, pTask->pCMDEnd[-3], pTask->pCMDEnd[-2]);
  1948. } else {
  1949. CMDQ_ERR("[CMD] pTask 0x%p does not throw IRQ (%08x:%08x)\n",
  1950. pTask, pTask->pCMDEnd[-3], pTask->pCMDEnd[-2]);
  1951. valid = false;
  1952. }
  1953. }
  1954. if (((pTask->pCMDEnd[-2] & 0xFF000000) >> 24) != CMDQ_CODE_EOC ||
  1955. ((pTask->pCMDEnd[0] & 0xFF000000) >> 24) != CMDQ_CODE_JUMP) {
  1956. CMDQ_ERR("[CMD] pTask 0x%p does not end in EOC+JUMP (%08x:%08x, %08x:%08x)\n",
  1957. pTask,
  1958. pTask->pCMDEnd[-3], pTask->pCMDEnd[-2], pTask->pCMDEnd[-1],
  1959. pTask->pCMDEnd[0]);
  1960. valid = false;
  1961. }
  1962. #if 0
  1963. BUG_ON(!valid);
  1964. #else
  1965. if (false == valid) {
  1966. /* Raise AEE */
  1967. CMDQ_AEE("CMDQ", "INVALID pTask 0x%p\n", pTask);
  1968. }
  1969. #endif
  1970. return valid;
  1971. }
  1972. static TaskStruct *cmdq_core_find_free_task(void)
  1973. {
  1974. TaskStruct *pTask = NULL;
  1975. mutex_lock(&gCmdqTaskMutex);
  1976. /* Pick from free list first; */
  1977. /* create one if there is no free entry. */
  1978. if (list_empty(&gCmdqContext.taskFreeList)) {
  1979. pTask = cmdq_core_task_create();
  1980. } else {
  1981. pTask = list_first_entry(&(gCmdqContext.taskFreeList), TaskStruct, listEntry);
  1982. /* remove from free list */
  1983. list_del_init(&(pTask->listEntry));
  1984. }
  1985. mutex_unlock(&gCmdqTaskMutex);
  1986. return pTask;
  1987. }
  1988. static int32_t cmdq_core_insert_read_reg_command(TaskStruct *pTask,
  1989. cmdqCommandStruct *pCommandDesc)
  1990. {
  1991. /* #define CMDQ_PROFILE_COMMAND */
  1992. int32_t status = 0;
  1993. uint32_t extraBufferSize = 0;
  1994. int i = 0;
  1995. CMDQ_DATA_REGISTER_ENUM valueRegId;
  1996. CMDQ_DATA_REGISTER_ENUM destRegId;
  1997. CMDQ_EVENT_ENUM regAccessToken;
  1998. uint32_t prependBufferSize = 0;
  1999. const bool userSpaceRequest = cmdq_core_is_request_from_user_space(pTask->scenario);
  2000. bool isValidCommandEnd = true;
  2001. /* HACK: let blockSize >= commandSize + 1 * CMDQ_INST_SIZE to prevnet GCE stop wrongly */
  2002. /* ALPS01676155, queue 2 tasks in same HW thread */
  2003. /* .task_2: MVABase 0x0x435b3000, Size: 2944 */
  2004. /* .task_3: MVABase 0x0x435b2000, Size: 4096 */
  2005. /* THR_END = end of task_3
  2006. * = 0x0x435b3000 = 0x0x435b2000 + 4096
  2007. * = begin of task_2, so...GCE stop at begin of task_2's command and never exec task_2 & task_3 */
  2008. const uint32_t reservedAppendBufferSize = 1 * CMDQ_INST_SIZE;
  2009. #ifdef CMDQ_PROFILE_COMMAND
  2010. bool profileCommand = false;
  2011. #endif
  2012. uint32_t copiedBeforeProfile = 0;
  2013. int32_t subsysCode;
  2014. uint32_t physAddr;
  2015. uint32_t reservedAppendBlockInstrSize = 0;
  2016. uint32_t reservedBackCookieInstrSize = 0;
  2017. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  2018. /* HACK: Insert WAIT CMDQ APPEND event here */
  2019. reservedAppendBlockInstrSize = 1 * CMDQ_INST_SIZE;
  2020. #endif
  2021. /* HACK: Insert BACKUP secure threads' COOKIE and IRQ here */
  2022. if (true == pTask->secData.isSecure) {
  2023. /* we need to consider {READ, MOVE, WRITE} for each register */
  2024. /* and the SYNC in the begin and end */
  2025. #ifdef CMDQ_SECURE_PATH_HW_LOCK
  2026. reservedBackCookieInstrSize = (3 + 3 + 2 + 2 + 1) * CMDQ_INST_SIZE;
  2027. #else
  2028. #ifdef CMDQ_SECURE_PATH_NORMAL_IRQ
  2029. reservedBackCookieInstrSize = (3 + 2) * CMDQ_INST_SIZE;
  2030. #endif
  2031. #endif
  2032. }
  2033. /* calculate required buffer size */
  2034. /* we need to consider {READ, MOVE, WRITE} for each register */
  2035. /* and the SYNC in the begin and end */
  2036. if (pTask->regCount) {
  2037. extraBufferSize = (3 * CMDQ_INST_SIZE * pTask->regCount) + (2 * CMDQ_INST_SIZE);
  2038. /* Add move instruction count for handle Extra APB address (add move instructions) */
  2039. for (i = 0; i < pTask->regCount; ++i) {
  2040. physAddr = CMDQ_U32_PTR(pCommandDesc->regRequest.regAddresses)[i];
  2041. subsysCode = cmdq_core_subsys_from_phys_addr(physAddr);
  2042. if (CMDQ_SPECIAL_SUBSYS_ADDR == subsysCode)
  2043. extraBufferSize += CMDQ_INST_SIZE;
  2044. }
  2045. } else {
  2046. extraBufferSize = 0;
  2047. }
  2048. CMDQ_VERBOSE("test %d, original command size = %d\n", __LINE__, pTask->commandSize);
  2049. #ifdef CMDQ_PROFILE_COMMAND
  2050. /* do not insert profile command for trigger loop */
  2051. #ifdef CMDQ_PROFILE_COMMAND_TRIGGER_LOOP
  2052. profileCommand = pTask &&
  2053. cmdq_get_func()->shouldProfile(pTask->scenario) && (pTask->profileData != NULL);
  2054. #else
  2055. profileCommand = pTask &&
  2056. cmdq_get_func()->shouldProfile(pTask->scenario) &&
  2057. (pTask->loopCallback == NULL) && (pTask->profileData != NULL);
  2058. #endif
  2059. if (profileCommand) {
  2060. /* backup GPT at begin and end */
  2061. extraBufferSize += (CMDQ_INST_SIZE * 3);
  2062. /* insert after the first MARKER instruction */
  2063. /* and first SYNC instruction (if any) */
  2064. prependBufferSize += 2 * (CMDQ_INST_SIZE * 3);
  2065. if (pTask->commandSize < prependBufferSize)
  2066. prependBufferSize = 0;
  2067. }
  2068. #endif
  2069. status = cmdq_core_task_realloc_buffer_size(pTask,
  2070. pTask->commandSize + prependBufferSize +
  2071. extraBufferSize + reservedAppendBufferSize +
  2072. reservedAppendBlockInstrSize +
  2073. reservedBackCookieInstrSize);
  2074. if (status < 0) {
  2075. CMDQ_ERR("finalize command buffer failed to realloc, pTask=0x%p, requireSize=%d\n",
  2076. pTask, pTask->commandSize + prependBufferSize + extraBufferSize);
  2077. return status;
  2078. }
  2079. /* init pCMDEnd */
  2080. /* pCMDEnd start from beginning. Note it is out-of-sync with pTask->commandSize. */
  2081. pTask->pCMDEnd = pTask->pVABase - 1;
  2082. #ifdef CMDQ_PROFILE_COMMAND
  2083. if (profileCommand) {
  2084. if (cmdq_get_func()->shouldEnablePrefetch(pTask->scenario)) {
  2085. /* HACK: */
  2086. /* MARKER + WAIT_FOR_EOF */
  2087. copiedBeforeProfile = 2 * CMDQ_INST_SIZE;
  2088. } else if (pTask->loopCallback != NULL) {
  2089. #ifdef CMDQ_PROFILE_COMMAND_TRIGGER_LOOP
  2090. /* HACK: insert profile instr after WAIT TE_EOF (command mode only) */
  2091. /* note content of trigger loop varies according to platform */
  2092. copiedBeforeProfile = 8 * CMDQ_INST_SIZE;
  2093. #endif
  2094. } else if (true == userSpaceRequest) {
  2095. copiedBeforeProfile = 0;
  2096. } else {
  2097. /* HACK: we copy the 1st "WAIT FOR EOF" instruction, */
  2098. /* because this is the point where we start writing registers! */
  2099. copiedBeforeProfile = 1 * CMDQ_INST_SIZE;
  2100. }
  2101. if (0 < copiedBeforeProfile) {
  2102. cmdq_core_copy_buffer_impl(pTask->pCMDEnd + 1,
  2103. CMDQ_U32_PTR(pCommandDesc->pVABase),
  2104. copiedBeforeProfile, userSpaceRequest);
  2105. pTask->pCMDEnd =
  2106. pTask->pVABase + (copiedBeforeProfile / sizeof(pTask->pVABase[0])) - 1;
  2107. }
  2108. /* now we start insert backup instructions */
  2109. CMDQ_VERBOSE("test %d\n", __LINE__);
  2110. do {
  2111. CMDQ_VERBOSE("[BACKUP]va=%p, pa=%pa, task=%p\n", pTask->profileData,
  2112. &pTask->profileDataPA, pTask);
  2113. cmdq_core_insert_backup_instr(pTask,
  2114. CMDQ_APXGPT2_COUNT,
  2115. pTask->profileDataPA,
  2116. CMDQ_DATA_REG_JPEG, CMDQ_DATA_REG_JPEG_DST);
  2117. } while (0);
  2118. /* this increases pTask->commandSize */
  2119. }
  2120. #endif
  2121. /* Copy the commands to our DMA buffer */
  2122. status = cmdq_core_copy_buffer_impl(pTask->pCMDEnd + 1,
  2123. CMDQ_U32_PTR(pCommandDesc->pVABase) +
  2124. (copiedBeforeProfile /
  2125. sizeof(CMDQ_U32_PTR(pCommandDesc->pVABase)[0])),
  2126. pCommandDesc->blockSize - copiedBeforeProfile,
  2127. userSpaceRequest);
  2128. if (0 > status)
  2129. return status;
  2130. /* re-adjust pCMDEnd according to commandSize */
  2131. pTask->pCMDEnd = pTask->pVABase + (pTask->commandSize / sizeof(pTask->pVABase[0])) - 1;
  2132. /* make sure instructions are really in DRAM */
  2133. smp_mb();
  2134. CMDQ_VERBOSE("test %d, CMDEnd=%p, base=%p, cmdSize=%d\n", __LINE__, pTask->pCMDEnd,
  2135. pTask->pVABase, pTask->commandSize);
  2136. isValidCommandEnd = cmdq_core_verfiy_command_end(pTask);
  2137. if (!isValidCommandEnd) {
  2138. CMDQ_ERR("[CMD] with smp_mb() cmdSize=%d, blockSize:%d\n", pTask->commandSize,
  2139. pCommandDesc->blockSize);
  2140. cmdq_core_dump_task(pTask);
  2141. cmdq_core_dump_all_task();
  2142. }
  2143. /* if no read request, no post-process needed. */
  2144. if (0 == pTask->regCount && extraBufferSize == 0)
  2145. return 0;
  2146. /* move EOC+JUMP to the new end */
  2147. memcpy(pTask->pCMDEnd + 1 - 4 + (extraBufferSize / sizeof(pTask->pCMDEnd[0])),
  2148. &pTask->pCMDEnd[-3], 2 * CMDQ_INST_SIZE);
  2149. /* start from old EOC (replace it) */
  2150. pTask->pCMDEnd -= 4;
  2151. if (pTask->regCount) {
  2152. CMDQ_VERBOSE("COMMAND:allocate register output section\n");
  2153. /* allocate register output section */
  2154. BUG_ON(pTask->regResults);
  2155. pTask->regResults = cmdq_core_alloc_hw_buffer(cmdq_dev_get(),
  2156. pTask->regCount * sizeof(pTask->regResults[0]),
  2157. &pTask->regResultsMVA,
  2158. GFP_KERNEL);
  2159. CMDQ_MSG("COMMAND: result buf VA:0x%p, PA:%pa\n", pTask->regResults,
  2160. &pTask->regResultsMVA);
  2161. /* allocate GPR resource */
  2162. cmdq_get_func()->getRegID(pTask->engineFlag, &valueRegId, &destRegId,
  2163. &regAccessToken);
  2164. /* use SYNC TOKEN to make sure only 1 thread access at a time */
  2165. /* bit 0-11: wait_value */
  2166. /* bit 15: to_wait, true */
  2167. /* bit 31: to_update, true */
  2168. /* bit 16-27: update_value */
  2169. /* wait and clear */
  2170. cmdq_core_append_command(pTask,
  2171. (CMDQ_CODE_WFE << 24) | regAccessToken,
  2172. ((1 << 31) | (1 << 15) | 1));
  2173. for (i = 0; i < pTask->regCount; ++i) {
  2174. cmdq_core_insert_backup_instr(pTask,
  2175. CMDQ_U32_PTR(pCommandDesc->regRequest.regAddresses)[i],
  2176. pTask->regResultsMVA + (i * sizeof(pTask->regResults[0])),
  2177. valueRegId, destRegId);
  2178. }
  2179. /* set directly */
  2180. cmdq_core_append_command(pTask,
  2181. (CMDQ_CODE_WFE << 24) | regAccessToken,
  2182. ((1 << 31) | (1 << 16)));
  2183. }
  2184. #ifdef CMDQ_PROFILE_COMMAND
  2185. if (profileCommand) {
  2186. cmdq_core_insert_backup_instr(pTask,
  2187. CMDQ_APXGPT2_COUNT,
  2188. pTask->profileDataPA + sizeof(uint32_t),
  2189. CMDQ_DATA_REG_JPEG, CMDQ_DATA_REG_JPEG_DST);
  2190. }
  2191. /* this increases pTask->commandSize */
  2192. #endif
  2193. /* move END to copied EOC+JUMP */
  2194. pTask->pCMDEnd += 4;
  2195. #ifdef CMDQ_PROFILE_COMMAND_TRIGGER_LOOP
  2196. /* revise jump */
  2197. if (pTask && pTask->loopCallback) {
  2198. if (((pTask->pCMDEnd[0] & 0xFF000000) >> 24) == CMDQ_CODE_JUMP) {
  2199. /* The Last Instruction is JUMP */
  2200. pTask->pCMDEnd[-1] = -pTask->commandSize + CMDQ_INST_SIZE;
  2201. } else {
  2202. CMDQ_ERR("trigger loop error since profile\n");
  2203. }
  2204. }
  2205. #endif
  2206. /* make sure instructions are really in DRAM */
  2207. smp_mb();
  2208. #ifdef CMDQ_PROFILE_COMMAND_TRIGGER_LOOP
  2209. if (pTask && pTask->loopCallback && cmdq_get_func()->shouldProfile(pTask->scenario))
  2210. cmdqCoreDebugDumpCommand(pTask);
  2211. #endif
  2212. CMDQ_MSG("COMMAND: size = %d, end = 0x%p\n", pTask->commandSize, pTask->pCMDEnd);
  2213. isValidCommandEnd = cmdq_core_verfiy_command_end(pTask);
  2214. if (!isValidCommandEnd) {
  2215. CMDQ_ERR("[CMD] cmdSize=%d, blockSize:%d\n", pTask->commandSize,
  2216. pCommandDesc->blockSize);
  2217. cmdq_core_dump_task(pTask);
  2218. }
  2219. return status;
  2220. }
  2221. static TaskStruct *cmdq_core_acquire_task(cmdqCommandStruct *pCommandDesc,
  2222. CmdqInterruptCB loopCB, unsigned long loopData)
  2223. {
  2224. TaskStruct *pTask = NULL;
  2225. int32_t status;
  2226. CMDQ_MSG("-->TASK: acquire task begin CMD: 0x%p, size: %d, Eng: 0x%016llx\n",
  2227. CMDQ_U32_PTR(pCommandDesc->pVABase), pCommandDesc->blockSize,
  2228. pCommandDesc->engineFlag);
  2229. CMDQ_PROF_START(current->pid, __func__);
  2230. pTask = cmdq_core_find_free_task();
  2231. do {
  2232. if (NULL == pTask) {
  2233. CMDQ_AEE("CMDQ", "Can't acquire task info\n");
  2234. break;
  2235. }
  2236. /* initialize field values */
  2237. pTask->scenario = pCommandDesc->scenario;
  2238. pTask->priority = pCommandDesc->priority;
  2239. pTask->engineFlag = pCommandDesc->engineFlag;
  2240. pTask->privateData = (void *)(unsigned long long *)&pCommandDesc->privateData;
  2241. pTask->loopCallback = loopCB;
  2242. pTask->loopData = loopData;
  2243. pTask->taskState = TASK_STATE_WAITING;
  2244. pTask->reorder = 0;
  2245. pTask->thread = CMDQ_INVALID_THREAD;
  2246. pTask->irqFlag = 0x0;
  2247. pTask->useWorkQueue = false;
  2248. /* secure exec data */
  2249. pTask->secData.isSecure = pCommandDesc->secData.isSecure;
  2250. pTask->secData.enginesNeedDAPC = pCommandDesc->secData.enginesNeedDAPC;
  2251. pTask->secData.enginesNeedPortSecurity =
  2252. pCommandDesc->secData.enginesNeedPortSecurity;
  2253. pTask->secData.addrMetadataCount = pCommandDesc->secData.addrMetadataCount;
  2254. pTask->secData.addrMetadatas = pCommandDesc->secData.addrMetadatas;
  2255. /* profile data for command profiling */
  2256. if (cmdq_get_func()->shouldProfile(pTask->scenario)) {
  2257. pTask->profileData =
  2258. cmdq_core_alloc_hw_buffer(cmdq_dev_get(),
  2259. 2 * sizeof(uint32_t), &pTask->profileDataPA,
  2260. GFP_KERNEL);
  2261. } else {
  2262. pTask->profileData = NULL;
  2263. }
  2264. /* profile timers */
  2265. memset(&(pTask->trigger), 0x0, sizeof(pTask->trigger));
  2266. memset(&(pTask->gotIRQ), 0x0, sizeof(pTask->gotIRQ));
  2267. memset(&(pTask->beginWait), 0x0, sizeof(pTask->beginWait));
  2268. memset(&(pTask->wakedUp), 0x0, sizeof(pTask->wakedUp));
  2269. /* profile marker */
  2270. cmdq_task_init_profile_marker_data(pCommandDesc, pTask);
  2271. pTask->commandSize = pCommandDesc->blockSize;
  2272. pTask->regCount = pCommandDesc->regRequest.count;
  2273. /* store caller info for debug */
  2274. if (current) {
  2275. pTask->callerPid = current->pid;
  2276. memcpy(pTask->callerName, current->comm, sizeof(current->comm));
  2277. }
  2278. status = cmdq_core_insert_read_reg_command(pTask, pCommandDesc);
  2279. if (0 > status) {
  2280. /* raise AEE first */
  2281. CMDQ_AEE("CMDQ", "Can't alloc command buffer\n");
  2282. /* then release task */
  2283. cmdq_core_release_task(pTask);
  2284. pTask = NULL;
  2285. }
  2286. } while (0);
  2287. /* */
  2288. /* insert into waiting list to process */
  2289. /* */
  2290. mutex_lock(&gCmdqTaskMutex);
  2291. if (pTask) {
  2292. struct list_head *insertAfter = &gCmdqContext.taskWaitList;
  2293. struct TaskStruct *taskEntry = NULL;
  2294. struct list_head *p = NULL;
  2295. pTask->submit = sched_clock();
  2296. /* add to waiting list, keep it sorted by priority */
  2297. /* so that we add high-priority tasks first. */
  2298. list_for_each(p, &gCmdqContext.taskWaitList) {
  2299. taskEntry = list_entry(p, struct TaskStruct, listEntry);
  2300. /* keep the list sorted. */
  2301. /* higher priority tasks are inserted in front of the queue */
  2302. if (taskEntry->priority < pTask->priority)
  2303. break;
  2304. insertAfter = p;
  2305. }
  2306. list_add(&(pTask->listEntry), insertAfter);
  2307. }
  2308. mutex_unlock(&gCmdqTaskMutex);
  2309. CMDQ_MSG("<--TASK: acquire task 0x%p end\n", pTask);
  2310. CMDQ_PROF_END(current->pid, __func__);
  2311. return pTask;
  2312. }
  2313. static void cmdq_core_enable_common_clock_locked(const bool enable,
  2314. const uint64_t engineFlag,
  2315. CMDQ_SCENARIO_ENUM scenario)
  2316. {
  2317. /* CMDQ(GCE) clock */
  2318. if (enable) {
  2319. CMDQ_VERBOSE("[CLOCK] Enable CMDQ(GCE) Clock test=%d SMI %d\n",
  2320. atomic_read(&gCmdqThreadUsage), atomic_read(&gSMIThreadUsage));
  2321. if (0 == atomic_read(&gCmdqThreadUsage)) {
  2322. /* CMDQ init flow: */
  2323. /* 1. clock-on */
  2324. /* 2. reset all events */
  2325. cmdq_get_func()->enableGCEClockLocked(enable);
  2326. cmdq_core_reset_hw_events();
  2327. cmdq_core_config_prefetch_gsize();
  2328. #ifdef CMDQ_ENABLE_BUS_ULTRA
  2329. CMDQ_LOG("Enable GCE Ultra ability");
  2330. CMDQ_REG_SET32(CMDQ_BUS_CONTROL_TYPE, 0x3);
  2331. #endif
  2332. #ifdef CMDQ_EVENT_NEED_BACKUP
  2333. /* Restore event */
  2334. cmdq_get_func()->eventRestore();
  2335. #endif
  2336. }
  2337. atomic_inc(&gCmdqThreadUsage);
  2338. /* SMI related threads common clock enable, excluding display scenario on his own */
  2339. if (!cmdq_get_func()->isDispScenario(scenario)) {
  2340. if (0 == atomic_read(&gSMIThreadUsage)) {
  2341. CMDQ_VERBOSE("[CLOCK] SMI clock enable %d\n", scenario);
  2342. cmdq_get_func()->enableCommonClockLocked(enable);
  2343. }
  2344. atomic_inc(&gSMIThreadUsage);
  2345. }
  2346. } else {
  2347. atomic_dec(&gCmdqThreadUsage);
  2348. CMDQ_VERBOSE("[CLOCK] Disable CMDQ(GCE) Clock test=%d SMI %d\n",
  2349. atomic_read(&gCmdqThreadUsage), atomic_read(&gSMIThreadUsage));
  2350. if (0 >= atomic_read(&gCmdqThreadUsage)) {
  2351. #ifdef CMDQ_EVENT_NEED_BACKUP
  2352. /* Backup event */
  2353. cmdq_get_func()->eventBackup();
  2354. #endif
  2355. /* clock-off */
  2356. cmdq_get_func()->enableGCEClockLocked(enable);
  2357. }
  2358. /* SMI related threads common clock enable, excluding display scenario on his own */
  2359. if (!cmdq_get_func()->isDispScenario(scenario)) {
  2360. atomic_dec(&gSMIThreadUsage);
  2361. if (0 >= atomic_read(&gSMIThreadUsage)) {
  2362. CMDQ_VERBOSE("[CLOCK] SMI clock disable %d\n", scenario);
  2363. cmdq_get_func()->enableCommonClockLocked(enable);
  2364. }
  2365. }
  2366. }
  2367. }
  2368. static uint64_t cmdq_core_get_actual_engine_flag_for_enable_clock(uint64_t engineFlag,
  2369. int32_t thread)
  2370. {
  2371. EngineStruct *pEngine;
  2372. ThreadStruct *pThread;
  2373. uint64_t engines;
  2374. int32_t index;
  2375. pEngine = gCmdqContext.engine;
  2376. pThread = gCmdqContext.thread;
  2377. engines = 0;
  2378. for (index = 0; index < CMDQ_MAX_ENGINE_COUNT; index++) {
  2379. if (engineFlag & (1LL << index)) {
  2380. if (pEngine[index].userCount <= 0) {
  2381. pEngine[index].currOwner = thread;
  2382. engines |= (1LL << index);
  2383. /* also assign engine flag into ThreadStruct */
  2384. pThread[thread].engineFlag |= (1LL << index);
  2385. }
  2386. pEngine[index].userCount++;
  2387. }
  2388. }
  2389. return engines;
  2390. }
  2391. static int32_t gCmdqISPClockCounter;
  2392. static void cmdq_core_enable_clock(uint64_t engineFlag,
  2393. int32_t thread,
  2394. uint64_t engineMustEnableClock, CMDQ_SCENARIO_ENUM scenario)
  2395. {
  2396. const uint64_t engines = engineMustEnableClock;
  2397. int32_t index;
  2398. CmdqCBkStruct *pCallback;
  2399. int32_t status;
  2400. CMDQ_VERBOSE("-->CLOCK: Enable flag 0x%llx thread %d begin, mustEnable: 0x%llx(0x%llx)\n",
  2401. engineFlag, thread, engineMustEnableClock, engines);
  2402. /* enable fundamental clocks if needed */
  2403. cmdq_core_enable_common_clock_locked(true, engineFlag, scenario);
  2404. pCallback = gCmdqGroupCallback;
  2405. /* ISP special check: Always call ISP on/off if this task */
  2406. /* involves ISP. Ignore the ISP HW flags. */
  2407. if (cmdq_core_is_group_flag(CMDQ_GROUP_ISP, engineFlag)) {
  2408. CMDQ_VERBOSE("CLOCK: enable group %d clockOn\n", CMDQ_GROUP_ISP);
  2409. if (NULL == pCallback[CMDQ_GROUP_ISP].clockOn) {
  2410. CMDQ_ERR("CLOCK: enable group %d clockOn func NULL\n", CMDQ_GROUP_ISP);
  2411. } else {
  2412. status =
  2413. pCallback[CMDQ_GROUP_ISP].clockOn(gCmdqEngineGroupBits[CMDQ_GROUP_ISP] &
  2414. engineFlag);
  2415. #if 1
  2416. ++gCmdqISPClockCounter;
  2417. #endif
  2418. if (status < 0) {
  2419. /* Error status print */
  2420. CMDQ_ERR("CLOCK: enable group %d clockOn failed\n", CMDQ_GROUP_ISP);
  2421. }
  2422. }
  2423. }
  2424. for (index = CMDQ_MAX_GROUP_COUNT - 1; index >= 0; --index) {
  2425. /* note that DISPSYS controls their own clock on/off */
  2426. if (CMDQ_GROUP_DISP == index)
  2427. continue;
  2428. /* note that ISP is per-task on/off, not per HW flag */
  2429. if (CMDQ_GROUP_ISP == index)
  2430. continue;
  2431. if (cmdq_core_is_group_flag((CMDQ_GROUP_ENUM) index, engines)) {
  2432. CMDQ_MSG("CLOCK: enable group %d clockOn\n", index);
  2433. if (NULL == pCallback[index].clockOn) {
  2434. CMDQ_LOG("[WARNING]CLOCK: enable group %d clockOn func NULL\n",
  2435. index);
  2436. continue;
  2437. }
  2438. status = pCallback[index].clockOn(gCmdqEngineGroupBits[index] & engines);
  2439. if (status < 0) {
  2440. /* Error status print */
  2441. CMDQ_ERR("CLOCK: enable group %d clockOn failed\n", index);
  2442. }
  2443. }
  2444. }
  2445. CMDQ_MSG("<--CLOCK: Enable hardware clock end\n");
  2446. }
  2447. static int32_t cmdq_core_can_start_to_acquire_HW_thread_unlocked(const uint64_t
  2448. engineFlag,
  2449. const bool isSecure)
  2450. {
  2451. struct TaskStruct *pFirstWaitingTask = NULL;
  2452. struct TaskStruct *pTempTask = NULL;
  2453. struct list_head *p = NULL;
  2454. bool preferSecurePath;
  2455. int32_t status = 0;
  2456. char longMsg[CMDQ_LONGSTRING_MAX];
  2457. uint32_t msgOffset;
  2458. int32_t msgMAXSize;
  2459. /* find the first waiting task with OVERLAPPED engine flag with pTask */
  2460. list_for_each(p, &gCmdqContext.taskWaitList) {
  2461. pTempTask = list_entry(p, struct TaskStruct, listEntry);
  2462. if (NULL != pTempTask && (engineFlag & (pTempTask->engineFlag))) {
  2463. pFirstWaitingTask = pTempTask;
  2464. break;
  2465. }
  2466. }
  2467. do {
  2468. if (NULL == pFirstWaitingTask) {
  2469. /* no waiting task with overlape engine, go to dispath thread */
  2470. break;
  2471. }
  2472. preferSecurePath = pFirstWaitingTask->secData.isSecure;
  2473. if (preferSecurePath == isSecure) {
  2474. /* same security path as first waiting task, go to start to thread dispatch */
  2475. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  2476. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  2477. "THREAD: is sec(%d, eng:0x%llx) as first waiting task",
  2478. isSecure, engineFlag);
  2479. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  2480. "(0x%p, eng:0x%llx), start thread dispatch.\n",
  2481. pFirstWaitingTask, pFirstWaitingTask->engineFlag);
  2482. if (msgOffset > 0) {
  2483. /* print message */
  2484. CMDQ_MSG("%s", longMsg);
  2485. }
  2486. break;
  2487. }
  2488. CMDQ_VERBOSE("THREAD: is not the first waiting task(0x%p), yield.\n",
  2489. pFirstWaitingTask);
  2490. status = -EFAULT;
  2491. } while (0);
  2492. return status;
  2493. }
  2494. /**
  2495. * check if engine conflict when thread dispatch
  2496. * Parameter:
  2497. * engineFlag: [IN] engine flag
  2498. * forceLog: [IN] print debug log
  2499. * isSecure: [IN] secure path
  2500. * *pThreadOut:
  2501. * [IN] prefer thread. please pass CMDQ_INVALID_THREAD if no prefere
  2502. * [OUT] dispatch thread result
  2503. * Return:
  2504. * 0 for success; else the error code is returned
  2505. */
  2506. static bool cmdq_core_check_engine_conflict_unlocked(const uint64_t engineFlag,
  2507. bool forceLog,
  2508. const bool isSecure, int32_t *pThreadOut)
  2509. {
  2510. EngineStruct *pEngine;
  2511. ThreadStruct *pThread;
  2512. uint32_t free;
  2513. int32_t index;
  2514. int32_t thread;
  2515. uint64_t engine;
  2516. bool isEngineConflict;
  2517. char longMsg[CMDQ_LONGSTRING_MAX];
  2518. uint32_t msgOffset;
  2519. int32_t msgMAXSize;
  2520. pEngine = gCmdqContext.engine;
  2521. pThread = gCmdqContext.thread;
  2522. isEngineConflict = false;
  2523. engine = engineFlag;
  2524. thread = (*pThreadOut);
  2525. free = (CMDQ_INVALID_THREAD == thread) ? 0xFFFFFFFF : 0xFFFFFFFF & (~(0x1 << thread));
  2526. /* check if engine conflict */
  2527. for (index = 0; ((index < CMDQ_MAX_ENGINE_COUNT) && (engine != 0)); index++) {
  2528. if (engine & (0x1LL << index)) {
  2529. if (CMDQ_INVALID_THREAD == pEngine[index].currOwner) {
  2530. continue;
  2531. } else if (CMDQ_INVALID_THREAD == thread) {
  2532. thread = pEngine[index].currOwner;
  2533. free &= ~(0x1 << thread);
  2534. } else if (thread != pEngine[index].currOwner) {
  2535. /* Partial HW occupied by different threads, */
  2536. /* we need to wait. */
  2537. if (forceLog) {
  2538. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  2539. cmdqCoreLongString(true, longMsg, &msgOffset, &msgMAXSize,
  2540. "THREAD: try locate on thread %d but engine %d",
  2541. thread, index);
  2542. cmdqCoreLongString(true, longMsg, &msgOffset, &msgMAXSize,
  2543. " also occupied by thread %d, secure:%d\n",
  2544. pEngine[index].currOwner, isSecure);
  2545. if (msgOffset > 0) {
  2546. /* print message */
  2547. CMDQ_LOG("%s", longMsg);
  2548. }
  2549. } else {
  2550. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  2551. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  2552. "THREAD: try locate on thread %d but engine %d",
  2553. thread, index);
  2554. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  2555. " also occupied by thread %d, secure:%d\n",
  2556. pEngine[index].currOwner, isSecure);
  2557. if (msgOffset > 0) {
  2558. /* print message */
  2559. CMDQ_VERBOSE("%s", longMsg);
  2560. }
  2561. }
  2562. isEngineConflict = true; /* engine conflict! */
  2563. thread = CMDQ_INVALID_THREAD;
  2564. break;
  2565. }
  2566. engine &= ~(0x1LL << index);
  2567. }
  2568. }
  2569. (*pThreadOut) = thread;
  2570. return isEngineConflict;
  2571. }
  2572. static int32_t cmdq_core_find_a_free_HW_thread(uint64_t engineFlag,
  2573. CMDQ_HW_THREAD_PRIORITY_ENUM thread_prio,
  2574. CMDQ_SCENARIO_ENUM scenario, bool forceLog,
  2575. const bool isSecure)
  2576. {
  2577. ThreadStruct *pThread;
  2578. unsigned long flagsExecLock;
  2579. int32_t index;
  2580. int32_t thread;
  2581. bool isEngineConflict;
  2582. int32_t insertCookie;
  2583. pThread = gCmdqContext.thread;
  2584. do {
  2585. CMDQ_VERBOSE
  2586. ("THREAD: find a free thread, engine: 0x%llx, scenario: %d, secure:%d\n",
  2587. engineFlag, scenario, isSecure);
  2588. /* start to dispatch? */
  2589. /* note we should not favor secure or normal path, */
  2590. /* traverse waiting list to decide that we should dispatch thread to secure or normal path */
  2591. if (0 > cmdq_core_can_start_to_acquire_HW_thread_unlocked(engineFlag, isSecure)) {
  2592. thread = CMDQ_INVALID_THREAD;
  2593. break;
  2594. }
  2595. /* it's okey to dispatch thread, */
  2596. /* use scenario and pTask->secure to get default thread */
  2597. thread = cmdq_get_func()->getThreadID(scenario, isSecure);
  2598. /* check if engine conflict happened except DISP scenario */
  2599. isEngineConflict = false;
  2600. if (false == cmdq_get_func()->isDispScenario(scenario)) {
  2601. isEngineConflict = cmdq_core_check_engine_conflict_unlocked(engineFlag,
  2602. forceLog,
  2603. isSecure,
  2604. &thread);
  2605. }
  2606. CMDQ_VERBOSE("THREAD: isEngineConflict:%d, thread:%d\n", isEngineConflict, thread);
  2607. #if 1 /* TODO: secure path proting */
  2608. /* because all thread are pre-dispatched, there 2 outcome of engine conflict check:
  2609. * 1. pre-dispatched secure thread, and no conflict with normal path
  2610. * 2. pre-dispatched secure thread, but conflict with normal/anothor secure path
  2611. *
  2612. * no need to check get normal thread in secure path
  2613. */
  2614. /* ensure not dispatch secure thread to normal task */
  2615. if ((false == isSecure) && (true == cmdq_get_func()->isSecureThread(thread))) {
  2616. thread = CMDQ_INVALID_THREAD;
  2617. isEngineConflict = true;
  2618. break;
  2619. }
  2620. #endif
  2621. /* no enfine conflict with running thread, AND used engines have no owner */
  2622. /* try to find a free thread */
  2623. if ((false == isEngineConflict) && (CMDQ_INVALID_THREAD == thread)) {
  2624. /* thread 0 - CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT are preserved for DISPSYS */
  2625. const bool isDisplayThread = CMDQ_THR_PRIO_DISPLAY_TRIGGER < thread_prio;
  2626. int startIndex = isDisplayThread ? 0 : CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT;
  2627. int endIndex = isDisplayThread ?
  2628. CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT : CMDQ_MAX_THREAD_COUNT;
  2629. for (index = startIndex; index < endIndex; ++index) {
  2630. spin_lock_irqsave(&gCmdqExecLock, flagsExecLock);
  2631. if ((0 == pThread[index].engineFlag) &&
  2632. (0 == pThread[index].taskCount) &&
  2633. (1 == pThread[index].allowDispatching)) {
  2634. CMDQ_VERBOSE
  2635. ("THREAD: dispatch to thread %d, taskCount:%d, allowDispatching:%d\n",
  2636. index, pThread[index].taskCount,
  2637. pThread[index].allowDispatching);
  2638. thread = index;
  2639. pThread[index].allowDispatching = 0;
  2640. spin_unlock_irqrestore(&gCmdqExecLock, flagsExecLock);
  2641. break;
  2642. }
  2643. spin_unlock_irqrestore(&gCmdqExecLock, flagsExecLock);
  2644. }
  2645. }
  2646. /* no thread available now, wait for it */
  2647. if (CMDQ_INVALID_THREAD == thread)
  2648. break;
  2649. /* Make sure the found thread has enough space for the task; */
  2650. /* ThreadStruct->pCurTask has size limitation. */
  2651. if (cmdq_core_max_task_in_thread(thread) <= pThread[thread].taskCount) {
  2652. if (forceLog) {
  2653. CMDQ_LOG("THREAD: thread %d task count = %d full\n",
  2654. thread, pThread[thread].taskCount);
  2655. } else {
  2656. CMDQ_VERBOSE("THREAD: thread %d task count = %d full\n",
  2657. thread, pThread[thread].taskCount);
  2658. }
  2659. thread = CMDQ_INVALID_THREAD;
  2660. } else {
  2661. insertCookie = pThread[thread].nextCookie % cmdq_core_max_task_in_thread(thread);
  2662. if (NULL != pThread[thread].pCurTask[insertCookie]) {
  2663. if (forceLog) {
  2664. CMDQ_LOG("THREAD: thread %d nextCookie = %d already has task\n",
  2665. thread, pThread[thread].nextCookie);
  2666. } else {
  2667. CMDQ_VERBOSE("THREAD: thread %d nextCookie = %d already has task\n",
  2668. thread, pThread[thread].nextCookie);
  2669. }
  2670. thread = CMDQ_INVALID_THREAD;
  2671. }
  2672. }
  2673. } while (0);
  2674. return thread;
  2675. }
  2676. static int32_t cmdq_core_acquire_thread(uint64_t engineFlag,
  2677. CMDQ_HW_THREAD_PRIORITY_ENUM thread_prio,
  2678. CMDQ_SCENARIO_ENUM scenario, bool forceLog,
  2679. const bool isSecure)
  2680. {
  2681. unsigned long flags;
  2682. int32_t thread;
  2683. uint64_t engineMustEnableClock;
  2684. CMDQ_PROF_START(current->pid, __func__);
  2685. do {
  2686. mutex_lock(&gCmdqClockMutex);
  2687. spin_lock_irqsave(&gCmdqThreadLock, flags);
  2688. thread =
  2689. cmdq_core_find_a_free_HW_thread(engineFlag, thread_prio, scenario, forceLog,
  2690. isSecure);
  2691. if (CMDQ_INVALID_THREAD != thread) {
  2692. /* get actual engine flag. Each bit represents a engine must enable clock. */
  2693. engineMustEnableClock =
  2694. cmdq_core_get_actual_engine_flag_for_enable_clock(engineFlag, thread);
  2695. }
  2696. #ifdef CMDQ_SECURE_PATH_CONSUME_AGAIN
  2697. if (CMDQ_INVALID_THREAD == thread && true == isSecure && CMDQ_SCENARIO_USER_MDP == scenario)
  2698. g_cmdq_consume_again = true;
  2699. #endif
  2700. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  2701. if (CMDQ_INVALID_THREAD != thread) {
  2702. /* enable clock */
  2703. cmdq_core_enable_clock(engineFlag, thread, engineMustEnableClock, scenario);
  2704. }
  2705. mutex_unlock(&gCmdqClockMutex);
  2706. } while (0);
  2707. CMDQ_PROF_END(current->pid, __func__);
  2708. return thread;
  2709. }
  2710. static uint64_t cmdq_core_get_not_used_engine_flag_for_disable_clock(const uint64_t engineFlag)
  2711. {
  2712. EngineStruct *pEngine;
  2713. ThreadStruct *pThread;
  2714. uint64_t enginesNotUsed;
  2715. int32_t index;
  2716. int32_t currOwnerThread = CMDQ_INVALID_THREAD;
  2717. enginesNotUsed = 0LL;
  2718. pEngine = gCmdqContext.engine;
  2719. pThread = gCmdqContext.thread;
  2720. for (index = 0; index < CMDQ_MAX_ENGINE_COUNT; index++) {
  2721. if (engineFlag & (1LL << index)) {
  2722. pEngine[index].userCount--;
  2723. if (pEngine[index].userCount <= 0) {
  2724. enginesNotUsed |= (1LL << index);
  2725. currOwnerThread = pEngine[index].currOwner;
  2726. /* remove engine flag in assigned pThread */
  2727. pThread[currOwnerThread].engineFlag &= ~(1LL << index);
  2728. pEngine[index].currOwner = CMDQ_INVALID_THREAD;
  2729. }
  2730. }
  2731. }
  2732. CMDQ_VERBOSE("%s, enginesNotUsed:0x%llx\n", __func__, enginesNotUsed);
  2733. return enginesNotUsed;
  2734. }
  2735. static void cmdq_core_disable_clock(uint64_t engineFlag,
  2736. const uint64_t enginesNotUsed, CMDQ_SCENARIO_ENUM scenario)
  2737. {
  2738. int32_t index;
  2739. int32_t status;
  2740. CmdqCBkStruct *pCallback;
  2741. CMDQ_VERBOSE("-->CLOCK: Disable hardware clock 0x%llx begin, enginesNotUsed 0x%llx\n",
  2742. engineFlag, enginesNotUsed);
  2743. pCallback = gCmdqGroupCallback;
  2744. /* ISP special check: Always call ISP on/off if this task */
  2745. /* involves ISP. Ignore the ISP HW flags ref count. */
  2746. if (cmdq_core_is_group_flag(CMDQ_GROUP_ISP, engineFlag)) {
  2747. CMDQ_VERBOSE("CLOCK: disable group %d clockOff\n", CMDQ_GROUP_ISP);
  2748. if (NULL == pCallback[CMDQ_GROUP_ISP].clockOff) {
  2749. CMDQ_ERR("CLOCK: disable group %d clockOff func NULL\n", CMDQ_GROUP_ISP);
  2750. } else {
  2751. status =
  2752. pCallback[CMDQ_GROUP_ISP].clockOff(gCmdqEngineGroupBits[CMDQ_GROUP_ISP]
  2753. & engineFlag);
  2754. #if 1
  2755. --gCmdqISPClockCounter;
  2756. if (gCmdqISPClockCounter != 0) {
  2757. /* ISP clock off */
  2758. CMDQ_VERBOSE("CLOCK: ISP clockOff cnt=%d\n", gCmdqISPClockCounter);
  2759. }
  2760. #endif
  2761. if (status < 0) {
  2762. CMDQ_ERR("CLOCK: disable group %d clockOff failed\n",
  2763. CMDQ_GROUP_ISP);
  2764. }
  2765. }
  2766. }
  2767. /* Turn off unused engines */
  2768. for (index = 0; index < CMDQ_MAX_GROUP_COUNT; ++index) {
  2769. /* note that DISPSYS controls their own clock on/off */
  2770. if (CMDQ_GROUP_DISP == index)
  2771. continue;
  2772. /* note that ISP is per-task on/off, not per HW flag */
  2773. if (CMDQ_GROUP_ISP == index)
  2774. continue;
  2775. if (cmdq_core_is_group_flag((CMDQ_GROUP_ENUM) index, enginesNotUsed)) {
  2776. CMDQ_MSG("CLOCK: Disable engine group %d flag=0x%llx clockOff\n", index,
  2777. enginesNotUsed);
  2778. if (NULL == pCallback[index].clockOff) {
  2779. CMDQ_LOG
  2780. ("[WARNING]CLOCK: Disable engine group %d clockOff func NULL\n",
  2781. index);
  2782. continue;
  2783. }
  2784. status =
  2785. pCallback[index].clockOff(gCmdqEngineGroupBits[index] & enginesNotUsed);
  2786. if (status < 0) {
  2787. /* Error status print */
  2788. CMDQ_ERR("CLOCK: Disable engine group %d clock failed\n", index);
  2789. }
  2790. }
  2791. }
  2792. /* disable fundamental clocks if needed */
  2793. cmdq_core_enable_common_clock_locked(false, engineFlag, scenario);
  2794. CMDQ_MSG("<--CLOCK: Disable hardware clock 0x%llx end\n", engineFlag);
  2795. }
  2796. void cmdq_core_add_consume_task(void)
  2797. {
  2798. if (!work_pending(&gCmdqContext.taskConsumeWaitQueueItem)) {
  2799. CMDQ_PROF_MMP(cmdq_mmp_get_event()->consume_add, MMProfileFlagPulse, 0, 0);
  2800. queue_work(gCmdqContext.taskConsumeWQ, &gCmdqContext.taskConsumeWaitQueueItem);
  2801. }
  2802. }
  2803. static void cmdq_core_release_thread(TaskStruct *pTask)
  2804. {
  2805. unsigned long flags;
  2806. const int32_t thread = pTask->thread;
  2807. const uint64_t engineFlag = pTask->engineFlag;
  2808. uint64_t engineNotUsed = 0LL;
  2809. if (thread == CMDQ_INVALID_THREAD)
  2810. return;
  2811. mutex_lock(&gCmdqClockMutex);
  2812. spin_lock_irqsave(&gCmdqThreadLock, flags);
  2813. /* get not used engines for disable clock */
  2814. engineNotUsed = cmdq_core_get_not_used_engine_flag_for_disable_clock(engineFlag);
  2815. pTask->thread = CMDQ_INVALID_THREAD;
  2816. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  2817. /* clock off */
  2818. cmdq_core_disable_clock(engineFlag, engineNotUsed, pTask->scenario);
  2819. /* Delay release resource */
  2820. cmdq_core_delay_check_unlock(engineFlag, engineNotUsed);
  2821. mutex_unlock(&gCmdqClockMutex);
  2822. }
  2823. static void cmdq_core_reset_hw_engine(int32_t engineFlag)
  2824. {
  2825. EngineStruct *pEngine;
  2826. uint32_t engines;
  2827. int32_t index;
  2828. int32_t status;
  2829. CmdqCBkStruct *pCallback;
  2830. CMDQ_MSG("Reset hardware engine begin\n");
  2831. pEngine = gCmdqContext.engine;
  2832. engines = 0;
  2833. for (index = 0; index < CMDQ_MAX_ENGINE_COUNT; index++) {
  2834. if (engineFlag & (1LL << index))
  2835. engines |= (1LL << index);
  2836. }
  2837. pCallback = gCmdqGroupCallback;
  2838. for (index = 0; index < CMDQ_MAX_GROUP_COUNT; ++index) {
  2839. if (cmdq_core_is_group_flag((CMDQ_GROUP_ENUM) index, engines)) {
  2840. CMDQ_MSG("Reset engine group %d clock\n", index);
  2841. if (NULL == pCallback[index].resetEng) {
  2842. CMDQ_ERR("Reset engine group %d clock func NULL\n", index);
  2843. continue;
  2844. }
  2845. status =
  2846. pCallback[index].resetEng(gCmdqEngineGroupBits[index] & engineFlag);
  2847. if (status < 0) {
  2848. /* Error status print */
  2849. CMDQ_ERR("Reset engine group %d clock failed\n", index);
  2850. }
  2851. }
  2852. }
  2853. CMDQ_MSG("Reset hardware engine end\n");
  2854. }
  2855. int32_t cmdq_core_suspend_HW_thread(int32_t thread, uint32_t lineNum)
  2856. {
  2857. int32_t loop = 0;
  2858. uint32_t enabled = 0;
  2859. if (CMDQ_INVALID_THREAD == thread) {
  2860. CMDQ_ERR("suspend invalid thread\n");
  2861. return -EFAULT;
  2862. }
  2863. CMDQ_PROF_MMP(cmdq_mmp_get_event()->thread_suspend, MMProfileFlagPulse, thread, lineNum);
  2864. /* write suspend bit */
  2865. CMDQ_REG_SET32(CMDQ_THR_SUSPEND_TASK(thread), 0x01);
  2866. /* check if the thread is already disabled. */
  2867. /* if already disabled, treat as suspend successful but print error log */
  2868. enabled = CMDQ_REG_GET32(CMDQ_THR_ENABLE_TASK(thread));
  2869. if (0 == (0x01 & enabled)) {
  2870. CMDQ_LOG("[WARNING] thread %d suspend not effective, enable=%d\n", thread, enabled);
  2871. return 0;
  2872. }
  2873. loop = 0;
  2874. while (0x0 == (CMDQ_REG_GET32(CMDQ_THR_CURR_STATUS(thread)) & 0x2)) {
  2875. if (loop > CMDQ_MAX_LOOP_COUNT) {
  2876. CMDQ_AEE("CMDQ", "Suspend HW thread %d failed\n", thread);
  2877. return -EFAULT;
  2878. }
  2879. loop++;
  2880. }
  2881. #ifdef CONFIG_MTK_FPGA
  2882. CMDQ_MSG("EXEC: Suspend HW thread(%d)\n", thread);
  2883. #endif
  2884. return 0;
  2885. }
  2886. static inline void cmdq_core_resume_HW_thread(int32_t thread)
  2887. {
  2888. #ifdef CONFIG_MTK_FPGA
  2889. CMDQ_MSG("EXEC: Resume HW thread(%d)\n", thread);
  2890. #endif
  2891. /* make sure instructions are really in DRAM */
  2892. smp_mb();
  2893. CMDQ_PROF_MMP(cmdq_mmp_get_event()->thread_resume, MMProfileFlagPulse, thread, __LINE__);
  2894. CMDQ_REG_SET32(CMDQ_THR_SUSPEND_TASK(thread), 0x00);
  2895. }
  2896. static inline int32_t cmdq_core_reset_HW_thread(int32_t thread)
  2897. {
  2898. int32_t loop = 0;
  2899. CMDQ_MSG("Reset HW thread(%d)\n", thread);
  2900. CMDQ_REG_SET32(CMDQ_THR_WARM_RESET(thread), 0x01);
  2901. while (0x1 == (CMDQ_REG_GET32(CMDQ_THR_WARM_RESET(thread)))) {
  2902. if (loop > CMDQ_MAX_LOOP_COUNT) {
  2903. CMDQ_AEE("CMDQ", "Reset HW thread %d failed\n", thread);
  2904. return -EFAULT;
  2905. }
  2906. loop++;
  2907. }
  2908. CMDQ_REG_SET32(CMDQ_THR_SLOT_CYCLES, 0x3200);
  2909. return 0;
  2910. }
  2911. static inline int32_t cmdq_core_disable_HW_thread(int32_t thread)
  2912. {
  2913. cmdq_core_reset_HW_thread(thread);
  2914. /* Disable thread */
  2915. CMDQ_MSG("Disable HW thread(%d)\n", thread);
  2916. CMDQ_REG_SET32(CMDQ_THR_ENABLE_TASK(thread), 0x00);
  2917. return 0;
  2918. }
  2919. uint32_t cmdq_core_subsys_to_reg_addr(uint32_t argA)
  2920. {
  2921. const uint32_t subsysBit = cmdq_get_func()->getSubsysLSBArgA();
  2922. const int32_t subsys_id = (argA & CMDQ_ARG_A_SUBSYS_MASK) >> subsysBit;
  2923. uint32_t offset = 0;
  2924. uint32_t base_addr = 0;
  2925. uint32_t i;
  2926. for (i = 0; i < CMDQ_SUBSYS_MAX_COUNT; i++) {
  2927. if (gCmdqDtsData.subsys[i].subsysID == subsys_id) {
  2928. base_addr = gCmdqDtsData.subsys[i].msb;
  2929. offset = (argA & ~gCmdqDtsData.subsys[i].mask);
  2930. break;
  2931. }
  2932. }
  2933. return base_addr | offset;
  2934. }
  2935. const char *cmdq_core_parse_subsys_from_reg_addr(uint32_t reg_addr)
  2936. {
  2937. uint32_t addr_base_shifted;
  2938. const char *module = "CMDQ";
  2939. uint32_t i;
  2940. for (i = 0; i < CMDQ_SUBSYS_MAX_COUNT; i++) {
  2941. if (-1 == gCmdqDtsData.subsys[i].subsysID)
  2942. continue;
  2943. addr_base_shifted = (reg_addr & gCmdqDtsData.subsys[i].mask);
  2944. if (gCmdqDtsData.subsys[i].msb == addr_base_shifted) {
  2945. module = gCmdqDtsData.subsys[i].grpName;
  2946. break;
  2947. }
  2948. }
  2949. return module;
  2950. }
  2951. int32_t cmdq_core_subsys_from_phys_addr(uint32_t physAddr)
  2952. {
  2953. int32_t msb;
  2954. int32_t subsysID = -1;
  2955. uint32_t i;
  2956. for (i = 0; i < CMDQ_SUBSYS_MAX_COUNT; i++) {
  2957. if (-1 == gCmdqDtsData.subsys[i].subsysID)
  2958. continue;
  2959. msb = (physAddr & gCmdqDtsData.subsys[i].mask);
  2960. if (msb == gCmdqDtsData.subsys[i].msb) {
  2961. subsysID = gCmdqDtsData.subsys[i].subsysID;
  2962. break;
  2963. }
  2964. }
  2965. if (-1 == subsysID) {
  2966. /* printf error message */
  2967. CMDQ_ERR("unrecognized subsys, physAddr:0x%08x\n", physAddr);
  2968. }
  2969. return subsysID;
  2970. }
  2971. static const char *cmdq_core_parse_op(uint32_t opCode)
  2972. {
  2973. switch (opCode) {
  2974. case CMDQ_CODE_POLL:
  2975. return "POLL";
  2976. case CMDQ_CODE_WRITE:
  2977. return "WRIT";
  2978. case CMDQ_CODE_WFE:
  2979. return "SYNC";
  2980. case CMDQ_CODE_READ:
  2981. return "READ";
  2982. case CMDQ_CODE_MOVE:
  2983. return "MASK";
  2984. case CMDQ_CODE_JUMP:
  2985. return "JUMP";
  2986. case CMDQ_CODE_EOC:
  2987. return "MARK";
  2988. }
  2989. return NULL;
  2990. }
  2991. static void cmdq_core_parse_error(const TaskStruct *pTask, uint32_t thread,
  2992. const char **moduleName, int32_t *flag, uint32_t *instA,
  2993. uint32_t *instB)
  2994. {
  2995. uint32_t op, argA, argB;
  2996. int32_t eventENUM;
  2997. uint32_t insts[4] = { 0 };
  2998. uint32_t addr = 0;
  2999. const char *module = NULL;
  3000. int32_t irqFlag = pTask->irqFlag;
  3001. int isSMIHang = 0;
  3002. do {
  3003. /* confirm if SMI is hang */
  3004. isSMIHang = cmdq_get_func()->dumpSMI(0);
  3005. if (isSMIHang) {
  3006. module = "SMI";
  3007. break;
  3008. }
  3009. /* other cases, use instruction to judge */
  3010. /* because scenario / HW flag are not sufficient */
  3011. /* e.g. ISP pass 2 involves both MDP and ISP */
  3012. /* so we need to check which instruction timeout-ed. */
  3013. if (cmdq_core_get_pc(pTask, thread, insts)) {
  3014. op = (insts[3] & 0xFF000000) >> 24;
  3015. argA = insts[3] & (~0xFF000000);
  3016. argB = insts[2];
  3017. /* quick exam by hwflag first */
  3018. module = cmdq_get_func()->parseErrorModule(pTask);
  3019. if (NULL != module)
  3020. break;
  3021. switch (op) {
  3022. case CMDQ_CODE_POLL:
  3023. case CMDQ_CODE_WRITE:
  3024. addr = cmdq_core_subsys_to_reg_addr(argA);
  3025. module = cmdq_get_func()->parseModule(addr);
  3026. break;
  3027. case CMDQ_CODE_WFE:
  3028. /* argA is the event ID */
  3029. eventENUM = cmdq_core_reverse_event_ENUM(argA);
  3030. module = cmdq_get_func()->moduleFromEvent(eventENUM);
  3031. break;
  3032. case CMDQ_CODE_READ:
  3033. case CMDQ_CODE_MOVE:
  3034. case CMDQ_CODE_JUMP:
  3035. case CMDQ_CODE_EOC:
  3036. default:
  3037. module = "CMDQ";
  3038. break;
  3039. }
  3040. break;
  3041. }
  3042. module = "CMDQ";
  3043. break;
  3044. } while (0);
  3045. /* fill output parameter */
  3046. *moduleName = module;
  3047. *flag = irqFlag;
  3048. *instA = insts[3];
  3049. *instB = insts[2];
  3050. }
  3051. void cmdq_core_dump_resource_status(CMDQ_EVENT_ENUM resourceEvent)
  3052. {
  3053. struct ResourceUnitStruct *pResource = NULL;
  3054. struct list_head *p = NULL;
  3055. if (cmdq_core_is_feature_off(CMDQ_FEATURE_SRAM_SHARE))
  3056. return;
  3057. list_for_each(p, &gCmdqContext.resourceList) {
  3058. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  3059. if (resourceEvent == pResource->lockEvent) {
  3060. CMDQ_ERR("[Res] Dump resource with event: %d\n", resourceEvent);
  3061. mutex_lock(&gCmdqResourceMutex);
  3062. /* find matched resource */
  3063. CMDQ_ERR("[Res] Dump resource latest time:\n");
  3064. CMDQ_ERR("[Res] notify: %llu, delay: %lld\n", pResource->notify, pResource->delay);
  3065. CMDQ_ERR("[Res] lock: %llu, unlock: %lld\n", pResource->lock, pResource->unlock);
  3066. CMDQ_ERR("[Res] acquire: %llu, release: %lld\n", pResource->acquire, pResource->release);
  3067. CMDQ_ERR("[Res] isUsed:%d, isDelay:%d\n", pResource->used, pResource->delaying);
  3068. if (NULL == pResource->releaseCB)
  3069. CMDQ_ERR("[Res]: release CB func is NULL\n");
  3070. mutex_unlock(&gCmdqResourceMutex);
  3071. break;
  3072. }
  3073. }
  3074. }
  3075. static uint32_t *cmdq_core_dump_pc(const TaskStruct *pTask, int thread, const char *tag)
  3076. {
  3077. uint32_t *pcVA = NULL;
  3078. uint32_t insts[4] = { 0 };
  3079. char parsedInstruction[128] = { 0 };
  3080. pcVA = cmdq_core_get_pc(pTask, thread, insts);
  3081. if (pcVA) {
  3082. const uint32_t op = (insts[3] & 0xFF000000) >> 24;
  3083. cmdq_core_parse_instruction(pcVA, parsedInstruction, sizeof(parsedInstruction));
  3084. /* for WFE, we specifically dump the event value */
  3085. if (op == CMDQ_CODE_WFE) {
  3086. uint32_t regValue = 0;
  3087. const uint32_t eventID = 0x3FF & insts[3];
  3088. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_ID, eventID);
  3089. regValue = CMDQ_REG_GET32(CMDQ_SYNC_TOKEN_VAL);
  3090. CMDQ_LOG("[%s]Thread %d PC(VA): 0x%p, 0x%08x:0x%08x => %s, value:(%d)",
  3091. tag, thread, pcVA, insts[2], insts[3], parsedInstruction, regValue);
  3092. cmdq_core_dump_resource_status(eventID);
  3093. } else {
  3094. CMDQ_LOG("[%s]Thread %d PC(VA): 0x%p, 0x%08x:0x%08x => %s",
  3095. tag, thread, pcVA, insts[2], insts[3], parsedInstruction);
  3096. }
  3097. } else {
  3098. if (true == pTask->secData.isSecure) {
  3099. CMDQ_LOG("[%s]Thread %d PC(VA): HIDDEN INFO since is it's secure thread\n",
  3100. tag, thread);
  3101. } else {
  3102. CMDQ_LOG("[%s]Thread %d PC(VA): Not available\n", tag, thread);
  3103. }
  3104. }
  3105. return pcVA;
  3106. }
  3107. static void cmdq_core_dump_status(const char *tag)
  3108. {
  3109. int32_t coreExecThread = CMDQ_INVALID_THREAD;
  3110. uint32_t value[6] = { 0 };
  3111. value[0] = CMDQ_REG_GET32(CMDQ_CURR_LOADED_THR);
  3112. value[1] = CMDQ_REG_GET32(CMDQ_THR_EXEC_CYCLES);
  3113. value[2] = CMDQ_REG_GET32(CMDQ_THR_TIMEOUT_TIMER);
  3114. value[3] = CMDQ_REG_GET32(CMDQ_BUS_CONTROL_TYPE);
  3115. /* this returns (1 + index of least bit set) or 0 if input is 0. */
  3116. coreExecThread = __builtin_ffs(value[0]) - 1;
  3117. CMDQ_LOG("[%s]IRQ flag:0x%08x, Execing:%d, Exec Thread:%d, CMDQ_CURR_LOADED_THR: 0x%08x\n",
  3118. tag,
  3119. CMDQ_REG_GET32(CMDQ_CURR_IRQ_STATUS),
  3120. (0x80000000 & value[0]) ? 1 : 0, coreExecThread, value[0]);
  3121. CMDQ_LOG("[%s]CMDQ_THR_EXEC_CYCLES:0x%08x, CMDQ_THR_TIMER:0x%08x, CMDQ_BUS_CTRL:0x%08x\n",
  3122. tag, value[1], value[2], value[3]);
  3123. CMDQ_LOG("[%s]CMDQ_DEBUG_1: 0x%08x\n", tag, CMDQ_REG_GET32((GCE_BASE_VA + 0xF0)));
  3124. CMDQ_LOG("[%s]CMDQ_DEBUG_2: 0x%08x\n", tag, CMDQ_REG_GET32((GCE_BASE_VA + 0xF4)));
  3125. CMDQ_LOG("[%s]CMDQ_DEBUG_3: 0x%08x\n", tag, CMDQ_REG_GET32((GCE_BASE_VA + 0xF8)));
  3126. CMDQ_LOG("[%s]CMDQ_DEBUG_4: 0x%08x\n", tag, CMDQ_REG_GET32((GCE_BASE_VA + 0xFC)));
  3127. }
  3128. void cmdq_core_dump_disp_trigger_loop(const char *tag)
  3129. {
  3130. /* we assume the first non-high-priority thread is trigger loop thread. */
  3131. /* since it will start very early */
  3132. if (gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].taskCount
  3133. && gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].pCurTask[1]
  3134. && gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].loopCallback) {
  3135. uint32_t regValue = 0;
  3136. TaskStruct *pTask =
  3137. gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].pCurTask[1];
  3138. cmdq_core_dump_pc(pTask, CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT, tag);
  3139. regValue = cmdqCoreGetEvent(CMDQ_EVENT_DISP_RDMA0_EOF);
  3140. CMDQ_LOG("[%s]CMDQ_SYNC_TOKEN_VAL of %s is %d\n",
  3141. tag, cmdq_core_get_event_name_ENUM(CMDQ_EVENT_DISP_RDMA0_EOF), regValue);
  3142. }
  3143. }
  3144. void cmdq_core_dump_disp_trigger_loop_mini(const char *tag)
  3145. {
  3146. /* we assume the first non-high-priority thread is trigger loop thread. */
  3147. /* since it will start very early */
  3148. if (gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].taskCount
  3149. && gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].pCurTask[1]
  3150. && gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].loopCallback) {
  3151. TaskStruct *pTask =
  3152. gCmdqContext.thread[CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT].pCurTask[1];
  3153. cmdq_core_dump_pc(pTask, CMDQ_MAX_HIGH_PRIORITY_THREAD_COUNT, tag);
  3154. }
  3155. }
  3156. static void cmdq_core_dump_thread_pc(const int32_t thread)
  3157. {
  3158. int32_t i;
  3159. ThreadStruct *pThread;
  3160. TaskStruct *pTask;
  3161. uint32_t *pcVA;
  3162. uint32_t insts[4] = { 0 };
  3163. char parsedInstruction[128] = { 0 };
  3164. if (CMDQ_INVALID_THREAD == thread)
  3165. return;
  3166. pThread = &(gCmdqContext.thread[thread]);
  3167. pcVA = NULL;
  3168. for (i = 0; i < cmdq_core_max_task_in_thread(thread); i++) {
  3169. pTask = pThread->pCurTask[i];
  3170. if (NULL == pTask)
  3171. continue;
  3172. pcVA = cmdq_core_get_pc(pTask, thread, insts);
  3173. if (pcVA) {
  3174. const uint32_t op = (insts[3] & 0xFF000000) >> 24;
  3175. cmdq_core_parse_instruction(pcVA, parsedInstruction,
  3176. sizeof(parsedInstruction));
  3177. /* for wait event case, dump token value */
  3178. /* for WFE, we specifically dump the event value */
  3179. if (op == CMDQ_CODE_WFE) {
  3180. uint32_t regValue = 0;
  3181. const uint32_t eventID = 0x3FF & insts[3];
  3182. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_ID, eventID);
  3183. regValue = CMDQ_REG_GET32(CMDQ_SYNC_TOKEN_VAL);
  3184. CMDQ_LOG
  3185. ("[INFO]task:%p(ID:%d), Thread %d PC(VA): 0x%p, 0x%08x:0x%08x => %s, value:(%d)",
  3186. pTask, i, thread, pcVA, insts[2], insts[3], parsedInstruction, regValue);
  3187. } else {
  3188. CMDQ_LOG
  3189. ("[INFO]task:%p(ID:%d), Thread %d PC(VA): 0x%p, 0x%08x:0x%08x => %s",
  3190. pTask, i, thread, pcVA, insts[2], insts[3], parsedInstruction);
  3191. }
  3192. break;
  3193. }
  3194. }
  3195. }
  3196. static void cmdq_core_dump_task_with_engine_flag(uint64_t engineFlag)
  3197. {
  3198. struct TaskStruct *pDumpTask = NULL;
  3199. struct list_head *p = NULL;
  3200. CMDQ_ERR
  3201. ("=============== [CMDQ] All active tasks sharing same engine flag 0x%08llx===============\n",
  3202. engineFlag);
  3203. list_for_each(p, &gCmdqContext.taskActiveList) {
  3204. pDumpTask = list_entry(p, struct TaskStruct, listEntry);
  3205. if (true == cmdq_core_is_valid_in_active_list(pDumpTask) && (engineFlag & pDumpTask->engineFlag)) {
  3206. CMDQ_ERR("Thr %d, Task: 0x%p, VABase: 0x%p, MVABase 0x%pa, Size: %d\n",
  3207. (pDumpTask->thread), (pDumpTask),
  3208. (pDumpTask->pVABase), &(pDumpTask->MVABase),
  3209. pDumpTask->commandSize);
  3210. CMDQ_ERR(" cont'd: Flag: 0x%08llx, Last Inst 0x%08x:0x%08x, 0x%08x:0x%08x\n",
  3211. pDumpTask->engineFlag, pDumpTask->pCMDEnd[-3],
  3212. pDumpTask->pCMDEnd[-2], pDumpTask->pCMDEnd[-1],
  3213. pDumpTask->pCMDEnd[0]);
  3214. }
  3215. }
  3216. }
  3217. static void cmdq_core_dump_task_in_thread(const int32_t thread,
  3218. const bool fullTatskDump, const bool dumpCookie,
  3219. const bool dumpCmd)
  3220. {
  3221. ThreadStruct *pThread;
  3222. TaskStruct *pDumpTask;
  3223. int32_t index;
  3224. uint32_t value[4] = { 0 };
  3225. uint32_t cookie;
  3226. if (CMDQ_INVALID_THREAD == thread)
  3227. return;
  3228. pThread = &(gCmdqContext.thread[thread]);
  3229. pDumpTask = NULL;
  3230. CMDQ_ERR("=============== [CMDQ] All Task in Error Thread %d ===============\n", thread);
  3231. cookie = cmdq_core_thread_exec_counter(thread);
  3232. if (dumpCookie) {
  3233. CMDQ_ERR
  3234. ("Curr Cookie: %d, Wait Cookie: %d, Next Cookie: %d, Task Count %d, engineFlag: 0x%llx\n",
  3235. cookie, pThread->waitCookie, pThread->nextCookie, pThread->taskCount,
  3236. pThread->engineFlag);
  3237. }
  3238. for (index = 0; index < cmdq_core_max_task_in_thread(thread); index++) {
  3239. pDumpTask = pThread->pCurTask[index];
  3240. if (NULL == pDumpTask)
  3241. continue;
  3242. /* full task dump */
  3243. if (fullTatskDump) {
  3244. CMDQ_ERR("Slot %d, Task: 0x%p\n", index, pDumpTask);
  3245. cmdq_core_dump_task(pDumpTask);
  3246. if (true == dumpCmd) {
  3247. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 4,
  3248. pDumpTask->pVABase, (pDumpTask->commandSize), true);
  3249. }
  3250. continue;
  3251. }
  3252. /* otherwise, simple dump task info */
  3253. if (NULL == pDumpTask->pVABase) {
  3254. value[0] = 0xBCBCBCBC;
  3255. value[1] = 0xBCBCBCBC;
  3256. value[2] = 0xBCBCBCBC;
  3257. value[3] = 0xBCBCBCBC;
  3258. } else {
  3259. value[0] = pDumpTask->pCMDEnd[-3];
  3260. value[1] = pDumpTask->pCMDEnd[-2];
  3261. value[2] = pDumpTask->pCMDEnd[-1];
  3262. value[3] = pDumpTask->pCMDEnd[0];
  3263. }
  3264. CMDQ_ERR("Slot %d, Task: 0x%p, VABase: 0x%p, MVABase 0x%pa, Size: %d\n",
  3265. index, (pDumpTask), (pDumpTask->pVABase),
  3266. &(pDumpTask->MVABase), pDumpTask->commandSize);
  3267. CMDQ_ERR(" cont'd: Last Inst 0x%08x:0x%08x, 0x%08x:0x%08x, priority:%d\n",
  3268. value[0], value[1], value[2], value[3], pDumpTask->priority);
  3269. if (true == dumpCmd) {
  3270. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 4,
  3271. pDumpTask->pVABase, (pDumpTask->commandSize), true);
  3272. }
  3273. }
  3274. }
  3275. void cmdq_core_dump_secure_metadata(cmdqSecDataStruct *pSecData)
  3276. {
  3277. #ifdef CMDQ_SECURE_PATH_SUPPORT
  3278. uint32_t i = 0;
  3279. cmdqSecAddrMetadataStruct *pAddr = NULL;
  3280. if (NULL == pSecData)
  3281. return;
  3282. pAddr = (cmdqSecAddrMetadataStruct *) (CMDQ_U32_PTR(pSecData->addrMetadatas));
  3283. CMDQ_LOG("========= pSecData: %p dump =========\n", pSecData);
  3284. CMDQ_LOG("count:%d(%d), enginesNeedDAPC:0x%llx, enginesPortSecurity:0x%llx\n",
  3285. pSecData->addrMetadataCount, pSecData->addrMetadataMaxCount,
  3286. pSecData->enginesNeedDAPC, pSecData->enginesNeedPortSecurity);
  3287. if (NULL == pAddr)
  3288. return;
  3289. for (i = 0; i < pSecData->addrMetadataCount; i++) {
  3290. CMDQ_LOG("idx:%d, type:%d, baseHandle:%x, offset:%d, size:%d, port:%d\n",
  3291. i, pAddr[i].type, pAddr[i].baseHandle, pAddr[i].offset, pAddr[i].size,
  3292. pAddr[i].port);
  3293. }
  3294. #endif
  3295. }
  3296. int32_t cmdq_core_interpret_instruction(char *textBuf, int bufLen,
  3297. const uint32_t op, const uint32_t argA, const uint32_t argB)
  3298. {
  3299. int reqLen = 0;
  3300. switch (op) {
  3301. case CMDQ_CODE_MOVE:
  3302. if (1 & (argA >> 23)) {
  3303. reqLen =
  3304. snprintf(textBuf, bufLen, "MOVE: 0x%08x to Reg%d\n", argB,
  3305. (argA >> 16) & 0x1f);
  3306. } else {
  3307. reqLen = snprintf(textBuf, bufLen, "Set MASK: 0x%08x\n", argB);
  3308. }
  3309. break;
  3310. case CMDQ_CODE_READ:
  3311. case CMDQ_CODE_WRITE:
  3312. case CMDQ_CODE_POLL:
  3313. reqLen = snprintf(textBuf, bufLen, "%s: ", cmdq_core_parse_op(op));
  3314. bufLen -= reqLen;
  3315. textBuf += reqLen;
  3316. /* data (value) */
  3317. if (argA & (1 << 22)) {
  3318. reqLen = snprintf(textBuf, bufLen, "Reg%d, ", argB);
  3319. bufLen -= reqLen;
  3320. textBuf += reqLen;
  3321. } else {
  3322. reqLen = snprintf(textBuf, bufLen, "0x%08x, ", argB);
  3323. bufLen -= reqLen;
  3324. textBuf += reqLen;
  3325. }
  3326. /* address */
  3327. if (argA & (1 << 23)) {
  3328. reqLen = snprintf(textBuf, bufLen, "Reg%d\n", (argA >> 16) & 0x1F);
  3329. bufLen -= reqLen;
  3330. textBuf += reqLen;
  3331. } else {
  3332. const uint32_t addr = cmdq_core_subsys_to_reg_addr(argA);
  3333. const uint32_t addrMask = 0xFFFFFFFE;
  3334. reqLen = snprintf(textBuf, bufLen, "addr=0x%08x [%s], use_mask=%d\n",
  3335. (addr & addrMask),
  3336. cmdq_get_func()->parseModule(addr), (addr & 0x1));
  3337. bufLen -= reqLen;
  3338. textBuf += reqLen;
  3339. }
  3340. break;
  3341. case CMDQ_CODE_JUMP:
  3342. if (argA) {
  3343. if (argA & (1 << 22)) {
  3344. /* jump by register */
  3345. reqLen = snprintf(textBuf, bufLen, "JUMP(register): Reg%d\n", argB);
  3346. } else {
  3347. /* absolute */
  3348. reqLen =
  3349. snprintf(textBuf, bufLen, "JUMP(absolute): 0x%08x\n", argB);
  3350. }
  3351. } else {
  3352. /* relative */
  3353. if ((int32_t) argB >= 0) {
  3354. reqLen = snprintf(textBuf, bufLen,
  3355. "JUMP(relative): +%d\n", (int32_t) argB);
  3356. } else {
  3357. reqLen = snprintf(textBuf, bufLen,
  3358. "JUMP(relative): %d\n", (int32_t) argB);
  3359. }
  3360. }
  3361. break;
  3362. case CMDQ_CODE_WFE:
  3363. if (0x80008001 == argB) {
  3364. reqLen =
  3365. snprintf(textBuf, bufLen, "Wait And Clear Event: %s\n",
  3366. cmdq_core_get_event_name(argA));
  3367. } else if (0x80000000 == argB) {
  3368. reqLen =
  3369. snprintf(textBuf, bufLen, "Clear Event: %s\n",
  3370. cmdq_core_get_event_name(argA));
  3371. } else if (0x80010000 == argB) {
  3372. reqLen =
  3373. snprintf(textBuf, bufLen, "Set Event: %s\n",
  3374. cmdq_core_get_event_name(argA));
  3375. } else if (0x00008001 == argB) {
  3376. reqLen =
  3377. snprintf(textBuf, bufLen, "Wait No Clear Event: %s\n",
  3378. cmdq_core_get_event_name(argA));
  3379. } else {
  3380. reqLen = snprintf(textBuf, bufLen,
  3381. "SYNC: %s, upd=%d, op=%d, val=%d, wait=%d, wop=%d, val=%d\n",
  3382. cmdq_core_get_event_name(argA),
  3383. (argB >> 31) & 0x1,
  3384. (argB >> 28) & 0x7,
  3385. (argB >> 16) & 0xFFF,
  3386. (argB >> 15) & 0x1,
  3387. (argB >> 12) & 0x7, (argB >> 0) & 0xFFF);
  3388. }
  3389. break;
  3390. case CMDQ_CODE_EOC:
  3391. if (argA == 0 && argB == 0x00000001) {
  3392. reqLen = snprintf(textBuf, bufLen, "EOC\n");
  3393. } else {
  3394. if (cmdq_core_support_sync_non_suspendable()) {
  3395. reqLen = snprintf(textBuf, bufLen,
  3396. "MARKER: sync_no_suspnd=%d",
  3397. (argA & (1 << 20)) > 0);
  3398. } else {
  3399. reqLen = snprintf(textBuf, bufLen, "MARKER:");
  3400. }
  3401. bufLen -= reqLen;
  3402. textBuf += reqLen;
  3403. if (argB == 0x00100000) {
  3404. reqLen = snprintf(textBuf, bufLen, " Disable");
  3405. } else if (argB == 0x00130000) {
  3406. reqLen = snprintf(textBuf, bufLen, " Enable");
  3407. } else {
  3408. reqLen = snprintf(textBuf, bufLen,
  3409. "no_suspnd=%d, no_inc=%d, m=%d, m_en=%d, prefetch=%d, irq=%d\n",
  3410. (argA & (1 << 21)) > 0,
  3411. (argA & (1 << 16)) > 0,
  3412. (argB & (1 << 20)) > 0,
  3413. (argB & (1 << 17)) > 0,
  3414. (argB & (1 << 16)) > 0, (argB & (1 << 0)) > 0);
  3415. }
  3416. }
  3417. break;
  3418. default:
  3419. reqLen = snprintf(textBuf, bufLen, "UNDEFINED\n");
  3420. break;
  3421. }
  3422. return reqLen;
  3423. }
  3424. int32_t cmdq_core_parse_instruction(const uint32_t *pCmd, char *textBuf, int bufLen)
  3425. {
  3426. int reqLen = 0;
  3427. const uint32_t op = (pCmd[1] & 0xFF000000) >> 24;
  3428. const uint32_t argA = pCmd[1] & (~0xFF000000);
  3429. const uint32_t argB = pCmd[0];
  3430. reqLen = cmdq_core_interpret_instruction(textBuf, bufLen, op, argA, argB);
  3431. return reqLen;
  3432. }
  3433. void cmdq_core_dump_error_instruction(const uint32_t *pcVA, uint32_t *insts, int thread,
  3434. uint32_t lineNum)
  3435. {
  3436. char parsedInstruction[128] = { 0 };
  3437. const uint32_t op = (insts[3] & 0xFF000000) >> 24;
  3438. if (pcVA == NULL) {
  3439. CMDQ_ERR("Dump error instruction with null va, line: %u\n", lineNum);
  3440. return;
  3441. }
  3442. cmdq_core_parse_instruction(pcVA, parsedInstruction, sizeof(parsedInstruction));
  3443. CMDQ_ERR("Thread %d error instruction: 0x%p, 0x%08x:0x%08x => %s",
  3444. thread, pcVA, insts[2], insts[3], parsedInstruction);
  3445. /* for WFE, we specifically dump the event value */
  3446. if (op == CMDQ_CODE_WFE) {
  3447. uint32_t regValue = 0;
  3448. const uint32_t eventID = 0x3FF & insts[3];
  3449. CMDQ_REG_SET32(CMDQ_SYNC_TOKEN_ID, eventID);
  3450. regValue = CMDQ_REG_GET32(CMDQ_SYNC_TOKEN_VAL);
  3451. CMDQ_ERR("CMDQ_SYNC_TOKEN_VAL of %s is %d\n",
  3452. cmdq_core_get_event_name(eventID), regValue);
  3453. }
  3454. }
  3455. static void cmdq_core_dump_summary(const TaskStruct *pTask, int thread,
  3456. const TaskStruct **pOutNGTask)
  3457. {
  3458. uint32_t *pcVA = NULL;
  3459. uint32_t insts[4] = { 0 };
  3460. ThreadStruct *pThread;
  3461. const TaskStruct *pNGTask = NULL;
  3462. const char *module = NULL;
  3463. int32_t index;
  3464. uint32_t instA = 0, instB = 0;
  3465. int32_t irqFlag = 0;
  3466. if (NULL == pTask) {
  3467. CMDQ_ERR("dump summary failed since pTask is NULL");
  3468. return;
  3469. }
  3470. if ((NULL == pTask->pVABase) || (CMDQ_INVALID_THREAD == thread)) {
  3471. CMDQ_ERR
  3472. ("dump summary failed since invalid param, pTask %p, pTask->pVABase:%p, thread:%d\n",
  3473. pTask, pTask->pVABase, thread);
  3474. return;
  3475. }
  3476. if (true == pTask->secData.isSecure) {
  3477. CMDQ_ERR("Summary dump does not support secure now.\n");
  3478. pNGTask = pTask;
  3479. return;
  3480. }
  3481. /* Find correct task */
  3482. pThread = &(gCmdqContext.thread[thread]);
  3483. pcVA = cmdq_core_get_pc(pTask, thread, insts);
  3484. if (NULL == pcVA) {
  3485. /* Find all task to get correct PC */
  3486. for (index = 0; index < cmdq_core_max_task_in_thread(thread); index++) {
  3487. pNGTask = pThread->pCurTask[index];
  3488. if (NULL == pNGTask)
  3489. continue;
  3490. pcVA = cmdq_core_get_pc(pNGTask, thread, insts);
  3491. if (pcVA) {
  3492. /* we got NG task ! */
  3493. break;
  3494. }
  3495. }
  3496. }
  3497. if (NULL == pNGTask)
  3498. pNGTask = pTask;
  3499. /* Do summary ! */
  3500. CMDQ_ERR("***************************************\n");
  3501. cmdq_core_parse_error(pNGTask, thread, &module, &irqFlag, &instA, &instB);
  3502. CMDQ_ERR("** [Module] %s **\n", module);
  3503. if (pTask != pNGTask) {
  3504. CMDQ_ERR
  3505. ("** [Note] PC is not in first error task (0x%p) but in previous task (0x%p) **\n",
  3506. pTask, pNGTask);
  3507. }
  3508. CMDQ_ERR("** [Error Info] Refer to instruction and check engine dump for debug**\n");
  3509. cmdq_core_dump_error_instruction(pcVA, insts, thread, __LINE__);
  3510. cmdq_core_dump_disp_trigger_loop("ERR");
  3511. CMDQ_ERR("***************************************\n");
  3512. *pOutNGTask = pNGTask;
  3513. }
  3514. static void cmdqCoreDumpCommandMem(const uint32_t *pCmd, int32_t commandSize)
  3515. {
  3516. static char textBuf[128] = { 0 };
  3517. int i;
  3518. mutex_lock(&gCmdqTaskMutex);
  3519. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 4, pCmd, commandSize, false);
  3520. CMDQ_LOG("======TASK command buffer END\n");
  3521. for (i = 0; i < commandSize; i += CMDQ_INST_SIZE, pCmd += 2) {
  3522. cmdq_core_parse_instruction(pCmd, textBuf, 128);
  3523. CMDQ_LOG("%s", textBuf);
  3524. }
  3525. CMDQ_LOG("TASK command buffer TRANSLATED END\n");
  3526. mutex_unlock(&gCmdqTaskMutex);
  3527. }
  3528. int32_t cmdqCoreDebugDumpCommand(TaskStruct *pTask)
  3529. {
  3530. if (NULL == pTask)
  3531. return -EFAULT;
  3532. CMDQ_LOG("======TASK 0x%p , size (%d) command START\n", pTask, pTask->commandSize);
  3533. cmdqCoreDumpCommandMem(pTask->pVABase, pTask->commandSize);
  3534. CMDQ_LOG("======TASK 0x%p command END\n", pTask);
  3535. return 0;
  3536. }
  3537. void cmdq_core_set_command_buffer_dump(int32_t scenario, int32_t bufferSize)
  3538. {
  3539. mutex_lock(&gCmdqSaveBufferMutex);
  3540. if (bufferSize != gCmdqBufferDump.bufferSize && bufferSize != -1) {
  3541. if (gCmdqBufferDump.bufferSize != 0) {
  3542. vfree(gCmdqBufferDump.cmdqString);
  3543. gCmdqBufferDump.bufferSize = 0;
  3544. gCmdqBufferDump.count = 0;
  3545. }
  3546. if (bufferSize > 0) {
  3547. gCmdqBufferDump.bufferSize = bufferSize;
  3548. gCmdqBufferDump.cmdqString = vmalloc(gCmdqBufferDump.bufferSize);
  3549. }
  3550. }
  3551. if (-1 == scenario) {
  3552. /* clear all scenario */
  3553. gCmdqBufferDump.scenario = 0LL;
  3554. } else if (-2 == scenario) {
  3555. /* set all scenario */
  3556. gCmdqBufferDump.scenario = ~0LL;
  3557. } else if (scenario >= 0 && scenario < CMDQ_MAX_SCENARIO_COUNT) {
  3558. /* set scenario to save command buffer */
  3559. gCmdqBufferDump.scenario |= (1LL << scenario);
  3560. }
  3561. CMDQ_LOG("[SET DUMP]CONFIG: bufferSize: %d, scenario: 0x%08llx\n",
  3562. gCmdqBufferDump.bufferSize, gCmdqBufferDump.scenario);
  3563. mutex_unlock(&gCmdqSaveBufferMutex);
  3564. }
  3565. static void cmdq_core_save_buffer(const char *string, ...)
  3566. {
  3567. int logLen, redundantLen, i;
  3568. va_list argptr;
  3569. char *pBuffer;
  3570. va_start(argptr, string);
  3571. do {
  3572. logLen = vsnprintf(NULL, 0, string, argptr) + 1;
  3573. if (logLen <= 1)
  3574. break;
  3575. redundantLen = gCmdqBufferDump.bufferSize - gCmdqBufferDump.count;
  3576. if (logLen >= redundantLen) {
  3577. for (i = 0; i < redundantLen; i++)
  3578. *(gCmdqBufferDump.cmdqString + gCmdqBufferDump.count + i) = 0;
  3579. gCmdqBufferDump.count = 0;
  3580. }
  3581. pBuffer = gCmdqBufferDump.cmdqString + gCmdqBufferDump.count;
  3582. gCmdqBufferDump.count += vsnprintf(pBuffer, logLen, string, argptr);
  3583. } while (0);
  3584. va_end(argptr);
  3585. }
  3586. static void cmdq_core_save_command_buffer_dump(const TaskStruct *pTask)
  3587. {
  3588. static char textBuf[128] = { 0 };
  3589. struct timeval savetv;
  3590. struct tm nowTM;
  3591. unsigned long long saveTimeSec;
  3592. unsigned long rem_nsec;
  3593. const uint32_t *pCmd;
  3594. int i;
  3595. if (gCmdqContext.errNum > 0)
  3596. return;
  3597. if (gCmdqBufferDump.bufferSize <= 0 || NULL == pTask->pVABase)
  3598. return;
  3599. mutex_lock(&gCmdqSaveBufferMutex);
  3600. if (gCmdqBufferDump.scenario & (1LL << pTask->scenario)) {
  3601. pCmd = pTask->pVABase;
  3602. cmdq_core_save_buffer("************TASK command buffer TRANSLATED************\n");
  3603. /* get kernel time */
  3604. saveTimeSec = sched_clock();
  3605. rem_nsec = do_div(saveTimeSec, 1000000000);
  3606. /* get UTC time */
  3607. do_gettimeofday(&savetv);
  3608. time_to_tm(savetv.tv_sec, sys_tz.tz_minuteswest * 60, &nowTM);
  3609. /* print current task information */
  3610. cmdq_core_save_buffer("kernel time:[%5llu.%06lu],", saveTimeSec, rem_nsec / 1000);
  3611. cmdq_core_save_buffer(" UTC time:[%04ld-%02d-%02d %02d:%02d:%02d.%06ld]",
  3612. (nowTM.tm_year + 1900), (nowTM.tm_mon + 1), nowTM.tm_mday,
  3613. nowTM.tm_hour, nowTM.tm_min, nowTM.tm_sec,
  3614. savetv.tv_usec);
  3615. cmdq_core_save_buffer(" Pid: %d, Name: %s\n", pTask->callerPid, pTask->callerName);
  3616. cmdq_core_save_buffer("Task: 0x%p, Scenario: %d, Size: %d, Flag: 0x%016llx\n",
  3617. pTask, pTask->scenario, pTask->commandSize, pTask->engineFlag);
  3618. /* print command buffer */
  3619. for (i = 0; i < pTask->commandSize; i += CMDQ_INST_SIZE, pCmd += 2) {
  3620. cmdq_core_parse_instruction(pCmd, textBuf, 128);
  3621. cmdq_core_save_buffer("[%5llu.%06lu] %s", saveTimeSec, rem_nsec / 1000, textBuf);
  3622. }
  3623. cmdq_core_save_buffer("****************TASK command buffer END***************\n\n");
  3624. }
  3625. mutex_unlock(&gCmdqSaveBufferMutex);
  3626. }
  3627. #ifdef CMDQ_INSTRUCTION_COUNT
  3628. CmdqModulePAStatStruct gCmdqModulePAStat;
  3629. CmdqModulePAStatStruct *cmdq_core_Initial_and_get_module_stat(void)
  3630. {
  3631. memset(&gCmdqModulePAStat, 0, sizeof(gCmdqModulePAStat));
  3632. return &gCmdqModulePAStat;
  3633. }
  3634. ssize_t cmdqCorePrintInstructionCountLevel(struct device *dev, struct device_attribute *attr,
  3635. char *buf)
  3636. {
  3637. int len = 0;
  3638. if (buf)
  3639. len = sprintf(buf, "%d\n", gCmdqContext.instructionCountLevel);
  3640. return len;
  3641. }
  3642. ssize_t cmdqCoreWriteInstructionCountLevel(struct device *dev,
  3643. struct device_attribute *attr, const char *buf,
  3644. size_t size)
  3645. {
  3646. int len = 0;
  3647. int value = 0;
  3648. int status = 0;
  3649. char textBuf[10] = { 0 };
  3650. do {
  3651. if (size >= 10) {
  3652. status = -EFAULT;
  3653. break;
  3654. }
  3655. len = size;
  3656. memcpy(textBuf, buf, len);
  3657. textBuf[len] = '\0';
  3658. if (0 > kstrtoint(textBuf, 10, &value)) {
  3659. status = -EFAULT;
  3660. break;
  3661. }
  3662. status = len;
  3663. if (value < 0)
  3664. value = 0;
  3665. cmdq_core_set_instruction_count_level(value);
  3666. } while (0);
  3667. return status;
  3668. }
  3669. void cmdq_core_set_instruction_count_level(const int32_t value)
  3670. {
  3671. gCmdqContext.instructionCountLevel = value;
  3672. }
  3673. static void cmdq_core_fill_module_stat(const uint32_t *pCommand,
  3674. unsigned short *pModuleCount,
  3675. uint32_t *pOtherInstruction,
  3676. uint32_t *pOtherInstructionCount)
  3677. {
  3678. const uint32_t argA = pCommand[1] & (~0xFF000000);
  3679. const uint32_t addr = cmdq_core_subsys_to_reg_addr(argA);
  3680. int32_t i;
  3681. for (i = 0; i < CMDQ_MODULE_STAT_GPR; i++) {
  3682. if ((gCmdqModulePAStat.start[i] > 0) && (addr >= gCmdqModulePAStat.start[i])
  3683. && (addr <= gCmdqModulePAStat.end[i])) {
  3684. pModuleCount[i]++;
  3685. break;
  3686. }
  3687. }
  3688. if (i >= CMDQ_MODULE_STAT_GPR) {
  3689. if (3 & (pCommand[1] >> 22)) {
  3690. pModuleCount[CMDQ_MODULE_STAT_GPR]++;
  3691. } else {
  3692. pOtherInstruction[(*pOtherInstructionCount)++] = addr;
  3693. pModuleCount[CMDQ_MODULE_STAT_OTHERS]++;
  3694. }
  3695. }
  3696. }
  3697. static void cmdq_core_fill_module_event_count(const uint32_t *pCommand,
  3698. unsigned short *pEventCount)
  3699. {
  3700. const uint32_t argA = pCommand[1] & (~0xFF000000);
  3701. if (argA >= CMDQ_MAX_HW_EVENT_COUNT)
  3702. pEventCount[CMDQ_EVENT_STAT_SW]++;
  3703. else
  3704. pEventCount[CMDQ_EVENT_STAT_HW]++;
  3705. }
  3706. static void cmdq_core_fill_task_instruction_stat(RecordStruct *pRecord, const TaskStruct *pTask)
  3707. {
  3708. bool invalidinstruction = false;
  3709. int32_t commandIndex = 0;
  3710. uint32_t argA_prefetch_en, argB_prefetch_en, argA_prefetch_dis, argB_prefetch_dis;
  3711. uint32_t *pCommand;
  3712. uint32_t op;
  3713. if (gCmdqContext.instructionCountLevel < 1)
  3714. return;
  3715. if ((NULL == pRecord) || (NULL == pTask))
  3716. return;
  3717. memset(&(pRecord->instructionStat[0]), 0x0, sizeof(pRecord->instructionStat));
  3718. memset(&(pRecord->writeModule[0]), 0x0, sizeof(pRecord->writeModule));
  3719. memset(&(pRecord->writewmaskModule[0]), 0x0, sizeof(pRecord->writewmaskModule));
  3720. memset(&(pRecord->readModlule[0]), 0x0, sizeof(pRecord->readModlule));
  3721. memset(&(pRecord->pollModule[0]), 0x0, sizeof(pRecord->pollModule));
  3722. memset(&(pRecord->eventCount[0]), 0x0, sizeof(pRecord->eventCount));
  3723. memset(&(pRecord->otherInstr[0]), 0x0, sizeof(pRecord->otherInstr));
  3724. pRecord->otherInstrNUM = 0;
  3725. do {
  3726. pCommand = (uint32_t *) ((uint8_t *) pTask->pVABase + commandIndex);
  3727. op = (pCommand[1] & 0xFF000000) >> 24;
  3728. switch (op) {
  3729. case CMDQ_CODE_MOVE:
  3730. if (1 & (pCommand[1] >> 23)) {
  3731. if (CMDQ_SPECIAL_SUBSYS_ADDR ==
  3732. cmdq_core_subsys_from_phys_addr(pCommand[0])) {
  3733. commandIndex += CMDQ_INST_SIZE;
  3734. pRecord->instructionStat[CMDQ_STAT_WRITE]++;
  3735. pRecord->writeModule[CMDQ_MODULE_STAT_DISP_PWM]++;
  3736. } else {
  3737. pRecord->instructionStat[CMDQ_STAT_MOVE]++;
  3738. }
  3739. } else if ((commandIndex + CMDQ_INST_SIZE) < pTask->commandSize) {
  3740. commandIndex += CMDQ_INST_SIZE;
  3741. pCommand = (uint32_t *) ((uint8_t *) pTask->pVABase + commandIndex);
  3742. op = (pCommand[1] & 0xFF000000) >> 24;
  3743. if (CMDQ_CODE_WRITE == op) {
  3744. pRecord->instructionStat[CMDQ_STAT_WRITE_W_MASK]++;
  3745. cmdq_core_fill_module_stat(pCommand,
  3746. pRecord->writewmaskModule,
  3747. pRecord->otherInstr,
  3748. &(pRecord->otherInstrNUM));
  3749. } else if (CMDQ_CODE_POLL == op) {
  3750. pRecord->instructionStat[CMDQ_STAT_POLLING]++;
  3751. cmdq_core_fill_module_stat(pCommand,
  3752. pRecord->pollModule,
  3753. pRecord->otherInstr,
  3754. &(pRecord->otherInstrNUM));
  3755. } else {
  3756. invalidinstruction = true;
  3757. }
  3758. } else {
  3759. invalidinstruction = true;
  3760. }
  3761. break;
  3762. case CMDQ_CODE_READ:
  3763. pRecord->instructionStat[CMDQ_STAT_READ]++;
  3764. cmdq_core_fill_module_stat(pCommand, pRecord->readModlule,
  3765. pRecord->otherInstr, &(pRecord->otherInstrNUM));
  3766. break;
  3767. case CMDQ_CODE_WRITE:
  3768. pRecord->instructionStat[CMDQ_STAT_WRITE]++;
  3769. cmdq_core_fill_module_stat(pCommand, pRecord->writeModule,
  3770. pRecord->otherInstr, &(pRecord->otherInstrNUM));
  3771. break;
  3772. case CMDQ_CODE_WFE:
  3773. pRecord->instructionStat[CMDQ_STAT_SYNC]++;
  3774. cmdq_core_fill_module_event_count(pCommand, pRecord->eventCount);
  3775. break;
  3776. case CMDQ_CODE_JUMP:
  3777. pRecord->instructionStat[CMDQ_STAT_JUMP]++;
  3778. break;
  3779. case CMDQ_CODE_EOC:
  3780. argB_prefetch_en = ((1 << 20) | (1 << 17) | (1 << 16));
  3781. argA_prefetch_en =
  3782. (CMDQ_CODE_EOC << 24) | (0x1 << (53 - 32)) | (0x1 << (48 - 32));
  3783. argB_prefetch_dis = (1 << 20);
  3784. argA_prefetch_dis = (CMDQ_CODE_EOC << 24) | (0x1 << (48 - 32));
  3785. if ((argB_prefetch_en == pCommand[0]) && (argA_prefetch_en == pCommand[1])) {
  3786. pRecord->instructionStat[CMDQ_STAT_PREFETCH_EN]++;
  3787. } else if ((argB_prefetch_dis == pCommand[0])
  3788. && (argA_prefetch_dis == pCommand[1])) {
  3789. pRecord->instructionStat[CMDQ_STAT_PREFETCH_DIS]++;
  3790. } else {
  3791. pRecord->instructionStat[CMDQ_STAT_EOC]++;
  3792. }
  3793. break;
  3794. default:
  3795. invalidinstruction = true;
  3796. break;
  3797. }
  3798. commandIndex += CMDQ_INST_SIZE;
  3799. if (true == invalidinstruction) {
  3800. memset(&(pRecord->instructionStat[0]), 0x0,
  3801. sizeof(pRecord->instructionStat));
  3802. break;
  3803. }
  3804. } while (commandIndex < pTask->commandSize);
  3805. }
  3806. static const char *gCmdqModuleInstructionLabel[CMDQ_MODULE_STAT_MAX] = {
  3807. "MMSYS_CONFIG",
  3808. "MDP_RDMA",
  3809. "MDP_RSZ0",
  3810. "MDP_RSZ1",
  3811. "MDP_WDMA",
  3812. "MDP_WROT",
  3813. "MDP_TDSHP",
  3814. "MM_MUTEX",
  3815. "VENC",
  3816. "DISP_OVL0",
  3817. "DISP_OVL1",
  3818. "DISP_RDMA0",
  3819. "DISP_RDMA1",
  3820. "DISP_WDMA0",
  3821. "DISP_COLOR",
  3822. "DISP_CCORR",
  3823. "DISP_AAL",
  3824. "DISP_GAMMA",
  3825. "DISP_DITHER",
  3826. "DISP_UFOE",
  3827. "DISP_PWM",
  3828. "DISP_WDMA1",
  3829. "DISP_MUTEX",
  3830. "DISP_DSI0",
  3831. "DISP_DPI0",
  3832. "DISP_OD",
  3833. "CAM0",
  3834. "CAM1",
  3835. "CAM2",
  3836. "CAM3",
  3837. "SODI",
  3838. "GPR",
  3839. "Others",
  3840. };
  3841. int cmdqCorePrintInstructionCountSeq(struct seq_file *m, void *v)
  3842. {
  3843. unsigned long flags;
  3844. int32_t i;
  3845. int32_t index;
  3846. int32_t numRec;
  3847. RecordStruct record;
  3848. if (gCmdqContext.instructionCountLevel < 1)
  3849. return 0;
  3850. seq_puts(m, "Record ID, PID, scenario, total, write, write_w_mask, read,");
  3851. seq_puts(m, " polling, move, sync, prefetch_en, prefetch_dis, EOC, jump");
  3852. for (i = 0; i < CMDQ_MODULE_STAT_MAX; i++) {
  3853. seq_printf(m, ", (%s)=>, write, write_w_mask, read, polling",
  3854. gCmdqModuleInstructionLabel[i]);
  3855. }
  3856. seq_puts(m, ", (SYNC)=>, HW Event, SW Event\n");
  3857. /* we try to minimize time spent in spin lock */
  3858. /* since record is an array so it is okay to */
  3859. /* allow displaying an out-of-date entry. */
  3860. spin_lock_irqsave(&gCmdqRecordLock, flags);
  3861. numRec = gCmdqContext.recNum;
  3862. index = gCmdqContext.lastID - 1;
  3863. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  3864. /* we print record in reverse order. */
  3865. for (; numRec > 0; --numRec, --index) {
  3866. if (index >= CMDQ_MAX_RECORD_COUNT)
  3867. index = 0;
  3868. else if (index < 0)
  3869. index = CMDQ_MAX_RECORD_COUNT - 1;
  3870. /* Make sure we don't print a record that is during updating. */
  3871. /* However, this record may already be different */
  3872. /* from the time of entering cmdqCorePrintRecordSeq(). */
  3873. spin_lock_irqsave(&gCmdqRecordLock, flags);
  3874. record = gCmdqContext.record[index];
  3875. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  3876. if ((0 == record.instructionStat[CMDQ_STAT_EOC]) &&
  3877. (0 == record.instructionStat[CMDQ_STAT_JUMP])) {
  3878. seq_printf(m, "%4d, %5c, %2c, %4d", index, 'X', 'X', 0);
  3879. for (i = 0; i < CMDQ_STAT_MAX; i++)
  3880. seq_printf(m, ", %4d", 0);
  3881. for (i = 0; i < CMDQ_MODULE_STAT_MAX; i++)
  3882. seq_printf(m, ", , %4d, %4d, %4d, %4d", 0, 0, 0, 0);
  3883. seq_printf(m, ", , %4d, %4d", 0, 0);
  3884. } else {
  3885. uint32_t totalCount = (uint32_t) (record.size / CMDQ_INST_SIZE);
  3886. seq_printf(m, " %4d, %5d, %02d, %4d", index, record.user, record.scenario,
  3887. totalCount);
  3888. for (i = 0; i < CMDQ_STAT_MAX; i++)
  3889. seq_printf(m, ", %4d", record.instructionStat[i]);
  3890. for (i = 0; i < CMDQ_MODULE_STAT_MAX; i++) {
  3891. seq_printf(m, ", , %4d, %4d, %4d, %4d", record.writeModule[i],
  3892. record.writewmaskModule[i], record.readModlule[i],
  3893. record.pollModule[i]);
  3894. }
  3895. seq_printf(m, ", , %4d, %4d",
  3896. record.eventCount[CMDQ_EVENT_STAT_HW],
  3897. record.eventCount[CMDQ_EVENT_STAT_SW]);
  3898. }
  3899. seq_puts(m, "\n");
  3900. }
  3901. seq_puts(m, "\n\n==============Other Instruction==============\n");
  3902. /* we try to minimize time spent in spin lock */
  3903. /* since record is an array so it is okay to */
  3904. /* allow displaying an out-of-date entry. */
  3905. spin_lock_irqsave(&gCmdqRecordLock, flags);
  3906. numRec = gCmdqContext.recNum;
  3907. index = gCmdqContext.lastID - 1;
  3908. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  3909. /* we print record in reverse order. */
  3910. for (; numRec > 0; --numRec, --index) {
  3911. if (index >= CMDQ_MAX_RECORD_COUNT)
  3912. index = 0;
  3913. else if (index < 0)
  3914. index = CMDQ_MAX_RECORD_COUNT - 1;
  3915. /* Make sure we don't print a record that is during updating. */
  3916. /* However, this record may already be different */
  3917. /* from the time of entering cmdqCorePrintRecordSeq(). */
  3918. spin_lock_irqsave(&gCmdqRecordLock, flags);
  3919. record = gCmdqContext.record[index];
  3920. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  3921. for (i = 0; i < record.otherInstrNUM; i++)
  3922. seq_printf(m, "0x%08x\n", record.otherInstr[i]);
  3923. }
  3924. return 0;
  3925. }
  3926. #endif
  3927. static void cmdq_core_fill_task_profile_marker_record(RecordStruct *pRecord,
  3928. const TaskStruct *pTask)
  3929. {
  3930. #ifdef CMDQ_PROFILE_MARKER_SUPPORT
  3931. uint32_t i;
  3932. uint32_t profileMarkerCount;
  3933. uint32_t value;
  3934. cmdqBackupSlotHandle hSlot;
  3935. if ((NULL == pRecord) || (NULL == pTask))
  3936. return;
  3937. if (0 == pTask->profileMarker.hSlot)
  3938. return;
  3939. profileMarkerCount = pTask->profileMarker.count;
  3940. hSlot = (cmdqBackupSlotHandle) (pTask->profileMarker.hSlot);
  3941. pRecord->profileMarkerCount = profileMarkerCount;
  3942. for (i = 0; i < profileMarkerCount; i++) {
  3943. /* timestamp, each count is 76ns */
  3944. cmdqBackupReadSlot(hSlot, i, &value);
  3945. pRecord->profileMarkerTimeNS[i] = value * 76;
  3946. pRecord->profileMarkerTag[i] = (char *)(CMDQ_U32_PTR(pTask->profileMarker.tag[i]));
  3947. }
  3948. #endif
  3949. }
  3950. static void cmdq_core_fill_task_record(RecordStruct *pRecord, const TaskStruct *pTask,
  3951. uint32_t thread)
  3952. {
  3953. uint32_t begin, end;
  3954. if (pRecord && pTask) {
  3955. /* Record scenario */
  3956. pRecord->user = pTask->callerPid;
  3957. pRecord->scenario = pTask->scenario;
  3958. pRecord->priority = pTask->priority;
  3959. pRecord->thread = thread;
  3960. pRecord->reorder = pTask->reorder;
  3961. pRecord->engineFlag = pTask->engineFlag;
  3962. pRecord->size = pTask->commandSize;
  3963. pRecord->isSecure = pTask->secData.isSecure;
  3964. if (NULL == pTask->profileData) {
  3965. pRecord->writeTimeNS = 0;
  3966. pRecord->writeTimeNSBegin = 0;
  3967. pRecord->writeTimeNSEnd = 0;
  3968. } else {
  3969. /* Command exec time, each count is 76ns */
  3970. begin = *((volatile uint32_t *)pTask->profileData);
  3971. end = *((volatile uint32_t *)(pTask->profileData + 1));
  3972. pRecord->writeTimeNS = (end - begin) * 76;
  3973. pRecord->writeTimeNSBegin = (begin) * 76;
  3974. pRecord->writeTimeNSEnd = (end) * 76;
  3975. }
  3976. /* Record time */
  3977. pRecord->submit = pTask->submit;
  3978. pRecord->trigger = pTask->trigger;
  3979. pRecord->gotIRQ = pTask->gotIRQ;
  3980. pRecord->beginWait = pTask->beginWait;
  3981. pRecord->wakedUp = pTask->wakedUp;
  3982. cmdq_core_fill_task_profile_marker_record(pRecord, pTask);
  3983. #ifdef CMDQ_INSTRUCTION_COUNT
  3984. /* Instruction count statistics */
  3985. cmdq_core_fill_task_instruction_stat(pRecord, pTask);
  3986. #endif
  3987. }
  3988. }
  3989. static void cmdq_core_track_task_record(TaskStruct *pTask, uint32_t thread)
  3990. {
  3991. RecordStruct *pRecord;
  3992. unsigned long flags;
  3993. CMDQ_TIME done;
  3994. #if 0
  3995. if (cmdq_get_func()->shouldProfile(pTask->scenario))
  3996. return;
  3997. #endif
  3998. done = sched_clock();
  3999. spin_lock_irqsave(&gCmdqRecordLock, flags);
  4000. pRecord = &(gCmdqContext.record[gCmdqContext.lastID]);
  4001. cmdq_core_fill_task_record(pRecord, pTask, thread);
  4002. pRecord->done = done;
  4003. gCmdqContext.lastID++;
  4004. if (gCmdqContext.lastID >= CMDQ_MAX_RECORD_COUNT)
  4005. gCmdqContext.lastID = 0;
  4006. gCmdqContext.recNum++;
  4007. if (gCmdqContext.recNum >= CMDQ_MAX_RECORD_COUNT)
  4008. gCmdqContext.recNum = CMDQ_MAX_RECORD_COUNT;
  4009. spin_unlock_irqrestore(&gCmdqRecordLock, flags);
  4010. }
  4011. void cmdq_core_dump_GIC(void)
  4012. {
  4013. #ifndef CMDQ_OF_SUPPORT /* OF Support removes mt_irq.h, mt_irq_dump_status support will be added later. */
  4014. #if CMDQ_DUMP_GIC
  4015. mt_irq_dump_status(cmdq_dev_get_irq_id());
  4016. mt_irq_dump_status(cmdq_dev_get_irq_secure_id());
  4017. #endif
  4018. #endif
  4019. }
  4020. static void cmdq_core_dump_error_buffer(const TaskStruct *pTask, uint32_t *hwPC)
  4021. {
  4022. if (NULL == pTask)
  4023. return;
  4024. if (hwPC && hwPC >= pTask->pVABase) {
  4025. /* because hwPC points to "start" of the instruction */
  4026. /* add offset 1 */
  4027. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 4,
  4028. pTask->pVABase, (2 + hwPC - pTask->pVABase) * sizeof(uint32_t),
  4029. true);
  4030. cmdq_core_save_hex_first_dump("", 16, 4,
  4031. pTask->pVABase,
  4032. (2 + hwPC - pTask->pVABase) * sizeof(uint32_t));
  4033. } else {
  4034. CMDQ_ERR("hwPC is not in region, dump all\n");
  4035. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 4,
  4036. pTask->pVABase, (pTask->commandSize), true);
  4037. cmdq_core_save_hex_first_dump("", 16, 4, pTask->pVABase, (pTask->commandSize));
  4038. }
  4039. }
  4040. static void cmdq_core_dump_error_task(const TaskStruct *pTask, const TaskStruct *pNGTask, uint32_t thread)
  4041. {
  4042. CmdqCBkStruct *pCallback = NULL;
  4043. ThreadStruct *pThread;
  4044. int32_t index = 0;
  4045. uint32_t *hwPC = NULL;
  4046. uint32_t *hwNGPC = NULL;
  4047. uint64_t printEngineFlag = 0;
  4048. uint32_t value[10] = { 0 };
  4049. bool isDispScn = false;
  4050. static const char *const engineGroupName[] = {
  4051. CMDQ_FOREACH_GROUP(GENERATE_STRING)
  4052. };
  4053. CMDQ_ERR("=============== [CMDQ] Error Thread Status ===============\n");
  4054. pThread = &(gCmdqContext.thread[thread]);
  4055. if (false == cmdq_get_func()->isSecureThread(thread)) {
  4056. /* normal thread */
  4057. value[0] = CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread));
  4058. value[1] = CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread));
  4059. value[2] = CMDQ_REG_GET32(CMDQ_THR_WAIT_TOKEN(thread));
  4060. value[3] = cmdq_core_thread_exec_counter(thread);
  4061. value[4] = CMDQ_REG_GET32(CMDQ_THR_IRQ_STATUS(thread));
  4062. value[5] = CMDQ_REG_GET32(CMDQ_THR_INST_CYCLES(thread));
  4063. value[6] = CMDQ_REG_GET32(CMDQ_THR_CURR_STATUS(thread));
  4064. value[7] = CMDQ_REG_GET32(CMDQ_THR_IRQ_ENABLE(thread));
  4065. value[8] = CMDQ_REG_GET32(CMDQ_THR_ENABLE_TASK(thread));
  4066. CMDQ_ERR
  4067. ("Index: %d, Enabled: %d, IRQ: 0x%08x, Thread PC: 0x%08x, End: 0x%08x, Wait Token: 0x%08x\n",
  4068. thread, value[8], value[4], value[0], value[1], value[2]);
  4069. CMDQ_ERR
  4070. ("Curr Cookie: %d, Wait Cookie: %d, Next Cookie: %d, Task Count %d, engineFlag: 0x%llx\n",
  4071. value[3], pThread->waitCookie, pThread->nextCookie, pThread->taskCount,
  4072. pThread->engineFlag);
  4073. CMDQ_ERR("Timeout Cycle:%d, Status:0x%08x, IRQ_EN: 0x%08x\n", value[5], value[6],
  4074. value[7]);
  4075. } else {
  4076. /* do nothing since it's a secure thread */
  4077. CMDQ_ERR("Wait Cookie: %d, Next Cookie: %d, Task Count %d,\n",
  4078. pThread->waitCookie, pThread->nextCookie, pThread->taskCount);
  4079. }
  4080. /* Begin is not first, save NG task but print pTask as well */
  4081. if (NULL != pNGTask && pNGTask != pTask) {
  4082. CMDQ_ERR("== [CMDQ] We have NG task, so engine dumps may more than you think ==\n");
  4083. CMDQ_ERR("========== [CMDQ] Error Thread PC (NG Task) ==========\n");
  4084. hwNGPC = cmdq_core_dump_pc(pNGTask, thread, "ERR");
  4085. CMDQ_ERR("========= [CMDQ] Error Task Status (NG Task) =========\n");
  4086. cmdq_core_dump_task(pNGTask);
  4087. printEngineFlag |= pNGTask->engineFlag;
  4088. }
  4089. if (NULL != pTask) {
  4090. CMDQ_ERR("=============== [CMDQ] Error Thread PC ===============\n");
  4091. hwPC = cmdq_core_dump_pc(pTask, thread, "ERR");
  4092. CMDQ_ERR("=============== [CMDQ] Error Task Status ===============\n");
  4093. cmdq_core_dump_task(pTask);
  4094. printEngineFlag |= pTask->engineFlag;
  4095. }
  4096. /* dump tasks in error thread */
  4097. cmdq_core_dump_task_in_thread(thread, false, false, false);
  4098. cmdq_core_dump_task_with_engine_flag(printEngineFlag);
  4099. CMDQ_ERR("=============== [CMDQ] CMDQ Status ===============\n");
  4100. cmdq_core_dump_status("ERR");
  4101. #ifndef CONFIG_MTK_FPGA
  4102. CMDQ_ERR("=============== [CMDQ] SMI Status ===============\n");
  4103. cmdq_get_func()->dumpSMI(1);
  4104. #endif
  4105. CMDQ_ERR("=============== [CMDQ] Clock Gating Status ===============\n");
  4106. CMDQ_ERR("[CLOCK] common clock ref=%d\n", atomic_read(&gCmdqThreadUsage));
  4107. cmdq_get_func()->dumpClockGating();
  4108. /* */
  4109. /* Dump MMSYS configuration */
  4110. /* */
  4111. CMDQ_ERR("=============== [CMDQ] MMSYS_CONFIG ===============\n");
  4112. cmdq_mdp_get_func()->dumpMMSYSConfig();
  4113. /* */
  4114. /* ask each module to print their status */
  4115. /* */
  4116. CMDQ_ERR("=============== [CMDQ] Engine Status ===============\n");
  4117. pCallback = gCmdqGroupCallback;
  4118. for (index = 0; index < CMDQ_MAX_GROUP_COUNT; ++index) {
  4119. if (!cmdq_core_is_group_flag((CMDQ_GROUP_ENUM) index, printEngineFlag))
  4120. continue;
  4121. CMDQ_ERR("====== engine group %s status =======\n", engineGroupName[index]);
  4122. if (NULL == pCallback[index].dumpInfo) {
  4123. CMDQ_ERR("(no dump function)\n");
  4124. continue;
  4125. }
  4126. pCallback[index].dumpInfo((gCmdqEngineGroupBits[index] & printEngineFlag),
  4127. gCmdqContext.logLevel);
  4128. }
  4129. /* force dump DISP for DISP scenario with 0x0 engine flag */
  4130. if (NULL != pTask)
  4131. isDispScn = cmdq_get_func()->isDispScenario(pTask->scenario);
  4132. if (NULL != pNGTask)
  4133. isDispScn = isDispScn | cmdq_get_func()->isDispScenario(pNGTask->scenario);
  4134. if (isDispScn) {
  4135. index = CMDQ_GROUP_DISP;
  4136. if (pCallback[index].dumpInfo) {
  4137. pCallback[index].dumpInfo((gCmdqEngineGroupBits[index] & printEngineFlag),
  4138. gCmdqContext.logLevel);
  4139. }
  4140. }
  4141. CMDQ_ERR("=============== [CMDQ] GIC dump ===============\n");
  4142. cmdq_core_dump_GIC();
  4143. /* Begin is not first, save NG task but print pTask as well */
  4144. if (NULL != pNGTask && pNGTask != pTask) {
  4145. CMDQ_ERR("========== [CMDQ] Error Command Buffer (NG Task) ==========\n");
  4146. cmdq_core_dump_error_buffer(pNGTask, hwNGPC);
  4147. }
  4148. if (NULL != pTask) {
  4149. CMDQ_ERR("=============== [CMDQ] Error Command Buffer ===============\n");
  4150. cmdq_core_dump_error_buffer(pTask, hwPC);
  4151. }
  4152. }
  4153. static void cmdq_core_attach_error_task(const TaskStruct *pTask, int32_t thread,
  4154. const TaskStruct **pOutNGTask)
  4155. {
  4156. EngineStruct *pEngine = NULL;
  4157. ThreadStruct *pThread = NULL;
  4158. const TaskStruct *pNGTask = NULL;
  4159. uint64_t engFlag = 0;
  4160. int32_t index = 0;
  4161. if (NULL == pTask) {
  4162. CMDQ_ERR("attach error failed since pTask is NULL");
  4163. return;
  4164. }
  4165. pThread = &(gCmdqContext.thread[thread]);
  4166. pEngine = gCmdqContext.engine;
  4167. CMDQ_PROF_MMP(cmdq_mmp_get_event()->warning, MMProfileFlagPulse, ((unsigned long)pTask),
  4168. thread);
  4169. /* */
  4170. /* Update engine fail count */
  4171. /* */
  4172. engFlag = pTask->engineFlag;
  4173. for (index = 0; index < CMDQ_MAX_ENGINE_COUNT; index++) {
  4174. if (engFlag & (1LL << index))
  4175. pEngine[index].failCount++;
  4176. }
  4177. /* */
  4178. /* register error record */
  4179. /* */
  4180. if (gCmdqContext.errNum < CMDQ_MAX_ERROR_COUNT) {
  4181. ErrorStruct *pError = &gCmdqContext.error[gCmdqContext.errNum];
  4182. cmdq_core_fill_task_record(&pError->errorRec, pTask, thread);
  4183. pError->ts_nsec = local_clock();
  4184. }
  4185. #ifdef CMDQ_DUMP_FIRSTERROR
  4186. if (0 == gCmdqFirstError.cmdqCount) {
  4187. gCmdqFirstError.flag = true;
  4188. /* save kernel time, pid, and caller name */
  4189. gCmdqFirstError.callerPid = pTask->callerPid;
  4190. snprintf(gCmdqFirstError.callerName, TASK_COMM_LEN, "%s", pTask->callerName);
  4191. gCmdqFirstError.savetime = sched_clock();
  4192. do_gettimeofday(&gCmdqFirstError.savetv);
  4193. }
  4194. #endif
  4195. /* */
  4196. /* Then we just print out info */
  4197. /* */
  4198. CMDQ_ERR("================= [CMDQ] Begin of Error %d================\n",
  4199. gCmdqContext.errNum);
  4200. cmdq_core_dump_summary(pTask, thread, &pNGTask);
  4201. if (gCmdqContext.errNum <= 2 || gCmdqContext.errNum % 16 == 0 || cmdq_core_should_full_error()) {
  4202. /* Dump error task */
  4203. cmdq_core_dump_error_task(pTask, pNGTask, thread);
  4204. }
  4205. CMDQ_ERR("================= [CMDQ] End of Error %d ================\n",
  4206. gCmdqContext.errNum);
  4207. gCmdqContext.errNum++;
  4208. if (pOutNGTask != NULL) {
  4209. if (NULL != pNGTask)
  4210. *pOutNGTask = pNGTask;
  4211. else
  4212. *pOutNGTask = pTask;
  4213. }
  4214. }
  4215. static int32_t cmdq_core_insert_task_from_thread_array_by_cookie(TaskStruct *pTask,
  4216. ThreadStruct *pThread,
  4217. const int32_t cookie,
  4218. const bool resetHWThread)
  4219. {
  4220. if (NULL == pTask || NULL == pThread) {
  4221. CMDQ_ERR("invalid param, pTask[0x%p], pThread[0x%p], cookie[%d], needReset[%d]\n",
  4222. pTask, pThread, cookie, resetHWThread);
  4223. return -EFAULT;
  4224. }
  4225. if (true == resetHWThread) {
  4226. pThread->waitCookie = cookie;
  4227. pThread->nextCookie = cookie + 1;
  4228. if (pThread->nextCookie > CMDQ_MAX_COOKIE_VALUE) {
  4229. /* Reach the maximum cookie */
  4230. pThread->nextCookie = 0;
  4231. }
  4232. /* taskCount must start from 0. */
  4233. /* and we are the first task, so set to 1. */
  4234. pThread->taskCount = 1;
  4235. } else {
  4236. pThread->nextCookie += 1;
  4237. if (pThread->nextCookie > CMDQ_MAX_COOKIE_VALUE) {
  4238. /* Reach the maximum cookie */
  4239. pThread->nextCookie = 0;
  4240. }
  4241. pThread->taskCount++;
  4242. }
  4243. /* genernal part */
  4244. pThread->pCurTask[cookie % cmdq_core_max_task_in_thread(pTask->thread)] = pTask;
  4245. pThread->allowDispatching = 1;
  4246. /* secure path */
  4247. if (pTask->secData.isSecure) {
  4248. pTask->secData.waitCookie = cookie;
  4249. pTask->secData.resetExecCnt = resetHWThread;
  4250. }
  4251. return 0;
  4252. }
  4253. static int32_t cmdq_core_remove_task_from_thread_array_by_cookie(ThreadStruct *pThread,
  4254. int32_t index,
  4255. TASK_STATE_ENUM newTaskState)
  4256. {
  4257. TaskStruct *pTask = NULL;
  4258. if ((NULL == pThread) || (index < 0) || (index >= CMDQ_MAX_TASK_IN_THREAD)) {
  4259. CMDQ_ERR
  4260. ("remove task from thread array, invalid param. THR[0x%p], task_slot[%d], newTaskState[%d]\n",
  4261. pThread, index, newTaskState);
  4262. return -EINVAL;
  4263. }
  4264. pTask = pThread->pCurTask[index];
  4265. if (NULL == pTask) {
  4266. CMDQ_ERR("remove fail, task_slot[%d] on thread[%p] is NULL\n", index, pThread);
  4267. return -EINVAL;
  4268. }
  4269. if (cmdq_core_max_task_in_thread(pTask->thread) <= index) {
  4270. CMDQ_ERR
  4271. ("remove task from thread array, invalid index. THR[0x%p], task_slot[%d], newTaskState[%d]\n",
  4272. pThread, index, newTaskState);
  4273. return -EINVAL;
  4274. }
  4275. /* to switch a task to done_status(_ERROR, _KILLED, _DONE) is aligned with thread's taskcount change */
  4276. /* check task status to prevent double clean-up thread's taskcount */
  4277. if (TASK_STATE_BUSY != pTask->taskState) {
  4278. CMDQ_ERR
  4279. ("remove task, taskStatus err[%d]. THR[0x%p], task_slot[%d], targetTaskStaus[%d]\n",
  4280. pTask->taskState, pThread, index, newTaskState);
  4281. return -EINVAL;
  4282. }
  4283. CMDQ_VERBOSE("remove task, slot[%d], targetStatus: %d\n", index, newTaskState);
  4284. pTask->taskState = newTaskState;
  4285. pTask = NULL;
  4286. pThread->pCurTask[index] = NULL;
  4287. pThread->taskCount--;
  4288. if (0 > pThread->taskCount) {
  4289. /* Error status print */
  4290. CMDQ_ERR("taskCount < 0 after cmdq_core_remove_task_from_thread_array_by_cookie\n");
  4291. }
  4292. return 0;
  4293. }
  4294. static int32_t cmdq_core_remove_task_from_thread_array_when_secure_submit_fail(ThreadStruct *pThread,
  4295. int32_t index)
  4296. {
  4297. TaskStruct *pTask = NULL;
  4298. if ((NULL == pThread) || (index < 0) || (index >= CMDQ_MAX_TASK_IN_THREAD)) {
  4299. CMDQ_ERR
  4300. ("remove task from thread array, invalid param. THR[0x%p], task_slot[%d]\n",
  4301. pThread, index);
  4302. return -EINVAL;
  4303. }
  4304. pTask = pThread->pCurTask[index];
  4305. if (NULL == pTask) {
  4306. CMDQ_ERR("remove fail, task_slot[%d] on thread[%p] is NULL\n", index, pThread);
  4307. return -EINVAL;
  4308. }
  4309. if (cmdq_core_max_task_in_thread(pTask->thread) <= index) {
  4310. CMDQ_ERR
  4311. ("remove task from thread array, invalid index. THR[0x%p], task_slot[%d]\n",
  4312. pThread, index);
  4313. return -EINVAL;
  4314. }
  4315. CMDQ_VERBOSE("remove task, slot[%d]\n", index);
  4316. pTask = NULL;
  4317. pThread->pCurTask[index] = NULL;
  4318. pThread->taskCount--;
  4319. pThread->nextCookie--;
  4320. if (0 > pThread->taskCount) {
  4321. /* Error status print */
  4322. CMDQ_ERR("taskCount < 0 after cmdq_core_remove_task_from_thread_array_when_secure_submit_fail\n");
  4323. }
  4324. return 0;
  4325. }
  4326. static int32_t cmdq_core_force_remove_task_from_thread(TaskStruct *pTask, uint32_t thread)
  4327. {
  4328. int32_t status = 0;
  4329. int32_t cookie = 0;
  4330. int index = 0;
  4331. int loop = 0;
  4332. struct TaskStruct *pExecTask = NULL;
  4333. struct ThreadStruct *pThread = &(gCmdqContext.thread[thread]);
  4334. status = cmdq_core_suspend_HW_thread(thread, __LINE__);
  4335. CMDQ_REG_SET32(CMDQ_THR_INST_CYCLES(thread), cmdq_core_get_task_timeout_cycle(pThread));
  4336. /* The cookie of the task currently being processed */
  4337. cookie = CMDQ_GET_COOKIE_CNT(thread) + 1;
  4338. pExecTask = pThread->pCurTask[cookie % cmdq_core_max_task_in_thread(thread)];
  4339. if (NULL != pExecTask && (pExecTask == pTask)) {
  4340. /* The task is executed now, set the PC to EOC for bypass */
  4341. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread),
  4342. CMDQ_PHYS_TO_AREG(pTask->MVABase + pTask->commandSize - 16));
  4343. cmdq_core_reset_hw_engine(pTask->engineFlag);
  4344. pThread->pCurTask[cookie % cmdq_core_max_task_in_thread(thread)] = NULL;
  4345. pTask->taskState = TASK_STATE_KILLED;
  4346. } else {
  4347. loop = pThread->taskCount;
  4348. for (index = (cookie % cmdq_core_max_task_in_thread(thread)); loop > 0; loop--, index++) {
  4349. if (index >= cmdq_core_max_task_in_thread(thread))
  4350. index = 0;
  4351. pExecTask = pThread->pCurTask[index];
  4352. if (NULL == pExecTask)
  4353. continue;
  4354. if ((0x10000000 == pExecTask->pCMDEnd[0]) &&
  4355. (0x00000008 == pExecTask->pCMDEnd[-1])) {
  4356. /* We reached the last task */
  4357. break;
  4358. } else if (pExecTask->pCMDEnd[-1] == pTask->MVABase) {
  4359. /* Fake EOC command */
  4360. pExecTask->pCMDEnd[-1] = 0x00000001;
  4361. pExecTask->pCMDEnd[0] = 0x40000000;
  4362. /* Bypass the task */
  4363. pExecTask->pCMDEnd[1] = pTask->pCMDEnd[-1];
  4364. pExecTask->pCMDEnd[2] = pTask->pCMDEnd[0];
  4365. index += 1;
  4366. if (index >= cmdq_core_max_task_in_thread(thread))
  4367. index = 0;
  4368. pThread->pCurTask[index] = NULL;
  4369. pTask->taskState = TASK_STATE_KILLED;
  4370. status = 0;
  4371. break;
  4372. }
  4373. }
  4374. }
  4375. return status;
  4376. }
  4377. static void cmdq_core_handle_done_with_cookie_impl(int32_t thread,
  4378. int32_t value, CMDQ_TIME *pGotIRQ,
  4379. const uint32_t cookie)
  4380. {
  4381. #ifdef CMDQ_MDP_MET_STATUS
  4382. struct TaskStruct *pTask;
  4383. #endif
  4384. ThreadStruct *pThread;
  4385. int32_t count;
  4386. int32_t inner;
  4387. int32_t maxTaskNUM = cmdq_core_max_task_in_thread(thread);
  4388. pThread = &(gCmdqContext.thread[thread]);
  4389. /* do not print excessive message for looping thread */
  4390. if (NULL == pThread->loopCallback) {
  4391. #ifdef CONFIG_MTK_FPGA
  4392. /* ASYNC: debug log, use printk_sched to prevent block IRQ handler */
  4393. CMDQ_MSG("IRQ: Done, thread: %d, cookie:%d\n", thread, cookie);
  4394. #endif
  4395. }
  4396. if (pThread->waitCookie <= cookie) {
  4397. count = cookie - pThread->waitCookie + 1;
  4398. } else if ((cookie+1) % CMDQ_MAX_COOKIE_VALUE == pThread->waitCookie) {
  4399. count = 0;
  4400. CMDQ_MSG("IRQ: duplicated cookie: waitCookie:%d, hwCookie:%d",
  4401. pThread->waitCookie, cookie);
  4402. } else {
  4403. /* Counter wrapped */
  4404. count = (CMDQ_MAX_COOKIE_VALUE - pThread->waitCookie + 1) + (cookie + 1);
  4405. CMDQ_ERR("IRQ: counter wrapped: waitCookie:%d, hwCookie:%d, count=%d",
  4406. pThread->waitCookie, cookie, count);
  4407. }
  4408. for (inner = (pThread->waitCookie % maxTaskNUM); count > 0; count--, inner++) {
  4409. if (inner >= maxTaskNUM)
  4410. inner = 0;
  4411. if (NULL != pThread->pCurTask[inner]) {
  4412. struct TaskStruct *pTask = pThread->pCurTask[inner];
  4413. pTask->gotIRQ = *pGotIRQ;
  4414. pTask->irqFlag = value;
  4415. cmdq_core_remove_task_from_thread_array_by_cookie(pThread,
  4416. inner, TASK_STATE_DONE);
  4417. #ifdef CMDQ_MDP_MET_STATUS
  4418. /* MET MMSYS: Thread done */
  4419. if (met_mmsys_event_gce_thread_end)
  4420. met_mmsys_event_gce_thread_end(thread, (uintptr_t) pTask, pTask->engineFlag);
  4421. #endif
  4422. }
  4423. }
  4424. CMDQ_PROF_MMP(cmdq_mmp_get_event()->CMDQ_IRQ, MMProfileFlagPulse, thread, cookie);
  4425. pThread->waitCookie = cookie + 1;
  4426. if (pThread->waitCookie > CMDQ_MAX_COOKIE_VALUE)
  4427. pThread->waitCookie -= (CMDQ_MAX_COOKIE_VALUE + 1); /* min cookie value is 0 */
  4428. #ifdef CMDQ_MDP_MET_STATUS
  4429. /* MET MMSYS: GCE should trigger next waiting task */
  4430. if ((0 < pThread->taskCount) && met_mmsys_event_gce_thread_begin) {
  4431. count = pThread->nextCookie - pThread->waitCookie;
  4432. for (inner = (pThread->waitCookie % maxTaskNUM); count > 0; count--, inner++) {
  4433. if (inner >= maxTaskNUM)
  4434. inner = 0;
  4435. if (NULL != pThread->pCurTask[inner]) {
  4436. pTask = pThread->pCurTask[inner];
  4437. met_mmsys_event_gce_thread_begin(thread, (uintptr_t) pTask, pTask->engineFlag,
  4438. (void *)pTask->pVABase, pTask->commandSize);
  4439. break;
  4440. }
  4441. }
  4442. }
  4443. #endif
  4444. wake_up(&gCmdWaitQueue[thread]);
  4445. }
  4446. static void cmdq_core_handle_secure_thread_done_impl(const int32_t thread,
  4447. const int32_t value, CMDQ_TIME *pGotIRQ)
  4448. {
  4449. const int32_t cookie = cmdq_core_get_secure_thread_exec_counter(thread);
  4450. /* get cookie value from shared memory */
  4451. if (0 > cookie)
  4452. return;
  4453. cmdq_core_handle_done_with_cookie_impl(thread, value, pGotIRQ, cookie);
  4454. }
  4455. static void cmdq_core_handle_secure_paths_exec_done_notify(const int32_t notifyThread,
  4456. const int32_t value, CMDQ_TIME *pGotIRQ)
  4457. {
  4458. uint32_t i;
  4459. int32_t thread;
  4460. const uint32_t startThread = CMDQ_MIN_SECURE_THREAD_ID;
  4461. const uint32_t endThread = CMDQ_MIN_SECURE_THREAD_ID + CMDQ_MAX_SECURE_THREAD_COUNT;
  4462. int32_t raisedIRQ;
  4463. raisedIRQ = 0x0;
  4464. /* HACK:
  4465. * IRQ of the notify thread,
  4466. * implies threre are some secure tasks execute done.
  4467. *
  4468. * when receive it, we should
  4469. * .suspend notify thread
  4470. * .scan shared memory to update secure path task status
  4471. * (and notify waiting process context to check result)
  4472. * .resume notify thread
  4473. */
  4474. /* it's okey that SWd update and NWd read shared memory, which used to
  4475. * store copy value of secure thread cookie, at the same time.
  4476. *
  4477. * The reason is NWd will receive a notify thread IRQ again after resume notify thread.
  4478. * The later IRQ let driver scan shared memory again.
  4479. * (note it's possible that same content in shared memory in such case)
  4480. */
  4481. /* confirm if it is notify thread */
  4482. if (false == cmdq_get_func()->isValidNotifyThread(notifyThread))
  4483. return;
  4484. raisedIRQ = cmdq_core_get_secure_IRQ_status();
  4485. CMDQ_LOG("%s, raisedIRQ:0x%08x, shared_cookie(%d, %d, %d)\n",
  4486. __func__,
  4487. raisedIRQ,
  4488. cmdq_core_get_secure_thread_exec_counter(12),
  4489. cmdq_core_get_secure_thread_exec_counter(13),
  4490. cmdq_core_get_secure_thread_exec_counter(14));
  4491. /* update tasks' status according cookie in shared memory */
  4492. for (i = startThread; i < endThread; i++) {
  4493. /* bit X = 1 means thread X raised IRQ */
  4494. if (0 == (raisedIRQ & (0x1 << i)))
  4495. continue;
  4496. thread = i;
  4497. cmdq_core_handle_secure_thread_done_impl(thread, value, pGotIRQ);
  4498. }
  4499. cmdq_core_set_secure_IRQ_status(0x0);
  4500. #ifdef CMDQ_SECURE_PATH_HW_LOCK
  4501. cmdqCoreSetEvent(CMDQ_SYNC_SECURE_WSM_LOCK);
  4502. #endif
  4503. }
  4504. static void cmdqCoreHandleError(int32_t thread, int32_t value, CMDQ_TIME *pGotIRQ)
  4505. {
  4506. ThreadStruct *pThread = NULL;
  4507. TaskStruct *pTask = NULL;
  4508. int32_t cookie;
  4509. int32_t count;
  4510. int32_t inner;
  4511. int32_t status = 0;
  4512. cookie = cmdq_core_thread_exec_counter(thread);
  4513. CMDQ_ERR("IRQ: error thread=%d, irq_flag=0x%x, cookie:%d\n", thread, value, cookie);
  4514. CMDQ_ERR("IRQ: Thread PC: 0x%08x, End PC:0x%08x\n",
  4515. CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread)),
  4516. CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread)));
  4517. pThread = &(gCmdqContext.thread[thread]);
  4518. /* we assume error happens BEFORE EOC */
  4519. /* because it wouldn't be error if this interrupt is issue by EOC. */
  4520. /* So we should inc by 1 to locate "current" task */
  4521. cookie += 1;
  4522. /* Set the issued task to error state */
  4523. #define CMDQ_TEST_PREFETCH_FOR_MULTIPLE_COMMAND
  4524. #ifdef CMDQ_TEST_PREFETCH_FOR_MULTIPLE_COMMAND
  4525. cmdq_core_dump_task_in_thread(thread, true, true, true);
  4526. #endif
  4527. /* suspend HW thread first, so that we work in a consistent state */
  4528. /* outer function should acquire spinlock - gCmdqExecLock */
  4529. status = cmdq_core_suspend_HW_thread(thread, __LINE__);
  4530. if (0 > status) {
  4531. /* suspend HW thread failed */
  4532. CMDQ_ERR("IRQ: suspend HW thread failed!");
  4533. }
  4534. CMDQ_ERR("Error IRQ: always suspend thread (%d) to prevent contiuous error IRQ\n", thread);
  4535. if (NULL != pThread->pCurTask[cookie % cmdq_core_max_task_in_thread(thread)]) {
  4536. pTask = pThread->pCurTask[cookie % cmdq_core_max_task_in_thread(thread)];
  4537. pTask->gotIRQ = *pGotIRQ;
  4538. pTask->irqFlag = value;
  4539. cmdq_core_attach_error_task(pTask, thread, NULL);
  4540. cmdq_core_remove_task_from_thread_array_by_cookie(pThread,
  4541. cookie % cmdq_core_max_task_in_thread(thread),
  4542. TASK_STATE_ERR_IRQ);
  4543. } else {
  4544. CMDQ_ERR
  4545. ("IRQ: can not find task in cmdqCoreHandleError, pc:0x%08x, end_pc:0x%08x\n",
  4546. CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread)),
  4547. CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread)));
  4548. if (0 >= pThread->taskCount) {
  4549. cmdq_core_disable_HW_thread(thread);
  4550. CMDQ_ERR("IRQ: there is no task for thread (%d) cmdqCoreHandleError\n",
  4551. thread);
  4552. }
  4553. }
  4554. /* Set the remain tasks to done state */
  4555. if (pThread->waitCookie <= cookie) {
  4556. count = cookie - pThread->waitCookie + 1;
  4557. } else if ((cookie+1) % CMDQ_MAX_COOKIE_VALUE == pThread->waitCookie) {
  4558. count = 0;
  4559. CMDQ_MSG("IRQ: duplicated cookie: waitCookie:%d, hwCookie:%d",
  4560. pThread->waitCookie, cookie);
  4561. } else {
  4562. /* Counter wrapped */
  4563. count = (CMDQ_MAX_COOKIE_VALUE - pThread->waitCookie + 1) + (cookie + 1);
  4564. CMDQ_ERR("IRQ: counter wrapped: waitCookie:%d, hwCookie:%d, count=%d",
  4565. pThread->waitCookie, cookie, count);
  4566. }
  4567. for (inner = (pThread->waitCookie % cmdq_core_max_task_in_thread(thread)); count > 0; count--, inner++) {
  4568. if (inner >= cmdq_core_max_task_in_thread(thread))
  4569. inner = 0;
  4570. if (NULL != pThread->pCurTask[inner]) {
  4571. pTask = pThread->pCurTask[inner];
  4572. pTask->gotIRQ = (*pGotIRQ);
  4573. pTask->irqFlag = 0; /* we don't know the exact irq flag. */
  4574. cmdq_core_remove_task_from_thread_array_by_cookie(pThread,
  4575. inner, TASK_STATE_DONE);
  4576. }
  4577. }
  4578. /* Error cookie will be handled in cmdq_core_handle_wait_task_result_impl API */
  4579. /*pThread->waitCookie = cookie + 1;
  4580. if (pThread->waitCookie > CMDQ_MAX_COOKIE_VALUE) {
  4581. pThread->waitCookie -= (CMDQ_MAX_COOKIE_VALUE + 1);
  4582. } */
  4583. wake_up(&gCmdWaitQueue[thread]);
  4584. }
  4585. static void cmdqCoreHandleDone(int32_t thread, int32_t value, CMDQ_TIME *pGotIRQ)
  4586. {
  4587. ThreadStruct *pThread;
  4588. int32_t cookie;
  4589. int32_t loopResult = 0;
  4590. pThread = &(gCmdqContext.thread[thread]);
  4591. /* */
  4592. /* Loop execution never gets done; unless */
  4593. /* user loop function returns error */
  4594. /* */
  4595. if (NULL != pThread->loopCallback) {
  4596. loopResult = pThread->loopCallback(pThread->loopData);
  4597. CMDQ_PROF_MMP(cmdq_mmp_get_event()->loopBeat,
  4598. MMProfileFlagPulse, thread, loopResult);
  4599. /* HACK: there are some seucre task execue done */
  4600. cmdq_core_handle_secure_paths_exec_done_notify(thread, value, pGotIRQ);
  4601. if (loopResult >= 0) {
  4602. #ifdef CMDQ_PROFILE_COMMAND_TRIGGER_LOOP
  4603. /* HACK */
  4604. if (pThread->pCurTask[1])
  4605. cmdq_core_track_task_record(pThread->pCurTask[1], thread);
  4606. #endif
  4607. /* Success, contiue execution as if nothing happens */
  4608. CMDQ_REG_SET32(CMDQ_THR_IRQ_STATUS(thread), ~value);
  4609. return;
  4610. }
  4611. }
  4612. if (loopResult < 0) {
  4613. /* The loop CB failed, so stop HW thread now. */
  4614. cmdq_core_disable_HW_thread(thread);
  4615. /* loop CB failed. the EXECUTION count should not be used as cookie, */
  4616. /* since it will increase by each loop iteration. */
  4617. cookie = pThread->waitCookie;
  4618. } else {
  4619. /* task cookie */
  4620. cookie = cmdq_core_thread_exec_counter(thread);
  4621. CMDQ_MSG("Done: thread %d got cookie: %d\n", thread, cookie);
  4622. }
  4623. cmdq_core_handle_done_with_cookie_impl(thread, value, pGotIRQ, cookie);
  4624. }
  4625. void cmdqCoreHandleIRQ(int32_t thread)
  4626. {
  4627. unsigned long flags = 0L;
  4628. CMDQ_TIME gotIRQ;
  4629. int value;
  4630. int enabled;
  4631. int32_t cookie;
  4632. /* note that do_gettimeofday may cause HWT in spin_lock_irqsave (ALPS01496779) */
  4633. gotIRQ = sched_clock();
  4634. /* */
  4635. /* Normal execution, marks tasks done and remove from thread */
  4636. /* Also, handle "loop CB fail" case */
  4637. /* */
  4638. spin_lock_irqsave(&gCmdqExecLock, flags);
  4639. /* it is possible for another CPU core */
  4640. /* to run "releaseTask" right before we acquire the spin lock */
  4641. /* and thus reset / disable this HW thread */
  4642. /* so we check both the IRQ flag and the enable bit of this thread */
  4643. value = CMDQ_REG_GET32(CMDQ_THR_IRQ_STATUS(thread));
  4644. if (0 == (value & 0x13)) {
  4645. CMDQ_ERR("IRQ: thread %d got interrupt but IRQ flag is 0x%08x in NWd\n", thread,
  4646. value);
  4647. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  4648. return;
  4649. }
  4650. if (false == cmdq_get_func()->isSecureThread(thread)) {
  4651. enabled = CMDQ_REG_GET32(CMDQ_THR_ENABLE_TASK(thread));
  4652. if (0 == (enabled & 0x01)) {
  4653. CMDQ_ERR("IRQ: thread %d got interrupt already disabled 0x%08x\n", thread,
  4654. enabled);
  4655. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  4656. return;
  4657. }
  4658. }
  4659. CMDQ_PROF_START(0, gCmdqThreadLabel[thread]);
  4660. /* Read HW cookie here to print message only */
  4661. cookie = cmdq_core_thread_exec_counter(thread);
  4662. /* Move the reset IRQ before read HW cookie to prevent race condition and save the cost of suspend */
  4663. CMDQ_REG_SET32(CMDQ_THR_IRQ_STATUS(thread), ~value);
  4664. CMDQ_MSG("IRQ: thread %d got interrupt, after reset, and IRQ flag is 0x%08x, cookie: %d\n",
  4665. thread, value, cookie);
  4666. if (value & 0x12)
  4667. cmdqCoreHandleError(thread, value, &gotIRQ);
  4668. else if (value & 0x01)
  4669. cmdqCoreHandleDone(thread, value, &gotIRQ);
  4670. CMDQ_PROF_END(0, gCmdqThreadLabel[thread]);
  4671. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  4672. }
  4673. static TaskStruct *cmdq_core_search_task_by_pc(uint32_t threadPC, const ThreadStruct *pThread, int32_t thread)
  4674. {
  4675. TaskStruct *pTask = NULL;
  4676. int i = 0;
  4677. for (i = 0; i < cmdq_core_max_task_in_thread(thread); ++i) {
  4678. pTask = pThread->pCurTask[i];
  4679. if (pTask &&
  4680. threadPC >= pTask->MVABase &&
  4681. threadPC <= (pTask->MVABase + pTask->commandSize)) {
  4682. break;
  4683. }
  4684. }
  4685. return pTask;
  4686. }
  4687. /* Implementation of wait task done
  4688. * Return:
  4689. * wait time of wait_event_timeout() kernel API
  4690. * . =0, for timeout elapsed,
  4691. * . >0, remain jiffies if condition passed
  4692. *
  4693. * Note process will go to sleep with state TASK_UNINTERRUPTIBLE until
  4694. * the condition[task done] passed or timeout happened.
  4695. */
  4696. static int32_t cmdq_core_wait_task_done_with_timeout_impl(TaskStruct *pTask, int32_t thread)
  4697. {
  4698. int32_t waitQ;
  4699. unsigned long flags;
  4700. ThreadStruct *pThread = NULL;
  4701. int32_t retryCount = 0;
  4702. pThread = &(gCmdqContext.thread[thread]);
  4703. /* timeout wait & make sure this task is finished. */
  4704. /* pTask->taskState flag is updated in IRQ handlers like cmdqCoreHandleDone. */
  4705. retryCount = 0;
  4706. waitQ = wait_event_timeout(gCmdWaitQueue[thread],
  4707. (TASK_STATE_BUSY != pTask->taskState
  4708. && TASK_STATE_WAITING != pTask->taskState),
  4709. /* timeout_jiffies); */
  4710. msecs_to_jiffies(CMDQ_PREDUMP_TIMEOUT_MS));
  4711. /* if SW-timeout, pre-dump hang instructions */
  4712. while (0 == waitQ && retryCount < CMDQ_PREDUMP_RETRY_COUNT) {
  4713. CMDQ_LOG("=============== [CMDQ] SW timeout Pre-dump(%d)===============\n",
  4714. retryCount);
  4715. ++retryCount;
  4716. spin_lock_irqsave(&gCmdqExecLock, flags);
  4717. cmdq_core_dump_status("INFO");
  4718. cmdq_core_dump_pc(pTask, thread, "INFO");
  4719. /* HACK: check trigger thread status */
  4720. cmdq_core_dump_disp_trigger_loop("INFO");
  4721. /* end of HACK */
  4722. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  4723. /* then we wait again */
  4724. waitQ = wait_event_timeout(gCmdWaitQueue[thread],
  4725. (TASK_STATE_BUSY != pTask->taskState
  4726. && TASK_STATE_WAITING != pTask->taskState),
  4727. msecs_to_jiffies(CMDQ_PREDUMP_TIMEOUT_MS));
  4728. }
  4729. return waitQ;
  4730. }
  4731. static int32_t cmdq_core_handle_wait_task_result_secure_impl(TaskStruct *pTask,
  4732. int32_t thread, const int32_t waitQ)
  4733. {
  4734. int32_t i;
  4735. int32_t status;
  4736. ThreadStruct *pThread = NULL;
  4737. /* error report */
  4738. bool throwAEE = false;
  4739. const char *module = NULL;
  4740. int32_t irqFlag = 0;
  4741. cmdqSecCancelTaskResultStruct result;
  4742. char parsedInstruction[128] = { 0 };
  4743. /* Init default status */
  4744. status = 0;
  4745. pThread = &(gCmdqContext.thread[thread]);
  4746. memset(&result, 0, sizeof(cmdqSecCancelTaskResultStruct));
  4747. /* lock cmdqSecLock */
  4748. cmdq_sec_lock_secure_path();
  4749. do {
  4750. /* check if this task has finished */
  4751. #if defined(CMDQ_SECURE_PATH_NORMAL_IRQ) || defined(CMDQ_SECURE_PATH_HW_LOCK)
  4752. if (TASK_STATE_DONE == pTask->taskState)
  4753. break;
  4754. #else
  4755. if (TASK_STATE_BUSY != pTask->taskState)
  4756. break;
  4757. #endif
  4758. /* Oops, tha tasks is not done. */
  4759. /* We have several possible error scenario: */
  4760. /* 1. task still running (hang / timeout) */
  4761. /* 2. IRQ pending (done or error/timeout IRQ) */
  4762. /* 3. task's SW thread has been signaled (e.g. SIGKILL) */
  4763. /* dump shared cookie */
  4764. CMDQ_VERBOSE
  4765. ("WAIT: [1]secure path failed, pTask:%p, thread:%d, shared_cookie(%d, %d, %d)\n",
  4766. pTask, thread,
  4767. cmdq_core_get_secure_thread_exec_counter(12),
  4768. cmdq_core_get_secure_thread_exec_counter(13),
  4769. cmdq_core_get_secure_thread_exec_counter(14));
  4770. /* suppose that task failed, entry secure world to confirm it */
  4771. /* we entry secure world: */
  4772. /* .check if pending IRQ and update cookie value to shared memory */
  4773. /* .confirm if task execute done */
  4774. /* if not, do error handle */
  4775. /* .recover M4U & DAPC setting */
  4776. /* .dump secure HW thread */
  4777. /* .reset CMDQ secure HW thread */
  4778. cmdq_sec_cancel_error_task_unlocked(pTask, thread, &result);
  4779. /* dump shared cookie */
  4780. CMDQ_VERBOSE
  4781. ("WAIT: [2]secure path failed, pTask:%p, thread:%d, shared_cookie(%d, %d, %d)\n",
  4782. pTask, thread,
  4783. cmdq_core_get_secure_thread_exec_counter(12),
  4784. cmdq_core_get_secure_thread_exec_counter(13),
  4785. cmdq_core_get_secure_thread_exec_counter(14));
  4786. /* confirm pending IRQ first */
  4787. cmdq_core_handle_secure_thread_done_impl(thread, 0x01, &pTask->wakedUp);
  4788. /* check if this task has finished after handling pending IRQ */
  4789. if (TASK_STATE_DONE == pTask->taskState)
  4790. break;
  4791. status = -ETIMEDOUT;
  4792. throwAEE = true;
  4793. /* shall we pass the error instru back from secure path?? */
  4794. /* cmdq_core_parse_error(pTask, thread, &module, &irqFlag, &instA, &instB); */
  4795. module = cmdq_get_func()->parseErrorModule(pTask);
  4796. /* module dump */
  4797. cmdq_core_attach_error_task(pTask, thread, NULL);
  4798. /* module reset */
  4799. /* TODO: get needReset infor by secure thread PC */
  4800. cmdq_core_reset_hw_engine(pTask->engineFlag);
  4801. /* remove all tasks in tread since we have reset HW thread in SWd */
  4802. for (i = 0; i < cmdq_core_max_task_in_thread(thread); i++) {
  4803. pTask = pThread->pCurTask[i];
  4804. if (pTask) {
  4805. cmdq_core_remove_task_from_thread_array_by_cookie(pThread, i,
  4806. TASK_STATE_ERROR);
  4807. }
  4808. }
  4809. pThread->taskCount = 0;
  4810. pThread->waitCookie = pThread->nextCookie;
  4811. } while (0);
  4812. /* unlock cmdqSecLock */
  4813. cmdq_sec_unlock_secure_path();
  4814. /* throw AEE if nessary */
  4815. if (throwAEE) {
  4816. const uint32_t instA = result.errInstr[1];
  4817. const uint32_t instB = result.errInstr[0];
  4818. const uint32_t op = (instA & 0xFF000000) >> 24;
  4819. cmdq_core_interpret_instruction(parsedInstruction, sizeof(parsedInstruction), op,
  4820. instA & (~0xFF000000), instB);
  4821. CMDQ_AEE(module, "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:%s => %s\n",
  4822. module, irqFlag, instA, instB, cmdq_core_parse_op(op), parsedInstruction);
  4823. }
  4824. return status;
  4825. }
  4826. static int32_t cmdq_core_handle_wait_task_result_impl(TaskStruct *pTask, int32_t thread,
  4827. const int32_t waitQ)
  4828. {
  4829. int32_t status;
  4830. int32_t index;
  4831. unsigned long flags;
  4832. ThreadStruct *pThread = NULL;
  4833. const TaskStruct *pNGTask = NULL;
  4834. bool markAsErrorTask = false;
  4835. /* error report */
  4836. bool throwAEE = false;
  4837. const char *module = NULL;
  4838. uint32_t instA = 0, instB = 0;
  4839. int32_t irqFlag = 0;
  4840. /* Init default status */
  4841. status = 0;
  4842. pThread = &(gCmdqContext.thread[thread]);
  4843. /* Note that although we disable IRQ, HW continues to execute */
  4844. /* so it's possible to have pending IRQ */
  4845. spin_lock_irqsave(&gCmdqExecLock, flags);
  4846. do {
  4847. TaskStruct *pNextTask = NULL;
  4848. TaskStruct *pPrevTask = NULL;
  4849. int32_t cookie = 0;
  4850. long threadPC = 0L;
  4851. status = 0;
  4852. throwAEE = false;
  4853. markAsErrorTask = false;
  4854. if (TASK_STATE_DONE == pTask->taskState)
  4855. break;
  4856. CMDQ_ERR("Task state of %p is not TASK_STATE_DONE, %d\n", pTask, pTask->taskState);
  4857. /* Oops, tha tasks is not done. */
  4858. /* We have several possible error scenario: */
  4859. /* 1. task still running (hang / timeout) */
  4860. /* 2. IRQ pending (done or error/timeout IRQ) */
  4861. /* 3. task's SW thread has been signaled (e.g. SIGKILL) */
  4862. /* suspend HW thread first, so that we work in a consistent state */
  4863. status = cmdq_core_suspend_HW_thread(thread, __LINE__);
  4864. if (0 > status)
  4865. throwAEE = true;
  4866. /* The cookie of the task currently being processed */
  4867. cookie = CMDQ_GET_COOKIE_CNT(thread) + 1;
  4868. threadPC = CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread)));
  4869. /* process any pending IRQ */
  4870. /* TODO: provide no spin lock version because we already locked. */
  4871. irqFlag = CMDQ_REG_GET32(CMDQ_THR_IRQ_STATUS(thread));
  4872. if (irqFlag & 0x12)
  4873. cmdqCoreHandleError(thread, irqFlag, &pTask->wakedUp);
  4874. else if (irqFlag & 0x01)
  4875. cmdqCoreHandleDone(thread, irqFlag, &pTask->wakedUp);
  4876. CMDQ_REG_SET32(CMDQ_THR_IRQ_STATUS(thread), ~irqFlag);
  4877. /* check if this task has finished after handling pending IRQ */
  4878. if (TASK_STATE_DONE == pTask->taskState)
  4879. break;
  4880. /* Then decide we are SW timeout or SIGNALed (not an error) */
  4881. if (0 == waitQ) {
  4882. /* SW timeout and no IRQ received */
  4883. markAsErrorTask = true;
  4884. /* if we reach here, we're in errornous state. */
  4885. /* print error log immediately. */
  4886. cmdq_core_attach_error_task(pTask, thread, &pNGTask);
  4887. CMDQ_ERR("SW timeout of task 0x%p on thread %d\n", pTask, thread);
  4888. if (pTask != pNGTask) {
  4889. CMDQ_ERR(" But pc stays in task 0x%p on thread %d\n", pNGTask,
  4890. thread);
  4891. }
  4892. throwAEE = true;
  4893. cmdq_core_parse_error(pNGTask, thread, &module, &irqFlag, &instA, &instB);
  4894. status = -ETIMEDOUT;
  4895. } else if (0 > waitQ) {
  4896. /* Task be killed. Not an error, but still need removal. */
  4897. markAsErrorTask = false;
  4898. if (-ERESTARTSYS == waitQ) {
  4899. /* Error status print */
  4900. CMDQ_ERR("Task %p KILLED by waitQ = -ERESTARTSYS\n", pTask);
  4901. } else if (-EINTR == waitQ) {
  4902. /* Error status print */
  4903. CMDQ_ERR("Task %p KILLED by waitQ = -EINTR\n", pTask);
  4904. } else {
  4905. /* Error status print */
  4906. CMDQ_ERR("Task %p KILLED by waitQ = %d\n", pTask, waitQ);
  4907. }
  4908. status = waitQ;
  4909. }
  4910. /* reset HW engine immediately if we already got error IRQ. */
  4911. if ((TASK_STATE_ERROR == pTask->taskState) ||
  4912. (TASK_STATE_ERR_IRQ == pTask->taskState)) {
  4913. cmdq_core_reset_hw_engine(pTask->engineFlag);
  4914. CMDQ_MSG("WAIT: task state is error, reset engine\n");
  4915. } else if (TASK_STATE_BUSY == pTask->taskState) {
  4916. /* */
  4917. /* if taskState is BUSY, this means we did not reach EOC, did not have error IRQ. */
  4918. /* - remove the task from thread.pCurTask[] */
  4919. /* - and decrease thread.taskCount */
  4920. /* NOTE: after this, the pCurTask will not contain link to pTask anymore. */
  4921. /* and pTask should become TASK_STATE_ERROR */
  4922. /* we find our place in pThread->pCurTask[]. */
  4923. for (index = 0; index < cmdq_core_max_task_in_thread(thread); ++index) {
  4924. if (pThread->pCurTask[index] == pTask) {
  4925. /* update taskCount and pCurTask[] */
  4926. cmdq_core_remove_task_from_thread_array_by_cookie(pThread,
  4927. index,
  4928. markAsErrorTask
  4929. ?
  4930. TASK_STATE_ERROR
  4931. :
  4932. TASK_STATE_KILLED);
  4933. break;
  4934. }
  4935. }
  4936. }
  4937. if (NULL == pTask->pCMDEnd)
  4938. break;
  4939. pNextTask = NULL;
  4940. /* find pTask's jump destination */
  4941. if (0x10000001 == pTask->pCMDEnd[0]) {
  4942. pNextTask = cmdq_core_search_task_by_pc(pTask->pCMDEnd[-1], pThread, thread);
  4943. } else {
  4944. CMDQ_MSG("No next task: LAST instruction : (0x%08x, 0x%08x)\n",
  4945. pTask->pCMDEnd[0], pTask->pCMDEnd[-1]);
  4946. }
  4947. /* Then, we try remove pTask from the chain of pThread->pCurTask. */
  4948. /* . if HW PC falls in pTask range */
  4949. /* . HW EXEC_CNT += 1 */
  4950. /* . thread.waitCookie += 1 */
  4951. /* . set HW PC to next task head */
  4952. /* . if not, find previous task (whose jump address is pTask->MVABase) */
  4953. /* . check if HW PC points is not at the EOC/JUMP end */
  4954. /* . change jump to fake EOC(no IRQ) */
  4955. /* . insert jump to next task head and increase cmd buffer size */
  4956. /* . if there is no next task, set HW End Address */
  4957. if (threadPC >= pTask->MVABase && threadPC <= (pTask->MVABase + pTask->commandSize)) {
  4958. if (pNextTask) {
  4959. /* cookie already +1 */
  4960. CMDQ_REG_SET32(CMDQ_THR_EXEC_CNT(thread), cookie);
  4961. pThread->waitCookie = cookie + 1;
  4962. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread),
  4963. CMDQ_PHYS_TO_AREG(pNextTask->MVABase));
  4964. CMDQ_MSG("WAIT: resume task 0x%p from err\n", pNextTask);
  4965. }
  4966. } else if (TASK_STATE_ERR_IRQ == pTask->taskState) {
  4967. /* Error IRQ might not stay in normal Task range (jump to a strange part) */
  4968. /* We always execute next due to error IRQ must correct task */
  4969. if (pNextTask) {
  4970. /* cookie already +1 */
  4971. CMDQ_REG_SET32(CMDQ_THR_EXEC_CNT(thread), cookie);
  4972. pThread->waitCookie = cookie + 1;
  4973. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread),
  4974. CMDQ_PHYS_TO_AREG(pNextTask->MVABase));
  4975. CMDQ_MSG("WAIT: resume task 0x%p from err IRQ\n", pNextTask);
  4976. }
  4977. } else {
  4978. pPrevTask = NULL;
  4979. for (index = 0; index < cmdq_core_max_task_in_thread(thread); ++index) {
  4980. pPrevTask = pThread->pCurTask[index];
  4981. /* find which task JUMP into pTask */
  4982. if (pPrevTask && pPrevTask->pCMDEnd
  4983. && pPrevTask->pCMDEnd[-1] == pTask->MVABase
  4984. && pPrevTask->pCMDEnd[0] == 0x10000001) {
  4985. /* Copy Jump instruction */
  4986. pPrevTask->pCMDEnd[-1] = pTask->pCMDEnd[-1];
  4987. pPrevTask->pCMDEnd[0] = pTask->pCMDEnd[0];
  4988. if (pNextTask)
  4989. cmdq_core_reorder_task_array(pThread, thread, index);
  4990. else
  4991. pThread->nextCookie--;
  4992. CMDQ_VERBOSE
  4993. ("WAIT: modify jump to 0x%08x (pPrev:0x%p, pTask:0x%p)\n",
  4994. pTask->pCMDEnd[-1], pPrevTask, pTask);
  4995. /* Give up fetched command, invoke CMDQ HW to re-fetch command buffer again. */
  4996. cmdq_core_invalidate_hw_fetched_buffer(thread);
  4997. break;
  4998. }
  4999. }
  5000. }
  5001. } while (0);
  5002. if (pThread->taskCount <= 0) {
  5003. cmdq_core_disable_HW_thread(thread);
  5004. } else {
  5005. do {
  5006. /* Reset GCE thread when task state is ERROR or KILL */
  5007. uint32_t backupCurrPC, backupEnd, backupCookieCnt;
  5008. int threadPrio;
  5009. if (TASK_STATE_DONE == pTask->taskState)
  5010. break;
  5011. /* Backup PC, End address, and GCE cookie count before reset GCE thread */
  5012. backupCurrPC =
  5013. CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread)));
  5014. backupEnd = CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread)));
  5015. backupCookieCnt = CMDQ_GET_COOKIE_CNT(thread);
  5016. CMDQ_LOG
  5017. ("Reset Backup Thread PC: 0x%08x, End: 0x%08x, CookieCnt: 0x%08x\n",
  5018. backupCurrPC, backupEnd, backupCookieCnt);
  5019. /* Reset GCE thread */
  5020. if (cmdq_core_reset_HW_thread(thread) < 0) {
  5021. status = -EFAULT;
  5022. break;
  5023. }
  5024. CMDQ_REG_SET32(CMDQ_THR_INST_CYCLES(thread),
  5025. cmdq_core_get_task_timeout_cycle(pThread));
  5026. /* Set PC & End address */
  5027. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread), CMDQ_PHYS_TO_AREG(backupCurrPC));
  5028. CMDQ_REG_SET32(CMDQ_THR_END_ADDR(thread), CMDQ_PHYS_TO_AREG(backupEnd));
  5029. /* bit 0-2 for priority level; */
  5030. threadPrio = cmdq_get_func()->priority(pTask->scenario);
  5031. CMDQ_MSG("RESET HW THREAD: set HW thread(%d), qos:%d\n", thread,
  5032. threadPrio);
  5033. CMDQ_REG_SET32(CMDQ_THR_CFG(thread), threadPrio & 0x7);
  5034. /* For loop thread, do not enable timeout */
  5035. CMDQ_REG_SET32(CMDQ_THR_IRQ_ENABLE(thread),
  5036. pThread->loopCallback ? 0x011 : 0x013);
  5037. if (pThread->loopCallback) {
  5038. CMDQ_MSG("RESET HW THREAD: HW thread(%d) in loop func 0x%p\n",
  5039. thread, pThread->loopCallback);
  5040. }
  5041. /* Set GCE cookie count */
  5042. CMDQ_REG_SET32(CMDQ_THR_EXEC_CNT(thread), backupCookieCnt);
  5043. /* Enable HW thread */
  5044. CMDQ_REG_SET32(CMDQ_THR_ENABLE_TASK(thread), 0x01);
  5045. } while (0);
  5046. cmdq_core_resume_HW_thread(thread);
  5047. }
  5048. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5049. if (throwAEE) {
  5050. const uint32_t op = (instA & 0xFF000000) >> 24;
  5051. switch (op) {
  5052. case CMDQ_CODE_WFE:
  5053. CMDQ_AEE(module,
  5054. "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:WAIT EVENT:%s\n",
  5055. module, irqFlag, instA, instB,
  5056. cmdq_core_get_event_name(instA & (~0xFF000000)));
  5057. break;
  5058. default:
  5059. CMDQ_AEE(module, "%s in CMDQ IRQ:0x%02x, INST:(0x%08x, 0x%08x), OP:%s\n",
  5060. module, irqFlag, instA, instB, cmdq_core_parse_op(op));
  5061. break;
  5062. }
  5063. }
  5064. return status;
  5065. }
  5066. static int32_t cmdq_core_wait_task_done(TaskStruct *pTask, long timeout_jiffies)
  5067. {
  5068. int32_t waitQ;
  5069. int32_t status;
  5070. uint32_t thread;
  5071. ThreadStruct *pThread = NULL;
  5072. status = 0; /* Default status */
  5073. thread = pTask->thread;
  5074. if (CMDQ_INVALID_THREAD == thread) {
  5075. CMDQ_PROF_MMP(cmdq_mmp_get_event()->wait_thread,
  5076. MMProfileFlagPulse, ((unsigned long)pTask), -1);
  5077. CMDQ_PROF_START(current->pid, "wait_for_thread");
  5078. CMDQ_LOG("pid:%d task:0x%p wait for valid thread first\n", current->pid, pTask);
  5079. /* wait for acquire thread (this is done by cmdq_core_consume_waiting_list); */
  5080. waitQ = wait_event_timeout(gCmdqThreadDispatchQueue,
  5081. (CMDQ_INVALID_THREAD != pTask->thread),
  5082. msecs_to_jiffies(CMDQ_ACQUIRE_THREAD_TIMEOUT_MS));
  5083. CMDQ_PROF_END(current->pid, "wait_for_thread");
  5084. if (0 == waitQ || CMDQ_INVALID_THREAD == pTask->thread) {
  5085. mutex_lock(&gCmdqTaskMutex);
  5086. /* it's possible that the task was just consumed now. */
  5087. /* so check again. */
  5088. if (CMDQ_INVALID_THREAD == pTask->thread) {
  5089. /* task may already released, or starved to death */
  5090. CMDQ_ERR("task 0x%p timeout with invalid thread\n", pTask);
  5091. cmdq_core_dump_task(pTask);
  5092. cmdq_core_dump_task_with_engine_flag(pTask->engineFlag);
  5093. /* remove from waiting list, */
  5094. /* so that it won't be consumed in the future */
  5095. list_del_init(&(pTask->listEntry));
  5096. mutex_unlock(&gCmdqTaskMutex);
  5097. return -EINVAL;
  5098. }
  5099. /* valid thread, so we keep going */
  5100. mutex_unlock(&gCmdqTaskMutex);
  5101. }
  5102. }
  5103. /* double confim if it get a valid thread */
  5104. thread = pTask->thread;
  5105. if ((0 > thread) || (CMDQ_MAX_THREAD_COUNT <= thread)) {
  5106. CMDQ_ERR("invalid thread %d in %s\n", thread, __func__);
  5107. return -EINVAL;
  5108. }
  5109. pThread = &(gCmdqContext.thread[thread]);
  5110. CMDQ_PROF_MMP(cmdq_mmp_get_event()->wait_task,
  5111. MMProfileFlagPulse, ((unsigned long)pTask), thread);
  5112. CMDQ_PROF_START(current->pid, "wait_for_task_done");
  5113. /* start to wait */
  5114. pTask->beginWait = sched_clock();
  5115. CMDQ_MSG("-->WAIT: task 0x%p on thread %d timeout: %d(ms) begin\n", pTask, thread,
  5116. jiffies_to_msecs(timeout_jiffies));
  5117. waitQ = cmdq_core_wait_task_done_with_timeout_impl(pTask, thread);
  5118. /* wake up! */
  5119. /* so the maximum total waiting time would be */
  5120. /* CMDQ_PREDUMP_TIMEOUT_MS * CMDQ_PREDUMP_RETRY_COUNT */
  5121. pTask->wakedUp = sched_clock();
  5122. CMDQ_MSG("WAIT: task 0x%p waitq=%d state=%d\n", pTask, waitQ, pTask->taskState);
  5123. CMDQ_PROF_END(current->pid, "wait_for_task_done");
  5124. status = (false == pTask->secData.isSecure) ?
  5125. cmdq_core_handle_wait_task_result_impl(pTask, thread, waitQ) :
  5126. cmdq_core_handle_wait_task_result_secure_impl(pTask, thread, waitQ);
  5127. CMDQ_MSG("<--WAIT: task 0x%p on thread %d end\n", pTask, thread);
  5128. return status;
  5129. }
  5130. static int32_t cmdq_core_exec_task_async_secure_impl(TaskStruct *pTask, int32_t thread)
  5131. {
  5132. int32_t status;
  5133. ThreadStruct *pThread;
  5134. int32_t cookie;
  5135. char longMsg[CMDQ_LONGSTRING_MAX];
  5136. uint32_t msgOffset;
  5137. int32_t msgMAXSize;
  5138. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  5139. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  5140. "-->EXEC: task 0x%p on thread %d begin, VABase: 0x%p,",
  5141. pTask, thread, pTask->pVABase);
  5142. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  5143. " MVABase: %pa, Size: %d, bufferSize: %d, scenario:%d, flag:0x%llx\n",
  5144. &(pTask->MVABase), pTask->commandSize, pTask->bufferSize,
  5145. pTask->scenario, pTask->engineFlag);
  5146. if (msgOffset > 0) {
  5147. /* print message */
  5148. CMDQ_MSG("%s", longMsg);
  5149. }
  5150. status = 0;
  5151. pThread = &(gCmdqContext.thread[thread]);
  5152. cmdq_sec_lock_secure_path();
  5153. do {
  5154. /* setup whole patah */
  5155. status = cmdq_sec_allocate_path_resource_unlocked(true);
  5156. if (0 > status)
  5157. break;
  5158. /* update task's thread info */
  5159. pTask->thread = thread;
  5160. pTask->irqFlag = 0;
  5161. pTask->taskState = TASK_STATE_BUSY;
  5162. /* insert task to pThread's task lsit, and */
  5163. /* delay HW config when entry SWd */
  5164. if (pThread->taskCount <= 0) {
  5165. cookie = 1;
  5166. cmdq_core_insert_task_from_thread_array_by_cookie(pTask, pThread, cookie,
  5167. true);
  5168. } else {
  5169. /* append directly */
  5170. cookie = pThread->nextCookie;
  5171. cmdq_core_insert_task_from_thread_array_by_cookie(pTask, pThread, cookie,
  5172. false);
  5173. }
  5174. pTask->trigger = sched_clock();
  5175. /* execute */
  5176. status = cmdq_sec_exec_task_async_unlocked(pTask, thread);
  5177. if (0 > status) {
  5178. /* config failed case, dump for more detail */
  5179. cmdq_core_attach_error_task(pTask, thread, NULL);
  5180. cmdq_core_turnoff_first_dump();
  5181. cmdq_core_remove_task_from_thread_array_when_secure_submit_fail(pThread, cookie);
  5182. }
  5183. } while (0);
  5184. cmdq_sec_unlock_secure_path();
  5185. return status;
  5186. }
  5187. static inline int32_t cmdq_core_exec_find_task_slot(TaskStruct **pLast, TaskStruct *pTask,
  5188. int32_t thread, int32_t loop)
  5189. {
  5190. int32_t status = 0;
  5191. ThreadStruct *pThread;
  5192. TaskStruct *pPrev;
  5193. int32_t index;
  5194. int32_t prev;
  5195. int32_t cookie;
  5196. pThread = &(gCmdqContext.thread[thread]);
  5197. cookie = pThread->nextCookie;
  5198. /* Traverse forward to adjust tasks' order according to their priorities */
  5199. for (prev = (cookie % cmdq_core_max_task_in_thread(thread)); loop > 0; loop--) {
  5200. index = prev;
  5201. if (index < 0)
  5202. index = cmdq_core_max_task_in_thread(thread) - 1;
  5203. prev = index - 1;
  5204. if (prev < 0)
  5205. prev = cmdq_core_max_task_in_thread(thread) - 1;
  5206. pPrev = pThread->pCurTask[prev];
  5207. /* Maybe the job is killed, search a new one */
  5208. while ((NULL == pPrev) && (loop > 1)) {
  5209. CMDQ_LOG("pPrev is NULL, prev:%d, loop:%d, index:%d\n", prev, loop, index);
  5210. prev = prev - 1;
  5211. if (prev < 0)
  5212. prev = cmdq_core_max_task_in_thread(thread) - 1;
  5213. pPrev = pThread->pCurTask[prev];
  5214. loop--;
  5215. }
  5216. if (NULL == pPrev) {
  5217. cmdq_core_attach_error_task(pTask, thread, NULL);
  5218. CMDQ_ERR("Invalid task state for reorder %d %d\n", index, loop);
  5219. status = -EFAULT;
  5220. break;
  5221. }
  5222. if (loop <= 1) {
  5223. CMDQ_MSG("Set current(%d) order for the new task, line:%d\n", index, __LINE__);
  5224. CMDQ_MSG("Original PC: %pa, size: %d\n", &pPrev->MVABase, pTask->commandSize);
  5225. CMDQ_MSG("Original instruction 0x%08x, 0x%08x\n", pPrev->pCMDEnd[0],
  5226. pPrev->pCMDEnd[-1]);
  5227. pThread->pCurTask[index] = pTask;
  5228. /* Jump: Absolute */
  5229. pPrev->pCMDEnd[0] = 0x10000001;
  5230. /* Jump to here */
  5231. pPrev->pCMDEnd[-1] = pTask->MVABase;
  5232. CMDQ_VERBOSE("EXEC: modify jump to %pa, line:%d\n", &(pTask->MVABase), __LINE__);
  5233. #ifndef CMDQ_APPEND_WITHOUT_SUSPEND
  5234. /* re-fetch command buffer again. */
  5235. cmdq_core_invalidate_hw_fetched_buffer(thread);
  5236. #endif
  5237. break;
  5238. }
  5239. if (pPrev->priority < pTask->priority) {
  5240. CMDQ_LOG("Switch prev(%d, 0x%p) and curr(%d, 0x%p) order\n",
  5241. prev, pPrev, index, pTask);
  5242. pThread->pCurTask[index] = pPrev;
  5243. pPrev->pCMDEnd[0] = pTask->pCMDEnd[0];
  5244. pPrev->pCMDEnd[-1] = pTask->pCMDEnd[-1];
  5245. /* Boot priority for the task */
  5246. pPrev->priority += CMDQ_MIN_AGE_VALUE;
  5247. pPrev->reorder++;
  5248. pThread->pCurTask[prev] = pTask;
  5249. /* Jump: Absolute */
  5250. pTask->pCMDEnd[0] = 0x10000001;
  5251. /* Jump to here */
  5252. pTask->pCMDEnd[-1] = pPrev->MVABase;
  5253. CMDQ_VERBOSE("EXEC: modify jump to %pa, line:%d\n", &(pPrev->MVABase), __LINE__);
  5254. #ifndef CMDQ_APPEND_WITHOUT_SUSPEND
  5255. /* re-fetch command buffer again. */
  5256. cmdq_core_invalidate_hw_fetched_buffer(thread);
  5257. #endif
  5258. if (*pLast == pTask) {
  5259. CMDQ_LOG("update pLast from 0x%p to 0x%p\n", pTask, pPrev);
  5260. *pLast = pPrev;
  5261. }
  5262. } else {
  5263. CMDQ_MSG("Set current(%d) order for the new task, line:%d\n", index, __LINE__);
  5264. CMDQ_MSG("Original PC: %pa, size: %d\n", &pPrev->MVABase, pTask->commandSize);
  5265. CMDQ_MSG("Original instruction 0x%08x, 0x%08x\n", pPrev->pCMDEnd[0], pPrev->pCMDEnd[-1]);
  5266. pThread->pCurTask[index] = pTask;
  5267. /* Jump: Absolute */
  5268. pPrev->pCMDEnd[0] = 0x10000001;
  5269. /* Jump to here */
  5270. pPrev->pCMDEnd[-1] = pTask->MVABase;
  5271. CMDQ_VERBOSE("EXEC: modify jump to %pa, line:%d\n", &(pTask->MVABase), __LINE__);
  5272. #ifndef CMDQ_APPEND_WITHOUT_SUSPEND
  5273. /* re-fetch command buffer again. */
  5274. cmdq_core_invalidate_hw_fetched_buffer(thread);
  5275. #endif
  5276. break;
  5277. }
  5278. }
  5279. CMDQ_MSG("Reorder %d tasks for performance end, pLast:0x%p\n", loop, *pLast);
  5280. return status;
  5281. }
  5282. static int32_t cmdq_core_exec_task_async_impl(TaskStruct *pTask, int32_t thread)
  5283. {
  5284. int32_t status;
  5285. ThreadStruct *pThread;
  5286. TaskStruct *pLast;
  5287. unsigned long flags;
  5288. int32_t loop;
  5289. uint32_t minimum;
  5290. uint32_t cookie;
  5291. int threadPrio = 0;
  5292. uint32_t EndAddr;
  5293. char longMsg[CMDQ_LONGSTRING_MAX];
  5294. uint32_t msgOffset;
  5295. int32_t msgMAXSize;
  5296. /* for no suspend thread, we shift END before JUMP */
  5297. int32_t shiftEnd = 0;
  5298. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  5299. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  5300. "-->EXEC: task 0x%p on thread %d begin, VABase: 0x%p, MVABase: %pa,",
  5301. pTask, thread, pTask->pVABase, &(pTask->MVABase));
  5302. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  5303. " Size: %d, bufferSize: %d, scenario:%d, flag:0x%llx\n",
  5304. pTask->commandSize, pTask->bufferSize, pTask->scenario,
  5305. pTask->engineFlag);
  5306. if (msgOffset > 0) {
  5307. /* print message */
  5308. CMDQ_MSG("%s", longMsg);
  5309. }
  5310. status = 0;
  5311. pThread = &(gCmdqContext.thread[thread]);
  5312. pTask->trigger = sched_clock();
  5313. spin_lock_irqsave(&gCmdqExecLock, flags);
  5314. /* update task's thread info */
  5315. pTask->thread = thread;
  5316. pTask->irqFlag = 0;
  5317. pTask->taskState = TASK_STATE_BUSY;
  5318. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  5319. /* for loop command, we do not shift END before JUMP */
  5320. if (pThread->loopCallback)
  5321. shiftEnd = 0;
  5322. else
  5323. shiftEnd = CMDQ_INST_SIZE;
  5324. #endif
  5325. if (pThread->taskCount <= 0) {
  5326. bool enablePrefetch;
  5327. CMDQ_MSG("EXEC: new HW thread(%d)\n", thread);
  5328. if (cmdq_core_reset_HW_thread(thread) < 0) {
  5329. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5330. return -EFAULT;
  5331. }
  5332. CMDQ_REG_SET32(CMDQ_THR_INST_CYCLES(thread),
  5333. cmdq_core_get_task_timeout_cycle(pThread));
  5334. #ifdef _CMDQ_DISABLE_MARKER_
  5335. enablePrefetch = cmdq_core_thread_prefetch_size(thread) > 0;
  5336. if (enablePrefetch) {
  5337. CMDQ_MSG("EXEC: set HW thread(%d) enable prefetch, size(%d)!\n",
  5338. thread, cmdq_core_thread_prefetch_size(thread));
  5339. CMDQ_REG_SET32(CMDQ_THR_PREFETCH(thread), 0x1);
  5340. }
  5341. #endif
  5342. threadPrio = cmdq_get_func()->priority(pTask->scenario);
  5343. CMDQ_MSG("EXEC: set HW thread(%d) pc:%pa, qos:%d\n",
  5344. thread, &pTask->MVABase, threadPrio);
  5345. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread), CMDQ_PHYS_TO_AREG(pTask->MVABase));
  5346. EndAddr = CMDQ_PHYS_TO_AREG(pTask->MVABase + pTask->commandSize - shiftEnd);
  5347. CMDQ_REG_SET32(CMDQ_THR_END_ADDR(thread), EndAddr);
  5348. CMDQ_REG_SET32(CMDQ_THR_CFG(thread), threadPrio & 0x7); /* bit 0-2 for priority level; */
  5349. /* For loop thread, do not enable timeout */
  5350. CMDQ_REG_SET32(CMDQ_THR_IRQ_ENABLE(thread), pThread->loopCallback ? 0x011 : 0x013);
  5351. if (pThread->loopCallback) {
  5352. CMDQ_MSG("EXEC: HW thread(%d) in loop func 0x%p\n", thread,
  5353. pThread->loopCallback);
  5354. }
  5355. /* attach task to thread */
  5356. minimum = CMDQ_GET_COOKIE_CNT(thread);
  5357. cmdq_core_insert_task_from_thread_array_by_cookie(pTask, pThread, (minimum + 1),
  5358. true);
  5359. /* verify that we don't corrupt EOC + JUMP pattern */
  5360. cmdq_core_verfiy_command_end(pTask);
  5361. /* enable HW thread */
  5362. CMDQ_MSG("enable HW thread(%d)\n", thread);
  5363. CMDQ_PROF_MMP(cmdq_mmp_get_event()->thread_en,
  5364. MMProfileFlagPulse, thread, pThread->nextCookie - 1);
  5365. CMDQ_REG_SET32(CMDQ_THR_ENABLE_TASK(thread), 0x01);
  5366. #ifdef CMDQ_MDP_MET_STATUS
  5367. /* MET MMSYS : Primary Trigger start */
  5368. if (met_mmsys_event_gce_thread_begin)
  5369. met_mmsys_event_gce_thread_begin(thread, (uintptr_t) pTask, pTask->engineFlag,
  5370. (void *)pTask->pVABase, pTask->commandSize);
  5371. #endif
  5372. } else {
  5373. CMDQ_MSG("EXEC: reuse HW thread(%d), taskCount:%d\n", thread, pThread->taskCount);
  5374. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  5375. cmdqCoreClearEvent(CMDQ_SYNC_TOKEN_APPEND_THR(thread));
  5376. #else
  5377. status = cmdq_core_suspend_HW_thread(thread, __LINE__);
  5378. if (status < 0) {
  5379. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5380. return status;
  5381. }
  5382. CMDQ_REG_SET32(CMDQ_THR_INST_CYCLES(thread),
  5383. cmdq_core_get_task_timeout_cycle(pThread));
  5384. #endif
  5385. cookie = pThread->nextCookie;
  5386. /* Boundary case tested: EOC have been executed, but JUMP is not executed */
  5387. /* Thread PC: 0x9edc0dd8, End: 0x9edc0de0, Curr Cookie: 1, Next Cookie: 2 */
  5388. /* PC = END - 8, EOC is executed */
  5389. /* PC = END - 0, All CMDs are executed */
  5390. if ((CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread))) ==
  5391. (CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread))) - 8)) ||
  5392. (CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread))) ==
  5393. (CMDQ_AREG_TO_PHYS(CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread))) - 0))) {
  5394. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  5395. cmdqCoreLongString(true, longMsg, &msgOffset, &msgMAXSize,
  5396. "EXEC: Set HW thread(%d) pc from 0x%08x(end:0x%08x) to %pa,",
  5397. thread,
  5398. CMDQ_REG_GET32(CMDQ_THR_CURR_ADDR(thread)),
  5399. CMDQ_REG_GET32(CMDQ_THR_END_ADDR(thread)),
  5400. &pTask->MVABase);
  5401. cmdqCoreLongString(true, longMsg, &msgOffset, &msgMAXSize,
  5402. " oriNextCookie:%d, oriTaskCount:%d\n",
  5403. cookie, pThread->taskCount);
  5404. if (msgOffset > 0) {
  5405. /* print message */
  5406. CMDQ_LOG("%s", longMsg);
  5407. }
  5408. /* set to pTask directly */
  5409. CMDQ_REG_SET32(CMDQ_THR_CURR_ADDR(thread),
  5410. CMDQ_PHYS_TO_AREG(pTask->MVABase));
  5411. EndAddr = CMDQ_PHYS_TO_AREG(pTask->MVABase + pTask->commandSize - shiftEnd);
  5412. CMDQ_REG_SET32(CMDQ_THR_END_ADDR(thread), EndAddr);
  5413. pThread->pCurTask[cookie % cmdq_core_max_task_in_thread(thread)] = pTask;
  5414. pThread->taskCount++;
  5415. pThread->allowDispatching = 1;
  5416. } else {
  5417. CMDQ_MSG("Connect new task's MVA to previous one\n");
  5418. /* Current task that shuld be processed */
  5419. minimum = CMDQ_GET_COOKIE_CNT(thread) + 1;
  5420. if (minimum > CMDQ_MAX_COOKIE_VALUE)
  5421. minimum = 0;
  5422. /* Calculate loop count to adjust the tasks' order */
  5423. if (minimum <= cookie) {
  5424. loop = cookie - minimum;
  5425. } else {
  5426. /* Counter wrapped */
  5427. loop = (CMDQ_MAX_COOKIE_VALUE - minimum + 1) + cookie;
  5428. }
  5429. CMDQ_MSG("Reorder task in range [%d, %d] with count %d\n", minimum, cookie, loop);
  5430. /* ALPS01672377 */
  5431. /* .note pThread->taskCount-- when remove task from pThread in ISR */
  5432. /* .In mutlple SW clients or async case, */
  5433. /* clients may continue submit tasks with overlap engines */
  5434. /* it's okey 0 = abs(pThread->nextCookie, THR_CNT+1) when... */
  5435. /* .submit task_1, trigger GCE */
  5436. /* .submit task_2: */
  5437. /* .GCE exec task1 done */
  5438. /* .task_2 lock execLock when insert task to thread */
  5439. /* .task 1's IRQ */
  5440. if (loop < 0) {
  5441. cmdq_core_dump_task_in_thread(thread, true, true, false);
  5442. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  5443. cmdqCoreLongString(true, longMsg, &msgOffset, &msgMAXSize,
  5444. "Invalid task count(%d) in thread %d for reorder,",
  5445. loop, thread);
  5446. cmdqCoreLongString(true, longMsg, &msgOffset, &msgMAXSize,
  5447. " nextCookie:%d, nextCookieHW:%d, pTask:%p\n",
  5448. pThread->nextCookie, minimum, pTask);
  5449. if (msgOffset > 0) {
  5450. /* print message */
  5451. CMDQ_AEE("CMDQ", "%s", longMsg);
  5452. }
  5453. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  5454. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_APPEND_THR(thread));
  5455. #endif
  5456. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5457. return -EFAULT;
  5458. }
  5459. if (loop > cmdq_core_max_task_in_thread(thread)) {
  5460. CMDQ_LOG("loop = %d, execeed max task in thread", loop);
  5461. loop = loop % cmdq_core_max_task_in_thread(thread);
  5462. }
  5463. CMDQ_MSG("Reorder %d tasks for performance begin\n", loop);
  5464. /* By default, pTask is the last task, and insert [cookie % CMDQ_MAX_TASK_IN_THREAD] */
  5465. pLast = pTask;
  5466. status = cmdq_core_exec_find_task_slot(&pLast, pTask, thread, loop);
  5467. if (status < 0) {
  5468. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  5469. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_APPEND_THR(thread));
  5470. #endif
  5471. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5472. CMDQ_AEE("CMDQ", "Invalid task state for reorder.\n");
  5473. return status;
  5474. }
  5475. /* We must set memory barrier here to make sure we modify jump before enable thread */
  5476. smp_mb();
  5477. EndAddr = CMDQ_PHYS_TO_AREG(pLast->MVABase + pLast->commandSize - shiftEnd);
  5478. CMDQ_REG_SET32(CMDQ_THR_END_ADDR(thread), EndAddr);
  5479. pThread->taskCount++;
  5480. pThread->allowDispatching = 1;
  5481. }
  5482. pThread->nextCookie += 1;
  5483. if (pThread->nextCookie > CMDQ_MAX_COOKIE_VALUE) {
  5484. /* Reach the maximum cookie */
  5485. pThread->nextCookie = 0;
  5486. }
  5487. /* verify that we don't corrupt EOC + JUMP pattern */
  5488. cmdq_core_verfiy_command_end(pTask);
  5489. /* resume HW thread */
  5490. CMDQ_PROF_MMP(cmdq_mmp_get_event()->thread_en,
  5491. MMProfileFlagPulse, thread, pThread->nextCookie - 1);
  5492. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  5493. cmdqCoreSetEvent(CMDQ_SYNC_TOKEN_APPEND_THR(thread));
  5494. #else
  5495. cmdq_core_resume_HW_thread(thread);
  5496. #endif
  5497. }
  5498. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5499. CMDQ_MSG("<--EXEC: status: %d\n", status);
  5500. return status;
  5501. }
  5502. #ifdef CMDQ_PROFILE
  5503. static const char *gCmdqThreadLabel[CMDQ_MAX_THREAD_COUNT] = {
  5504. "CMDQ_IRQ_THR_0",
  5505. "CMDQ_IRQ_THR_1",
  5506. "CMDQ_IRQ_THR_2",
  5507. "CMDQ_IRQ_THR_3",
  5508. "CMDQ_IRQ_THR_4",
  5509. "CMDQ_IRQ_THR_5",
  5510. "CMDQ_IRQ_THR_6",
  5511. "CMDQ_IRQ_THR_7",
  5512. "CMDQ_IRQ_THR_8",
  5513. "CMDQ_IRQ_THR_9",
  5514. "CMDQ_IRQ_THR_10",
  5515. "CMDQ_IRQ_THR_11",
  5516. "CMDQ_IRQ_THR_12",
  5517. "CMDQ_IRQ_THR_13",
  5518. "CMDQ_IRQ_THR_14",
  5519. "CMDQ_IRQ_THR_15",
  5520. };
  5521. #endif
  5522. int32_t cmdqCoreSuspend(void)
  5523. {
  5524. unsigned long flags = 0L;
  5525. EngineStruct *pEngine = NULL;
  5526. uint32_t execThreads = 0x0;
  5527. int refCount = 0;
  5528. bool killTasks = false;
  5529. struct TaskStruct *pTask = NULL;
  5530. struct list_head *p = NULL;
  5531. int i = 0;
  5532. /* destroy secure path notify thread */
  5533. cmdq_core_stop_secure_path_notify_thread();
  5534. pEngine = gCmdqContext.engine;
  5535. execThreads = CMDQ_REG_GET32(CMDQ_CURR_LOADED_THR);
  5536. refCount = atomic_read(&gCmdqThreadUsage);
  5537. if (0 > cmdq_get_func()->moduleEntrySuspend(pEngine)) {
  5538. CMDQ_ERR("[SUSPEND] MDP running, kill tasks. threads:0x%08x, ref:%d\n", execThreads,
  5539. refCount);
  5540. killTasks = true;
  5541. } else if ((refCount > 0) || (0x80000000 & execThreads)) {
  5542. CMDQ_ERR("[SUSPEND] other running, kill tasks. threads:0x%08x, ref:%d\n",
  5543. execThreads, refCount);
  5544. killTasks = true;
  5545. }
  5546. /* */
  5547. /* We need to ensure the system is ready to suspend, */
  5548. /* so kill all running CMDQ tasks */
  5549. /* and release HW engines. */
  5550. /* */
  5551. if (killTasks) {
  5552. /* print active tasks */
  5553. CMDQ_ERR("[SUSPEND] active tasks during suspend:\n");
  5554. list_for_each(p, &gCmdqContext.taskActiveList) {
  5555. pTask = list_entry(p, struct TaskStruct, listEntry);
  5556. if (true == cmdq_core_is_valid_in_active_list(pTask))
  5557. cmdq_core_dump_task(pTask);
  5558. }
  5559. /* remove all active task from thread */
  5560. CMDQ_ERR("[SUSPEND] remove all active tasks\n");
  5561. list_for_each(p, &gCmdqContext.taskActiveList) {
  5562. pTask = list_entry(p, struct TaskStruct, listEntry);
  5563. if (pTask->thread != CMDQ_INVALID_THREAD) {
  5564. spin_lock_irqsave(&gCmdqExecLock, flags);
  5565. cmdq_core_force_remove_task_from_thread(pTask, pTask->thread);
  5566. pTask->taskState = TASK_STATE_KILLED;
  5567. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5568. /* release all thread and mark all active tasks as "KILLED" */
  5569. /* (so that thread won't release again) */
  5570. CMDQ_ERR("[SUSPEND] release all threads and HW clocks\n");
  5571. cmdq_core_release_thread(pTask);
  5572. }
  5573. }
  5574. /* TODO: skip secure path thread... */
  5575. /* disable all HW thread */
  5576. CMDQ_ERR("[SUSPEND] disable all HW threads\n");
  5577. for (i = 0; i < CMDQ_MAX_THREAD_COUNT; ++i)
  5578. cmdq_core_disable_HW_thread(i);
  5579. /* reset all threadStruct */
  5580. memset(&gCmdqContext.thread[0], 0, sizeof(gCmdqContext.thread));
  5581. cmdq_core_reset_thread_struct();
  5582. /* reset all engineStruct */
  5583. memset(&gCmdqContext.engine[0], 0, sizeof(gCmdqContext.engine));
  5584. cmdq_core_reset_engine_struct();
  5585. }
  5586. spin_lock_irqsave(&gCmdqThreadLock, flags);
  5587. gCmdqSuspended = true;
  5588. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  5589. /* ALWAYS allow suspend */
  5590. return 0;
  5591. }
  5592. int32_t cmdq_core_reume_impl(const char *tag)
  5593. {
  5594. unsigned long flags = 0L;
  5595. int refCount = 0;
  5596. spin_lock_irqsave(&gCmdqThreadLock, flags);
  5597. refCount = atomic_read(&gCmdqThreadUsage);
  5598. CMDQ_MSG("[%s] resume, refCount:%d\n", tag, refCount);
  5599. gCmdqSuspended = false;
  5600. /* during suspending, there may be queued tasks. */
  5601. /* we should process them if any. */
  5602. if (!work_pending(&gCmdqContext.taskConsumeWaitQueueItem)) {
  5603. CMDQ_MSG("[%s] there are undone task, process them\n", tag);
  5604. /* we use system global work queue (kernel thread kworker/n) */
  5605. CMDQ_PROF_MMP(cmdq_mmp_get_event()->consume_add, MMProfileFlagPulse, 0, 0);
  5606. queue_work(gCmdqContext.taskConsumeWQ, &gCmdqContext.taskConsumeWaitQueueItem);
  5607. }
  5608. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  5609. return 0;
  5610. }
  5611. int32_t cmdqCoreResume(void)
  5612. {
  5613. CMDQ_VERBOSE("[RESUME] do nothing\n");
  5614. /* do nothing */
  5615. return 0;
  5616. }
  5617. int32_t cmdqCoreResumedNotifier(void)
  5618. {
  5619. /* TEE project limitation:
  5620. * .t-base daemon process is available after process-unfreeze
  5621. * .need t-base daemon for communication to secure world
  5622. * .M4U port security setting backup/resore needs to entry secure world
  5623. * .M4U port security setting is access normal PA
  5624. *
  5625. * Delay resume timing until process-unfreeze done in order to
  5626. * ensure M4U driver had restore M4U port security setting
  5627. */
  5628. CMDQ_VERBOSE("[RESUME] cmdqCoreResumedNotifier\n");
  5629. return cmdq_core_reume_impl("RESUME_NOTIFIER");
  5630. }
  5631. static int32_t cmdq_core_exec_task_async_with_retry(TaskStruct *pTask, int32_t thread)
  5632. {
  5633. int32_t retry = 0;
  5634. int32_t status = 0;
  5635. ThreadStruct *pThread;
  5636. pThread = &(gCmdqContext.thread[thread]);
  5637. if (pThread->loopCallback) {
  5638. /* Do not insert Wait for loop due to loop no need append */
  5639. CMDQ_MSG("Ignore insert wait for loop task\n");
  5640. } else {
  5641. if (true == pTask->secData.isSecure)
  5642. status = cmdq_core_insert_secure_handle_instr(pTask, thread);
  5643. #ifdef CMDQ_APPEND_WITHOUT_SUSPEND
  5644. /* Shift JUMP and EOC */
  5645. pTask->pCMDEnd += 2;
  5646. pTask->pCMDEnd[0] = pTask->pCMDEnd[-2];
  5647. pTask->pCMDEnd[-1] = pTask->pCMDEnd[-3];
  5648. pTask->pCMDEnd[-2] = pTask->pCMDEnd[-4];
  5649. pTask->pCMDEnd[-3] = pTask->pCMDEnd[-5];
  5650. /* Update original JUMP to wait event */
  5651. /* Sync: Op and sync event */
  5652. pTask->pCMDEnd[-4] = (CMDQ_CODE_WFE << 24) | CMDQ_SYNC_TOKEN_APPEND_THR(thread);
  5653. /* Sync: Wait and no clear */
  5654. pTask->pCMDEnd[-5] = ((0 << 31) | (1 << 15) | 1);
  5655. pTask->commandSize += CMDQ_INST_SIZE;
  5656. /* make sure instructions are synced in DRAM */
  5657. smp_mb();
  5658. CMDQ_MSG
  5659. ("After insert wait: pTask 0x%p last 3 instr (%08x:%08x, %08x:%08x, %08x:%08x)\n",
  5660. pTask,
  5661. pTask->pCMDEnd[-5], pTask->pCMDEnd[-4], pTask->pCMDEnd[-3],
  5662. pTask->pCMDEnd[-2], pTask->pCMDEnd[-1], pTask->pCMDEnd[0]);
  5663. #endif
  5664. }
  5665. if (status < 0)
  5666. return status;
  5667. do {
  5668. if (false == cmdq_core_verfiy_command_end(pTask)) {
  5669. status = -EFAULT;
  5670. break;
  5671. }
  5672. /* Save command buffer dump */
  5673. if (0 == retry)
  5674. cmdq_core_save_command_buffer_dump(pTask);
  5675. status = (false == pTask->secData.isSecure) ?
  5676. (cmdq_core_exec_task_async_impl(pTask, thread)) :
  5677. (cmdq_core_exec_task_async_secure_impl(pTask, thread));
  5678. if (status >= 0)
  5679. break;
  5680. if ((TASK_STATE_KILLED == pTask->taskState) ||
  5681. (TASK_STATE_ERROR == pTask->taskState) ||
  5682. (TASK_STATE_ERR_IRQ == pTask->taskState)) {
  5683. CMDQ_ERR("cmdq_core_exec_task_async_impl fail\n");
  5684. status = -EFAULT;
  5685. break;
  5686. }
  5687. ++retry;
  5688. } while (retry < CMDQ_MAX_RETRY_COUNT);
  5689. return status;
  5690. }
  5691. static int32_t cmdq_core_consume_waiting_list(struct work_struct *_ignore)
  5692. {
  5693. struct list_head *p, *n = NULL;
  5694. struct TaskStruct *pTask = NULL;
  5695. struct ThreadStruct *pThread = NULL;
  5696. int32_t thread = CMDQ_INVALID_THREAD;
  5697. int32_t status = 0;
  5698. bool threadAcquired = false;
  5699. CMDQ_HW_THREAD_PRIORITY_ENUM thread_prio = CMDQ_THR_PRIO_NORMAL;
  5700. CMDQ_TIME consumeTime;
  5701. int32_t waitingTimeMS;
  5702. bool needLog = false;
  5703. bool dumpTriggerLoop = false;
  5704. /* when we're suspending, do not execute any tasks. delay & hold them. */
  5705. if (gCmdqSuspended)
  5706. return status;
  5707. CMDQ_PROF_START(current->pid, __func__);
  5708. CMDQ_PROF_MMP(cmdq_mmp_get_event()->consume_done, MMProfileFlagStart, current->pid, 0);
  5709. consumeTime = sched_clock();
  5710. mutex_lock(&gCmdqTaskMutex);
  5711. threadAcquired = false;
  5712. /* scan and remove (if executed) waiting tasks */
  5713. list_for_each_safe(p, n, &gCmdqContext.taskWaitList) {
  5714. pTask = list_entry(p, struct TaskStruct, listEntry);
  5715. thread_prio = cmdq_get_func()->priority(pTask->scenario);
  5716. CMDQ_MSG("-->THREAD: try acquire thread for task: 0x%p, thread_prio: %d\n",
  5717. pTask, thread_prio);
  5718. CMDQ_MSG("-->THREAD: task_prio: %d, flag: 0x%llx, scenario:%d begin\n",
  5719. pTask->priority, pTask->engineFlag, pTask->scenario);
  5720. CMDQ_GET_TIME_IN_MS(pTask->submit, consumeTime, waitingTimeMS);
  5721. needLog = waitingTimeMS >= CMDQ_PREDUMP_TIMEOUT_MS;
  5722. /* Allocate hw thread */
  5723. thread = cmdq_core_acquire_thread(pTask->engineFlag,
  5724. thread_prio, pTask->scenario, needLog,
  5725. pTask->secData.isSecure);
  5726. if (CMDQ_INVALID_THREAD == thread) {
  5727. /* have to wait, remain in wait list */
  5728. CMDQ_MSG("<--THREAD: acquire thread fail, need to wait\n");
  5729. if (true == needLog) {
  5730. /* task wait too long */
  5731. CMDQ_ERR("acquire thread fail, task(0x%p), thread_prio(%d), flag(0x%llx)\n",
  5732. pTask, thread_prio, pTask->engineFlag);
  5733. dumpTriggerLoop =
  5734. (CMDQ_SCENARIO_PRIMARY_DISP == pTask->scenario) ?
  5735. (true) : (dumpTriggerLoop);
  5736. }
  5737. continue;
  5738. }
  5739. pThread = &gCmdqContext.thread[thread];
  5740. /* some task is ready to run */
  5741. threadAcquired = true;
  5742. /* Assign loop function if the thread should be a loop thread */
  5743. pThread->loopCallback = pTask->loopCallback;
  5744. pThread->loopData = pTask->loopData;
  5745. /* Start execution, */
  5746. /* remove from wait list and put into active list */
  5747. list_del_init(&(pTask->listEntry));
  5748. list_add_tail(&(pTask->listEntry), &gCmdqContext.taskActiveList);
  5749. CMDQ_MSG("<--THREAD: acquire thread w/flag: 0x%llx on thread(%d): 0x%p end\n",
  5750. pTask->engineFlag, thread, pThread);
  5751. /* Run task on thread */
  5752. status = cmdq_core_exec_task_async_with_retry(pTask, thread);
  5753. if (status < 0) {
  5754. CMDQ_ERR
  5755. ("<--THREAD: cmdq_core_exec_task_async_with_retry fail, release task 0x%p\n",
  5756. pTask);
  5757. cmdq_core_track_task_record(pTask, thread);
  5758. cmdq_core_release_thread(pTask);
  5759. cmdq_core_release_task_unlocked(pTask);
  5760. pTask = NULL;
  5761. }
  5762. }
  5763. if (dumpTriggerLoop) {
  5764. /* HACK: observe trigger loop status when acquire config thread failed. */
  5765. int32_t dumpThread = cmdq_get_func()->dispThread(CMDQ_SCENARIO_PRIMARY_DISP);
  5766. cmdq_core_dump_disp_trigger_loop_mini("ACQUIRE");
  5767. cmdq_core_dump_thread_pc(dumpThread);
  5768. }
  5769. if (threadAcquired) {
  5770. /* notify some task's SW thread to change their waiting state. */
  5771. /* (if they already called cmdqCoreWaitResultAndReleaseTask()) */
  5772. wake_up_all(&gCmdqThreadDispatchQueue);
  5773. }
  5774. mutex_unlock(&gCmdqTaskMutex);
  5775. CMDQ_PROF_END(current->pid, __func__);
  5776. CMDQ_PROF_MMP(cmdq_mmp_get_event()->consume_done, MMProfileFlagEnd, current->pid, 0);
  5777. return status;
  5778. }
  5779. static void cmdqCoreConsumeWaitQueueItem(struct work_struct *_ignore)
  5780. {
  5781. int32_t status;
  5782. status = cmdq_core_consume_waiting_list(_ignore);
  5783. }
  5784. int32_t cmdqCoreSubmitTaskAsyncImpl(cmdqCommandStruct *pCommandDesc,
  5785. CmdqInterruptCB loopCB,
  5786. unsigned long loopData, TaskStruct **ppTaskOut)
  5787. {
  5788. struct TaskStruct *pTask = NULL;
  5789. int32_t status = 0;
  5790. if (CMDQ_SCENARIO_TRIGGER_LOOP != pCommandDesc->scenario)
  5791. cmdq_core_verfiy_command_desc_end(pCommandDesc);
  5792. CMDQ_MSG("-->SUBMIT_ASYNC: cmd 0x%p begin\n", CMDQ_U32_PTR(pCommandDesc->pVABase));
  5793. CMDQ_PROF_START(current->pid, __func__);
  5794. CMDQ_PROF_MMP(cmdq_mmp_get_event()->alloc_task, MMProfileFlagStart, current->pid, 0);
  5795. /* Allocate Task. This creates a new task */
  5796. /* and put into tail of waiting list */
  5797. pTask = cmdq_core_acquire_task(pCommandDesc, loopCB, loopData);
  5798. CMDQ_PROF_MMP(cmdq_mmp_get_event()->alloc_task, MMProfileFlagEnd, current->pid, 0);
  5799. if (NULL == pTask) {
  5800. CMDQ_PROF_END(current->pid, __func__);
  5801. return -EFAULT;
  5802. }
  5803. if (NULL != ppTaskOut)
  5804. *ppTaskOut = pTask;
  5805. /* Try to lock resource base on engine flag */
  5806. cmdqCoreLockResource(pTask->engineFlag, false);
  5807. /* consume the waiting list. */
  5808. /* this may or may not execute the task, */
  5809. /* depending on available threads. */
  5810. status = cmdq_core_consume_waiting_list(NULL);
  5811. CMDQ_MSG("<--SUBMIT_ASYNC: task: 0x%p end\n", CMDQ_U32_PTR(pCommandDesc->pVABase));
  5812. CMDQ_PROF_END(current->pid, __func__);
  5813. return status;
  5814. }
  5815. int32_t cmdqCoreSubmitTaskAsync(cmdqCommandStruct *pCommandDesc,
  5816. CmdqInterruptCB loopCB,
  5817. unsigned long loopData, TaskStruct **ppTaskOut)
  5818. {
  5819. int32_t status = 0;
  5820. TaskStruct *pTask = NULL;
  5821. if (true == pCommandDesc->secData.isSecure) {
  5822. status = cmdq_core_start_secure_path_notify_thread();
  5823. if (0 > status)
  5824. return status;
  5825. }
  5826. status = cmdqCoreSubmitTaskAsyncImpl(pCommandDesc, loopCB, loopData, &pTask);
  5827. if (NULL != ppTaskOut)
  5828. *ppTaskOut = pTask;
  5829. return status;
  5830. }
  5831. int32_t cmdqCoreReleaseTask(TaskStruct *pTask)
  5832. {
  5833. unsigned long flags;
  5834. int32_t status = 0;
  5835. int32_t thread = pTask->thread;
  5836. struct ThreadStruct *pThread = NULL;
  5837. CMDQ_MSG("<--TASK: cmdqCoreReleaseTask 0x%p\n", pTask);
  5838. if (CMDQ_INVALID_THREAD == thread) {
  5839. CMDQ_ERR("cmdqCoreReleaseTask, thread is invalid (%d)\n", thread);
  5840. return -EFAULT;
  5841. }
  5842. pThread = &(gCmdqContext.thread[thread]);
  5843. if (NULL != pThread) {
  5844. /* this task is being executed (or queueed) on a HW thread */
  5845. /* get SW lock first to ensure atomic access HW */
  5846. spin_lock_irqsave(&gCmdqExecLock, flags);
  5847. /* make sure instructions are really in DRAM */
  5848. smp_mb();
  5849. if (pThread->loopCallback) {
  5850. /* a loop thread has only 1 task involved */
  5851. /* so we can release thread directly */
  5852. /* otherwise we need to connect remaining tasks */
  5853. BUG_ON(pThread->taskCount > 1);
  5854. /* suspend and reset the thread */
  5855. status = cmdq_core_suspend_HW_thread(thread, __LINE__);
  5856. BUG_ON(status < 0);
  5857. pThread->taskCount = 0;
  5858. cmdq_core_disable_HW_thread(thread);
  5859. } else {
  5860. /* TODO: we should check thread enabled or not before resume it. */
  5861. status = cmdq_core_force_remove_task_from_thread(pTask, thread);
  5862. if (pThread->taskCount > 0)
  5863. cmdq_core_resume_HW_thread(thread);
  5864. }
  5865. spin_unlock_irqrestore(&gCmdqExecLock, flags);
  5866. wake_up(&gCmdWaitQueue[thread]);
  5867. }
  5868. cmdq_core_track_task_record(pTask, thread);
  5869. cmdq_core_release_thread(pTask);
  5870. cmdq_core_auto_release_task(pTask);
  5871. CMDQ_MSG("-->TASK: cmdqCoreReleaseTask 0x%p end\n", pTask);
  5872. return 0;
  5873. }
  5874. int32_t cmdqCoreWaitAndReleaseTask(TaskStruct *pTask, long timeout_jiffies)
  5875. {
  5876. return cmdqCoreWaitResultAndReleaseTask(pTask, NULL, timeout_jiffies);
  5877. }
  5878. int32_t cmdqCoreWaitResultAndReleaseTask(TaskStruct *pTask, cmdqRegValueStruct *pResult,
  5879. long timeout_jiffies)
  5880. {
  5881. int32_t status;
  5882. int32_t thread;
  5883. int i;
  5884. if (NULL == pTask) {
  5885. CMDQ_ERR("cmdqCoreWaitAndReleaseTask err ptr=0x%p\n", pTask);
  5886. return -EFAULT;
  5887. }
  5888. if (pTask->taskState == TASK_STATE_IDLE) {
  5889. CMDQ_ERR("cmdqCoreWaitAndReleaseTask task=0x%p is IDLE\n", pTask);
  5890. return -EFAULT;
  5891. }
  5892. CMDQ_PROF_START(current->pid, __func__);
  5893. /* */
  5894. /* wait for task finish */
  5895. thread = pTask->thread;
  5896. status = cmdq_core_wait_task_done(pTask, timeout_jiffies);
  5897. /* */
  5898. /* retrieve result */
  5899. if (pResult && pResult->count) {
  5900. /* clear results */
  5901. memset(CMDQ_U32_PTR(pResult->regValues), 0,
  5902. pResult->count * sizeof(CMDQ_U32_PTR(pResult->regValues)[0]));
  5903. mutex_lock(&gCmdqTaskMutex);
  5904. for (i = 0; i < pResult->count && i < pTask->regCount; ++i) {
  5905. /* fill results */
  5906. CMDQ_U32_PTR(pResult->regValues)[i] = pTask->regResults[i];
  5907. }
  5908. mutex_unlock(&gCmdqTaskMutex);
  5909. }
  5910. cmdq_core_track_task_record(pTask, thread);
  5911. cmdq_core_release_thread(pTask);
  5912. cmdq_core_auto_release_task(pTask);
  5913. #ifdef CMDQ_SECURE_PATH_CONSUME_AGAIN
  5914. if (true == g_cmdq_consume_again) {
  5915. cmdq_core_add_consume_task();
  5916. g_cmdq_consume_again = false;
  5917. }
  5918. #endif
  5919. CMDQ_PROF_END(current->pid, __func__);
  5920. return status;
  5921. }
  5922. static void cmdq_core_auto_release_work(struct work_struct *workItem)
  5923. {
  5924. int32_t status = 0;
  5925. TaskStruct *pTask = NULL;
  5926. CmdqAsyncFlushCB finishCallback = NULL;
  5927. uint32_t userData = 0;
  5928. uint32_t *pCmd = NULL;
  5929. int32_t commandSize = 0;
  5930. pTask = container_of(workItem, struct TaskStruct, autoReleaseWork);
  5931. if (pTask) {
  5932. finishCallback = pTask->flushCallback;
  5933. userData = pTask->flushData;
  5934. commandSize = pTask->commandSize;
  5935. pCmd = kzalloc(commandSize, GFP_KERNEL);
  5936. memcpy(pCmd, pTask->pVABase, commandSize);
  5937. status = cmdqCoreWaitResultAndReleaseTask(pTask,
  5938. NULL,
  5939. msecs_to_jiffies
  5940. (CMDQ_DEFAULT_TIMEOUT_MS));
  5941. CMDQ_VERBOSE("[Auto Release] released pTask=%p, status=%d\n", pTask, status);
  5942. CMDQ_PROF_MMP(cmdq_mmp_get_event()->autoRelease_done,
  5943. MMProfileFlagPulse, ((unsigned long)pTask), current->pid);
  5944. /* Notify user */
  5945. if (finishCallback) {
  5946. CMDQ_VERBOSE("[Auto Release] call user callback %p with data 0x%08x\n",
  5947. finishCallback, userData);
  5948. if (0 > finishCallback(userData)) {
  5949. CMDQ_LOG
  5950. ("[DEBUG]user complains execution abnormal, dump command...\n");
  5951. CMDQ_LOG("======TASK 0x%p command (%d) START\n", pTask,
  5952. commandSize);
  5953. cmdqCoreDumpCommandMem(pCmd, commandSize);
  5954. CMDQ_LOG("======TASK 0x%p command END\n", pTask);
  5955. }
  5956. }
  5957. kfree(pCmd);
  5958. pCmd = NULL;
  5959. pTask = NULL;
  5960. }
  5961. }
  5962. int32_t cmdqCoreAutoReleaseTask(TaskStruct *pTask)
  5963. {
  5964. int32_t threadNo = CMDQ_INVALID_THREAD;
  5965. bool isSecure;
  5966. if (NULL == pTask) {
  5967. /* Error occurs when Double INIT_WORK */
  5968. CMDQ_ERR("[Double INIT WORK] pTask is NULL");
  5969. return 0;
  5970. }
  5971. if (NULL == pTask->pCMDEnd || NULL == pTask->pVABase) {
  5972. /* Error occurs when Double INIT_WORK */
  5973. CMDQ_ERR("[Double INIT WORK] pTask(%p) is already released", pTask);
  5974. return 0;
  5975. }
  5976. /* the work item is embeded in pTask already */
  5977. /* but we need to initialized it */
  5978. if (false == pTask->useWorkQueue) {
  5979. /* use work queue to release task */
  5980. INIT_WORK(&pTask->autoReleaseWork, cmdq_core_auto_release_work);
  5981. } else {
  5982. /* Error occurs when Double INIT_WORK */
  5983. CMDQ_ERR("[Double INIT WORK] useWorkQueue is already TRUE, pTask(%p)", pTask);
  5984. }
  5985. pTask->useWorkQueue = true;
  5986. CMDQ_PROF_MMP(cmdq_mmp_get_event()->autoRelease_add,
  5987. MMProfileFlagPulse, ((unsigned long)pTask), pTask->thread);
  5988. /* Put auto release task to corresponded thread */
  5989. if (CMDQ_INVALID_THREAD != pTask->thread) {
  5990. queue_work(gCmdqContext.taskThreadAutoReleaseWQ[pTask->thread],
  5991. &pTask->autoReleaseWork);
  5992. } else {
  5993. /* if task does not belong thread, use static dispatch thread at first, */
  5994. /* otherwise, use global context workqueue */
  5995. isSecure = pTask->secData.isSecure;
  5996. threadNo = cmdq_get_func()->getThreadID(pTask->scenario, isSecure);
  5997. if (CMDQ_INVALID_THREAD != threadNo) {
  5998. queue_work(gCmdqContext.taskThreadAutoReleaseWQ[threadNo],
  5999. &pTask->autoReleaseWork);
  6000. } else {
  6001. queue_work(gCmdqContext.taskAutoReleaseWQ, &pTask->autoReleaseWork);
  6002. }
  6003. }
  6004. return 0;
  6005. }
  6006. int32_t cmdqCoreSubmitTask(cmdqCommandStruct *pCommandDesc)
  6007. {
  6008. int32_t status;
  6009. TaskStruct *pTask = NULL;
  6010. CMDQ_MSG("-->SUBMIT: SYNC cmd 0x%p begin\n", CMDQ_U32_PTR(pCommandDesc->pVABase));
  6011. status = cmdqCoreSubmitTaskAsync(pCommandDesc, NULL, 0, &pTask);
  6012. if (status >= 0) {
  6013. status = cmdqCoreWaitResultAndReleaseTask(pTask,
  6014. &pCommandDesc->regValue,
  6015. msecs_to_jiffies
  6016. (CMDQ_DEFAULT_TIMEOUT_MS));
  6017. if (status < 0) {
  6018. /* error status print */
  6019. CMDQ_ERR("Task 0x%p wait fails\n", pTask);
  6020. }
  6021. } else {
  6022. CMDQ_ERR("cmdqCoreSubmitTaskAsync failed=%d", status);
  6023. }
  6024. CMDQ_MSG("<--SUBMIT: SYNC cmd 0x%p end\n", CMDQ_U32_PTR(pCommandDesc->pVABase));
  6025. return status;
  6026. }
  6027. int32_t cmdqCoreQueryUsage(int32_t *pCount)
  6028. {
  6029. unsigned long flags;
  6030. EngineStruct *pEngine;
  6031. int32_t index;
  6032. pEngine = gCmdqContext.engine;
  6033. spin_lock_irqsave(&gCmdqThreadLock, flags);
  6034. for (index = 0; index < CMDQ_MAX_ENGINE_COUNT; index++)
  6035. pCount[index] = pEngine[index].userCount;
  6036. spin_unlock_irqrestore(&gCmdqThreadLock, flags);
  6037. return 0;
  6038. }
  6039. void cmdq_core_release_task_by_file_node(void *file_node)
  6040. {
  6041. struct TaskStruct *pTask = NULL;
  6042. struct list_head *p = NULL;
  6043. /* Since the file node is closed, there is no way */
  6044. /* user space can issue further "wait_and_close" request, */
  6045. /* so we must auto-release running/waiting tasks */
  6046. /* to prevent resource leakage */
  6047. /* walk through active and waiting lists and release them */
  6048. mutex_lock(&gCmdqTaskMutex);
  6049. list_for_each(p, &gCmdqContext.taskActiveList) {
  6050. pTask = list_entry(p, struct TaskStruct, listEntry);
  6051. if (TASK_STATE_IDLE != pTask->taskState &&
  6052. pTask->privateData == file_node &&
  6053. (cmdq_core_is_request_from_user_space(pTask->scenario))) {
  6054. CMDQ_LOG
  6055. ("[WARNING] ACTIVE task 0x%p release because file node 0x%p closed\n",
  6056. pTask, file_node);
  6057. cmdq_core_dump_task(pTask);
  6058. /* since we already inside mutex, */
  6059. /* do not cmdqReleaseTask directly, */
  6060. /* instead we change state to "KILLED" */
  6061. /* and arrange a auto-release. */
  6062. /* Note that these tasks may already issued to HW */
  6063. /* so there is a chance that following MPU/M4U violation */
  6064. /* may occur, if the user space process has destroyed. */
  6065. /* The ideal solution is to stop / cancel HW operation */
  6066. /* immediately, but we cannot do so due to SMI hang risk. */
  6067. cmdqCoreAutoReleaseTask(pTask);
  6068. }
  6069. }
  6070. list_for_each(p, &gCmdqContext.taskWaitList) {
  6071. pTask = list_entry(p, struct TaskStruct, listEntry);
  6072. if (TASK_STATE_WAITING == pTask->taskState &&
  6073. pTask->privateData == file_node &&
  6074. (cmdq_core_is_request_from_user_space(pTask->scenario))) {
  6075. CMDQ_LOG
  6076. ("[WARNING] WAITING task 0x%p release because file node 0x%p closed\n",
  6077. pTask, file_node);
  6078. cmdq_core_dump_task(pTask);
  6079. /* since we already inside mutex, */
  6080. /* and these WAITING tasks will not be consumed (acquire thread / exec) */
  6081. /* we can release them directly. */
  6082. /* note that we use unlocked version since we already hold gCmdqTaskMutex. */
  6083. cmdq_core_release_task_unlocked(pTask);
  6084. }
  6085. }
  6086. mutex_unlock(&gCmdqTaskMutex);
  6087. }
  6088. unsigned long long cmdq_core_get_GPR64(const CMDQ_DATA_REGISTER_ENUM regID)
  6089. {
  6090. #ifdef CMDQ_GPR_SUPPORT
  6091. unsigned long long value;
  6092. unsigned long long value1;
  6093. unsigned long long value2;
  6094. const uint32_t x = regID & 0x0F;
  6095. if (0 < (regID & 0x10)) {
  6096. /* query address GPR(64bit), Px */
  6097. value1 = 0LL | CMDQ_REG_GET32(CMDQ_GPR_R32((2 * x)));
  6098. value2 = 0LL | CMDQ_REG_GET32(CMDQ_GPR_R32((2 * x + 1)));
  6099. } else {
  6100. /* query data GPR(32bit), Rx */
  6101. value1 = 0LL | CMDQ_REG_GET32(CMDQ_GPR_R32(x));
  6102. value2 = 0LL;
  6103. }
  6104. value = (0LL) | (value2 << 32) | (value1);
  6105. CMDQ_VERBOSE("get_GPR64(%x): 0x%llx(0x%llx, 0x%llx)\n", regID, value, value2, value1);
  6106. return value;
  6107. #else
  6108. CMDQ_ERR("func:%s failed since CMDQ doesn't support GPR\n", __func__);
  6109. return 0LL;
  6110. #endif
  6111. }
  6112. void cmdq_core_set_GPR64(const CMDQ_DATA_REGISTER_ENUM regID, const unsigned long long value)
  6113. {
  6114. #ifdef CMDQ_GPR_SUPPORT
  6115. const unsigned long long value1 = 0x00000000FFFFFFFF & value;
  6116. const unsigned long long value2 = 0LL | value >> 32;
  6117. const uint32_t x = regID & 0x0F;
  6118. unsigned long long result;
  6119. if (0 < (regID & 0x10)) {
  6120. /* set address GPR(64bit), Px */
  6121. CMDQ_REG_SET32(CMDQ_GPR_R32((2 * x)), value1);
  6122. CMDQ_REG_SET32(CMDQ_GPR_R32((2 * x + 1)), value2);
  6123. } else {
  6124. /* set data GPR(32bit), Rx */
  6125. CMDQ_REG_SET32(CMDQ_GPR_R32((2 * x)), value1);
  6126. }
  6127. result = 0LL | cmdq_core_get_GPR64(regID);
  6128. if (value != result) {
  6129. CMDQ_ERR("set_GPR64(%x) failed, value is 0x%llx, not value 0x%llx\n", regID, result,
  6130. value);
  6131. }
  6132. #else
  6133. CMDQ_ERR("func:%s failed since CMDQ doesn't support GPR\n", __func__);
  6134. #endif
  6135. }
  6136. uint32_t cmdqCoreReadDataRegister(CMDQ_DATA_REGISTER_ENUM regID)
  6137. {
  6138. #ifdef CMDQ_GPR_SUPPORT
  6139. return CMDQ_REG_GET32(CMDQ_GPR_R32(regID));
  6140. #else
  6141. CMDQ_ERR("func:%s failed since CMDQ doesn't support GPR\n", __func__);
  6142. return 0;
  6143. #endif
  6144. }
  6145. uint32_t cmdq_core_thread_prefetch_size(const int32_t thread)
  6146. {
  6147. if (thread >= 0 && thread < CMDQ_MAX_THREAD_COUNT)
  6148. return g_dts_setting.prefetch_size[thread];
  6149. else
  6150. return 0;
  6151. }
  6152. void cmdq_core_dump_dts_setting(void)
  6153. {
  6154. uint32_t index;
  6155. struct ResourceUnitStruct *pResource = NULL;
  6156. struct list_head *p = NULL;
  6157. CMDQ_LOG("[DTS] Prefetch Thread Count:%d\n", g_dts_setting.prefetch_thread_count);
  6158. CMDQ_LOG("[DTS] Prefetch Size of Thread:\n");
  6159. for (index = 0; index < g_dts_setting.prefetch_thread_count && index < CMDQ_MAX_THREAD_COUNT; index++)
  6160. CMDQ_LOG(" Thread[%d]=%d\n", index, g_dts_setting.prefetch_size[index]);
  6161. CMDQ_LOG("[DTS] SRAM Sharing Config:\n");
  6162. list_for_each(p, &gCmdqContext.resourceList) {
  6163. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  6164. CMDQ_LOG(" Engine=0x%016llx,, event=%d\n", pResource->engine, pResource->lockEvent);
  6165. }
  6166. }
  6167. int32_t cmdqCoreInitialize(void)
  6168. {
  6169. struct TaskStruct *pTask;
  6170. int32_t index;
  6171. atomic_set(&gCmdqThreadUsage, 0);
  6172. atomic_set(&gSMIThreadUsage, 0);
  6173. BUG_ON(0 != atomic_read(&gCmdqThreadUsage));
  6174. BUG_ON(0 != atomic_read(&gSMIThreadUsage));
  6175. for (index = 0; index < CMDQ_MAX_THREAD_COUNT; index++)
  6176. init_waitqueue_head(&gCmdWaitQueue[index]);
  6177. init_waitqueue_head(&gCmdqThreadDispatchQueue);
  6178. /* Reset overall context */
  6179. memset(&gCmdqContext, 0x0, sizeof(ContextStruct));
  6180. /* some fields has non-zero initial value */
  6181. cmdq_core_reset_engine_struct();
  6182. cmdq_core_reset_thread_struct();
  6183. /* Create task pool */
  6184. gCmdqContext.taskCache = kmem_cache_create(CMDQ_DRIVER_DEVICE_NAME "_task",
  6185. sizeof(struct TaskStruct),
  6186. __alignof__(struct TaskStruct),
  6187. SLAB_POISON | SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE,
  6188. &cmdq_core_task_ctor);
  6189. /* Initialize task lists */
  6190. INIT_LIST_HEAD(&gCmdqContext.taskFreeList);
  6191. INIT_LIST_HEAD(&gCmdqContext.taskActiveList);
  6192. INIT_LIST_HEAD(&gCmdqContext.taskWaitList);
  6193. INIT_LIST_HEAD(&gCmdqContext.resourceList);
  6194. INIT_WORK(&gCmdqContext.taskConsumeWaitQueueItem, cmdqCoreConsumeWaitQueueItem);
  6195. /* Initialize writable address */
  6196. INIT_LIST_HEAD(&gCmdqContext.writeAddrList);
  6197. /* Initialize emergency buffer */
  6198. cmdq_core_init_emergency_buffer();
  6199. /* Initialize work queue */
  6200. gCmdqContext.taskAutoReleaseWQ = create_singlethread_workqueue("cmdq_auto_release");
  6201. gCmdqContext.taskConsumeWQ = create_singlethread_workqueue("cmdq_task");
  6202. gCmdqContext.resourceCheckWQ = create_singlethread_workqueue("cmdq_resource");
  6203. cmdq_core_init_thread_work_queue();
  6204. /* Initialize command buffer dump */
  6205. memset(&gCmdqBufferDump, 0x0, sizeof(DumpCommandBufferStruct));
  6206. #ifdef CMDQ_DUMP_FIRSTERROR
  6207. /* Reset overall first error dump */
  6208. memset(&gCmdqFirstError, 0x0, sizeof(DumpFirstErrorStruct));
  6209. gCmdqFirstError.cmdqMaxSize = CMDQ_MAX_FIRSTERROR;
  6210. #endif
  6211. #ifdef CMDQ_EVENT_NEED_BACKUP
  6212. /* Initialize backup event */
  6213. cmdq_get_func()->initialBackupEvent();
  6214. #endif
  6215. /* pre-allocate free tasks */
  6216. for (index = 0; index < CMDQ_INIT_FREE_TASK_COUNT; index++) {
  6217. pTask = cmdq_core_task_create();
  6218. if (pTask) {
  6219. mutex_lock(&gCmdqTaskMutex);
  6220. list_add_tail(&(pTask->listEntry), &gCmdqContext.taskFreeList);
  6221. mutex_unlock(&gCmdqTaskMutex);
  6222. }
  6223. }
  6224. /* allocate shared memory */
  6225. gCmdqContext.hSecSharedMem = NULL;
  6226. #ifdef CMDQ_SECURE_PATH_SUPPORT
  6227. cmdq_sec_create_shared_memory(&(gCmdqContext.hSecSharedMem), PAGE_SIZE);
  6228. #endif
  6229. #if 0
  6230. /* cmdqCoreRegisterDebugRegDumpCB(testcase_regdump_begin, testcase_regdump_end); */
  6231. #endif
  6232. /* Initialize MET for statistics */
  6233. /* note that we don't need to uninit it. */
  6234. CMDQ_PROF_INIT();
  6235. #ifdef CMDQ_PROFILE_MMP
  6236. cmdq_mmp_init();
  6237. #endif
  6238. /* Initialize secure path context */
  6239. cmdqSecInitialize();
  6240. /* Initialize test case structure */
  6241. cmdq_test_init_setting();
  6242. /* Initialize DTS Setting structure */
  6243. memset(&g_dts_setting, 0x0, sizeof(cmdq_dts_setting));
  6244. /* Initialize setting for legacy chip */
  6245. g_dts_setting.prefetch_thread_count = 3;
  6246. g_dts_setting.prefetch_size[0] = 240;
  6247. g_dts_setting.prefetch_size[2] = 32;
  6248. cmdq_dev_get_dts_setting(&g_dts_setting);
  6249. /* Initialize Resource via device tree */
  6250. cmdq_dev_init_resource(cmdq_core_init_resource);
  6251. /* Initialize Features */
  6252. gCmdqContext.features[CMDQ_FEATURE_SRAM_SHARE] = 1;
  6253. #ifdef CMDQ_SECURE_PATH_CONSUME_AGAIN
  6254. g_cmdq_consume_again = false;
  6255. #endif
  6256. return 0;
  6257. }
  6258. #ifdef CMDQ_SECURE_PATH_SUPPORT
  6259. int32_t cmdqCoreLateInitialize(void)
  6260. {
  6261. int32_t status = 0;
  6262. struct task_struct *open_th =
  6263. kthread_run(cmdq_sec_init_allocate_resource_thread, NULL, "cmdq_WSM_init");
  6264. if (IS_ERR(open_th)) {
  6265. CMDQ_LOG("%s, init kthread_run failed!\n", __func__);
  6266. status = -EFAULT;
  6267. }
  6268. return status;
  6269. }
  6270. #endif
  6271. void cmdqCoreDeInitialize(void)
  6272. {
  6273. struct TaskStruct *pTask = NULL;
  6274. struct list_head *p;
  6275. int index;
  6276. struct list_head *lists[] = {
  6277. &gCmdqContext.taskFreeList,
  6278. &gCmdqContext.taskActiveList,
  6279. &gCmdqContext.taskWaitList
  6280. };
  6281. /* directly destroy the auto release WQ since we're going to release tasks anyway. */
  6282. destroy_workqueue(gCmdqContext.taskAutoReleaseWQ);
  6283. gCmdqContext.taskAutoReleaseWQ = NULL;
  6284. destroy_workqueue(gCmdqContext.taskConsumeWQ);
  6285. gCmdqContext.taskConsumeWQ = NULL;
  6286. destroy_workqueue(gCmdqContext.resourceCheckWQ);
  6287. gCmdqContext.resourceCheckWQ = NULL;
  6288. cmdq_core_destroy_thread_work_queue();
  6289. /* release all tasks in both list */
  6290. for (index = 0; index < (sizeof(lists) / sizeof(lists[0])); ++index) {
  6291. list_for_each(p, lists[index]) {
  6292. mutex_lock(&gCmdqTaskMutex);
  6293. pTask = list_entry(p, struct TaskStruct, listEntry);
  6294. /* free allocated DMA buffer */
  6295. cmdq_task_free_task_command_buffer(pTask);
  6296. kmem_cache_free(gCmdqContext.taskCache, pTask);
  6297. list_del(p);
  6298. mutex_unlock(&gCmdqTaskMutex);
  6299. }
  6300. }
  6301. /* check if there are dangling write addresses. */
  6302. if (!list_empty(&gCmdqContext.writeAddrList)) {
  6303. /* there are unreleased write buffer, raise AEE */
  6304. CMDQ_AEE("CMDQ", "there are unreleased write buffer");
  6305. }
  6306. kmem_cache_destroy(gCmdqContext.taskCache);
  6307. gCmdqContext.taskCache = NULL;
  6308. /* release emergency buffer */
  6309. cmdq_core_uninit_emergency_buffer();
  6310. /* Deinitialize secure path context */
  6311. cmdqSecDeInitialize();
  6312. }
  6313. int cmdqCoreAllocWriteAddress(uint32_t count, dma_addr_t *paStart)
  6314. {
  6315. unsigned long flagsWriteAddr = 0L;
  6316. WriteAddrStruct *pWriteAddr = NULL;
  6317. int status = 0;
  6318. CMDQ_VERBOSE("ALLOC: line %d\n", __LINE__);
  6319. do {
  6320. if (NULL == paStart) {
  6321. CMDQ_ERR("invalid output argument\n");
  6322. status = -EINVAL;
  6323. break;
  6324. }
  6325. *paStart = 0;
  6326. CMDQ_VERBOSE("ALLOC: line %d\n", __LINE__);
  6327. pWriteAddr = kzalloc(sizeof(WriteAddrStruct), GFP_KERNEL);
  6328. if (NULL == pWriteAddr) {
  6329. CMDQ_ERR("failed to alloc WriteAddrStruct\n");
  6330. status = -ENOMEM;
  6331. break;
  6332. }
  6333. memset(pWriteAddr, 0, sizeof(WriteAddrStruct));
  6334. CMDQ_VERBOSE("ALLOC: line %d\n", __LINE__);
  6335. pWriteAddr->count = count;
  6336. pWriteAddr->va =
  6337. cmdq_core_alloc_hw_buffer(cmdq_dev_get(),
  6338. count * sizeof(uint32_t), &(pWriteAddr->pa),
  6339. GFP_KERNEL);
  6340. if (current)
  6341. pWriteAddr->user = current->pid;
  6342. CMDQ_VERBOSE("ALLOC: line %d\n", __LINE__);
  6343. if (NULL == pWriteAddr->va) {
  6344. CMDQ_ERR("failed to alloc write buffer\n");
  6345. status = -ENOMEM;
  6346. break;
  6347. }
  6348. CMDQ_VERBOSE("ALLOC: line %d\n", __LINE__);
  6349. /* clear buffer content */
  6350. do {
  6351. volatile uint32_t *pInt = (uint32_t *) pWriteAddr->va;
  6352. int i = 0;
  6353. for (i = 0; i < count; ++i) {
  6354. *(pInt + i) = 0xcdcdabab;
  6355. mb();
  6356. /* make sure instructions are really in DRAM */
  6357. smp_mb();
  6358. }
  6359. } while (0);
  6360. /* assign output pa */
  6361. *paStart = pWriteAddr->pa;
  6362. spin_lock_irqsave(&gCmdqWriteAddrLock, flagsWriteAddr);
  6363. list_add_tail(&(pWriteAddr->list_node), &gCmdqContext.writeAddrList);
  6364. spin_unlock_irqrestore(&gCmdqWriteAddrLock, flagsWriteAddr);
  6365. status = 0;
  6366. } while (0);
  6367. if (0 != status) {
  6368. /* release resources */
  6369. if (pWriteAddr && pWriteAddr->va) {
  6370. cmdq_core_free_hw_buffer(cmdq_dev_get(),
  6371. sizeof(uint32_t) * pWriteAddr->count,
  6372. pWriteAddr->va, pWriteAddr->pa);
  6373. memset(pWriteAddr, 0, sizeof(WriteAddrStruct));
  6374. }
  6375. kfree(pWriteAddr);
  6376. pWriteAddr = NULL;
  6377. }
  6378. CMDQ_VERBOSE("ALLOC: line %d\n", __LINE__);
  6379. return status;
  6380. }
  6381. uint32_t cmdqCoreReadWriteAddress(dma_addr_t pa)
  6382. {
  6383. struct list_head *p = NULL;
  6384. WriteAddrStruct *pWriteAddr = NULL;
  6385. int32_t offset = 0;
  6386. uint32_t value = 0;
  6387. unsigned long flagsWriteAddr = 0L;
  6388. /* search for the entry */
  6389. spin_lock_irqsave(&gCmdqWriteAddrLock, flagsWriteAddr);
  6390. list_for_each(p, &gCmdqContext.writeAddrList) {
  6391. pWriteAddr = list_entry(p, struct WriteAddrStruct, list_node);
  6392. if (NULL == pWriteAddr)
  6393. continue;
  6394. offset = pa - pWriteAddr->pa;
  6395. if (offset >= 0 && (offset / sizeof(uint32_t)) < pWriteAddr->count) {
  6396. CMDQ_VERBOSE
  6397. ("cmdqCoreReadWriteAddress() input:%pa, got offset=%d va=%p pa_start=%pa\n",
  6398. &pa, offset, (pWriteAddr->va + offset), &(pWriteAddr->pa));
  6399. value = *((volatile uint32_t *)(pWriteAddr->va + offset));
  6400. CMDQ_VERBOSE
  6401. ("cmdqCoreReadWriteAddress() found offset=%d va=%p value=0x%08x\n",
  6402. offset, (pWriteAddr->va + offset), value);
  6403. break;
  6404. }
  6405. }
  6406. spin_unlock_irqrestore(&gCmdqWriteAddrLock, flagsWriteAddr);
  6407. return value;
  6408. }
  6409. uint32_t cmdqCoreWriteWriteAddress(dma_addr_t pa, uint32_t value)
  6410. {
  6411. struct list_head *p = NULL;
  6412. WriteAddrStruct *pWriteAddr = NULL;
  6413. int32_t offset = 0;
  6414. unsigned long flagsWriteAddr = 0L;
  6415. char longMsg[CMDQ_LONGSTRING_MAX];
  6416. uint32_t msgOffset;
  6417. int32_t msgMAXSize;
  6418. /* search for the entry */
  6419. spin_lock_irqsave(&gCmdqWriteAddrLock, flagsWriteAddr);
  6420. list_for_each(p, &gCmdqContext.writeAddrList) {
  6421. pWriteAddr = list_entry(p, struct WriteAddrStruct, list_node);
  6422. if (NULL == pWriteAddr)
  6423. continue;
  6424. offset = pa - pWriteAddr->pa;
  6425. /* note it is 64 bit length for uint32_t variable in 64 bit kernel */
  6426. /* use sizeof(u_log) to check valid offset range */
  6427. if (offset >= 0 && (offset / sizeof(unsigned long)) < pWriteAddr->count) {
  6428. cmdq_core_longstring_init(longMsg, &msgOffset, &msgMAXSize);
  6429. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  6430. "cmdqCoreWriteWriteAddress() input:0x%pa,", &pa);
  6431. cmdqCoreLongString(false, longMsg, &msgOffset, &msgMAXSize,
  6432. " got offset=%d va=%p pa_start=0x%pa, value=0x%08x\n",
  6433. offset, (pWriteAddr->va + offset),
  6434. &pWriteAddr->pa, value);
  6435. if (msgOffset > 0) {
  6436. /* print message */
  6437. CMDQ_VERBOSE("%s", longMsg);
  6438. }
  6439. *((volatile uint32_t *)(pWriteAddr->va + offset)) = value;
  6440. break;
  6441. }
  6442. }
  6443. spin_unlock_irqrestore(&gCmdqWriteAddrLock, flagsWriteAddr);
  6444. return value;
  6445. }
  6446. int cmdqCoreFreeWriteAddress(dma_addr_t paStart)
  6447. {
  6448. struct list_head *p, *n = NULL;
  6449. WriteAddrStruct *pWriteAddr = NULL;
  6450. bool foundEntry;
  6451. unsigned long flagsWriteAddr = 0L;
  6452. foundEntry = false;
  6453. /* search for the entry */
  6454. spin_lock_irqsave(&gCmdqWriteAddrLock, flagsWriteAddr);
  6455. list_for_each_safe(p, n, &gCmdqContext.writeAddrList) {
  6456. pWriteAddr = list_entry(p, struct WriteAddrStruct, list_node);
  6457. if (pWriteAddr && pWriteAddr->pa == paStart) {
  6458. list_del(&(pWriteAddr->list_node));
  6459. foundEntry = true;
  6460. break;
  6461. }
  6462. }
  6463. spin_unlock_irqrestore(&gCmdqWriteAddrLock, flagsWriteAddr);
  6464. /* when list is not empty, we always get a entry even we don't found a valid entry */
  6465. /* use foundEntry to confirm search result */
  6466. if (false == foundEntry) {
  6467. CMDQ_ERR("cmdqCoreFreeWriteAddress() no matching entry, paStart:%pa\n", &paStart);
  6468. return -EINVAL;
  6469. }
  6470. /* release resources */
  6471. if (pWriteAddr->va) {
  6472. cmdq_core_free_hw_buffer(cmdq_dev_get(),
  6473. sizeof(uint32_t) * pWriteAddr->count,
  6474. pWriteAddr->va, pWriteAddr->pa);
  6475. memset(pWriteAddr, 0xda, sizeof(WriteAddrStruct));
  6476. }
  6477. kfree(pWriteAddr);
  6478. pWriteAddr = NULL;
  6479. return 0;
  6480. }
  6481. int32_t cmdqCoreDebugRegDumpBegin(uint32_t taskID, uint32_t *regCount, uint32_t **regAddress)
  6482. {
  6483. if (NULL == gCmdqDebugCallback.beginDebugRegDump) {
  6484. CMDQ_ERR("beginDebugRegDump not registered\n");
  6485. return -EFAULT;
  6486. }
  6487. return gCmdqDebugCallback.beginDebugRegDump(taskID, regCount, regAddress);
  6488. }
  6489. int32_t cmdqCoreDebugRegDumpEnd(uint32_t taskID, uint32_t regCount, uint32_t *regValues)
  6490. {
  6491. if (NULL == gCmdqDebugCallback.endDebugRegDump) {
  6492. CMDQ_ERR("endDebugRegDump not registered\n");
  6493. return -EFAULT;
  6494. }
  6495. return gCmdqDebugCallback.endDebugRegDump(taskID, regCount, regValues);
  6496. }
  6497. void cmdq_core_set_log_level(const int32_t value)
  6498. {
  6499. if (value == CMDQ_LOG_LEVEL_NORMAL) {
  6500. /* Only print CMDQ ERR and CMDQ LOG */
  6501. gCmdqContext.logLevel = CMDQ_LOG_LEVEL_NORMAL;
  6502. } else if (value < CMDQ_LOG_LEVEL_MAX) {
  6503. /* Modify log level */
  6504. gCmdqContext.logLevel = (1 << value);
  6505. }
  6506. }
  6507. bool cmdq_core_should_print_msg(void)
  6508. {
  6509. bool logLevel = (gCmdqContext.logLevel & (1 << CMDQ_LOG_LEVEL_MSG)) ? (1) : (0);
  6510. return logLevel;
  6511. }
  6512. bool cmdq_core_should_full_error(void)
  6513. {
  6514. bool logLevel = (gCmdqContext.logLevel & (1 << CMDQ_LOG_LEVEL_FULL_ERROR)) ? (1) : (0);
  6515. return logLevel;
  6516. }
  6517. int32_t cmdq_core_profile_enabled(void)
  6518. {
  6519. return gCmdqContext.enableProfile;
  6520. }
  6521. int32_t cmdq_core_enable_emergency_buffer_test(const bool enable)
  6522. {
  6523. #ifdef CMDQ_TEST_EMERGENCY_BUFFER
  6524. const uint32_t value = (enable) ? (1) : (0);
  6525. atomic_set(&gCmdqDebugForceUseEmergencyBuffer, value);
  6526. return 0;
  6527. #else
  6528. CMDQ_ERR("CMDQ_TEST_EMERGENCY_BUFFER not support\n");
  6529. return -EFAULT;
  6530. #endif
  6531. }
  6532. void cmdq_core_longstring_init(char *buf, uint32_t *offset, int32_t *maxSize)
  6533. {
  6534. buf[0] = '\0';
  6535. *offset = 0;
  6536. *maxSize = CMDQ_LONGSTRING_MAX - 1;
  6537. }
  6538. void cmdqCoreLongString(bool forceLog, char *buf, uint32_t *offset, int32_t *maxSize,
  6539. const char *string, ...)
  6540. {
  6541. int msgLen;
  6542. va_list argptr;
  6543. char *pBuffer;
  6544. if ((false == forceLog) && (false == cmdq_core_should_print_msg()) && (*maxSize <= 0))
  6545. return;
  6546. va_start(argptr, string);
  6547. pBuffer = buf + (*offset);
  6548. msgLen = vsnprintf(pBuffer, *maxSize, string, argptr);
  6549. *maxSize -= msgLen;
  6550. if (*maxSize < 0)
  6551. *maxSize = 0;
  6552. *offset += msgLen;
  6553. va_end(argptr);
  6554. }
  6555. void cmdq_core_turnoff_first_dump(void)
  6556. {
  6557. #ifdef CMDQ_DUMP_FIRSTERROR
  6558. gCmdqFirstError.flag = false;
  6559. #endif
  6560. }
  6561. int32_t cmdq_core_save_first_dump(const char *string, ...)
  6562. {
  6563. #ifdef CMDQ_DUMP_FIRSTERROR
  6564. int logLen;
  6565. va_list argptr;
  6566. char *pBuffer;
  6567. if (false == gCmdqFirstError.flag)
  6568. return -EFAULT;
  6569. va_start(argptr, string);
  6570. pBuffer = gCmdqFirstError.cmdqString + gCmdqFirstError.cmdqCount;
  6571. logLen = vsnprintf(pBuffer, gCmdqFirstError.cmdqMaxSize, string, argptr);
  6572. gCmdqFirstError.cmdqMaxSize -= logLen;
  6573. gCmdqFirstError.cmdqCount += logLen;
  6574. if (gCmdqFirstError.cmdqMaxSize <= 0) {
  6575. gCmdqFirstError.flag = false;
  6576. pr_err("[CMDQ][ERR] Error0 dump saving buffer is full\n");
  6577. }
  6578. va_end(argptr);
  6579. return 0;
  6580. #else
  6581. return -EFAULT;
  6582. #endif
  6583. }
  6584. #ifdef CMDQ_DUMP_FIRSTERROR
  6585. void cmdq_core_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
  6586. int groupsize, char *linebuf, size_t linebuflen)
  6587. {
  6588. const u8 *ptr = buf;
  6589. u8 ch;
  6590. int j, lx = 0;
  6591. if (rowsize != 16 && rowsize != 32)
  6592. rowsize = 16;
  6593. if (!len)
  6594. goto nil;
  6595. if (len > rowsize) /* limit to one line at a time */
  6596. len = rowsize;
  6597. if ((len % groupsize) != 0) /* no mixed size output */
  6598. groupsize = 1;
  6599. switch (groupsize) {
  6600. case 8:{
  6601. const u64 *ptr8 = buf;
  6602. int ngroups = len / groupsize;
  6603. for (j = 0; j < ngroups; j++)
  6604. lx += scnprintf(linebuf + lx, linebuflen - lx,
  6605. "%s%16.16llx", j ? " " : "",
  6606. (unsigned long long)*(ptr8 + j));
  6607. break;
  6608. }
  6609. case 4:{
  6610. const u32 *ptr4 = buf;
  6611. int ngroups = len / groupsize;
  6612. for (j = 0; j < ngroups; j++)
  6613. lx += scnprintf(linebuf + lx, linebuflen - lx,
  6614. "%s%8.8x", j ? " " : "", *(ptr4 + j));
  6615. break;
  6616. }
  6617. case 2:{
  6618. const u16 *ptr2 = buf;
  6619. int ngroups = len / groupsize;
  6620. for (j = 0; j < ngroups; j++)
  6621. lx += scnprintf(linebuf + lx, linebuflen - lx,
  6622. "%s%4.4x", j ? " " : "", *(ptr2 + j));
  6623. break;
  6624. }
  6625. default:
  6626. for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
  6627. ch = ptr[j];
  6628. linebuf[lx++] = hex_asc_hi(ch);
  6629. linebuf[lx++] = hex_asc_lo(ch);
  6630. linebuf[lx++] = ' ';
  6631. }
  6632. if (j)
  6633. lx--;
  6634. break;
  6635. }
  6636. nil:
  6637. linebuf[lx++] = '\0';
  6638. }
  6639. #endif
  6640. void cmdq_core_save_hex_first_dump(const char *prefix_str,
  6641. int rowsize, int groupsize, const void *buf, size_t len)
  6642. {
  6643. #ifdef CMDQ_DUMP_FIRSTERROR
  6644. const u8 *ptr = buf;
  6645. int i, linelen, remaining = len;
  6646. unsigned char linebuf[32 * 3 + 2 + 32 + 1];
  6647. int logLen;
  6648. char *pBuffer;
  6649. if (false == gCmdqFirstError.flag)
  6650. return;
  6651. if (rowsize != 16 && rowsize != 32)
  6652. rowsize = 16;
  6653. for (i = 0; i < len; i += rowsize) {
  6654. linelen = min(remaining, rowsize);
  6655. remaining -= rowsize;
  6656. cmdq_core_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
  6657. linebuf, sizeof(linebuf));
  6658. pBuffer = gCmdqFirstError.cmdqString + gCmdqFirstError.cmdqCount;
  6659. logLen = snprintf(pBuffer, gCmdqFirstError.cmdqMaxSize, "%s%p: %s\n", prefix_str, ptr + i, linebuf);
  6660. gCmdqFirstError.cmdqMaxSize -= logLen;
  6661. gCmdqFirstError.cmdqCount += logLen;
  6662. if (gCmdqFirstError.cmdqMaxSize <= 0) {
  6663. gCmdqFirstError.flag = false;
  6664. pr_err("[CMDQ][ERR] Error0 dump saving buffer is full\n");
  6665. }
  6666. }
  6667. #endif
  6668. }
  6669. /* Use CMDQ as Resource Manager */
  6670. void cmdqCoreLockResource(uint64_t engineFlag, bool fromNotify)
  6671. {
  6672. struct ResourceUnitStruct *pResource = NULL;
  6673. struct list_head *p = NULL;
  6674. if (cmdq_core_is_feature_off(CMDQ_FEATURE_SRAM_SHARE))
  6675. return;
  6676. list_for_each(p, &gCmdqContext.resourceList) {
  6677. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  6678. if (engineFlag & pResource->engine) {
  6679. mutex_lock(&gCmdqResourceMutex);
  6680. /* find matched engine */
  6681. if (fromNotify)
  6682. pResource->notify = sched_clock();
  6683. else
  6684. pResource->lock = sched_clock();
  6685. if (!pResource->used) {
  6686. /* First time used */
  6687. int32_t status;
  6688. CMDQ_MSG("[Res] Lock resource with engine: 0x%016llx, fromNotify:%d\n",
  6689. engineFlag, fromNotify);
  6690. pResource->used = true;
  6691. CMDQ_MSG("[Res] Callback to release\n");
  6692. if (NULL == pResource->releaseCB) {
  6693. CMDQ_LOG("[Res]: release CB func is NULL, event:%d\n",
  6694. pResource->lockEvent);
  6695. } else {
  6696. /* release mutex before callback */
  6697. mutex_unlock(&gCmdqResourceMutex);
  6698. status =
  6699. pResource->releaseCB(pResource->lockEvent);
  6700. mutex_lock(&gCmdqResourceMutex);
  6701. if (status < 0) {
  6702. /* Error status print */
  6703. CMDQ_ERR("[Res]: release CB (%d) return fail:%d\n",
  6704. pResource->lockEvent, status);
  6705. }
  6706. }
  6707. } else {
  6708. /* Cancel previous delay task if existed */
  6709. if (pResource->delaying) {
  6710. pResource->delaying = false;
  6711. cancel_delayed_work(&pResource->delayCheckWork);
  6712. }
  6713. }
  6714. mutex_unlock(&gCmdqResourceMutex);
  6715. }
  6716. }
  6717. }
  6718. bool cmdqCoreAcquireResource(CMDQ_EVENT_ENUM resourceEvent)
  6719. {
  6720. struct ResourceUnitStruct *pResource = NULL;
  6721. struct list_head *p = NULL;
  6722. bool result = false;
  6723. if (cmdq_core_is_feature_off(CMDQ_FEATURE_SRAM_SHARE))
  6724. return result;
  6725. CMDQ_MSG("[Res] Acquire resource with event: %d\n", resourceEvent);
  6726. list_for_each(p, &gCmdqContext.resourceList) {
  6727. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  6728. if (resourceEvent == pResource->lockEvent) {
  6729. mutex_lock(&gCmdqResourceMutex);
  6730. /* find matched resource */
  6731. result = !pResource->used;
  6732. if (result) {
  6733. CMDQ_MSG("[Res] Acquire successfully, event: %d\n", resourceEvent);
  6734. cmdqCoreClearEvent(resourceEvent);
  6735. pResource->acquire = sched_clock();
  6736. }
  6737. mutex_unlock(&gCmdqResourceMutex);
  6738. break;
  6739. }
  6740. }
  6741. return result;
  6742. }
  6743. void cmdqCoreReleaseResource(CMDQ_EVENT_ENUM resourceEvent)
  6744. {
  6745. struct ResourceUnitStruct *pResource = NULL;
  6746. struct list_head *p = NULL;
  6747. if (cmdq_core_is_feature_off(CMDQ_FEATURE_SRAM_SHARE))
  6748. return;
  6749. CMDQ_MSG("[Res] Release resource with event: %d\n", resourceEvent);
  6750. list_for_each(p, &gCmdqContext.resourceList) {
  6751. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  6752. if (resourceEvent == pResource->lockEvent) {
  6753. mutex_lock(&gCmdqResourceMutex);
  6754. /* find matched resource */
  6755. pResource->release = sched_clock();
  6756. mutex_unlock(&gCmdqResourceMutex);
  6757. break;
  6758. }
  6759. }
  6760. }
  6761. void cmdqCoreSetResourceCallback(CMDQ_EVENT_ENUM resourceEvent,
  6762. CmdqResourceAvailableCB resourceAvailable,
  6763. CmdqResourceReleaseCB resourceRelease)
  6764. {
  6765. struct ResourceUnitStruct *pResource = NULL;
  6766. struct list_head *p = NULL;
  6767. CMDQ_MSG("[Res] Set resource callback with event: %d\n", resourceEvent);
  6768. list_for_each(p, &gCmdqContext.resourceList) {
  6769. pResource = list_entry(p, struct ResourceUnitStruct, listEntry);
  6770. if (resourceEvent == pResource->lockEvent) {
  6771. CMDQ_MSG("[Res] Set resource callback ok!\n");
  6772. mutex_lock(&gCmdqResourceMutex);
  6773. /* find matched resource */
  6774. pResource->availableCB = resourceAvailable;
  6775. pResource->releaseCB = resourceRelease;
  6776. mutex_unlock(&gCmdqResourceMutex);
  6777. break;
  6778. }
  6779. }
  6780. }
  6781. /* Implement dynamic feature configuration */
  6782. void cmdq_core_dump_feature(void)
  6783. {
  6784. int index;
  6785. static const char *const FEATURE_STRING[] = {
  6786. FOREACH_FEATURE(GENERATE_STRING)
  6787. };
  6788. /* dump all feature status */
  6789. for (index = 0; index < CMDQ_FEATURE_TYPE_MAX; index++) {
  6790. CMDQ_LOG("[Feature] %02d %s\t\t%d\n", index, FEATURE_STRING[index],
  6791. cmdq_core_get_feature(index));
  6792. }
  6793. }
  6794. void cmdq_core_set_feature(CMDQ_FEATURE_TYPE_ENUM featureOption, uint32_t value)
  6795. {
  6796. if (0 == atomic_read(&gCmdqThreadUsage))
  6797. CMDQ_ERR("[FO] Try to set feature (%d) while running!\n", featureOption);
  6798. if (featureOption >= CMDQ_FEATURE_TYPE_MAX) {
  6799. CMDQ_ERR("[FO] Set feature invalid: %d\n", featureOption);
  6800. } else {
  6801. CMDQ_LOG("[FO] Set feature: %d, with value:%d\n", featureOption, value);
  6802. gCmdqContext.features[featureOption] = value;
  6803. }
  6804. }
  6805. uint32_t cmdq_core_get_feature(CMDQ_FEATURE_TYPE_ENUM featureOption)
  6806. {
  6807. if (featureOption >= CMDQ_FEATURE_TYPE_MAX) {
  6808. CMDQ_ERR("[FO] Set feature invalid: %d\n", featureOption);
  6809. return CMDQ_FEATURE_OFF_VALUE;
  6810. }
  6811. return gCmdqContext.features[featureOption];
  6812. }
  6813. bool cmdq_core_is_feature_off(CMDQ_FEATURE_TYPE_ENUM featureOption)
  6814. {
  6815. return CMDQ_FEATURE_OFF_VALUE == cmdq_core_get_feature(featureOption);
  6816. }