binder.c 161 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607
  1. /* binder.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2008 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define DEBUG 1
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <asm/cacheflush.h>
  20. #include <linux/fdtable.h>
  21. #include <linux/file.h>
  22. #include <linux/freezer.h>
  23. #include <linux/fs.h>
  24. #include <linux/list.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/mutex.h>
  29. #include <linux/nsproxy.h>
  30. #include <linux/poll.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/rbtree.h>
  33. #include <linux/sched.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/slab.h>
  38. #include <linux/pid_namespace.h>
  39. #include <linux/security.h>
  40. #include <linux/time.h>
  41. #include <linux/delay.h>
  42. #include <linux/kthread.h>
  43. #include <linux/rtc.h>
  44. #include <mt-plat/aee.h>
  45. #ifdef CONFIG_MT_PRIO_TRACER
  46. #include <linux/prio_tracer.h>
  47. #endif
  48. #include "binder.h"
  49. #include "binder_trace.h"
  50. static DEFINE_MUTEX(binder_main_lock);
  51. static DEFINE_MUTEX(binder_deferred_lock);
  52. static DEFINE_MUTEX(binder_mmap_lock);
  53. static HLIST_HEAD(binder_procs);
  54. static HLIST_HEAD(binder_deferred_list);
  55. static HLIST_HEAD(binder_dead_nodes);
  56. static struct dentry *binder_debugfs_dir_entry_root;
  57. static struct dentry *binder_debugfs_dir_entry_proc;
  58. static struct binder_node *binder_context_mgr_node;
  59. static kuid_t binder_context_mgr_uid = INVALID_UID;
  60. static int binder_last_id;
  61. static struct workqueue_struct *binder_deferred_workqueue;
  62. #define BINDER_MIN_ALLOC (1 * PAGE_SIZE)
  63. #define RT_PRIO_INHERIT "v1.7"
  64. #ifdef RT_PRIO_INHERIT
  65. #include <linux/sched/rt.h>
  66. #endif
  67. #define MTK_BINDER_DEBUG "v0.1" /* defined for mtk internal added debug code */
  68. /*****************************************************************************************************/
  69. /* MTK Death Notify | */
  70. /* Debug Log Prefix | Description */
  71. /* --------------------------------------------------------------------- */
  72. /* [DN #1] | Some one requests Death Notify from upper layer. */
  73. /* [DN #2] | Some one cancels Death Notify from upper layer. */
  74. /* [DN #3] | Binder Driver sends Death Notify to all requesters' Binder Thread. */
  75. /* [DN #4] | Some requester's binder_thread_read() handles Death Notify works. */
  76. /* [DN #5] | Some requester sends confirmation to Binder Driver. (In IPCThreadState.cpp)*/
  77. /* [DN #6] | Finally receive requester's confirmation from upper layer. */
  78. /******************************************************************************************************/
  79. #define MTK_DEATH_NOTIFY_MONITOR "v0.1"
  80. /**
  81. * Revision history of binder monitor
  82. *
  83. * v0.1 - enhance debug log
  84. * v0.2 - transaction timeout log
  85. * v0.2.1 - buffer allocation debug
  86. */
  87. #ifdef CONFIG_MT_ENG_BUILD
  88. #define BINDER_MONITOR "v0.2.1" /* BINDER_MONITOR only turn on for eng build */
  89. #endif
  90. #ifdef BINDER_MONITOR
  91. #define MAX_SERVICE_NAME_LEN 32
  92. /*******************************************************************************************************/
  93. /* Payload layout of addService(): */
  94. /* | Parcel header | IServiceManager.descriptor | Parcel header | Service name | ... */
  95. /* (Please refer ServiceManagerNative.java:addService()) */
  96. /* IServiceManager.descriptor is 'android.os.IServiceManager' interleaved with character '\0'. */
  97. /* that is, 'a', '\0', 'n', '\0', 'd', '\0', 'r', '\0', 'o', ... */
  98. /* so the offset of Service name = Parcel header x2 + strlen(android.os.IServiceManager) x2 = 8x2 + 26x2 = 68*/
  99. /*******************************************************************************************************/
  100. #define MAGIC_SERVICE_NAME_OFFSET 68
  101. #define MAX_ENG_TRANS_LOG_BUFF_LEN 10240
  102. static pid_t system_server_pid;
  103. static int binder_check_buf_pid;
  104. static int binder_check_buf_tid;
  105. static unsigned long binder_log_level;
  106. char aee_msg[512];
  107. char aee_word[100];
  108. #define TRANS_LOG_LEN 210
  109. char large_msg[TRANS_LOG_LEN];
  110. #define BINDER_PERF_EVAL "V0.1"
  111. #endif
  112. #define BINDER_DEBUG_ENTRY(name) \
  113. static int binder_##name##_open(struct inode *inode, struct file *file) \
  114. { \
  115. return single_open(file, binder_##name##_show, inode->i_private); \
  116. } \
  117. \
  118. static const struct file_operations binder_##name##_fops = { \
  119. .owner = THIS_MODULE, \
  120. .open = binder_##name##_open, \
  121. .read = seq_read, \
  122. .llseek = seq_lseek, \
  123. .release = single_release, \
  124. }
  125. #ifdef BINDER_MONITOR
  126. #define BINDER_DEBUG_SETTING_ENTRY(name) \
  127. static int binder_##name##_open(struct inode *inode, struct file *file) \
  128. { \
  129. return single_open(file, binder_##name##_show, inode->i_private); \
  130. } \
  131. \
  132. static const struct file_operations binder_##name##_fops = { \
  133. .owner = THIS_MODULE, \
  134. .open = binder_##name##_open, \
  135. .read = seq_read, \
  136. .write = binder_##name##_write, \
  137. .llseek = seq_lseek, \
  138. .release = single_release, \
  139. }
  140. #endif
  141. /*LCH add, for binder pages leakage debug*/
  142. #ifdef CONFIG_MT_ENG_BUILD
  143. #define MTK_BINDER_PAGE_USED_RECORD
  144. #endif
  145. #ifdef MTK_BINDER_PAGE_USED_RECORD
  146. static unsigned int binder_page_used;
  147. static unsigned int binder_page_used_peak;
  148. #endif
  149. static int binder_proc_show(struct seq_file *m, void *unused);
  150. BINDER_DEBUG_ENTRY(proc);
  151. /* This is only defined in include/asm-arm/sizes.h */
  152. #ifndef SZ_1K
  153. #define SZ_1K 0x400
  154. #endif
  155. #ifndef SZ_4M
  156. #define SZ_4M 0x400000
  157. #endif
  158. #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
  159. #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
  160. enum {
  161. BINDER_DEBUG_USER_ERROR = 1U << 0,
  162. BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
  163. BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
  164. BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
  165. BINDER_DEBUG_DEAD_BINDER = 1U << 4,
  166. BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
  167. BINDER_DEBUG_READ_WRITE = 1U << 6,
  168. BINDER_DEBUG_USER_REFS = 1U << 7,
  169. BINDER_DEBUG_THREADS = 1U << 8,
  170. BINDER_DEBUG_TRANSACTION = 1U << 9,
  171. BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
  172. BINDER_DEBUG_FREE_BUFFER = 1U << 11,
  173. BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
  174. BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
  175. BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
  176. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
  177. };
  178. static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
  179. BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
  180. module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
  181. static bool binder_debug_no_lock;
  182. module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
  183. static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
  184. static int binder_stop_on_user_error;
  185. static int binder_set_stop_on_user_error(const char *val, struct kernel_param *kp)
  186. {
  187. int ret;
  188. ret = param_set_int(val, kp);
  189. if (binder_stop_on_user_error < 2)
  190. wake_up(&binder_user_error_wait);
  191. return ret;
  192. }
  193. module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
  194. param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
  195. #define binder_debug(mask, x...) \
  196. do { \
  197. if (binder_debug_mask & mask) \
  198. pr_info(x); \
  199. } while (0)
  200. #ifdef BINDER_MONITOR
  201. #define binder_user_error(x...) \
  202. do { \
  203. if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
  204. pr_err(x); \
  205. if (binder_stop_on_user_error) \
  206. binder_stop_on_user_error = 2; \
  207. } while (0)
  208. #else
  209. #define binder_user_error(x...) \
  210. do { \
  211. if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
  212. pr_info(x); \
  213. if (binder_stop_on_user_error) \
  214. binder_stop_on_user_error = 2; \
  215. } while (0)
  216. #endif
  217. enum binder_stat_types {
  218. BINDER_STAT_PROC,
  219. BINDER_STAT_THREAD,
  220. BINDER_STAT_NODE,
  221. BINDER_STAT_REF,
  222. BINDER_STAT_DEATH,
  223. BINDER_STAT_TRANSACTION,
  224. BINDER_STAT_TRANSACTION_COMPLETE,
  225. BINDER_STAT_COUNT
  226. };
  227. struct binder_stats {
  228. int br[_IOC_NR(BR_FAILED_REPLY) + 1];
  229. int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
  230. int obj_created[BINDER_STAT_COUNT];
  231. int obj_deleted[BINDER_STAT_COUNT];
  232. };
  233. static struct binder_stats binder_stats;
  234. static inline void binder_stats_deleted(enum binder_stat_types type)
  235. {
  236. binder_stats.obj_deleted[type]++;
  237. }
  238. static inline void binder_stats_created(enum binder_stat_types type)
  239. {
  240. binder_stats.obj_created[type]++;
  241. }
  242. struct binder_transaction_log_entry {
  243. int debug_id;
  244. int call_type;
  245. int from_proc;
  246. int from_thread;
  247. int target_handle;
  248. int to_proc;
  249. int to_thread;
  250. int to_node;
  251. int data_size;
  252. int offsets_size;
  253. #ifdef BINDER_MONITOR
  254. unsigned int code;
  255. struct timespec timestamp;
  256. char service[MAX_SERVICE_NAME_LEN];
  257. int fd;
  258. struct timeval tv;
  259. struct timespec readstamp;
  260. struct timespec endstamp;
  261. #endif
  262. };
  263. struct binder_transaction_log {
  264. int next;
  265. int full;
  266. #ifdef BINDER_MONITOR
  267. unsigned size;
  268. struct binder_transaction_log_entry *entry;
  269. #else
  270. struct binder_transaction_log_entry entry[32];
  271. #endif
  272. };
  273. static struct binder_transaction_log binder_transaction_log;
  274. static struct binder_transaction_log binder_transaction_log_failed;
  275. static struct binder_transaction_log_entry *binder_transaction_log_add(
  276. struct binder_transaction_log *log)
  277. {
  278. struct binder_transaction_log_entry *e;
  279. e = &log->entry[log->next];
  280. memset(e, 0, sizeof(*e));
  281. log->next++;
  282. #ifdef BINDER_MONITOR
  283. if (log->next == log->size) {
  284. log->next = 0;
  285. log->full = 1;
  286. }
  287. #else
  288. if (log->next == ARRAY_SIZE(log->entry)) {
  289. log->next = 0;
  290. log->full = 1;
  291. }
  292. #endif
  293. return e;
  294. }
  295. #ifdef BINDER_MONITOR
  296. static struct binder_transaction_log_entry entry_failed[32];
  297. /* log_disable bitmap
  298. * bit: 31...43210
  299. * | |||||_ 0: log enable / 1: log disable
  300. * | ||||__ 1: self resume
  301. * | |||____2: manually trigger kernel warning for buffer allocation
  302. * | ||____ 3: 1:rt_inherit log enable / 0: rt_inherit log disable
  303. * | |
  304. */
  305. static int log_disable;
  306. #define BINDER_LOG_RESUME 0x2
  307. #define BINDER_BUF_WARN 0x4
  308. #ifdef RT_PRIO_INHERIT
  309. #define BINDER_RT_LOG_ENABLE 0x8
  310. #endif
  311. #ifdef CONFIG_MTK_EXTMEM
  312. #include <linux/exm_driver.h>
  313. #else
  314. static struct binder_transaction_log_entry entry_t[MAX_ENG_TRANS_LOG_BUFF_LEN];
  315. #endif
  316. #endif
  317. struct binder_work {
  318. struct list_head entry;
  319. enum {
  320. BINDER_WORK_TRANSACTION = 1,
  321. BINDER_WORK_TRANSACTION_COMPLETE,
  322. BINDER_WORK_NODE,
  323. BINDER_WORK_DEAD_BINDER,
  324. BINDER_WORK_DEAD_BINDER_AND_CLEAR,
  325. BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
  326. } type;
  327. };
  328. struct binder_node {
  329. int debug_id;
  330. struct binder_work work;
  331. union {
  332. struct rb_node rb_node;
  333. struct hlist_node dead_node;
  334. };
  335. struct binder_proc *proc;
  336. struct hlist_head refs;
  337. int internal_strong_refs;
  338. int local_weak_refs;
  339. int local_strong_refs;
  340. binder_uintptr_t ptr;
  341. binder_uintptr_t cookie;
  342. unsigned has_strong_ref:1;
  343. unsigned pending_strong_ref:1;
  344. unsigned has_weak_ref:1;
  345. unsigned pending_weak_ref:1;
  346. unsigned has_async_transaction:1;
  347. unsigned accept_fds:1;
  348. unsigned min_priority:8;
  349. struct list_head async_todo;
  350. #ifdef BINDER_MONITOR
  351. char name[MAX_SERVICE_NAME_LEN];
  352. #endif
  353. #ifdef MTK_BINDER_DEBUG
  354. int async_pid;
  355. #endif
  356. };
  357. struct binder_ref_death {
  358. struct binder_work work;
  359. binder_uintptr_t cookie;
  360. };
  361. struct binder_ref {
  362. /* Lookups needed: */
  363. /* node + proc => ref (transaction) */
  364. /* desc + proc => ref (transaction, inc/dec ref) */
  365. /* node => refs + procs (proc exit) */
  366. int debug_id;
  367. struct rb_node rb_node_desc;
  368. struct rb_node rb_node_node;
  369. struct hlist_node node_entry;
  370. struct binder_proc *proc;
  371. struct binder_node *node;
  372. uint32_t desc;
  373. int strong;
  374. int weak;
  375. struct binder_ref_death *death;
  376. };
  377. struct binder_buffer {
  378. struct list_head entry; /* free and allocated entries by address */
  379. struct rb_node rb_node; /* free entry by size or allocated entry */
  380. /* by address */
  381. unsigned free:1;
  382. unsigned allow_user_free:1;
  383. unsigned async_transaction:1;
  384. unsigned debug_id:29;
  385. struct binder_transaction *transaction;
  386. #ifdef BINDER_MONITOR
  387. struct binder_transaction_log_entry *log_entry;
  388. #endif
  389. struct binder_node *target_node;
  390. size_t data_size;
  391. size_t offsets_size;
  392. uint8_t data[0];
  393. };
  394. enum binder_deferred_state {
  395. BINDER_DEFERRED_PUT_FILES = 0x01,
  396. BINDER_DEFERRED_FLUSH = 0x02,
  397. BINDER_DEFERRED_RELEASE = 0x04,
  398. };
  399. #ifdef BINDER_MONITOR
  400. enum wait_on_reason {
  401. WAIT_ON_NONE = 0U,
  402. WAIT_ON_READ = 1U,
  403. WAIT_ON_EXEC = 2U,
  404. WAIT_ON_REPLY_READ = 3U
  405. };
  406. #endif
  407. struct binder_proc {
  408. struct hlist_node proc_node;
  409. struct rb_root threads;
  410. struct rb_root nodes;
  411. struct rb_root refs_by_desc;
  412. struct rb_root refs_by_node;
  413. int pid;
  414. struct vm_area_struct *vma;
  415. struct mm_struct *vma_vm_mm;
  416. struct task_struct *tsk;
  417. struct files_struct *files;
  418. struct hlist_node deferred_work_node;
  419. int deferred_work;
  420. void *buffer;
  421. ptrdiff_t user_buffer_offset;
  422. struct list_head buffers;
  423. struct rb_root free_buffers;
  424. struct rb_root allocated_buffers;
  425. size_t free_async_space;
  426. struct page **pages;
  427. size_t buffer_size;
  428. uint32_t buffer_free;
  429. struct list_head todo;
  430. wait_queue_head_t wait;
  431. struct binder_stats stats;
  432. struct list_head delivered_death;
  433. int max_threads;
  434. int requested_threads;
  435. int requested_threads_started;
  436. int ready_threads;
  437. long default_priority;
  438. struct dentry *debugfs_entry;
  439. #ifdef RT_PRIO_INHERIT
  440. unsigned long default_rt_prio:16;
  441. unsigned long default_policy:16;
  442. #endif
  443. #ifdef BINDER_MONITOR
  444. struct binder_buffer *large_buffer;
  445. #endif
  446. #ifdef MTK_BINDER_PAGE_USED_RECORD
  447. unsigned int page_used;
  448. unsigned int page_used_peak;
  449. #endif
  450. };
  451. enum {
  452. BINDER_LOOPER_STATE_REGISTERED = 0x01,
  453. BINDER_LOOPER_STATE_ENTERED = 0x02,
  454. BINDER_LOOPER_STATE_EXITED = 0x04,
  455. BINDER_LOOPER_STATE_INVALID = 0x08,
  456. BINDER_LOOPER_STATE_WAITING = 0x10,
  457. BINDER_LOOPER_STATE_NEED_RETURN = 0x20
  458. };
  459. struct binder_thread {
  460. struct binder_proc *proc;
  461. struct rb_node rb_node;
  462. int pid;
  463. int looper;
  464. struct binder_transaction *transaction_stack;
  465. struct list_head todo;
  466. uint32_t return_error; /* Write failed, return error code in read buf */
  467. uint32_t return_error2; /* Write failed, return error code in read */
  468. /* buffer. Used when sending a reply to a dead process that */
  469. /* we are also waiting on */
  470. wait_queue_head_t wait;
  471. struct binder_stats stats;
  472. };
  473. struct binder_transaction {
  474. int debug_id;
  475. struct binder_work work;
  476. struct binder_thread *from;
  477. struct binder_transaction *from_parent;
  478. struct binder_proc *to_proc;
  479. struct binder_thread *to_thread;
  480. struct binder_transaction *to_parent;
  481. unsigned need_reply:1;
  482. /* unsigned is_dead:1; *//* not used at the moment */
  483. struct binder_buffer *buffer;
  484. unsigned int code;
  485. unsigned int flags;
  486. long priority;
  487. long saved_priority;
  488. kuid_t sender_euid;
  489. #ifdef RT_PRIO_INHERIT
  490. unsigned long rt_prio:16;
  491. unsigned long policy:16;
  492. unsigned long saved_rt_prio:16;
  493. unsigned long saved_policy:16;
  494. #endif
  495. #ifdef BINDER_MONITOR
  496. struct timespec timestamp;
  497. enum wait_on_reason wait_on;
  498. enum wait_on_reason bark_on;
  499. struct rb_node rb_node; /* by bark_time */
  500. struct timespec bark_time;
  501. struct timespec exe_timestamp;
  502. struct timeval tv;
  503. char service[MAX_SERVICE_NAME_LEN];
  504. pid_t fproc;
  505. pid_t fthrd;
  506. pid_t tproc;
  507. pid_t tthrd;
  508. unsigned int log_idx;
  509. #endif
  510. };
  511. static void
  512. binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
  513. static inline void binder_lock(const char *tag);
  514. static inline void binder_unlock(const char *tag);
  515. #ifdef BINDER_MONITOR
  516. /* work should be done within how many secs */
  517. #define WAIT_BUDGET_READ 2
  518. #define WAIT_BUDGET_EXEC 4
  519. #define WAIT_BUDGET_MIN min(WAIT_BUDGET_READ, WAIT_BUDGET_EXEC)
  520. static struct rb_root bwdog_transacts;
  521. static const char *const binder_wait_on_str[] = {
  522. "none",
  523. "read",
  524. "exec",
  525. "rply"
  526. };
  527. struct binder_timeout_log_entry {
  528. enum wait_on_reason r;
  529. pid_t from_proc;
  530. pid_t from_thrd;
  531. pid_t to_proc;
  532. pid_t to_thrd;
  533. unsigned over_sec;
  534. struct timespec ts;
  535. struct timeval tv;
  536. unsigned int code;
  537. char service[MAX_SERVICE_NAME_LEN];
  538. int debug_id;
  539. };
  540. struct binder_timeout_log {
  541. int next;
  542. int full;
  543. #ifdef BINDER_PERF_EVAL
  544. struct binder_timeout_log_entry entry[256];
  545. #else
  546. struct binder_timeout_log_entry entry[64];
  547. #endif
  548. };
  549. static struct binder_timeout_log binder_timeout_log_t;
  550. /**
  551. * binder_timeout_log_add - Insert a timeout log
  552. */
  553. static struct binder_timeout_log_entry *binder_timeout_log_add(void)
  554. {
  555. struct binder_timeout_log *log = &binder_timeout_log_t;
  556. struct binder_timeout_log_entry *e;
  557. e = &log->entry[log->next];
  558. memset(e, 0, sizeof(*e));
  559. log->next++;
  560. if (log->next == ARRAY_SIZE(log->entry)) {
  561. log->next = 0;
  562. log->full = 1;
  563. }
  564. return e;
  565. }
  566. /**
  567. * binder_print_bwdog - Output info of a timeout transaction
  568. * @t: pointer to the timeout transaction
  569. * @cur_in: current timespec while going to print
  570. * @e: timeout log entry to record
  571. * @r: output reason, either while barking or after barked
  572. */
  573. static void binder_print_bwdog(struct binder_transaction *t,
  574. struct timespec *cur_in,
  575. struct binder_timeout_log_entry *e, enum wait_on_reason r)
  576. {
  577. struct rtc_time tm;
  578. struct timespec *startime;
  579. struct timespec cur, sub_t;
  580. if (cur_in && e) {
  581. memcpy(&cur, cur_in, sizeof(struct timespec));
  582. } else {
  583. do_posix_clock_monotonic_gettime(&cur);
  584. /*monotonic_to_bootbased(&cur); */
  585. }
  586. startime = (r == WAIT_ON_EXEC) ? &t->exe_timestamp : &t->timestamp;
  587. sub_t = timespec_sub(cur, *startime);
  588. rtc_time_to_tm(t->tv.tv_sec, &tm);
  589. pr_debug("%d %s %d:%d to %d:%d %s %u.%03ld sec (%s) dex_code %u",
  590. t->debug_id, binder_wait_on_str[r],
  591. t->fproc, t->fthrd, t->tproc, t->tthrd,
  592. (cur_in && e) ? "over" : "total",
  593. (unsigned)sub_t.tv_sec, (sub_t.tv_nsec / NSEC_PER_MSEC),
  594. t->service, t->code);
  595. pr_debug(" start_at %lu.%03ld android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  596. (unsigned long)startime->tv_sec,
  597. (startime->tv_nsec / NSEC_PER_MSEC),
  598. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  599. tm.tm_hour, tm.tm_min, tm.tm_sec, (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  600. if (e) {
  601. e->over_sec = sub_t.tv_sec;
  602. memcpy(&e->ts, startime, sizeof(struct timespec));
  603. }
  604. }
  605. /**
  606. * binder_bwdog_safe - Check a transaction is monitor-free or not
  607. * @t: pointer to the transaction to check
  608. *
  609. * Returns 1 means safe.
  610. */
  611. static inline int binder_bwdog_safe(struct binder_transaction *t)
  612. {
  613. return (t->wait_on == WAIT_ON_NONE) ? 1 : 0;
  614. }
  615. /**
  616. * binder_query_bwdog - Check a transaction is queued or not
  617. * @t: pointer to the transaction to check
  618. *
  619. * Returns a pointer points to t, or NULL if it's not queued.
  620. */
  621. static struct rb_node **binder_query_bwdog(struct binder_transaction *t)
  622. {
  623. struct rb_node **p = &bwdog_transacts.rb_node;
  624. struct rb_node *parent = NULL;
  625. struct binder_transaction *transact = NULL;
  626. int comp;
  627. while (*p) {
  628. parent = *p;
  629. transact = rb_entry(parent, struct binder_transaction, rb_node);
  630. comp = timespec_compare(&t->bark_time, &transact->bark_time);
  631. if (comp < 0)
  632. p = &(*p)->rb_left;
  633. else if (comp > 0)
  634. p = &(*p)->rb_right;
  635. else
  636. break;
  637. }
  638. return p;
  639. }
  640. /**
  641. * binder_queue_bwdog - Queue a transaction to keep tracking
  642. * @t: pointer to the transaction being tracked
  643. * @budget: seconds, which this transaction can afford
  644. */
  645. static void binder_queue_bwdog(struct binder_transaction *t, time_t budget)
  646. {
  647. struct rb_node **p = &bwdog_transacts.rb_node;
  648. struct rb_node *parent = NULL;
  649. struct binder_transaction *transact = NULL;
  650. int ret;
  651. do_posix_clock_monotonic_gettime(&t->bark_time);
  652. /* monotonic_to_bootbased(&t->bark_time); */
  653. t->bark_time.tv_sec += budget;
  654. while (*p) {
  655. parent = *p;
  656. transact = rb_entry(parent, struct binder_transaction, rb_node);
  657. ret = timespec_compare(&t->bark_time, &transact->bark_time);
  658. if (ret < 0)
  659. p = &(*p)->rb_left;
  660. else if (ret > 0)
  661. p = &(*p)->rb_right;
  662. else {
  663. pr_debug("%d found same key\n", t->debug_id);
  664. t->bark_time.tv_nsec += 1;
  665. p = &(*p)->rb_right;
  666. }
  667. }
  668. rb_link_node(&t->rb_node, parent, p);
  669. rb_insert_color(&t->rb_node, &bwdog_transacts);
  670. }
  671. /**
  672. * binder_cancel_bwdog - Cancel a transaction from tracking list
  673. * @t: pointer to the transaction being cancelled
  674. */
  675. static void binder_cancel_bwdog(struct binder_transaction *t)
  676. {
  677. struct rb_node **p = NULL;
  678. if (binder_bwdog_safe(t)) {
  679. if (t->bark_on) {
  680. binder_print_bwdog(t, NULL, NULL, t->bark_on);
  681. t->bark_on = WAIT_ON_NONE;
  682. }
  683. return;
  684. }
  685. p = binder_query_bwdog(t);
  686. if (*p == NULL) {
  687. pr_err("%d waits %s, but not queued...\n",
  688. t->debug_id, binder_wait_on_str[t->wait_on]);
  689. return;
  690. }
  691. rb_erase(&t->rb_node, &bwdog_transacts);
  692. t->wait_on = WAIT_ON_NONE;
  693. }
  694. /**
  695. * binder_bwdog_bark -
  696. * Barking function while timeout. Record target process or thread, which
  697. * cannot handle transaction in time, including todo list. Also add a log
  698. * entry for AMS reference.
  699. *
  700. * @t: pointer to the transaction, which triggers watchdog
  701. * @cur: current kernel timespec
  702. */
  703. static void binder_bwdog_bark(struct binder_transaction *t, struct timespec *cur)
  704. {
  705. struct binder_timeout_log_entry *e;
  706. if (binder_bwdog_safe(t)) {
  707. pr_debug("%d watched, but wait nothing\n", t->debug_id);
  708. return;
  709. }
  710. e = binder_timeout_log_add();
  711. binder_print_bwdog(t, cur, e, t->wait_on);
  712. e->r = t->wait_on;
  713. e->from_proc = t->fproc;
  714. e->from_thrd = t->fthrd;
  715. e->debug_id = t->debug_id;
  716. memcpy(&e->tv, &t->tv, sizeof(struct timeval));
  717. switch (t->wait_on) {
  718. case WAIT_ON_READ:{
  719. if (!t->to_proc) {
  720. pr_err("%d has NULL target\n", t->debug_id);
  721. return;
  722. }
  723. e->to_proc = t->tproc;
  724. e->to_thrd = t->tthrd;
  725. e->code = t->code;
  726. strcpy(e->service, t->service);
  727. break;
  728. }
  729. case WAIT_ON_EXEC:{
  730. if (!t->to_thread) {
  731. pr_err("%d has NULL target for " "execution\n", t->debug_id);
  732. return;
  733. }
  734. e->to_proc = t->tproc;
  735. e->to_thrd = t->tthrd;
  736. e->code = t->code;
  737. strcpy(e->service, t->service);
  738. goto dumpBackTrace;
  739. }
  740. case WAIT_ON_REPLY_READ:{
  741. if (!t->to_thread) {
  742. pr_err("%d has NULL target thread\n", t->debug_id);
  743. return;
  744. }
  745. e->to_proc = t->tproc;
  746. e->to_thrd = t->tthrd;
  747. strcpy(e->service, "");
  748. break;
  749. }
  750. default:{
  751. return;
  752. }
  753. }
  754. dumpBackTrace:
  755. return;
  756. }
  757. /**
  758. * binder_bwdog_thread - Main thread to check timeout list periodically
  759. */
  760. static int binder_bwdog_thread(void *__unused)
  761. {
  762. unsigned long sleep_sec;
  763. struct rb_node *n = NULL;
  764. struct timespec cur_time;
  765. struct binder_transaction *t = NULL;
  766. for (;;) {
  767. binder_lock(__func__);
  768. do_posix_clock_monotonic_gettime(&cur_time);
  769. /* monotonic_to_bootbased(&cur_time); */
  770. for (n = rb_first(&bwdog_transacts); n != NULL; n = rb_next(n)) {
  771. t = rb_entry(n, struct binder_transaction, rb_node);
  772. if (timespec_compare(&cur_time, &t->bark_time) < 0)
  773. break;
  774. binder_bwdog_bark(t, &cur_time);
  775. rb_erase(&t->rb_node, &bwdog_transacts);
  776. t->bark_on = t->wait_on;
  777. t->wait_on = WAIT_ON_NONE;
  778. }
  779. if (!n)
  780. sleep_sec = WAIT_BUDGET_MIN;
  781. else
  782. sleep_sec = timespec_sub(t->bark_time, cur_time).tv_sec;
  783. binder_unlock(__func__);
  784. msleep(sleep_sec * MSEC_PER_SEC);
  785. }
  786. pr_debug("%s exit...\n", __func__);
  787. return 0;
  788. }
  789. /**
  790. * find_process_by_pid - convert pid to task_struct
  791. * @pid: pid for convert task
  792. */
  793. static inline struct task_struct *find_process_by_pid(pid_t pid)
  794. {
  795. return pid ? find_task_by_vpid(pid) : NULL;
  796. }
  797. /**
  798. * binder_find_buffer_sender - find the sender task_struct of this buffer
  799. * @buf binder buffer
  800. * @tsk task_struct of buf sender
  801. */
  802. static struct task_struct *binder_find_buffer_sender(struct binder_buffer *buf)
  803. {
  804. struct binder_transaction *t;
  805. struct binder_transaction_log_entry *e;
  806. struct task_struct *tsk;
  807. t = buf->transaction;
  808. if (t && t->fproc)
  809. tsk = find_process_by_pid(t->fproc);
  810. else {
  811. e = buf->log_entry;
  812. if ((buf->debug_id == e->debug_id) && e->from_proc)
  813. tsk = find_process_by_pid(e->from_proc);
  814. else
  815. tsk = NULL;
  816. }
  817. return tsk;
  818. }
  819. /**
  820. * copy from /kernel/fs/proc/base.c and modified to get task full name
  821. */
  822. static int binder_proc_pid_cmdline(struct task_struct *task, char *buf)
  823. {
  824. int res = 0;
  825. unsigned int len;
  826. struct mm_struct *mm;
  827. /*============ add begin =============================*/
  828. char c = ' ';
  829. char *str;
  830. unsigned int size;
  831. char *buffer;
  832. if (NULL == task)
  833. goto out;
  834. /*============ add end ===============================*/
  835. mm = get_task_mm(task);
  836. if (!mm)
  837. goto out;
  838. if (!mm->arg_end)
  839. goto out_mm; /* Shh! No looking before we're done */
  840. /*============ add begin =============================*/
  841. buffer = kzalloc(PAGE_SIZE, GFP_KERNEL);
  842. if (NULL == buffer)
  843. goto out_mm;
  844. /*============ add end ===============================*/
  845. len = mm->arg_end - mm->arg_start;
  846. if (len > PAGE_SIZE)
  847. len = PAGE_SIZE;
  848. res = access_process_vm(task, mm->arg_start, buffer, len, 0);
  849. /* If the nul at the end of args has been overwritten, then */
  850. /* assume application is using setproctitle(3). */
  851. if (res > 0 && buffer[res - 1] != '\0' && len < PAGE_SIZE) {
  852. len = strnlen(buffer, res);
  853. if (len < res) {
  854. res = len;
  855. } else {
  856. len = mm->env_end - mm->env_start;
  857. if (len > PAGE_SIZE - res)
  858. len = PAGE_SIZE - res;
  859. res += access_process_vm(task, mm->env_start, buffer + res, len, 0);
  860. res = strnlen(buffer, res);
  861. }
  862. }
  863. /*============ add begin =============================*/
  864. str = strchr(buffer, c);
  865. if (NULL != str)
  866. size = (unsigned int)(str - buffer);
  867. else
  868. size = res;
  869. if (size > 256)
  870. size = 256;
  871. snprintf(buf, size, buffer);
  872. kfree(buffer);
  873. /*============ add end ===============================*/
  874. out_mm:
  875. mmput(mm);
  876. out:
  877. return res;
  878. }
  879. /**
  880. * binder_print_buf - Print buffer info
  881. * @t: transaction
  882. * @buffer: target buffer
  883. * @dest: dest string pointer
  884. * @success: does this buffer allocate success
  885. * @check: check this log for owner finding
  886. */
  887. static void binder_print_buf(struct binder_buffer *buffer, char *dest, int success, int check)
  888. {
  889. struct rtc_time tm;
  890. struct binder_transaction *t = buffer->transaction;
  891. char str[TRANS_LOG_LEN];
  892. struct task_struct *sender_tsk;
  893. struct task_struct *rec_tsk;
  894. char sender_name[256], rec_name[256];
  895. int len_s, len_r;
  896. int ptr = 0;
  897. if (NULL == t) {
  898. struct binder_transaction_log_entry *log_entry = buffer->log_entry;
  899. if ((log_entry != NULL)
  900. && (buffer->debug_id == log_entry->debug_id)) {
  901. rtc_time_to_tm(log_entry->tv.tv_sec, &tm);
  902. sender_tsk = find_process_by_pid(log_entry->from_proc);
  903. rec_tsk = find_process_by_pid(log_entry->to_proc);
  904. len_s = binder_proc_pid_cmdline(sender_tsk, sender_name);
  905. len_r = binder_proc_pid_cmdline(rec_tsk, rec_name);
  906. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  907. "binder:check=%d,success=%d,id=%d,call=%s,type=%s,",
  908. check, success, buffer->debug_id,
  909. buffer->async_transaction ? "async" : "sync",
  910. (2 == log_entry->call_type) ? "reply" :
  911. ((1 == log_entry->call_type) ? "async" : "call"));
  912. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  913. "from=%d,tid=%d,name=%s,to=%d,name=%s,tid=%d,name=%s,",
  914. log_entry->from_proc, log_entry->from_thread,
  915. len_s ? sender_name : ((sender_tsk != NULL) ?
  916. sender_tsk->comm : ""),
  917. log_entry->to_proc,
  918. len_r ? rec_name : ((rec_tsk != NULL) ? rec_tsk->comm : ""),
  919. log_entry->to_thread, log_entry->service);
  920. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  921. "size=%zd,node=%d,handle=%d,dex=%u,auf=%d,start=%lu.%03ld,",
  922. (buffer->data_size + buffer->offsets_size),
  923. log_entry->to_node, log_entry->target_handle,
  924. log_entry->code, buffer->allow_user_free,
  925. (unsigned long)log_entry->timestamp.tv_sec,
  926. (log_entry->timestamp.tv_nsec / NSEC_PER_MSEC));
  927. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  928. "android=%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  929. (tm.tm_year + 1900), (tm.tm_mon + 1),
  930. tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
  931. (unsigned long)(log_entry->tv.tv_usec / USEC_PER_MSEC));
  932. } else {
  933. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  934. "binder:check=%d,success=%d,id=%d,call=%s, ,",
  935. check, success, buffer->debug_id,
  936. buffer->async_transaction ? "async" : "sync");
  937. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  938. ",,,,,,,size=%zd,,,," "auf=%d,,\n",
  939. (buffer->data_size + buffer->offsets_size),
  940. buffer->allow_user_free);
  941. }
  942. } else {
  943. rtc_time_to_tm(t->tv.tv_sec, &tm);
  944. sender_tsk = find_process_by_pid(t->fproc);
  945. rec_tsk = find_process_by_pid(t->tproc);
  946. len_s = binder_proc_pid_cmdline(sender_tsk, sender_name);
  947. len_r = binder_proc_pid_cmdline(rec_tsk, rec_name);
  948. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  949. "binder:check=%d,success=%d,id=%d,call=%s,type=%s,",
  950. check, success, t->debug_id,
  951. buffer->async_transaction ? "async" : "sync ",
  952. binder_wait_on_str[t->wait_on]);
  953. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  954. "from=%d,tid=%d,name=%s,to=%d,name=%s,tid=%d,name=%s,",
  955. t->fproc, t->fthrd,
  956. len_s ? sender_name : ((sender_tsk != NULL) ?
  957. sender_tsk->comm : ""),
  958. t->tproc,
  959. len_r ? rec_name : ((rec_tsk != NULL) ? rec_tsk->comm : ""),
  960. t->tthrd, t->service);
  961. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  962. "size=%zd,,,dex=%u,auf=%d,start=%lu.%03ld,android=",
  963. (buffer->data_size + buffer->offsets_size), t->code,
  964. buffer->allow_user_free, (unsigned long)t->timestamp.tv_sec,
  965. (t->timestamp.tv_nsec / NSEC_PER_MSEC));
  966. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  967. "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  968. (tm.tm_year + 1900),
  969. (tm.tm_mon + 1), tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
  970. (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  971. }
  972. pr_debug("%s", str);
  973. if (dest != NULL)
  974. strncat(dest, str, sizeof(str) - strlen(dest) - 1);
  975. }
  976. /**
  977. * binder_check_buf_checked -
  978. * Consider buffer related issue usually makes a series of failure.
  979. * Only care about the first problem time to minimize debug overhead.
  980. */
  981. static int binder_check_buf_checked(void)
  982. {
  983. return (binder_check_buf_pid == -1);
  984. }
  985. static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer);
  986. /**
  987. * binder_check_buf - Dump necessary info for buffer usage analysis
  988. * @target_proc: receiver
  989. * @size: requested size
  990. * @is_async: 1 if an async call
  991. */
  992. static void binder_check_buf(struct binder_proc *target_proc, size_t size, int is_async)
  993. {
  994. struct rb_node *n;
  995. struct binder_buffer *buffer;
  996. int i;
  997. int large_buffer_count = 0;
  998. size_t tmp_size, threshold;
  999. struct task_struct *sender;
  1000. struct task_struct *larger;
  1001. char sender_name[256], rec_name[256];
  1002. struct timespec exp_timestamp;
  1003. struct timeval tv;
  1004. struct rtc_time tm;
  1005. #if defined(CONFIG_MTK_AEE_FEATURE)
  1006. int db_flag = DB_OPT_BINDER_INFO;
  1007. #endif
  1008. int len_s, len_r;
  1009. int ptr = 0;
  1010. pr_debug("buffer allocation failed on %d:0 %s from %d:%d size %zd\n",
  1011. target_proc->pid,
  1012. is_async ? "async" : "call ", binder_check_buf_pid, binder_check_buf_tid, size);
  1013. if (binder_check_buf_checked())
  1014. return;
  1015. /* check blocked service for async call */
  1016. if (is_async) {
  1017. pr_debug("buffer allocation failed on %d:0 (%s) async service blocked\n",
  1018. target_proc->pid, target_proc->tsk ? target_proc->tsk->comm : "");
  1019. }
  1020. pr_debug("%d:0 pending transactions:\n", target_proc->pid);
  1021. threshold = target_proc->buffer_size / 16;
  1022. for (n = rb_last(&target_proc->allocated_buffers), i = 0; n; n = rb_prev(n), i++) {
  1023. buffer = rb_entry(n, struct binder_buffer, rb_node);
  1024. tmp_size = binder_buffer_size(target_proc, buffer);
  1025. BUG_ON(buffer->free);
  1026. if (tmp_size > threshold) {
  1027. if ((NULL == target_proc->large_buffer) ||
  1028. (target_proc->large_buffer &&
  1029. (tmp_size >
  1030. binder_buffer_size(target_proc, target_proc->large_buffer))))
  1031. target_proc->large_buffer = buffer;
  1032. large_buffer_count++;
  1033. binder_print_buf(buffer, NULL, 1, 0);
  1034. } else {
  1035. if (i < 20)
  1036. binder_print_buf(buffer, NULL, 1, 0);
  1037. }
  1038. }
  1039. pr_debug("%d:0 total pending trans: %d(%d large isze)\n",
  1040. target_proc->pid, i, large_buffer_count);
  1041. do_posix_clock_monotonic_gettime(&exp_timestamp);
  1042. /* monotonic_to_bootbased(&exp_timestamp); */
  1043. do_gettimeofday(&tv);
  1044. /* consider time zone. translate to android time */
  1045. tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
  1046. rtc_time_to_tm(tv.tv_sec, &tm);
  1047. sender = find_process_by_pid(binder_check_buf_pid);
  1048. len_s = binder_proc_pid_cmdline(sender, sender_name);
  1049. len_r = binder_proc_pid_cmdline(target_proc->tsk, rec_name);
  1050. if (size > threshold) {
  1051. if (target_proc->large_buffer) {
  1052. pr_debug("on %d:0 the largest pending trans is:\n", target_proc->pid);
  1053. binder_print_buf(target_proc->large_buffer, large_msg, 1, 0);
  1054. }
  1055. snprintf(aee_word, sizeof(aee_word),
  1056. "check %s: large binder trans fail on %d:0 size %zd",
  1057. len_s ? sender_name : ((sender != NULL) ? sender->comm : ""),
  1058. target_proc->pid, size);
  1059. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1060. "BINDER_BUF_DEBUG\n%s",
  1061. large_msg);
  1062. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1063. "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,",
  1064. 1, 0, is_async ? "async" : "sync",
  1065. binder_check_buf_pid, binder_check_buf_tid);
  1066. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1067. "name=%s,to=%d,name=%s,,,size=%zd,,,," ",start=%lu.%03ld,android=",
  1068. len_s ? sender_name : ((sender != NULL) ? sender->comm : ""),
  1069. target_proc->pid,
  1070. len_r ? rec_name : ((target_proc->tsk != NULL) ? target_proc->tsk->
  1071. comm : ""), size, (unsigned long)exp_timestamp.tv_sec,
  1072. (exp_timestamp.tv_nsec / NSEC_PER_MSEC));
  1073. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1074. "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  1075. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday, tm.tm_hour,
  1076. tm.tm_min, tm.tm_sec, (unsigned long)(tv.tv_usec / USEC_PER_MSEC));
  1077. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1078. "large data size,check sender %d(%s)! check kernel log\n",
  1079. binder_check_buf_pid, sender ? sender->comm : "");
  1080. } else {
  1081. if (target_proc->large_buffer) {
  1082. pr_debug("on %d:0 the largest pending trans is:\n", target_proc->pid);
  1083. binder_print_buf(target_proc->large_buffer, large_msg, 1, 1);
  1084. larger = binder_find_buffer_sender(target_proc->large_buffer);
  1085. snprintf(aee_word, sizeof(aee_word),
  1086. "check %s: large binder trans",
  1087. (larger != NULL) ? larger->comm : "");
  1088. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1089. "BINDER_BUF_DEBUG:\n%s",
  1090. large_msg);
  1091. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1092. "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,name=%s,",
  1093. 0, 0, is_async ? "async" : "sync",
  1094. binder_check_buf_pid, binder_check_buf_tid,
  1095. len_s ? sender_name : ((sender != NULL) ?
  1096. sender->comm : ""));
  1097. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1098. "to=%d,name=%s,,,size=%zd,,,,",
  1099. target_proc->pid, len_r ? rec_name : ((target_proc->tsk != NULL)
  1100. ? target_proc->tsk->comm : ""), size);
  1101. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1102. ",start=%lu.%03ld,android=",
  1103. (unsigned long)exp_timestamp.tv_sec,
  1104. (exp_timestamp.tv_nsec / NSEC_PER_MSEC));
  1105. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1106. "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  1107. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  1108. tm.tm_hour, tm.tm_min, tm.tm_sec,
  1109. (unsigned long)(tv.tv_usec / USEC_PER_MSEC));
  1110. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1111. "large data size,check sender %d(%s)! check kernel log\n",
  1112. (larger != NULL) ? larger->pid : 0,
  1113. (larger != NULL) ? larger->comm : "");
  1114. } else {
  1115. snprintf(aee_word, sizeof(aee_word),
  1116. "check %s: binder buffer exhaust ",
  1117. len_r ? rec_name : ((target_proc->tsk != NULL)
  1118. ? target_proc->tsk->comm : ""));
  1119. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1120. "BINDER_BUF_DEBUG\n binder:check=%d,success=%d,",
  1121. 1, 0);
  1122. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1123. "call=%s,from=%d,tid=%d,name=%s,to=%d,name=%s,,,size=%zd,,,,",
  1124. is_async ? "async" : "sync",
  1125. binder_check_buf_pid, binder_check_buf_tid,
  1126. len_s ? sender_name : ((sender != NULL) ?
  1127. sender->comm : ""),
  1128. target_proc->pid, len_r ? rec_name : ((target_proc->tsk != NULL)
  1129. ? target_proc->
  1130. tsk->comm : ""), size);
  1131. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1132. ",start=%lu.%03ld,android=%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  1133. (unsigned long)exp_timestamp.tv_sec,
  1134. (exp_timestamp.tv_nsec / NSEC_PER_MSEC), (tm.tm_year + 1900),
  1135. (tm.tm_mon + 1), tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
  1136. (unsigned long)(tv.tv_usec / USEC_PER_MSEC));
  1137. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1138. "%d small trans pending, check receiver %d(%s)! check kernel log\n",
  1139. i, target_proc->pid,
  1140. target_proc->tsk ? target_proc->tsk->comm : "");
  1141. }
  1142. }
  1143. binder_check_buf_pid = -1;
  1144. binder_check_buf_tid = -1;
  1145. #if defined(CONFIG_MTK_AEE_FEATURE)
  1146. aee_kernel_warning_api(__FILE__, __LINE__, db_flag, &aee_word[0], &aee_msg[0]);
  1147. #endif
  1148. }
  1149. #endif
  1150. static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
  1151. {
  1152. struct files_struct *files = proc->files;
  1153. unsigned long rlim_cur;
  1154. unsigned long irqs;
  1155. if (files == NULL)
  1156. return -ESRCH;
  1157. if (!lock_task_sighand(proc->tsk, &irqs))
  1158. return -EMFILE;
  1159. rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
  1160. unlock_task_sighand(proc->tsk, &irqs);
  1161. return __alloc_fd(files, 0, rlim_cur, flags);
  1162. }
  1163. /*
  1164. * copied from fd_install
  1165. */
  1166. static void task_fd_install(struct binder_proc *proc, unsigned int fd, struct file *file)
  1167. {
  1168. if (proc->files)
  1169. __fd_install(proc->files, fd, file);
  1170. }
  1171. /*
  1172. * copied from sys_close
  1173. */
  1174. static long task_close_fd(struct binder_proc *proc, unsigned int fd)
  1175. {
  1176. int retval;
  1177. if (proc->files == NULL)
  1178. return -ESRCH;
  1179. retval = __close_fd(proc->files, fd);
  1180. /* can't restart close syscall because file table entry was cleared */
  1181. if (unlikely(retval == -ERESTARTSYS ||
  1182. retval == -ERESTARTNOINTR ||
  1183. retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK))
  1184. retval = -EINTR;
  1185. return retval;
  1186. }
  1187. static inline void binder_lock(const char *tag)
  1188. {
  1189. trace_binder_lock(tag);
  1190. mutex_lock(&binder_main_lock);
  1191. trace_binder_locked(tag);
  1192. }
  1193. static inline void binder_unlock(const char *tag)
  1194. {
  1195. trace_binder_unlock(tag);
  1196. mutex_unlock(&binder_main_lock);
  1197. }
  1198. static void binder_set_nice(long nice)
  1199. {
  1200. long min_nice;
  1201. if (can_nice(current, nice)) {
  1202. set_user_nice(current, nice);
  1203. return;
  1204. }
  1205. min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
  1206. binder_debug(BINDER_DEBUG_PRIORITY_CAP,
  1207. "%d: nice value %ld not allowed use %ld instead\n",
  1208. current->pid, nice, min_nice);
  1209. set_user_nice(current, min_nice);
  1210. if (min_nice <= MAX_NICE)
  1211. return;
  1212. binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
  1213. }
  1214. static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer)
  1215. {
  1216. if (list_is_last(&buffer->entry, &proc->buffers))
  1217. return proc->buffer + proc->buffer_size - (void *)buffer->data;
  1218. return (size_t) list_entry(buffer->entry.next,
  1219. struct binder_buffer, entry)-(size_t) buffer->data;
  1220. }
  1221. static void binder_insert_free_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer)
  1222. {
  1223. struct rb_node **p = &proc->free_buffers.rb_node;
  1224. struct rb_node *parent = NULL;
  1225. struct binder_buffer *buffer;
  1226. size_t buffer_size;
  1227. size_t new_buffer_size;
  1228. BUG_ON(!new_buffer->free);
  1229. new_buffer_size = binder_buffer_size(proc, new_buffer);
  1230. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1231. "%d: add free buffer, size %zd, at %pK\n",
  1232. proc->pid, new_buffer_size, new_buffer);
  1233. while (*p) {
  1234. parent = *p;
  1235. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  1236. BUG_ON(!buffer->free);
  1237. buffer_size = binder_buffer_size(proc, buffer);
  1238. if (new_buffer_size < buffer_size)
  1239. p = &parent->rb_left;
  1240. else
  1241. p = &parent->rb_right;
  1242. }
  1243. rb_link_node(&new_buffer->rb_node, parent, p);
  1244. rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
  1245. }
  1246. static void binder_insert_allocated_buffer(struct binder_proc *proc,
  1247. struct binder_buffer *new_buffer)
  1248. {
  1249. struct rb_node **p = &proc->allocated_buffers.rb_node;
  1250. struct rb_node *parent = NULL;
  1251. struct binder_buffer *buffer;
  1252. BUG_ON(new_buffer->free);
  1253. while (*p) {
  1254. parent = *p;
  1255. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  1256. BUG_ON(buffer->free);
  1257. if (new_buffer < buffer)
  1258. p = &parent->rb_left;
  1259. else if (new_buffer > buffer)
  1260. p = &parent->rb_right;
  1261. else
  1262. BUG();
  1263. }
  1264. rb_link_node(&new_buffer->rb_node, parent, p);
  1265. rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
  1266. }
  1267. static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, uintptr_t user_ptr)
  1268. {
  1269. struct rb_node *n = proc->allocated_buffers.rb_node;
  1270. struct binder_buffer *buffer;
  1271. struct binder_buffer *kern_ptr;
  1272. kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
  1273. - offsetof(struct binder_buffer, data));
  1274. while (n) {
  1275. buffer = rb_entry(n, struct binder_buffer, rb_node);
  1276. BUG_ON(buffer->free);
  1277. if (kern_ptr < buffer)
  1278. n = n->rb_left;
  1279. else if (kern_ptr > buffer)
  1280. n = n->rb_right;
  1281. else
  1282. return buffer;
  1283. }
  1284. return NULL;
  1285. }
  1286. static int binder_update_page_range(struct binder_proc *proc, int allocate,
  1287. void *start, void *end, struct vm_area_struct *vma)
  1288. {
  1289. void *page_addr;
  1290. unsigned long user_page_addr;
  1291. struct vm_struct tmp_area;
  1292. struct page **page;
  1293. struct mm_struct *mm;
  1294. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1295. "%d: %s pages %pK-%pK\n", proc->pid, allocate ? "allocate" : "free", start, end);
  1296. if (end <= start)
  1297. return 0;
  1298. trace_binder_update_page_range(proc, allocate, start, end);
  1299. if (vma)
  1300. mm = NULL;
  1301. else
  1302. mm = get_task_mm(proc->tsk);
  1303. if (mm) {
  1304. down_write(&mm->mmap_sem);
  1305. vma = proc->vma;
  1306. if (vma && mm != proc->vma_vm_mm) {
  1307. pr_err("%d: vma mm and task mm mismatch\n", proc->pid);
  1308. vma = NULL;
  1309. }
  1310. }
  1311. if (allocate == 0)
  1312. goto free_range;
  1313. if (vma == NULL) {
  1314. pr_err
  1315. ("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", proc->pid);
  1316. goto err_no_vma;
  1317. }
  1318. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  1319. int ret;
  1320. page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
  1321. BUG_ON(*page);
  1322. *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  1323. if (*page == NULL) {
  1324. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  1325. proc->pid, page_addr);
  1326. goto err_alloc_page_failed;
  1327. }
  1328. #ifdef MTK_BINDER_PAGE_USED_RECORD
  1329. binder_page_used++;
  1330. proc->page_used++;
  1331. if (binder_page_used > binder_page_used_peak)
  1332. binder_page_used_peak = binder_page_used;
  1333. if (proc->page_used > proc->page_used_peak)
  1334. proc->page_used_peak = proc->page_used;
  1335. #endif
  1336. tmp_area.addr = page_addr;
  1337. tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
  1338. ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
  1339. if (ret) {
  1340. pr_err
  1341. ("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  1342. proc->pid, page_addr);
  1343. goto err_map_kernel_failed;
  1344. }
  1345. user_page_addr = (uintptr_t) page_addr + proc->user_buffer_offset;
  1346. ret = vm_insert_page(vma, user_page_addr, page[0]);
  1347. if (ret) {
  1348. pr_err
  1349. ("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  1350. proc->pid, user_page_addr);
  1351. goto err_vm_insert_page_failed;
  1352. }
  1353. /* vm_insert_page does not seem to increment the refcount */
  1354. }
  1355. if (mm) {
  1356. up_write(&mm->mmap_sem);
  1357. mmput(mm);
  1358. }
  1359. return 0;
  1360. free_range:
  1361. for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) {
  1362. page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
  1363. if (vma)
  1364. zap_page_range(vma, (uintptr_t) page_addr +
  1365. proc->user_buffer_offset, PAGE_SIZE, NULL);
  1366. err_vm_insert_page_failed:
  1367. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  1368. err_map_kernel_failed:
  1369. __free_page(*page);
  1370. *page = NULL;
  1371. #ifdef MTK_BINDER_PAGE_USED_RECORD
  1372. if (binder_page_used > 0)
  1373. binder_page_used--;
  1374. if (proc->page_used > 0)
  1375. proc->page_used--;
  1376. #endif
  1377. err_alloc_page_failed:
  1378. ;
  1379. }
  1380. err_no_vma:
  1381. if (mm) {
  1382. up_write(&mm->mmap_sem);
  1383. mmput(mm);
  1384. }
  1385. return -ENOMEM;
  1386. }
  1387. static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
  1388. size_t data_size, size_t offsets_size, int is_async)
  1389. {
  1390. struct rb_node *n = proc->free_buffers.rb_node;
  1391. struct binder_buffer *buffer;
  1392. size_t buffer_size;
  1393. struct rb_node *best_fit = NULL;
  1394. void *has_page_addr;
  1395. void *end_page_addr;
  1396. size_t size;
  1397. #ifdef MTK_BINDER_DEBUG
  1398. size_t proc_max_size;
  1399. #endif
  1400. if (proc->vma == NULL) {
  1401. pr_err("%d: binder_alloc_buf, no vma\n", proc->pid);
  1402. return NULL;
  1403. }
  1404. size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *));
  1405. if (size < data_size || size < offsets_size) {
  1406. binder_user_error
  1407. ("%d: got transaction with invalid size %zd-%zd\n",
  1408. proc->pid, data_size, offsets_size);
  1409. return NULL;
  1410. }
  1411. #ifdef MTK_BINDER_DEBUG
  1412. proc_max_size = (is_async ? (proc->buffer_size / 2) : proc->buffer_size);
  1413. if (proc_max_size < size + sizeof(struct binder_buffer)) {
  1414. binder_user_error("%d: got transaction with too large size %s alloc size %zd-%zd allowed size %zd\n",
  1415. proc->pid, is_async ? "async" : "sync",
  1416. data_size, offsets_size,
  1417. (proc_max_size - sizeof(struct binder_buffer)));
  1418. return NULL;
  1419. }
  1420. #endif
  1421. if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) {
  1422. #ifdef MTK_BINDER_DEBUG
  1423. pr_err("%d: binder_alloc_buf size %zd failed, no async space left (%zd)\n",
  1424. proc->pid, size, proc->free_async_space);
  1425. #else
  1426. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1427. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  1428. proc->pid, size);
  1429. #endif
  1430. #ifdef BINDER_MONITOR
  1431. binder_check_buf(proc, size, 1);
  1432. #endif
  1433. return NULL;
  1434. }
  1435. while (n) {
  1436. buffer = rb_entry(n, struct binder_buffer, rb_node);
  1437. BUG_ON(!buffer->free);
  1438. buffer_size = binder_buffer_size(proc, buffer);
  1439. if (size < buffer_size) {
  1440. best_fit = n;
  1441. n = n->rb_left;
  1442. } else if (size > buffer_size)
  1443. n = n->rb_right;
  1444. else {
  1445. best_fit = n;
  1446. break;
  1447. }
  1448. }
  1449. #ifdef BINDER_MONITOR
  1450. if (log_disable & BINDER_BUF_WARN) {
  1451. if (size > 64) {
  1452. pr_err
  1453. ("%d: binder_alloc_buf size %zd failed, UT auto triggerd!\n",
  1454. proc->pid, size);
  1455. binder_check_buf(proc, size, 0);
  1456. }
  1457. }
  1458. #endif
  1459. if (best_fit == NULL) {
  1460. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", proc->pid, size);
  1461. #ifdef BINDER_MONITOR
  1462. binder_check_buf(proc, size, 0);
  1463. #endif
  1464. return NULL;
  1465. }
  1466. if (n == NULL) {
  1467. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  1468. buffer_size = binder_buffer_size(proc, buffer);
  1469. }
  1470. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1471. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  1472. proc->pid, size, buffer, buffer_size);
  1473. has_page_addr = (void *)(((uintptr_t) buffer->data + buffer_size) & PAGE_MASK);
  1474. if (n == NULL) {
  1475. if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
  1476. buffer_size = size; /* no room for other buffers */
  1477. else
  1478. buffer_size = size + sizeof(struct binder_buffer);
  1479. }
  1480. end_page_addr = (void *)PAGE_ALIGN((uintptr_t) buffer->data + buffer_size);
  1481. if (end_page_addr > has_page_addr)
  1482. end_page_addr = has_page_addr;
  1483. if (binder_update_page_range(proc, 1,
  1484. (void *)PAGE_ALIGN((uintptr_t) buffer->data), end_page_addr,
  1485. NULL))
  1486. return NULL;
  1487. rb_erase(best_fit, &proc->free_buffers);
  1488. buffer->free = 0;
  1489. binder_insert_allocated_buffer(proc, buffer);
  1490. if (buffer_size != size) {
  1491. struct binder_buffer *new_buffer = (void *)buffer->data + size;
  1492. list_add(&new_buffer->entry, &buffer->entry);
  1493. new_buffer->free = 1;
  1494. binder_insert_free_buffer(proc, new_buffer);
  1495. }
  1496. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1497. "%d: binder_alloc_buf size %zd got %pK\n", proc->pid, size, buffer);
  1498. buffer->data_size = data_size;
  1499. buffer->offsets_size = offsets_size;
  1500. buffer->async_transaction = is_async;
  1501. if (is_async) {
  1502. proc->free_async_space -= size + sizeof(struct binder_buffer);
  1503. binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  1504. "%d: binder_alloc_buf size %zd async free %zd\n",
  1505. proc->pid, size, proc->free_async_space);
  1506. }
  1507. return buffer;
  1508. }
  1509. static void *buffer_start_page(struct binder_buffer *buffer)
  1510. {
  1511. return (void *)((uintptr_t) buffer & PAGE_MASK);
  1512. }
  1513. static void *buffer_end_page(struct binder_buffer *buffer)
  1514. {
  1515. return (void *)(((uintptr_t) (buffer + 1) - 1) & PAGE_MASK);
  1516. }
  1517. static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer)
  1518. {
  1519. struct binder_buffer *prev, *next = NULL;
  1520. int free_page_end = 1;
  1521. int free_page_start = 1;
  1522. BUG_ON(proc->buffers.next == &buffer->entry);
  1523. prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
  1524. BUG_ON(!prev->free);
  1525. if (buffer_end_page(prev) == buffer_start_page(buffer)) {
  1526. free_page_start = 0;
  1527. if (buffer_end_page(prev) == buffer_end_page(buffer))
  1528. free_page_end = 0;
  1529. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1530. "%d: merge free, buffer %pK share page with %pK\n",
  1531. proc->pid, buffer, prev);
  1532. }
  1533. if (!list_is_last(&buffer->entry, &proc->buffers)) {
  1534. next = list_entry(buffer->entry.next, struct binder_buffer, entry);
  1535. if (buffer_start_page(next) == buffer_end_page(buffer)) {
  1536. free_page_end = 0;
  1537. if (buffer_start_page(next) == buffer_start_page(buffer))
  1538. free_page_start = 0;
  1539. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1540. "%d: merge free, buffer %pK share page with %pK\n",
  1541. proc->pid, buffer, prev);
  1542. }
  1543. }
  1544. list_del(&buffer->entry);
  1545. if (free_page_start || free_page_end) {
  1546. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1547. "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
  1548. proc->pid, buffer, free_page_start ? "" : " end",
  1549. free_page_end ? "" : " start", prev, next);
  1550. binder_update_page_range(proc, 0, free_page_start ?
  1551. buffer_start_page(buffer) :
  1552. buffer_end_page(buffer),
  1553. (free_page_end ?
  1554. buffer_end_page(buffer) :
  1555. buffer_start_page(buffer)) + PAGE_SIZE, NULL);
  1556. }
  1557. }
  1558. static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
  1559. {
  1560. size_t size, buffer_size;
  1561. buffer_size = binder_buffer_size(proc, buffer);
  1562. size = ALIGN(buffer->data_size, sizeof(void *)) +
  1563. ALIGN(buffer->offsets_size, sizeof(void *));
  1564. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1565. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  1566. proc->pid, buffer, size, buffer_size);
  1567. BUG_ON(buffer->free);
  1568. BUG_ON(size > buffer_size);
  1569. BUG_ON(buffer->transaction != NULL);
  1570. BUG_ON((void *)buffer < proc->buffer);
  1571. BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
  1572. #ifdef BINDER_MONITOR
  1573. buffer->log_entry = NULL;
  1574. #endif
  1575. if (buffer->async_transaction) {
  1576. proc->free_async_space += size + sizeof(struct binder_buffer);
  1577. binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  1578. "%d: binder_free_buf size %zd async free %zd\n",
  1579. proc->pid, size, proc->free_async_space);
  1580. }
  1581. binder_update_page_range(proc, 0,
  1582. (void *)PAGE_ALIGN((uintptr_t) buffer->data),
  1583. (void
  1584. *)(((uintptr_t) buffer->data + buffer_size) & PAGE_MASK), NULL);
  1585. rb_erase(&buffer->rb_node, &proc->allocated_buffers);
  1586. buffer->free = 1;
  1587. if (!list_is_last(&buffer->entry, &proc->buffers)) {
  1588. struct binder_buffer *next = list_entry(buffer->entry.next,
  1589. struct binder_buffer,
  1590. entry);
  1591. if (next->free) {
  1592. rb_erase(&next->rb_node, &proc->free_buffers);
  1593. binder_delete_free_buffer(proc, next);
  1594. }
  1595. }
  1596. if (proc->buffers.next != &buffer->entry) {
  1597. struct binder_buffer *prev = list_entry(buffer->entry.prev,
  1598. struct binder_buffer,
  1599. entry);
  1600. if (prev->free) {
  1601. binder_delete_free_buffer(proc, buffer);
  1602. rb_erase(&prev->rb_node, &proc->free_buffers);
  1603. buffer = prev;
  1604. }
  1605. }
  1606. binder_insert_free_buffer(proc, buffer);
  1607. }
  1608. static struct binder_node *binder_get_node(struct binder_proc *proc, binder_uintptr_t ptr)
  1609. {
  1610. struct rb_node *n = proc->nodes.rb_node;
  1611. struct binder_node *node;
  1612. while (n) {
  1613. node = rb_entry(n, struct binder_node, rb_node);
  1614. if (ptr < node->ptr)
  1615. n = n->rb_left;
  1616. else if (ptr > node->ptr)
  1617. n = n->rb_right;
  1618. else
  1619. return node;
  1620. }
  1621. return NULL;
  1622. }
  1623. static struct binder_node *binder_new_node(struct binder_proc *proc,
  1624. binder_uintptr_t ptr, binder_uintptr_t cookie)
  1625. {
  1626. struct rb_node **p = &proc->nodes.rb_node;
  1627. struct rb_node *parent = NULL;
  1628. struct binder_node *node;
  1629. while (*p) {
  1630. parent = *p;
  1631. node = rb_entry(parent, struct binder_node, rb_node);
  1632. if (ptr < node->ptr)
  1633. p = &(*p)->rb_left;
  1634. else if (ptr > node->ptr)
  1635. p = &(*p)->rb_right;
  1636. else
  1637. return NULL;
  1638. }
  1639. node = kzalloc(sizeof(*node), GFP_KERNEL);
  1640. if (node == NULL)
  1641. return NULL;
  1642. binder_stats_created(BINDER_STAT_NODE);
  1643. rb_link_node(&node->rb_node, parent, p);
  1644. rb_insert_color(&node->rb_node, &proc->nodes);
  1645. node->debug_id = ++binder_last_id;
  1646. node->proc = proc;
  1647. node->ptr = ptr;
  1648. node->cookie = cookie;
  1649. node->work.type = BINDER_WORK_NODE;
  1650. INIT_LIST_HEAD(&node->work.entry);
  1651. INIT_LIST_HEAD(&node->async_todo);
  1652. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1653. "%d:%d node %d u%016llx c%016llx created\n",
  1654. proc->pid, current->pid, node->debug_id, (u64) node->ptr, (u64) node->cookie);
  1655. return node;
  1656. }
  1657. static int binder_inc_node(struct binder_node *node, int strong, int internal,
  1658. struct list_head *target_list)
  1659. {
  1660. if (strong) {
  1661. if (internal) {
  1662. if (target_list == NULL &&
  1663. node->internal_strong_refs == 0 &&
  1664. !(node == binder_context_mgr_node && node->has_strong_ref)) {
  1665. pr_err("invalid inc strong node for %d\n", node->debug_id);
  1666. return -EINVAL;
  1667. }
  1668. node->internal_strong_refs++;
  1669. } else
  1670. node->local_strong_refs++;
  1671. if (!node->has_strong_ref && target_list) {
  1672. list_del_init(&node->work.entry);
  1673. list_add_tail(&node->work.entry, target_list);
  1674. }
  1675. } else {
  1676. if (!internal)
  1677. node->local_weak_refs++;
  1678. if (!node->has_weak_ref && list_empty(&node->work.entry)) {
  1679. if (target_list == NULL) {
  1680. pr_err("invalid inc weak node for %d\n", node->debug_id);
  1681. return -EINVAL;
  1682. }
  1683. list_add_tail(&node->work.entry, target_list);
  1684. }
  1685. }
  1686. return 0;
  1687. }
  1688. static int binder_dec_node(struct binder_node *node, int strong, int internal)
  1689. {
  1690. if (strong) {
  1691. if (internal)
  1692. node->internal_strong_refs--;
  1693. else
  1694. node->local_strong_refs--;
  1695. if (node->local_strong_refs || node->internal_strong_refs)
  1696. return 0;
  1697. } else {
  1698. if (!internal)
  1699. node->local_weak_refs--;
  1700. if (node->local_weak_refs || !hlist_empty(&node->refs))
  1701. return 0;
  1702. }
  1703. if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
  1704. if (list_empty(&node->work.entry)) {
  1705. list_add_tail(&node->work.entry, &node->proc->todo);
  1706. wake_up_interruptible(&node->proc->wait);
  1707. }
  1708. } else {
  1709. if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs) {
  1710. list_del_init(&node->work.entry);
  1711. if (node->proc) {
  1712. rb_erase(&node->rb_node, &node->proc->nodes);
  1713. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1714. "refless node %d deleted\n", node->debug_id);
  1715. } else {
  1716. hlist_del(&node->dead_node);
  1717. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1718. "dead node %d deleted\n", node->debug_id);
  1719. }
  1720. kfree(node);
  1721. binder_stats_deleted(BINDER_STAT_NODE);
  1722. }
  1723. }
  1724. return 0;
  1725. }
  1726. static struct binder_ref *binder_get_ref(struct binder_proc *proc,
  1727. uint32_t desc, bool need_strong_ref)
  1728. {
  1729. struct rb_node *n = proc->refs_by_desc.rb_node;
  1730. struct binder_ref *ref;
  1731. while (n) {
  1732. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1733. if (desc < ref->desc) {
  1734. n = n->rb_left;
  1735. } else if (desc > ref->desc) {
  1736. n = n->rb_right;
  1737. } else if (need_strong_ref && !ref->strong) {
  1738. binder_user_error("tried to use weak ref as strong ref\n");
  1739. return NULL;
  1740. } else {
  1741. return ref;
  1742. }
  1743. }
  1744. return NULL;
  1745. }
  1746. static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
  1747. struct binder_node *node)
  1748. {
  1749. struct rb_node *n;
  1750. struct rb_node **p = &proc->refs_by_node.rb_node;
  1751. struct rb_node *parent = NULL;
  1752. struct binder_ref *ref, *new_ref;
  1753. while (*p) {
  1754. parent = *p;
  1755. ref = rb_entry(parent, struct binder_ref, rb_node_node);
  1756. if (node < ref->node)
  1757. p = &(*p)->rb_left;
  1758. else if (node > ref->node)
  1759. p = &(*p)->rb_right;
  1760. else
  1761. return ref;
  1762. }
  1763. new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  1764. if (new_ref == NULL)
  1765. return NULL;
  1766. binder_stats_created(BINDER_STAT_REF);
  1767. new_ref->debug_id = ++binder_last_id;
  1768. new_ref->proc = proc;
  1769. new_ref->node = node;
  1770. rb_link_node(&new_ref->rb_node_node, parent, p);
  1771. rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
  1772. new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
  1773. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  1774. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1775. if (ref->desc > new_ref->desc)
  1776. break;
  1777. new_ref->desc = ref->desc + 1;
  1778. }
  1779. p = &proc->refs_by_desc.rb_node;
  1780. while (*p) {
  1781. parent = *p;
  1782. ref = rb_entry(parent, struct binder_ref, rb_node_desc);
  1783. if (new_ref->desc < ref->desc)
  1784. p = &(*p)->rb_left;
  1785. else if (new_ref->desc > ref->desc)
  1786. p = &(*p)->rb_right;
  1787. else
  1788. BUG();
  1789. }
  1790. rb_link_node(&new_ref->rb_node_desc, parent, p);
  1791. rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
  1792. if (node) {
  1793. hlist_add_head(&new_ref->node_entry, &node->refs);
  1794. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1795. "%d new ref %d desc %d for node %d\n",
  1796. proc->pid, new_ref->debug_id, new_ref->desc, node->debug_id);
  1797. } else {
  1798. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1799. "%d new ref %d desc %d for dead node\n",
  1800. proc->pid, new_ref->debug_id, new_ref->desc);
  1801. }
  1802. return new_ref;
  1803. }
  1804. static void binder_delete_ref(struct binder_ref *ref)
  1805. {
  1806. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1807. "%d delete ref %d desc %d for node %d\n",
  1808. ref->proc->pid, ref->debug_id, ref->desc, ref->node->debug_id);
  1809. rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
  1810. rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
  1811. if (ref->strong)
  1812. binder_dec_node(ref->node, 1, 1);
  1813. hlist_del(&ref->node_entry);
  1814. binder_dec_node(ref->node, 0, 1);
  1815. if (ref->death) {
  1816. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1817. "%d delete ref %d desc %d has death notification\n",
  1818. ref->proc->pid, ref->debug_id, ref->desc);
  1819. list_del(&ref->death->work.entry);
  1820. kfree(ref->death);
  1821. binder_stats_deleted(BINDER_STAT_DEATH);
  1822. }
  1823. kfree(ref);
  1824. binder_stats_deleted(BINDER_STAT_REF);
  1825. }
  1826. static int binder_inc_ref(struct binder_ref *ref, int strong, struct list_head *target_list)
  1827. {
  1828. int ret;
  1829. if (strong) {
  1830. if (ref->strong == 0) {
  1831. ret = binder_inc_node(ref->node, 1, 1, target_list);
  1832. if (ret)
  1833. return ret;
  1834. }
  1835. ref->strong++;
  1836. } else {
  1837. if (ref->weak == 0) {
  1838. ret = binder_inc_node(ref->node, 0, 1, target_list);
  1839. if (ret)
  1840. return ret;
  1841. }
  1842. ref->weak++;
  1843. }
  1844. return 0;
  1845. }
  1846. static int binder_dec_ref(struct binder_ref *ref, int strong)
  1847. {
  1848. if (strong) {
  1849. if (ref->strong == 0) {
  1850. binder_user_error
  1851. ("%d invalid dec strong, ref %d desc %d s %d w %d\n",
  1852. ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak);
  1853. return -EINVAL;
  1854. }
  1855. ref->strong--;
  1856. if (ref->strong == 0) {
  1857. int ret;
  1858. ret = binder_dec_node(ref->node, strong, 1);
  1859. if (ret)
  1860. return ret;
  1861. }
  1862. } else {
  1863. if (ref->weak == 0) {
  1864. binder_user_error
  1865. ("%d invalid dec weak, ref %d desc %d s %d w %d\n",
  1866. ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak);
  1867. return -EINVAL;
  1868. }
  1869. ref->weak--;
  1870. }
  1871. if (ref->strong == 0 && ref->weak == 0)
  1872. binder_delete_ref(ref);
  1873. return 0;
  1874. }
  1875. static void binder_pop_transaction(struct binder_thread *target_thread,
  1876. struct binder_transaction *t)
  1877. {
  1878. if (target_thread) {
  1879. BUG_ON(target_thread->transaction_stack != t);
  1880. BUG_ON(target_thread->transaction_stack->from != target_thread);
  1881. target_thread->transaction_stack = target_thread->transaction_stack->from_parent;
  1882. t->from = NULL;
  1883. }
  1884. t->need_reply = 0;
  1885. if (t->buffer)
  1886. t->buffer->transaction = NULL;
  1887. #ifdef BINDER_MONITOR
  1888. binder_cancel_bwdog(t);
  1889. #endif
  1890. kfree(t);
  1891. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  1892. }
  1893. static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code)
  1894. {
  1895. struct binder_thread *target_thread;
  1896. struct binder_transaction *next;
  1897. BUG_ON(t->flags & TF_ONE_WAY);
  1898. while (1) {
  1899. target_thread = t->from;
  1900. if (target_thread) {
  1901. if (target_thread->return_error != BR_OK &&
  1902. target_thread->return_error2 == BR_OK) {
  1903. target_thread->return_error2 = target_thread->return_error;
  1904. target_thread->return_error = BR_OK;
  1905. }
  1906. if (target_thread->return_error == BR_OK) {
  1907. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1908. "send failed reply for transaction %d to %d:%d\n",
  1909. t->debug_id,
  1910. target_thread->proc->pid, target_thread->pid);
  1911. binder_pop_transaction(target_thread, t);
  1912. target_thread->return_error = error_code;
  1913. wake_up_interruptible(&target_thread->wait);
  1914. } else {
  1915. pr_err
  1916. ("reply failed, target thread, %d:%d, has error code %d already\n",
  1917. target_thread->proc->pid,
  1918. target_thread->pid, target_thread->return_error);
  1919. }
  1920. return;
  1921. }
  1922. next = t->from_parent;
  1923. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1924. "send failed reply for transaction %d, target dead\n", t->debug_id);
  1925. binder_pop_transaction(target_thread, t);
  1926. if (next == NULL) {
  1927. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1928. "reply failed, no target thread at root\n");
  1929. return;
  1930. }
  1931. t = next;
  1932. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1933. "reply failed, no target thread -- retry %d\n", t->debug_id);
  1934. }
  1935. }
  1936. static void binder_transaction_buffer_release(struct binder_proc *proc,
  1937. struct binder_buffer *buffer,
  1938. binder_size_t *failed_at)
  1939. {
  1940. binder_size_t *offp, *off_end;
  1941. int debug_id = buffer->debug_id;
  1942. binder_debug(BINDER_DEBUG_TRANSACTION,
  1943. "%d buffer release %d, size %zd-%zd, failed at %pK\n",
  1944. proc->pid, buffer->debug_id,
  1945. buffer->data_size, buffer->offsets_size, failed_at);
  1946. if (buffer->target_node)
  1947. binder_dec_node(buffer->target_node, 1, 0);
  1948. offp = (binder_size_t *) (buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
  1949. if (failed_at)
  1950. off_end = failed_at;
  1951. else
  1952. off_end = (void *)offp + buffer->offsets_size;
  1953. for (; offp < off_end; offp++) {
  1954. struct flat_binder_object *fp;
  1955. if (*offp > buffer->data_size - sizeof(*fp) ||
  1956. buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) {
  1957. pr_err
  1958. ("transaction release %d bad offset %lld, size %zd\n",
  1959. debug_id, (u64) *offp, buffer->data_size);
  1960. continue;
  1961. }
  1962. fp = (struct flat_binder_object *)(buffer->data + *offp);
  1963. switch (fp->type) {
  1964. case BINDER_TYPE_BINDER:
  1965. case BINDER_TYPE_WEAK_BINDER:{
  1966. struct binder_node *node = binder_get_node(proc, fp->binder);
  1967. if (node == NULL) {
  1968. pr_err
  1969. ("transaction release %d bad node %016llx\n",
  1970. debug_id, (u64) fp->binder);
  1971. break;
  1972. }
  1973. binder_debug(BINDER_DEBUG_TRANSACTION,
  1974. " node %d u%016llx\n",
  1975. node->debug_id, (u64) node->ptr);
  1976. binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
  1977. }
  1978. break;
  1979. case BINDER_TYPE_HANDLE:
  1980. case BINDER_TYPE_WEAK_HANDLE:{
  1981. struct binder_ref *ref = binder_get_ref(proc, fp->handle,
  1982. fp->type == BINDER_TYPE_HANDLE);
  1983. if (ref == NULL) {
  1984. pr_err
  1985. ("transaction release %d bad handle %d\n",
  1986. debug_id, fp->handle);
  1987. break;
  1988. }
  1989. binder_debug(BINDER_DEBUG_TRANSACTION,
  1990. " ref %d desc %d (node %d)\n",
  1991. ref->debug_id, ref->desc, ref->node->debug_id);
  1992. binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
  1993. }
  1994. break;
  1995. case BINDER_TYPE_FD:
  1996. binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d\n", fp->handle);
  1997. if (failed_at)
  1998. task_close_fd(proc, fp->handle);
  1999. break;
  2000. default:
  2001. pr_err("transaction release %d bad object type %x\n", debug_id, fp->type);
  2002. break;
  2003. }
  2004. }
  2005. }
  2006. #ifdef RT_PRIO_INHERIT
  2007. static void mt_sched_setscheduler_nocheck(struct task_struct *p, int policy,
  2008. struct sched_param *param)
  2009. {
  2010. int ret;
  2011. ret = sched_setscheduler_nocheck(p, policy, param);
  2012. if (ret)
  2013. pr_err("set scheduler fail, error code: %d\n", ret);
  2014. }
  2015. #endif
  2016. #ifdef BINDER_MONITOR
  2017. /* binder_update_transaction_time - update read/exec done time for transaction
  2018. ** step:
  2019. ** 0: start // not used
  2020. ** 1: read
  2021. ** 2: reply
  2022. */
  2023. static void binder_update_transaction_time(struct binder_transaction_log *t_log,
  2024. struct binder_transaction *bt, int step)
  2025. {
  2026. if (step < 1 || step > 2) {
  2027. pr_err("update trans time fail, wrong step value for id %d\n", bt->debug_id);
  2028. return;
  2029. }
  2030. if ((NULL == bt) || (bt->log_idx == -1)
  2031. || (bt->log_idx > (t_log->size - 1)))
  2032. return;
  2033. if (t_log->entry[bt->log_idx].debug_id == bt->debug_id) {
  2034. if (step == 1)
  2035. do_posix_clock_monotonic_gettime(&t_log->entry[bt->log_idx].readstamp);
  2036. else if (step == 2)
  2037. do_posix_clock_monotonic_gettime(&t_log->entry[bt->log_idx].endstamp);
  2038. }
  2039. }
  2040. /* binder_update_transaction_tid - update to thread pid transaction
  2041. */
  2042. static void binder_update_transaction_ttid(struct binder_transaction_log *t_log,
  2043. struct binder_transaction *bt)
  2044. {
  2045. if ((NULL == bt) || (NULL == t_log))
  2046. return;
  2047. if ((bt->log_idx == -1) || (bt->log_idx > (t_log->size - 1)))
  2048. return;
  2049. if (bt->tthrd < 0)
  2050. return;
  2051. if ((t_log->entry[bt->log_idx].debug_id == bt->debug_id) &&
  2052. (t_log->entry[bt->log_idx].to_thread == 0)) {
  2053. t_log->entry[bt->log_idx].to_thread = bt->tthrd;
  2054. }
  2055. }
  2056. /* this is an addService() transaction identified by:
  2057. * fp->type == BINDER_TYPE_BINDER && tr->target.handle == 0
  2058. */
  2059. static void parse_service_name(struct binder_transaction_data *tr,
  2060. struct binder_proc *proc, char *name)
  2061. {
  2062. unsigned int i, len = 0;
  2063. char *tmp;
  2064. if (tr->target.handle == 0) {
  2065. for (i = 0; (2 * i) < tr->data_size; i++) {
  2066. /* hack into addService() payload:
  2067. * service name string is located at MAGIC_SERVICE_NAME_OFFSET,
  2068. * and interleaved with character '\0'.
  2069. * for example, 'p', '\0', 'h', '\0', 'o', '\0', 'n', '\0', 'e'
  2070. */
  2071. if ((2 * i) < MAGIC_SERVICE_NAME_OFFSET)
  2072. continue;
  2073. /* prevent array index overflow */
  2074. if (len >= (MAX_SERVICE_NAME_LEN - 1))
  2075. break;
  2076. tmp = (char *)(uintptr_t)(tr->data.ptr.buffer + (2 * i));
  2077. len += sprintf(name + len, "%c", *tmp);
  2078. }
  2079. name[len] = '\0';
  2080. } else {
  2081. name[0] = '\0';
  2082. }
  2083. /* via addService of activity service, identify
  2084. * system_server's process id.
  2085. */
  2086. if (!strcmp(name, "activity")) {
  2087. system_server_pid = proc->pid;
  2088. pr_debug("system_server %d\n", system_server_pid);
  2089. }
  2090. }
  2091. #endif
  2092. static void binder_transaction(struct binder_proc *proc,
  2093. struct binder_thread *thread,
  2094. struct binder_transaction_data *tr, int reply)
  2095. {
  2096. struct binder_transaction *t;
  2097. struct binder_work *tcomplete;
  2098. binder_size_t *offp, *off_end;
  2099. binder_size_t off_min;
  2100. struct binder_proc *target_proc;
  2101. struct binder_thread *target_thread = NULL;
  2102. struct binder_node *target_node = NULL;
  2103. struct list_head *target_list;
  2104. wait_queue_head_t *target_wait;
  2105. struct binder_transaction *in_reply_to = NULL;
  2106. struct binder_transaction_log_entry *e;
  2107. uint32_t return_error;
  2108. #ifdef BINDER_MONITOR
  2109. struct binder_transaction_log_entry log_entry;
  2110. unsigned int log_idx = -1;
  2111. if ((reply && (tr->data_size < (proc->buffer_size / 16)))
  2112. || log_disable)
  2113. e = &log_entry;
  2114. else {
  2115. e = binder_transaction_log_add(&binder_transaction_log);
  2116. if (binder_transaction_log.next)
  2117. log_idx = binder_transaction_log.next - 1;
  2118. else
  2119. log_idx = binder_transaction_log.size - 1;
  2120. }
  2121. #else
  2122. e = binder_transaction_log_add(&binder_transaction_log);
  2123. #endif
  2124. e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
  2125. e->from_proc = proc->pid;
  2126. e->from_thread = thread->pid;
  2127. e->target_handle = tr->target.handle;
  2128. e->data_size = tr->data_size;
  2129. e->offsets_size = tr->offsets_size;
  2130. #ifdef BINDER_MONITOR
  2131. e->code = tr->code;
  2132. /* fd 0 is also valid... set initial value to -1 */
  2133. e->fd = -1;
  2134. do_posix_clock_monotonic_gettime(&e->timestamp);
  2135. /* monotonic_to_bootbased(&e->timestamp); */
  2136. do_gettimeofday(&e->tv);
  2137. /* consider time zone. translate to android time */
  2138. e->tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
  2139. #endif
  2140. if (reply) {
  2141. in_reply_to = thread->transaction_stack;
  2142. if (in_reply_to == NULL) {
  2143. binder_user_error
  2144. ("%d:%d got reply transaction with no transaction stack\n",
  2145. proc->pid, thread->pid);
  2146. return_error = BR_FAILED_REPLY;
  2147. goto err_empty_call_stack;
  2148. }
  2149. #ifdef BINDER_MONITOR
  2150. binder_cancel_bwdog(in_reply_to);
  2151. #endif
  2152. binder_set_nice(in_reply_to->saved_priority);
  2153. #ifdef RT_PRIO_INHERIT
  2154. if (rt_task(current)
  2155. && (MAX_RT_PRIO != in_reply_to->saved_rt_prio)
  2156. && !(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  2157. BINDER_LOOPER_STATE_ENTERED))) {
  2158. struct sched_param param = {
  2159. .sched_priority = in_reply_to->saved_rt_prio,
  2160. };
  2161. mt_sched_setscheduler_nocheck(current, in_reply_to->saved_policy, &param);
  2162. #ifdef BINDER_MONITOR
  2163. if (log_disable & BINDER_RT_LOG_ENABLE) {
  2164. pr_debug
  2165. ("reply reset %d sched_policy from %d to %d rt_prio from %d to %d\n",
  2166. proc->pid, in_reply_to->policy,
  2167. in_reply_to->saved_policy,
  2168. in_reply_to->rt_prio, in_reply_to->saved_rt_prio);
  2169. }
  2170. #endif
  2171. }
  2172. #endif
  2173. if (in_reply_to->to_thread != thread) {
  2174. binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2175. proc->pid, thread->pid, in_reply_to->debug_id,
  2176. in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0,
  2177. in_reply_to->to_thread ?
  2178. in_reply_to->to_thread->pid : 0);
  2179. return_error = BR_FAILED_REPLY;
  2180. in_reply_to = NULL;
  2181. goto err_bad_call_stack;
  2182. }
  2183. thread->transaction_stack = in_reply_to->to_parent;
  2184. target_thread = in_reply_to->from;
  2185. if (target_thread == NULL) {
  2186. #ifdef MTK_BINDER_DEBUG
  2187. binder_user_error("%d:%d got reply transaction with bad transaction reply_from, ",
  2188. proc->pid, thread->pid);
  2189. binder_user_error("transaction %d has target %d:%d\n",
  2190. in_reply_to->debug_id,
  2191. in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0,
  2192. in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0);
  2193. #endif
  2194. return_error = BR_DEAD_REPLY;
  2195. goto err_dead_binder;
  2196. }
  2197. if (target_thread->transaction_stack != in_reply_to) {
  2198. binder_user_error
  2199. ("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
  2200. proc->pid, thread->pid,
  2201. target_thread->transaction_stack ? target_thread->transaction_stack->
  2202. debug_id : 0, in_reply_to->debug_id);
  2203. return_error = BR_FAILED_REPLY;
  2204. in_reply_to = NULL;
  2205. target_thread = NULL;
  2206. goto err_dead_binder;
  2207. }
  2208. target_proc = target_thread->proc;
  2209. #ifdef BINDER_MONITOR
  2210. e->service[0] = '\0';
  2211. #endif
  2212. } else {
  2213. if (tr->target.handle) {
  2214. struct binder_ref *ref;
  2215. ref = binder_get_ref(proc, tr->target.handle, true);
  2216. if (ref == NULL) {
  2217. binder_user_error
  2218. ("%d:%d got transaction to invalid handle\n",
  2219. proc->pid, thread->pid);
  2220. return_error = BR_FAILED_REPLY;
  2221. goto err_invalid_target_handle;
  2222. }
  2223. target_node = ref->node;
  2224. } else {
  2225. target_node = binder_context_mgr_node;
  2226. if (target_node == NULL) {
  2227. #ifdef MTK_BINDER_DEBUG
  2228. binder_user_error("%d:%d binder_context_mgr_node is NULL\n",
  2229. proc->pid, thread->pid);
  2230. #endif
  2231. return_error = BR_DEAD_REPLY;
  2232. goto err_no_context_mgr_node;
  2233. }
  2234. }
  2235. e->to_node = target_node->debug_id;
  2236. #ifdef BINDER_MONITOR
  2237. strcpy(e->service, target_node->name);
  2238. #endif
  2239. target_proc = target_node->proc;
  2240. if (target_proc == NULL) {
  2241. #ifdef MTK_BINDER_DEBUG
  2242. binder_user_error("%d:%d target_proc is NULL\n", proc->pid, thread->pid);
  2243. #endif
  2244. return_error = BR_DEAD_REPLY;
  2245. goto err_dead_binder;
  2246. }
  2247. if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
  2248. return_error = BR_FAILED_REPLY;
  2249. goto err_invalid_target_handle;
  2250. }
  2251. if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
  2252. struct binder_transaction *tmp;
  2253. tmp = thread->transaction_stack;
  2254. if (tmp->to_thread != thread) {
  2255. binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2256. proc->pid, thread->pid, tmp->debug_id,
  2257. tmp->to_proc ? tmp->to_proc->pid : 0,
  2258. tmp->to_thread ?
  2259. tmp->to_thread->pid : 0);
  2260. return_error = BR_FAILED_REPLY;
  2261. goto err_bad_call_stack;
  2262. }
  2263. while (tmp) {
  2264. if (tmp->from && tmp->from->proc == target_proc)
  2265. target_thread = tmp->from;
  2266. tmp = tmp->from_parent;
  2267. }
  2268. }
  2269. }
  2270. if (target_thread) {
  2271. e->to_thread = target_thread->pid;
  2272. target_list = &target_thread->todo;
  2273. target_wait = &target_thread->wait;
  2274. } else {
  2275. target_list = &target_proc->todo;
  2276. target_wait = &target_proc->wait;
  2277. }
  2278. e->to_proc = target_proc->pid;
  2279. /* TODO: reuse incoming transaction for reply */
  2280. t = kzalloc(sizeof(*t), GFP_KERNEL);
  2281. if (t == NULL) {
  2282. #ifdef MTK_BINDER_DEBUG
  2283. binder_user_error("%d:%d transaction allocation failed\n", proc->pid, thread->pid);
  2284. #endif
  2285. return_error = BR_FAILED_REPLY;
  2286. goto err_alloc_t_failed;
  2287. }
  2288. #ifdef BINDER_MONITOR
  2289. memcpy(&t->timestamp, &e->timestamp, sizeof(struct timespec));
  2290. /* do_gettimeofday(&t->tv); */
  2291. /* consider time zone. translate to android time */
  2292. /* t->tv.tv_sec -= (sys_tz.tz_minuteswest * 60); */
  2293. memcpy(&t->tv, &e->tv, sizeof(struct timeval));
  2294. if (!reply)
  2295. strcpy(t->service, target_node->name);
  2296. #endif
  2297. binder_stats_created(BINDER_STAT_TRANSACTION);
  2298. tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
  2299. if (tcomplete == NULL) {
  2300. #ifdef MTK_BINDER_DEBUG
  2301. binder_user_error("%d:%d tcomplete allocation failed\n", proc->pid, thread->pid);
  2302. #endif
  2303. return_error = BR_FAILED_REPLY;
  2304. goto err_alloc_tcomplete_failed;
  2305. }
  2306. binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
  2307. t->debug_id = ++binder_last_id;
  2308. e->debug_id = t->debug_id;
  2309. if (reply)
  2310. binder_debug(BINDER_DEBUG_TRANSACTION,
  2311. "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
  2312. proc->pid, thread->pid, t->debug_id,
  2313. target_proc->pid, target_thread->pid,
  2314. (u64) tr->data.ptr.buffer,
  2315. (u64) tr->data.ptr.offsets,
  2316. (u64) tr->data_size, (u64) tr->offsets_size);
  2317. else
  2318. binder_debug(BINDER_DEBUG_TRANSACTION,
  2319. "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
  2320. proc->pid, thread->pid, t->debug_id,
  2321. target_proc->pid, target_node->debug_id,
  2322. (u64) tr->data.ptr.buffer,
  2323. (u64) tr->data.ptr.offsets,
  2324. (u64) tr->data_size, (u64) tr->offsets_size);
  2325. #ifdef BINDER_MONITOR
  2326. t->fproc = proc->pid;
  2327. t->fthrd = thread->pid;
  2328. t->tproc = target_proc->pid;
  2329. t->tthrd = target_thread ? target_thread->pid : 0;
  2330. t->log_idx = log_idx;
  2331. if (!binder_check_buf_checked()) {
  2332. binder_check_buf_pid = proc->pid;
  2333. binder_check_buf_tid = thread->pid;
  2334. }
  2335. #endif
  2336. if (!reply && !(tr->flags & TF_ONE_WAY))
  2337. t->from = thread;
  2338. else
  2339. t->from = NULL;
  2340. t->sender_euid = task_euid(proc->tsk);
  2341. t->to_proc = target_proc;
  2342. t->to_thread = target_thread;
  2343. t->code = tr->code;
  2344. t->flags = tr->flags;
  2345. t->priority = task_nice(current);
  2346. #ifdef RT_PRIO_INHERIT
  2347. t->rt_prio = current->rt_priority;
  2348. t->policy = current->policy;
  2349. t->saved_rt_prio = MAX_RT_PRIO;
  2350. #endif
  2351. trace_binder_transaction(reply, t, target_node);
  2352. t->buffer = binder_alloc_buf(target_proc, tr->data_size,
  2353. tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
  2354. if (t->buffer == NULL) {
  2355. #ifdef MTK_BINDER_DEBUG
  2356. binder_user_error("%d:%d buffer allocation failed on %d:0\n", proc->pid, thread->pid, target_proc->pid);
  2357. #endif
  2358. return_error = BR_FAILED_REPLY;
  2359. goto err_binder_alloc_buf_failed;
  2360. }
  2361. t->buffer->allow_user_free = 0;
  2362. t->buffer->debug_id = t->debug_id;
  2363. t->buffer->transaction = t;
  2364. #ifdef BINDER_MONITOR
  2365. t->buffer->log_entry = e;
  2366. #endif
  2367. t->buffer->target_node = target_node;
  2368. trace_binder_transaction_alloc_buf(t->buffer);
  2369. if (target_node)
  2370. binder_inc_node(target_node, 1, 0, NULL);
  2371. offp = (binder_size_t *) (t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
  2372. if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
  2373. tr->data.ptr.buffer, tr->data_size)) {
  2374. binder_user_error
  2375. ("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid);
  2376. return_error = BR_FAILED_REPLY;
  2377. goto err_copy_data_failed;
  2378. }
  2379. if (copy_from_user(offp, (const void __user *)(uintptr_t)
  2380. tr->data.ptr.offsets, tr->offsets_size)) {
  2381. binder_user_error
  2382. ("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid);
  2383. return_error = BR_FAILED_REPLY;
  2384. goto err_copy_data_failed;
  2385. }
  2386. if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
  2387. binder_user_error
  2388. ("%d:%d got transaction with invalid offsets size, %lld\n",
  2389. proc->pid, thread->pid, (u64) tr->offsets_size);
  2390. return_error = BR_FAILED_REPLY;
  2391. goto err_bad_offset;
  2392. }
  2393. off_end = (void *)offp + tr->offsets_size;
  2394. off_min = 0;
  2395. for (; offp < off_end; offp++) {
  2396. struct flat_binder_object *fp;
  2397. if (*offp > t->buffer->data_size - sizeof(*fp) ||
  2398. *offp < off_min ||
  2399. t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) {
  2400. binder_user_error
  2401. ("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
  2402. proc->pid, thread->pid, (u64) *offp,
  2403. (u64) off_min, (u64) (t->buffer->data_size - sizeof(*fp)));
  2404. return_error = BR_FAILED_REPLY;
  2405. goto err_bad_offset;
  2406. }
  2407. fp = (struct flat_binder_object *)(t->buffer->data + *offp);
  2408. off_min = *offp + sizeof(struct flat_binder_object);
  2409. switch (fp->type) {
  2410. case BINDER_TYPE_BINDER:
  2411. case BINDER_TYPE_WEAK_BINDER:{
  2412. struct binder_ref *ref;
  2413. struct binder_node *node = binder_get_node(proc, fp->binder);
  2414. if (node == NULL) {
  2415. node = binder_new_node(proc, fp->binder, fp->cookie);
  2416. if (node == NULL) {
  2417. #ifdef MTK_BINDER_DEBUG
  2418. binder_user_error
  2419. ("%d:%d create new node failed\n",
  2420. proc->pid, thread->pid);
  2421. #endif
  2422. return_error = BR_FAILED_REPLY;
  2423. goto err_binder_new_node_failed;
  2424. }
  2425. node->min_priority =
  2426. fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
  2427. node->accept_fds =
  2428. !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
  2429. #ifdef BINDER_MONITOR
  2430. parse_service_name(tr, proc, node->name);
  2431. #endif
  2432. }
  2433. if (fp->cookie != node->cookie) {
  2434. binder_user_error
  2435. ("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
  2436. proc->pid, thread->pid,
  2437. (u64) fp->binder, node->debug_id,
  2438. (u64) fp->cookie, (u64) node->cookie);
  2439. return_error = BR_FAILED_REPLY;
  2440. goto err_binder_get_ref_for_node_failed;
  2441. }
  2442. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2443. return_error = BR_FAILED_REPLY;
  2444. goto err_binder_get_ref_for_node_failed;
  2445. }
  2446. ref = binder_get_ref_for_node(target_proc, node);
  2447. if (ref == NULL) {
  2448. #ifdef MTK_BINDER_DEBUG
  2449. binder_user_error
  2450. ("%d:%d get binder ref failed\n",
  2451. proc->pid, thread->pid);
  2452. #endif
  2453. return_error = BR_FAILED_REPLY;
  2454. goto err_binder_get_ref_for_node_failed;
  2455. }
  2456. if (fp->type == BINDER_TYPE_BINDER)
  2457. fp->type = BINDER_TYPE_HANDLE;
  2458. else
  2459. fp->type = BINDER_TYPE_WEAK_HANDLE;
  2460. fp->binder = 0;
  2461. fp->handle = ref->desc;
  2462. fp->cookie = 0;
  2463. binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
  2464. trace_binder_transaction_node_to_ref(t, node, ref);
  2465. binder_debug(BINDER_DEBUG_TRANSACTION,
  2466. " node %d u%016llx -> ref %d desc %d\n",
  2467. node->debug_id, (u64) node->ptr,
  2468. ref->debug_id, ref->desc);
  2469. }
  2470. break;
  2471. case BINDER_TYPE_HANDLE:
  2472. case BINDER_TYPE_WEAK_HANDLE:{
  2473. struct binder_ref *ref = binder_get_ref(proc, fp->handle,
  2474. fp->type == BINDER_TYPE_HANDLE);
  2475. if (ref == NULL) {
  2476. binder_user_error
  2477. ("%d:%d got transaction with invalid handle, %d\n",
  2478. proc->pid, thread->pid, fp->handle);
  2479. return_error = BR_FAILED_REPLY;
  2480. goto err_binder_get_ref_failed;
  2481. }
  2482. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2483. return_error = BR_FAILED_REPLY;
  2484. goto err_binder_get_ref_failed;
  2485. }
  2486. if (ref->node->proc == target_proc) {
  2487. if (fp->type == BINDER_TYPE_HANDLE)
  2488. fp->type = BINDER_TYPE_BINDER;
  2489. else
  2490. fp->type = BINDER_TYPE_WEAK_BINDER;
  2491. fp->binder = ref->node->ptr;
  2492. fp->cookie = ref->node->cookie;
  2493. binder_inc_node(ref->node,
  2494. fp->type == BINDER_TYPE_BINDER, 0, NULL);
  2495. trace_binder_transaction_ref_to_node(t, ref);
  2496. binder_debug(BINDER_DEBUG_TRANSACTION,
  2497. " ref %d desc %d -> node %d u%016llx\n",
  2498. ref->debug_id, ref->desc,
  2499. ref->node->debug_id, (u64) ref->node->ptr);
  2500. } else {
  2501. struct binder_ref *new_ref;
  2502. new_ref = binder_get_ref_for_node(target_proc, ref->node);
  2503. if (new_ref == NULL) {
  2504. #ifdef MTK_BINDER_DEBUG
  2505. binder_user_error
  2506. ("%d:%d get new binder ref failed\n",
  2507. proc->pid, thread->pid);
  2508. #endif
  2509. return_error = BR_FAILED_REPLY;
  2510. goto err_binder_get_ref_for_node_failed;
  2511. }
  2512. fp->binder = 0;
  2513. fp->handle = new_ref->desc;
  2514. fp->cookie = 0;
  2515. binder_inc_ref(new_ref,
  2516. fp->type == BINDER_TYPE_HANDLE, NULL);
  2517. trace_binder_transaction_ref_to_ref(t, ref, new_ref);
  2518. binder_debug(BINDER_DEBUG_TRANSACTION,
  2519. " ref %d desc %d -> ref %d desc %d (node %d)\n",
  2520. ref->debug_id, ref->desc,
  2521. new_ref->debug_id,
  2522. new_ref->desc, ref->node->debug_id);
  2523. }
  2524. }
  2525. break;
  2526. case BINDER_TYPE_FD:{
  2527. int target_fd;
  2528. struct file *file;
  2529. if (reply) {
  2530. if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
  2531. binder_user_error
  2532. ("%d:%d got reply with fd, %d, but target does not allow fds\n",
  2533. proc->pid, thread->pid, fp->handle);
  2534. return_error = BR_FAILED_REPLY;
  2535. goto err_fd_not_allowed;
  2536. }
  2537. } else if (!target_node->accept_fds) {
  2538. binder_user_error
  2539. ("%d:%d got transaction with fd, %d, but target does not allow fds\n",
  2540. proc->pid, thread->pid, fp->handle);
  2541. return_error = BR_FAILED_REPLY;
  2542. goto err_fd_not_allowed;
  2543. }
  2544. file = fget(fp->handle);
  2545. if (file == NULL) {
  2546. binder_user_error
  2547. ("%d:%d got transaction with invalid fd, %d\n",
  2548. proc->pid, thread->pid, fp->handle);
  2549. return_error = BR_FAILED_REPLY;
  2550. goto err_fget_failed;
  2551. }
  2552. if (security_binder_transfer_file
  2553. (proc->tsk, target_proc->tsk, file) < 0) {
  2554. fput(file);
  2555. return_error = BR_FAILED_REPLY;
  2556. goto err_get_unused_fd_failed;
  2557. }
  2558. target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
  2559. if (target_fd < 0) {
  2560. fput(file);
  2561. #ifdef MTK_BINDER_DEBUG
  2562. binder_user_error
  2563. ("%d:%d to %d failed, %d no unused fd available(%d:%s fd leak?), %d\n",
  2564. proc->pid, thread->pid,
  2565. target_proc->pid, target_proc->pid,
  2566. target_proc->pid,
  2567. target_proc->tsk ? target_proc->tsk->comm : "",
  2568. target_fd);
  2569. #endif
  2570. return_error = BR_FAILED_REPLY;
  2571. goto err_get_unused_fd_failed;
  2572. }
  2573. task_fd_install(target_proc, target_fd, file);
  2574. trace_binder_transaction_fd(t, fp->handle, target_fd);
  2575. binder_debug(BINDER_DEBUG_TRANSACTION,
  2576. " fd %d -> %d\n", fp->handle, target_fd);
  2577. /* TODO: fput? */
  2578. fp->binder = 0;
  2579. fp->handle = target_fd;
  2580. #ifdef BINDER_MONITOR
  2581. e->fd = target_fd;
  2582. #endif
  2583. }
  2584. break;
  2585. default:
  2586. binder_user_error
  2587. ("%d:%d got transaction with invalid object type, %x\n",
  2588. proc->pid, thread->pid, fp->type);
  2589. return_error = BR_FAILED_REPLY;
  2590. goto err_bad_object_type;
  2591. }
  2592. }
  2593. if (reply) {
  2594. BUG_ON(t->buffer->async_transaction != 0);
  2595. #ifdef BINDER_MONITOR
  2596. binder_update_transaction_time(&binder_transaction_log, in_reply_to, 2);
  2597. #endif
  2598. binder_pop_transaction(target_thread, in_reply_to);
  2599. } else if (!(t->flags & TF_ONE_WAY)) {
  2600. BUG_ON(t->buffer->async_transaction != 0);
  2601. t->need_reply = 1;
  2602. t->from_parent = thread->transaction_stack;
  2603. thread->transaction_stack = t;
  2604. } else {
  2605. BUG_ON(target_node == NULL);
  2606. BUG_ON(t->buffer->async_transaction != 1);
  2607. if (target_node->has_async_transaction) {
  2608. target_list = &target_node->async_todo;
  2609. target_wait = NULL;
  2610. } else
  2611. target_node->has_async_transaction = 1;
  2612. }
  2613. t->work.type = BINDER_WORK_TRANSACTION;
  2614. list_add_tail(&t->work.entry, target_list);
  2615. tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
  2616. list_add_tail(&tcomplete->entry, &thread->todo);
  2617. #ifdef RT_PRIO_INHERIT
  2618. if (target_wait) {
  2619. unsigned long flag;
  2620. wait_queue_t *curr, *next;
  2621. bool is_lock = false;
  2622. spin_lock_irqsave(&target_wait->lock, flag);
  2623. is_lock = true;
  2624. list_for_each_entry_safe(curr, next, &target_wait->task_list, task_list) {
  2625. unsigned flags = curr->flags;
  2626. struct task_struct *tsk = curr->private;
  2627. if (tsk == NULL) {
  2628. spin_unlock_irqrestore(&target_wait->lock, flag);
  2629. is_lock = false;
  2630. wake_up_interruptible(target_wait);
  2631. break;
  2632. }
  2633. #ifdef MTK_BINDER_DEBUG
  2634. if (tsk->state == TASK_UNINTERRUPTIBLE) {
  2635. pr_err("from %d:%d to %d:%d target thread state: %ld\n",
  2636. proc->pid, thread->pid, tsk->tgid, tsk->pid, tsk->state);
  2637. show_stack(tsk, NULL);
  2638. }
  2639. #endif
  2640. if (!reply && (t->policy == SCHED_RR || t->policy == SCHED_FIFO)
  2641. && t->rt_prio > tsk->rt_priority && !(t->flags & TF_ONE_WAY)) {
  2642. struct sched_param param = {
  2643. .sched_priority = t->rt_prio,
  2644. };
  2645. t->saved_rt_prio = tsk->rt_priority;
  2646. t->saved_policy = tsk->policy;
  2647. mt_sched_setscheduler_nocheck(tsk, t->policy, &param);
  2648. #ifdef BINDER_MONITOR
  2649. if (log_disable & BINDER_RT_LOG_ENABLE) {
  2650. pr_debug
  2651. ("write set %d sched_policy from %d to %d rt_prio from %d to %d\n",
  2652. tsk->pid, t->saved_policy,
  2653. t->policy, t->saved_rt_prio, t->rt_prio);
  2654. }
  2655. #endif
  2656. }
  2657. if (curr->func(curr, TASK_INTERRUPTIBLE, 0, NULL) &&
  2658. (flags & WQ_FLAG_EXCLUSIVE))
  2659. break;
  2660. }
  2661. if (is_lock)
  2662. spin_unlock_irqrestore(&target_wait->lock, flag);
  2663. }
  2664. #else
  2665. if (target_wait)
  2666. wake_up_interruptible(target_wait);
  2667. #endif
  2668. #ifdef BINDER_MONITOR
  2669. t->wait_on = reply ? WAIT_ON_REPLY_READ : WAIT_ON_READ;
  2670. binder_queue_bwdog(t, (time_t) WAIT_BUDGET_READ);
  2671. #endif
  2672. return;
  2673. err_get_unused_fd_failed:
  2674. err_fget_failed:
  2675. err_fd_not_allowed:
  2676. err_binder_get_ref_for_node_failed:
  2677. err_binder_get_ref_failed:
  2678. err_binder_new_node_failed:
  2679. err_bad_object_type:
  2680. err_bad_offset:
  2681. err_copy_data_failed:
  2682. trace_binder_transaction_failed_buffer_release(t->buffer);
  2683. binder_transaction_buffer_release(target_proc, t->buffer, offp);
  2684. t->buffer->transaction = NULL;
  2685. binder_free_buf(target_proc, t->buffer);
  2686. err_binder_alloc_buf_failed:
  2687. kfree(tcomplete);
  2688. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  2689. err_alloc_tcomplete_failed:
  2690. #ifdef BINDER_MONITOR
  2691. binder_cancel_bwdog(t);
  2692. #endif
  2693. kfree(t);
  2694. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  2695. err_alloc_t_failed:
  2696. err_bad_call_stack:
  2697. err_empty_call_stack:
  2698. err_dead_binder:
  2699. err_invalid_target_handle:
  2700. err_no_context_mgr_node:
  2701. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  2702. "%d:%d transaction failed %d, size %lld-%lld\n",
  2703. proc->pid, thread->pid, return_error,
  2704. (u64) tr->data_size, (u64) tr->offsets_size);
  2705. {
  2706. struct binder_transaction_log_entry *fe;
  2707. fe = binder_transaction_log_add(&binder_transaction_log_failed);
  2708. *fe = *e;
  2709. }
  2710. BUG_ON(thread->return_error != BR_OK);
  2711. if (in_reply_to) {
  2712. thread->return_error = BR_TRANSACTION_COMPLETE;
  2713. binder_send_failed_reply(in_reply_to, return_error);
  2714. } else
  2715. thread->return_error = return_error;
  2716. }
  2717. static int binder_thread_write(struct binder_proc *proc,
  2718. struct binder_thread *thread,
  2719. binder_uintptr_t binder_buffer, size_t size,
  2720. binder_size_t *consumed)
  2721. {
  2722. uint32_t cmd;
  2723. void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  2724. void __user *ptr = buffer + *consumed;
  2725. void __user *end = buffer + size;
  2726. while (ptr < end && thread->return_error == BR_OK) {
  2727. if (get_user(cmd, (uint32_t __user *)ptr))
  2728. return -EFAULT;
  2729. ptr += sizeof(uint32_t);
  2730. trace_binder_command(cmd);
  2731. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
  2732. binder_stats.bc[_IOC_NR(cmd)]++;
  2733. proc->stats.bc[_IOC_NR(cmd)]++;
  2734. thread->stats.bc[_IOC_NR(cmd)]++;
  2735. }
  2736. switch (cmd) {
  2737. case BC_INCREFS:
  2738. case BC_ACQUIRE:
  2739. case BC_RELEASE:
  2740. case BC_DECREFS: {
  2741. uint32_t target;
  2742. struct binder_ref *ref;
  2743. const char *debug_string;
  2744. if (get_user(target, (uint32_t __user *) ptr))
  2745. return -EFAULT;
  2746. ptr += sizeof(uint32_t);
  2747. if (target == 0 && binder_context_mgr_node &&
  2748. (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
  2749. ref = binder_get_ref_for_node(proc,
  2750. binder_context_mgr_node);
  2751. if (ref->desc != target) {
  2752. binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
  2753. proc->pid, thread->pid,
  2754. ref->desc);
  2755. }
  2756. } else
  2757. ref = binder_get_ref(proc, target,
  2758. cmd == BC_ACQUIRE ||
  2759. cmd == BC_RELEASE);
  2760. if (ref == NULL) {
  2761. binder_user_error("%d:%d refcount change on invalid ref %d\n",
  2762. proc->pid, thread->pid, target);
  2763. break;
  2764. }
  2765. switch (cmd) {
  2766. case BC_INCREFS:
  2767. debug_string = "IncRefs";
  2768. binder_inc_ref(ref, 0, NULL);
  2769. break;
  2770. case BC_ACQUIRE:
  2771. debug_string = "Acquire";
  2772. binder_inc_ref(ref, 1, NULL);
  2773. break;
  2774. case BC_RELEASE:
  2775. debug_string = "Release";
  2776. binder_dec_ref(ref, 1);
  2777. break;
  2778. case BC_DECREFS:
  2779. default:
  2780. debug_string = "DecRefs";
  2781. binder_dec_ref(ref, 0);
  2782. break;
  2783. }
  2784. binder_debug(BINDER_DEBUG_USER_REFS,
  2785. "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
  2786. proc->pid, thread->pid, debug_string, ref->debug_id,
  2787. ref->desc, ref->strong, ref->weak, ref->node->debug_id);
  2788. break;
  2789. }
  2790. case BC_INCREFS_DONE:
  2791. case BC_ACQUIRE_DONE:{
  2792. binder_uintptr_t node_ptr;
  2793. binder_uintptr_t cookie;
  2794. struct binder_node *node;
  2795. if (get_user(node_ptr, (binder_uintptr_t __user *) ptr))
  2796. return -EFAULT;
  2797. ptr += sizeof(binder_uintptr_t);
  2798. if (get_user(cookie, (binder_uintptr_t __user *) ptr))
  2799. return -EFAULT;
  2800. ptr += sizeof(binder_uintptr_t);
  2801. node = binder_get_node(proc, node_ptr);
  2802. if (node == NULL) {
  2803. binder_user_error("%d:%d %s u%016llx no match\n",
  2804. proc->pid, thread->pid,
  2805. cmd == BC_INCREFS_DONE ?
  2806. "BC_INCREFS_DONE" :
  2807. "BC_ACQUIRE_DONE",
  2808. (u64) node_ptr);
  2809. break;
  2810. }
  2811. if (cookie != node->cookie) {
  2812. binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
  2813. proc->pid, thread->pid,
  2814. cmd == BC_INCREFS_DONE ?
  2815. "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  2816. (u64) node_ptr, node->debug_id,
  2817. (u64) cookie, (u64) node->cookie);
  2818. break;
  2819. }
  2820. if (cmd == BC_ACQUIRE_DONE) {
  2821. if (node->pending_strong_ref == 0) {
  2822. binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
  2823. proc->pid, thread->pid,
  2824. node->debug_id);
  2825. break;
  2826. }
  2827. node->pending_strong_ref = 0;
  2828. } else {
  2829. if (node->pending_weak_ref == 0) {
  2830. binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
  2831. proc->pid, thread->pid,
  2832. node->debug_id);
  2833. break;
  2834. }
  2835. node->pending_weak_ref = 0;
  2836. }
  2837. binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
  2838. binder_debug(BINDER_DEBUG_USER_REFS,
  2839. "%d:%d %s node %d ls %d lw %d\n",
  2840. proc->pid, thread->pid,
  2841. cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  2842. node->debug_id, node->local_strong_refs, node->local_weak_refs);
  2843. break;
  2844. }
  2845. case BC_ATTEMPT_ACQUIRE:
  2846. pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
  2847. return -EINVAL;
  2848. case BC_ACQUIRE_RESULT:
  2849. pr_err("BC_ACQUIRE_RESULT not supported\n");
  2850. return -EINVAL;
  2851. case BC_FREE_BUFFER: {
  2852. binder_uintptr_t data_ptr;
  2853. struct binder_buffer *buffer;
  2854. if (get_user(data_ptr, (binder_uintptr_t __user *) ptr))
  2855. return -EFAULT;
  2856. ptr += sizeof(binder_uintptr_t);
  2857. buffer = binder_buffer_lookup(proc, data_ptr);
  2858. if (buffer == NULL) {
  2859. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
  2860. proc->pid, thread->pid, (u64)data_ptr);
  2861. break;
  2862. }
  2863. if (!buffer->allow_user_free) {
  2864. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
  2865. proc->pid, thread->pid, (u64) data_ptr);
  2866. break;
  2867. }
  2868. binder_debug(BINDER_DEBUG_FREE_BUFFER,
  2869. "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
  2870. proc->pid, thread->pid,
  2871. (u64) data_ptr, buffer->debug_id,
  2872. buffer->transaction ? "active" : "finished");
  2873. if (buffer->transaction) {
  2874. buffer->transaction->buffer = NULL;
  2875. buffer->transaction = NULL;
  2876. }
  2877. if (buffer->async_transaction && buffer->target_node) {
  2878. BUG_ON(!buffer->target_node->has_async_transaction);
  2879. #ifdef MTK_BINDER_DEBUG
  2880. if (list_empty(&buffer->target_node->async_todo)) {
  2881. buffer->target_node->has_async_transaction = 0;
  2882. buffer->target_node->async_pid = 0;
  2883. } else {
  2884. list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
  2885. buffer->target_node->async_pid = thread->pid;
  2886. }
  2887. #else
  2888. if (list_empty(&buffer->target_node->async_todo))
  2889. buffer->target_node->has_async_transaction = 0;
  2890. else
  2891. list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
  2892. #endif
  2893. }
  2894. trace_binder_transaction_buffer_release(buffer);
  2895. binder_transaction_buffer_release(proc, buffer, NULL);
  2896. binder_free_buf(proc, buffer);
  2897. break;
  2898. }
  2899. case BC_TRANSACTION:
  2900. case BC_REPLY: {
  2901. struct binder_transaction_data tr;
  2902. if (copy_from_user(&tr, ptr, sizeof(tr)))
  2903. return -EFAULT;
  2904. ptr += sizeof(tr);
  2905. binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
  2906. break;
  2907. }
  2908. case BC_REGISTER_LOOPER:
  2909. binder_debug(BINDER_DEBUG_THREADS,
  2910. "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid);
  2911. if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
  2912. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  2913. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
  2914. proc->pid, thread->pid);
  2915. } else if (proc->requested_threads == 0) {
  2916. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  2917. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
  2918. proc->pid, thread->pid);
  2919. } else {
  2920. proc->requested_threads--;
  2921. proc->requested_threads_started++;
  2922. }
  2923. thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
  2924. break;
  2925. case BC_ENTER_LOOPER:
  2926. binder_debug(BINDER_DEBUG_THREADS,
  2927. "%d:%d BC_ENTER_LOOPER\n",
  2928. proc->pid, thread->pid);
  2929. if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
  2930. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  2931. binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
  2932. proc->pid, thread->pid);
  2933. }
  2934. thread->looper |= BINDER_LOOPER_STATE_ENTERED;
  2935. break;
  2936. case BC_EXIT_LOOPER:
  2937. binder_debug(BINDER_DEBUG_THREADS,
  2938. "%d:%d BC_EXIT_LOOPER\n",
  2939. proc->pid, thread->pid);
  2940. thread->looper |= BINDER_LOOPER_STATE_EXITED;
  2941. break;
  2942. case BC_REQUEST_DEATH_NOTIFICATION:
  2943. case BC_CLEAR_DEATH_NOTIFICATION:{
  2944. uint32_t target;
  2945. binder_uintptr_t cookie;
  2946. struct binder_ref *ref;
  2947. struct binder_ref_death *death;
  2948. if (get_user(target, (uint32_t __user *) ptr))
  2949. return -EFAULT;
  2950. ptr += sizeof(uint32_t);
  2951. if (get_user(cookie, (binder_uintptr_t __user *) ptr))
  2952. return -EFAULT;
  2953. ptr += sizeof(binder_uintptr_t);
  2954. ref = binder_get_ref(proc, target, false);
  2955. if (ref == NULL) {
  2956. binder_user_error("%d:%d %s invalid ref %d\n",
  2957. proc->pid, thread->pid,
  2958. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  2959. "BC_REQUEST_DEATH_NOTIFICATION" :
  2960. "BC_CLEAR_DEATH_NOTIFICATION", target);
  2961. break;
  2962. }
  2963. #ifdef MTK_DEATH_NOTIFY_MONITOR
  2964. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  2965. "[DN #%s]binder: %d:%d %s %d(%s) cookie 0x%016llx\n",
  2966. cmd == BC_REQUEST_DEATH_NOTIFICATION ? "1" :
  2967. "2", proc->pid, thread->pid,
  2968. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  2969. "BC_REQUEST_DEATH_NOTIFICATION" :
  2970. "BC_CLEAR_DEATH_NOTIFICATION",
  2971. ref->node->proc ? ref->node->proc->pid : 0,
  2972. #ifdef BINDER_MONITOR
  2973. ref->node ? ref->node->name : "",
  2974. #else
  2975. "",
  2976. #endif
  2977. (u64) cookie);
  2978. #else
  2979. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  2980. "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
  2981. proc->pid, thread->pid,
  2982. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  2983. "BC_REQUEST_DEATH_NOTIFICATION" :
  2984. "BC_CLEAR_DEATH_NOTIFICATION",
  2985. (u64) cookie, ref->debug_id,
  2986. ref->desc, ref->strong, ref->weak,
  2987. ref->node->debug_id);
  2988. #endif
  2989. if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
  2990. if (ref->death) {
  2991. binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
  2992. proc->pid, thread->pid);
  2993. break;
  2994. }
  2995. death = kzalloc(sizeof(*death), GFP_KERNEL);
  2996. if (death == NULL) {
  2997. thread->return_error = BR_ERROR;
  2998. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  2999. "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
  3000. proc->pid, thread->pid);
  3001. break;
  3002. }
  3003. binder_stats_created(BINDER_STAT_DEATH);
  3004. INIT_LIST_HEAD(&death->work.entry);
  3005. death->cookie = cookie;
  3006. ref->death = death;
  3007. if (ref->node->proc == NULL) {
  3008. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  3009. if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
  3010. list_add_tail(&ref->death->work.entry, &thread->todo);
  3011. } else {
  3012. list_add_tail(&ref->death->work.entry, &proc->todo);
  3013. wake_up_interruptible(&proc->wait);
  3014. }
  3015. }
  3016. } else {
  3017. if (ref->death == NULL) {
  3018. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
  3019. proc->pid, thread->pid);
  3020. break;
  3021. }
  3022. death = ref->death;
  3023. if (death->cookie != cookie) {
  3024. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
  3025. proc->pid, thread->pid,
  3026. (u64) death->cookie, (u64) cookie);
  3027. break;
  3028. }
  3029. ref->death = NULL;
  3030. if (list_empty(&death->work.entry)) {
  3031. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3032. if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
  3033. list_add_tail(&death->work.entry, &thread->todo);
  3034. } else {
  3035. list_add_tail(&death->work.entry, &proc->todo);
  3036. wake_up_interruptible(&proc->wait);
  3037. }
  3038. } else {
  3039. BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
  3040. death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
  3041. }
  3042. }
  3043. }
  3044. break;
  3045. case BC_DEAD_BINDER_DONE: {
  3046. struct binder_work *w;
  3047. binder_uintptr_t cookie;
  3048. struct binder_ref_death *death = NULL;
  3049. if (get_user(cookie, (binder_uintptr_t __user *) ptr))
  3050. return -EFAULT;
  3051. #ifdef MTK_DEATH_NOTIFY_MONITOR
  3052. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3053. "[DN #6]binder: %d:%d cookie 0x%016llx\n",
  3054. proc->pid, thread->pid, (u64) cookie);
  3055. #endif
  3056. ptr += sizeof(void *);
  3057. list_for_each_entry(w, &proc->delivered_death, entry) {
  3058. struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
  3059. if (tmp_death->cookie == cookie) {
  3060. death = tmp_death;
  3061. break;
  3062. }
  3063. }
  3064. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  3065. "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
  3066. proc->pid, thread->pid, (u64) cookie,
  3067. death);
  3068. if (death == NULL) {
  3069. binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
  3070. proc->pid, thread->pid, (u64) cookie);
  3071. break;
  3072. }
  3073. list_del_init(&death->work.entry);
  3074. if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
  3075. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3076. if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
  3077. list_add_tail(&death->work.entry, &thread->todo);
  3078. } else {
  3079. list_add_tail(&death->work.entry, &proc->todo);
  3080. wake_up_interruptible(&proc->wait);
  3081. }
  3082. }
  3083. }
  3084. break;
  3085. default:
  3086. pr_err("%d:%d unknown command %d\n",
  3087. proc->pid, thread->pid, cmd);
  3088. return -EINVAL;
  3089. }
  3090. *consumed = ptr - buffer;
  3091. }
  3092. return 0;
  3093. }
  3094. static void binder_stat_br(struct binder_proc *proc,
  3095. struct binder_thread *thread, uint32_t cmd)
  3096. {
  3097. trace_binder_return(cmd);
  3098. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
  3099. binder_stats.br[_IOC_NR(cmd)]++;
  3100. proc->stats.br[_IOC_NR(cmd)]++;
  3101. thread->stats.br[_IOC_NR(cmd)]++;
  3102. }
  3103. }
  3104. static int binder_has_proc_work(struct binder_proc *proc,
  3105. struct binder_thread *thread)
  3106. {
  3107. return !list_empty(&proc->todo) ||
  3108. (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
  3109. }
  3110. static int binder_has_thread_work(struct binder_thread *thread)
  3111. {
  3112. return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
  3113. (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
  3114. }
  3115. static int binder_thread_read(struct binder_proc *proc,
  3116. struct binder_thread *thread,
  3117. binder_uintptr_t binder_buffer, size_t size,
  3118. binder_size_t *consumed, int non_block)
  3119. {
  3120. void __user *buffer = (void __user *)(uintptr_t) binder_buffer;
  3121. void __user *ptr = buffer + *consumed;
  3122. void __user *end = buffer + size;
  3123. int ret = 0;
  3124. int wait_for_proc_work;
  3125. if (*consumed == 0) {
  3126. if (put_user(BR_NOOP, (uint32_t __user *)ptr))
  3127. return -EFAULT;
  3128. ptr += sizeof(uint32_t);
  3129. }
  3130. retry:
  3131. wait_for_proc_work = thread->transaction_stack == NULL &&
  3132. list_empty(&thread->todo);
  3133. if (thread->return_error != BR_OK && ptr < end) {
  3134. if (thread->return_error2 != BR_OK) {
  3135. if (put_user(thread->return_error2, (uint32_t __user *) ptr))
  3136. return -EFAULT;
  3137. ptr += sizeof(uint32_t);
  3138. pr_err
  3139. ("read put err2 %u to user %p, thread error %u:%u\n",
  3140. thread->return_error2, ptr, thread->return_error,
  3141. thread->return_error2);
  3142. binder_stat_br(proc, thread, thread->return_error2);
  3143. if (ptr == end)
  3144. goto done;
  3145. thread->return_error2 = BR_OK;
  3146. }
  3147. if (put_user(thread->return_error, (uint32_t __user *) ptr))
  3148. return -EFAULT;
  3149. ptr += sizeof(uint32_t);
  3150. pr_err("read put err %u to user %p, thread error %u:%u\n",
  3151. thread->return_error, ptr, thread->return_error, thread->return_error2);
  3152. binder_stat_br(proc, thread, thread->return_error);
  3153. thread->return_error = BR_OK;
  3154. goto done;
  3155. }
  3156. thread->looper |= BINDER_LOOPER_STATE_WAITING;
  3157. if (wait_for_proc_work)
  3158. proc->ready_threads++;
  3159. binder_unlock(__func__);
  3160. trace_binder_wait_for_work(wait_for_proc_work,
  3161. !!thread->transaction_stack, !list_empty(&thread->todo));
  3162. if (wait_for_proc_work) {
  3163. if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  3164. BINDER_LOOPER_STATE_ENTERED))) {
  3165. binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
  3166. proc->pid, thread->pid, thread->looper);
  3167. wait_event_interruptible(binder_user_error_wait,
  3168. binder_stop_on_user_error < 2);
  3169. }
  3170. #ifdef RT_PRIO_INHERIT
  3171. /* disable preemption to prevent from schedule-out immediately */
  3172. preempt_disable();
  3173. #endif
  3174. binder_set_nice(proc->default_priority);
  3175. #ifdef RT_PRIO_INHERIT
  3176. if (rt_task(current) && !binder_has_proc_work(proc, thread)) {
  3177. /* make sure binder has no work before setting priority back */
  3178. struct sched_param param = {
  3179. .sched_priority = proc->default_rt_prio,
  3180. };
  3181. #ifdef BINDER_MONITOR
  3182. if (log_disable & BINDER_RT_LOG_ENABLE) {
  3183. pr_debug
  3184. ("enter threadpool reset %d sched_policy from %u to %d rt_prio from %u to %d\n",
  3185. current->pid, current->policy,
  3186. proc->default_policy, current->rt_priority,
  3187. proc->default_rt_prio);
  3188. }
  3189. #endif
  3190. mt_sched_setscheduler_nocheck(current, proc->default_policy, &param);
  3191. }
  3192. preempt_enable_no_resched();
  3193. #endif
  3194. if (non_block) {
  3195. if (!binder_has_proc_work(proc, thread))
  3196. ret = -EAGAIN;
  3197. } else
  3198. ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
  3199. } else {
  3200. if (non_block) {
  3201. if (!binder_has_thread_work(thread))
  3202. ret = -EAGAIN;
  3203. } else
  3204. ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
  3205. }
  3206. binder_lock(__func__);
  3207. if (wait_for_proc_work)
  3208. proc->ready_threads--;
  3209. thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
  3210. if (ret)
  3211. return ret;
  3212. while (1) {
  3213. uint32_t cmd;
  3214. struct binder_transaction_data tr;
  3215. struct binder_work *w;
  3216. struct binder_transaction *t = NULL;
  3217. if (!list_empty(&thread->todo)) {
  3218. w = list_first_entry(&thread->todo, struct binder_work, entry);
  3219. } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
  3220. w = list_first_entry(&proc->todo, struct binder_work, entry);
  3221. } else {
  3222. /* no data added */
  3223. if (ptr - buffer == 4 &&
  3224. !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
  3225. goto retry;
  3226. break;
  3227. }
  3228. if (end - ptr < sizeof(tr) + 4)
  3229. break;
  3230. switch (w->type) {
  3231. case BINDER_WORK_TRANSACTION:{
  3232. t = container_of(w, struct binder_transaction, work);
  3233. #ifdef BINDER_MONITOR
  3234. binder_cancel_bwdog(t);
  3235. #endif
  3236. } break;
  3237. case BINDER_WORK_TRANSACTION_COMPLETE:{
  3238. cmd = BR_TRANSACTION_COMPLETE;
  3239. if (put_user(cmd, (uint32_t __user *) ptr))
  3240. return -EFAULT;
  3241. ptr += sizeof(uint32_t);
  3242. binder_stat_br(proc, thread, cmd);
  3243. binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
  3244. "%d:%d BR_TRANSACTION_COMPLETE\n",
  3245. proc->pid, thread->pid);
  3246. list_del(&w->entry);
  3247. kfree(w);
  3248. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3249. }
  3250. break;
  3251. case BINDER_WORK_NODE:{
  3252. struct binder_node *node =
  3253. container_of(w, struct binder_node, work);
  3254. uint32_t cmd = BR_NOOP;
  3255. const char *cmd_name;
  3256. int strong = node->internal_strong_refs || node->local_strong_refs;
  3257. int weak = !hlist_empty(&node->refs)
  3258. || node->local_weak_refs || strong;
  3259. if (weak && !node->has_weak_ref) {
  3260. cmd = BR_INCREFS;
  3261. cmd_name = "BR_INCREFS";
  3262. node->has_weak_ref = 1;
  3263. node->pending_weak_ref = 1;
  3264. node->local_weak_refs++;
  3265. } else if (strong && !node->has_strong_ref) {
  3266. cmd = BR_ACQUIRE;
  3267. cmd_name = "BR_ACQUIRE";
  3268. node->has_strong_ref = 1;
  3269. node->pending_strong_ref = 1;
  3270. node->local_strong_refs++;
  3271. } else if (!strong && node->has_strong_ref) {
  3272. cmd = BR_RELEASE;
  3273. cmd_name = "BR_RELEASE";
  3274. node->has_strong_ref = 0;
  3275. } else if (!weak && node->has_weak_ref) {
  3276. cmd = BR_DECREFS;
  3277. cmd_name = "BR_DECREFS";
  3278. node->has_weak_ref = 0;
  3279. }
  3280. if (cmd != BR_NOOP) {
  3281. if (put_user(cmd, (uint32_t __user *) ptr))
  3282. return -EFAULT;
  3283. ptr += sizeof(uint32_t);
  3284. if (put_user(node->ptr, (binder_uintptr_t __user *)
  3285. ptr))
  3286. return -EFAULT;
  3287. ptr += sizeof(binder_uintptr_t);
  3288. if (put_user(node->cookie, (binder_uintptr_t __user *)
  3289. ptr))
  3290. return -EFAULT;
  3291. ptr += sizeof(binder_uintptr_t);
  3292. binder_stat_br(proc, thread, cmd);
  3293. binder_debug(BINDER_DEBUG_USER_REFS,
  3294. "%d:%d %s %d u%016llx c%016llx\n",
  3295. proc->pid, thread->pid,
  3296. cmd_name, node->debug_id,
  3297. (u64) node->ptr, (u64) node->cookie);
  3298. } else {
  3299. list_del_init(&w->entry);
  3300. if (!weak && !strong) {
  3301. binder_debug
  3302. (BINDER_DEBUG_INTERNAL_REFS,
  3303. "%d:%d node %d u%016llx c%016llx deleted\n",
  3304. proc->pid, thread->pid,
  3305. node->debug_id,
  3306. (u64) node->ptr, (u64) node->cookie);
  3307. rb_erase(&node->rb_node, &proc->nodes);
  3308. kfree(node);
  3309. binder_stats_deleted(BINDER_STAT_NODE);
  3310. } else {
  3311. binder_debug
  3312. (BINDER_DEBUG_INTERNAL_REFS,
  3313. "%d:%d node %d u%016llx c%016llx state unchanged\n",
  3314. proc->pid, thread->pid,
  3315. node->debug_id,
  3316. (u64) node->ptr, (u64) node->cookie);
  3317. }
  3318. }
  3319. }
  3320. break;
  3321. case BINDER_WORK_DEAD_BINDER:
  3322. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3323. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:{
  3324. struct binder_ref_death *death;
  3325. uint32_t cmd;
  3326. death = container_of(w, struct binder_ref_death, work);
  3327. #ifdef MTK_DEATH_NOTIFY_MONITOR
  3328. binder_debug
  3329. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3330. "[DN #4]binder: %d:%d ",
  3331. proc->pid, thread->pid);
  3332. switch (w->type) {
  3333. case BINDER_WORK_DEAD_BINDER:
  3334. binder_debug
  3335. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3336. "BINDER_WORK_DEAD_BINDER cookie 0x%016llx\n",
  3337. (u64) death->cookie);
  3338. break;
  3339. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3340. binder_debug
  3341. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3342. "BINDER_WORK_DEAD_BINDER_AND_CLEAR cookie 0x%016llx\n",
  3343. (u64) death->cookie);
  3344. break;
  3345. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
  3346. binder_debug
  3347. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3348. "BINDER_WORK_CLEAR_DEATH_NOTIFICATION cookie 0x%016llx\n",
  3349. (u64) death->cookie);
  3350. break;
  3351. default:
  3352. binder_debug
  3353. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3354. "UNKNOWN-%d cookie 0x%016llx\n",
  3355. w->type, (u64) death->cookie);
  3356. break;
  3357. }
  3358. #endif
  3359. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
  3360. cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
  3361. else
  3362. cmd = BR_DEAD_BINDER;
  3363. if (put_user(cmd, (uint32_t __user *) ptr))
  3364. return -EFAULT;
  3365. ptr += sizeof(uint32_t);
  3366. if (put_user(death->cookie, (binder_uintptr_t __user *) ptr))
  3367. return -EFAULT;
  3368. ptr += sizeof(binder_uintptr_t);
  3369. binder_stat_br(proc, thread, cmd);
  3370. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3371. "%d:%d %s %016llx\n",
  3372. proc->pid, thread->pid,
  3373. cmd == BR_DEAD_BINDER ?
  3374. "BR_DEAD_BINDER" :
  3375. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  3376. (u64) death->cookie);
  3377. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
  3378. list_del(&w->entry);
  3379. kfree(death);
  3380. binder_stats_deleted(BINDER_STAT_DEATH);
  3381. } else
  3382. list_move(&w->entry, &proc->delivered_death);
  3383. if (cmd == BR_DEAD_BINDER)
  3384. goto done; /* DEAD_BINDER notifications can cause transactions */
  3385. }
  3386. break;
  3387. }
  3388. if (!t)
  3389. continue;
  3390. BUG_ON(t->buffer == NULL);
  3391. if (t->buffer->target_node) {
  3392. struct binder_node *target_node = t->buffer->target_node;
  3393. tr.target.ptr = target_node->ptr;
  3394. tr.cookie = target_node->cookie;
  3395. t->saved_priority = task_nice(current);
  3396. #ifdef RT_PRIO_INHERIT
  3397. /* since we may fail the rt inherit due to target
  3398. * wait queue task_list is empty, check again here.
  3399. */
  3400. if ((SCHED_RR == t->policy || SCHED_FIFO == t->policy)
  3401. && t->rt_prio > current->rt_priority && !(t->flags & TF_ONE_WAY)) {
  3402. struct sched_param param = {
  3403. .sched_priority = t->rt_prio,
  3404. };
  3405. t->saved_rt_prio = current->rt_priority;
  3406. t->saved_policy = current->policy;
  3407. mt_sched_setscheduler_nocheck(current, t->policy, &param);
  3408. #ifdef BINDER_MONITOR
  3409. if (log_disable & BINDER_RT_LOG_ENABLE) {
  3410. pr_debug
  3411. ("read set %d sched_policy from %d to %d rt_prio from %d to %d\n",
  3412. proc->pid, t->saved_policy,
  3413. t->policy, t->saved_rt_prio, t->rt_prio);
  3414. }
  3415. #endif
  3416. }
  3417. #endif
  3418. if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY))
  3419. binder_set_nice(t->priority);
  3420. else if (!(t->flags & TF_ONE_WAY) ||
  3421. t->saved_priority > target_node->min_priority)
  3422. binder_set_nice(target_node->min_priority);
  3423. cmd = BR_TRANSACTION;
  3424. } else {
  3425. tr.target.ptr = 0;
  3426. tr.cookie = 0;
  3427. cmd = BR_REPLY;
  3428. }
  3429. tr.code = t->code;
  3430. tr.flags = t->flags;
  3431. tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
  3432. if (t->from) {
  3433. struct task_struct *sender = t->from->proc->tsk;
  3434. tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current));
  3435. } else {
  3436. tr.sender_pid = 0;
  3437. }
  3438. tr.data_size = t->buffer->data_size;
  3439. tr.offsets_size = t->buffer->offsets_size;
  3440. tr.data.ptr.buffer = (binder_uintptr_t) ((uintptr_t) t->buffer->data +
  3441. proc->user_buffer_offset);
  3442. tr.data.ptr.offsets =
  3443. tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
  3444. if (put_user(cmd, (uint32_t __user *) ptr))
  3445. return -EFAULT;
  3446. ptr += sizeof(uint32_t);
  3447. if (copy_to_user(ptr, &tr, sizeof(tr)))
  3448. return -EFAULT;
  3449. ptr += sizeof(tr);
  3450. trace_binder_transaction_received(t);
  3451. binder_stat_br(proc, thread, cmd);
  3452. binder_debug(BINDER_DEBUG_TRANSACTION,
  3453. "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
  3454. proc->pid, thread->pid,
  3455. (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
  3456. "BR_REPLY",
  3457. t->debug_id, t->from ? t->from->proc->pid : 0,
  3458. t->from ? t->from->pid : 0, cmd,
  3459. t->buffer->data_size, t->buffer->offsets_size,
  3460. (u64) tr.data.ptr.buffer, (u64) tr.data.ptr.offsets);
  3461. list_del(&t->work.entry);
  3462. t->buffer->allow_user_free = 1;
  3463. if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
  3464. t->to_parent = thread->transaction_stack;
  3465. t->to_thread = thread;
  3466. thread->transaction_stack = t;
  3467. #ifdef BINDER_MONITOR
  3468. do_posix_clock_monotonic_gettime(&t->exe_timestamp);
  3469. /* monotonic_to_bootbased(&t->exe_timestamp); */
  3470. do_gettimeofday(&t->tv);
  3471. /* consider time zone. translate to android time */
  3472. t->tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
  3473. t->wait_on = WAIT_ON_EXEC;
  3474. t->tthrd = thread->pid;
  3475. binder_queue_bwdog(t, (time_t) WAIT_BUDGET_EXEC);
  3476. binder_update_transaction_time(&binder_transaction_log, t, 1);
  3477. binder_update_transaction_ttid(&binder_transaction_log, t);
  3478. #endif
  3479. } else {
  3480. t->buffer->transaction = NULL;
  3481. #ifdef BINDER_MONITOR
  3482. binder_cancel_bwdog(t);
  3483. if (cmd == BR_TRANSACTION && (t->flags & TF_ONE_WAY)) {
  3484. binder_update_transaction_time(&binder_transaction_log, t, 1);
  3485. t->tthrd = thread->pid;
  3486. binder_update_transaction_ttid(&binder_transaction_log, t);
  3487. }
  3488. #endif
  3489. kfree(t);
  3490. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  3491. }
  3492. break;
  3493. }
  3494. done:
  3495. *consumed = ptr - buffer;
  3496. if (proc->requested_threads + proc->ready_threads == 0 &&
  3497. proc->requested_threads_started < proc->max_threads &&
  3498. (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))
  3499. /* the user-space code fails to */
  3500. /*spawn a new thread if we leave this out */
  3501. ) {
  3502. proc->requested_threads++;
  3503. binder_debug(BINDER_DEBUG_THREADS,
  3504. "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid);
  3505. if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
  3506. return -EFAULT;
  3507. binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
  3508. }
  3509. return 0;
  3510. }
  3511. static void binder_release_work(struct list_head *list)
  3512. {
  3513. struct binder_work *w;
  3514. while (!list_empty(list)) {
  3515. w = list_first_entry(list, struct binder_work, entry);
  3516. list_del_init(&w->entry);
  3517. switch (w->type) {
  3518. case BINDER_WORK_TRANSACTION:{
  3519. struct binder_transaction *t;
  3520. t = container_of(w, struct binder_transaction, work);
  3521. if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
  3522. binder_send_failed_reply(t, BR_DEAD_REPLY);
  3523. } else {
  3524. binder_debug
  3525. (BINDER_DEBUG_DEAD_TRANSACTION,
  3526. "undelivered transaction %d\n", t->debug_id);
  3527. t->buffer->transaction = NULL;
  3528. #ifdef BINDER_MONITOR
  3529. binder_cancel_bwdog(t);
  3530. #endif
  3531. kfree(t);
  3532. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  3533. }
  3534. }
  3535. break;
  3536. case BINDER_WORK_TRANSACTION_COMPLETE:{
  3537. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3538. "undelivered TRANSACTION_COMPLETE\n");
  3539. kfree(w);
  3540. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3541. }
  3542. break;
  3543. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3544. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:{
  3545. struct binder_ref_death *death;
  3546. death = container_of(w, struct binder_ref_death, work);
  3547. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3548. "undelivered death notification, %016llx\n",
  3549. (u64) death->cookie);
  3550. kfree(death);
  3551. binder_stats_deleted(BINDER_STAT_DEATH);
  3552. } break;
  3553. default:
  3554. pr_err("unexpected work type, %d, not freed\n", w->type);
  3555. break;
  3556. }
  3557. }
  3558. }
  3559. static struct binder_thread *binder_get_thread(struct binder_proc *proc)
  3560. {
  3561. struct binder_thread *thread = NULL;
  3562. struct rb_node *parent = NULL;
  3563. struct rb_node **p = &proc->threads.rb_node;
  3564. while (*p) {
  3565. parent = *p;
  3566. thread = rb_entry(parent, struct binder_thread, rb_node);
  3567. if (current->pid < thread->pid)
  3568. p = &(*p)->rb_left;
  3569. else if (current->pid > thread->pid)
  3570. p = &(*p)->rb_right;
  3571. else
  3572. break;
  3573. }
  3574. if (*p == NULL) {
  3575. thread = kzalloc(sizeof(*thread), GFP_KERNEL);
  3576. if (thread == NULL)
  3577. return NULL;
  3578. binder_stats_created(BINDER_STAT_THREAD);
  3579. thread->proc = proc;
  3580. thread->pid = current->pid;
  3581. init_waitqueue_head(&thread->wait);
  3582. INIT_LIST_HEAD(&thread->todo);
  3583. rb_link_node(&thread->rb_node, parent, p);
  3584. rb_insert_color(&thread->rb_node, &proc->threads);
  3585. thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
  3586. thread->return_error = BR_OK;
  3587. thread->return_error2 = BR_OK;
  3588. }
  3589. return thread;
  3590. }
  3591. static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread)
  3592. {
  3593. struct binder_transaction *t;
  3594. struct binder_transaction *send_reply = NULL;
  3595. int active_transactions = 0;
  3596. rb_erase(&thread->rb_node, &proc->threads);
  3597. t = thread->transaction_stack;
  3598. if (t && t->to_thread == thread)
  3599. send_reply = t;
  3600. while (t) {
  3601. active_transactions++;
  3602. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3603. "release %d:%d transaction %d %s, still active\n",
  3604. proc->pid, thread->pid,
  3605. t->debug_id, (t->to_thread == thread) ? "in" : "out");
  3606. #ifdef MTK_BINDER_DEBUG
  3607. pr_err("%d: %p from %d:%d to %d:%d code %x flags %x " "pri %ld r%d "
  3608. #ifdef BINDER_MONITOR
  3609. "start %lu.%06lu"
  3610. #endif
  3611. ,
  3612. t->debug_id, t,
  3613. t->from ? t->from->proc->pid : 0,
  3614. t->from ? t->from->pid : 0,
  3615. t->to_proc ? t->to_proc->pid : 0,
  3616. t->to_thread ? t->to_thread->pid : 0,
  3617. t->code, t->flags, t->priority, t->need_reply
  3618. #ifdef BINDER_MONITOR
  3619. , (unsigned long)t->timestamp.tv_sec, (t->timestamp.tv_nsec / NSEC_PER_USEC)
  3620. #endif
  3621. );
  3622. #endif
  3623. if (t->to_thread == thread) {
  3624. t->to_proc = NULL;
  3625. t->to_thread = NULL;
  3626. if (t->buffer) {
  3627. t->buffer->transaction = NULL;
  3628. t->buffer = NULL;
  3629. }
  3630. t = t->to_parent;
  3631. } else if (t->from == thread) {
  3632. t->from = NULL;
  3633. t = t->from_parent;
  3634. } else
  3635. BUG();
  3636. }
  3637. if (send_reply)
  3638. binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
  3639. binder_release_work(&thread->todo);
  3640. kfree(thread);
  3641. binder_stats_deleted(BINDER_STAT_THREAD);
  3642. return active_transactions;
  3643. }
  3644. static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait)
  3645. {
  3646. struct binder_proc *proc = filp->private_data;
  3647. struct binder_thread *thread = NULL;
  3648. int wait_for_proc_work;
  3649. binder_lock(__func__);
  3650. thread = binder_get_thread(proc);
  3651. wait_for_proc_work = thread->transaction_stack == NULL &&
  3652. list_empty(&thread->todo) && thread->return_error == BR_OK;
  3653. binder_unlock(__func__);
  3654. if (wait_for_proc_work) {
  3655. if (binder_has_proc_work(proc, thread))
  3656. return POLLIN;
  3657. poll_wait(filp, &proc->wait, wait);
  3658. if (binder_has_proc_work(proc, thread))
  3659. return POLLIN;
  3660. } else {
  3661. if (binder_has_thread_work(thread))
  3662. return POLLIN;
  3663. poll_wait(filp, &thread->wait, wait);
  3664. if (binder_has_thread_work(thread))
  3665. return POLLIN;
  3666. }
  3667. return 0;
  3668. }
  3669. static int binder_ioctl_write_read(struct file *filp,
  3670. unsigned int cmd, unsigned long arg,
  3671. struct binder_thread *thread)
  3672. {
  3673. int ret = 0;
  3674. struct binder_proc *proc = filp->private_data;
  3675. unsigned int size = _IOC_SIZE(cmd);
  3676. void __user *ubuf = (void __user *)arg;
  3677. struct binder_write_read bwr;
  3678. if (size != sizeof(struct binder_write_read)) {
  3679. ret = -EINVAL;
  3680. goto out;
  3681. }
  3682. if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
  3683. ret = -EFAULT;
  3684. goto out;
  3685. }
  3686. binder_debug(BINDER_DEBUG_READ_WRITE,
  3687. "%d:%d write %lld at %016llx, read %lld at %016llx\n",
  3688. proc->pid, thread->pid,
  3689. (u64) bwr.write_size, (u64) bwr.write_buffer,
  3690. (u64) bwr.read_size, (u64) bwr.read_buffer);
  3691. if (bwr.write_size > 0) {
  3692. ret = binder_thread_write(proc, thread,
  3693. bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
  3694. trace_binder_write_done(ret);
  3695. if (ret < 0) {
  3696. bwr.read_consumed = 0;
  3697. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  3698. ret = -EFAULT;
  3699. goto out;
  3700. }
  3701. }
  3702. if (bwr.read_size > 0) {
  3703. ret = binder_thread_read(proc, thread, bwr.read_buffer,
  3704. bwr.read_size,
  3705. &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
  3706. trace_binder_read_done(ret);
  3707. if (!list_empty(&proc->todo)) {
  3708. if (thread->proc != proc) {
  3709. int i;
  3710. unsigned int *p;
  3711. pr_debug("binder: " "thread->proc != proc\n");
  3712. pr_debug("binder: thread %p\n", thread);
  3713. p = (unsigned int *)thread - 32;
  3714. for (i = -4; i <= 3; i++, p += 8) {
  3715. pr_debug("%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3716. p, *(p), *(p + 1), *(p + 2),
  3717. *(p + 3), *(p + 4), *(p + 5), *(p + 6), *(p + 7));
  3718. }
  3719. pr_debug("binder: thread->proc " "%p\n", thread->proc);
  3720. p = (unsigned int *)thread->proc - 32;
  3721. for (i = -4; i <= 5; i++, p += 8) {
  3722. pr_debug("%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3723. p, *(p), *(p + 1), *(p + 2),
  3724. *(p + 3), *(p + 4), *(p + 5), *(p + 6), *(p + 7));
  3725. }
  3726. pr_debug("binder: proc %p\n", proc);
  3727. p = (unsigned int *)proc - 32;
  3728. for (i = -4; i <= 5; i++, p += 8) {
  3729. pr_debug("%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3730. p, *(p), *(p + 1), *(p + 2),
  3731. *(p + 3), *(p + 4), *(p + 5), *(p + 6), *(p + 7));
  3732. }
  3733. BUG();
  3734. }
  3735. wake_up_interruptible(&proc->wait);
  3736. }
  3737. if (ret < 0) {
  3738. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  3739. ret = -EFAULT;
  3740. goto out;
  3741. }
  3742. }
  3743. binder_debug(BINDER_DEBUG_READ_WRITE,
  3744. "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
  3745. proc->pid, thread->pid,
  3746. (u64) bwr.write_consumed, (u64) bwr.write_size,
  3747. (u64) bwr.read_consumed, (u64) bwr.read_size);
  3748. if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
  3749. ret = -EFAULT;
  3750. goto out;
  3751. }
  3752. out:
  3753. return ret;
  3754. }
  3755. static int binder_ioctl_set_ctx_mgr(struct file *filp, struct binder_thread
  3756. *thread)
  3757. {
  3758. int ret = 0;
  3759. struct binder_proc *proc = filp->private_data;
  3760. kuid_t curr_euid = current_euid();
  3761. if (binder_context_mgr_node != NULL) {
  3762. pr_err("BINDER_SET_CONTEXT_MGR already set\n");
  3763. ret = -EBUSY;
  3764. goto out;
  3765. }
  3766. ret = security_binder_set_context_mgr(proc->tsk);
  3767. if (ret < 0)
  3768. goto out;
  3769. if (uid_valid(binder_context_mgr_uid)) {
  3770. if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
  3771. pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
  3772. from_kuid(&init_user_ns, curr_euid),
  3773. from_kuid(&init_user_ns, binder_context_mgr_uid));
  3774. ret = -EPERM;
  3775. goto out;
  3776. }
  3777. } else {
  3778. binder_context_mgr_uid = curr_euid;
  3779. }
  3780. binder_context_mgr_node = binder_new_node(proc, 0, 0);
  3781. if (binder_context_mgr_node == NULL) {
  3782. ret = -ENOMEM;
  3783. goto out;
  3784. }
  3785. #ifdef BINDER_MONITOR
  3786. strcpy(binder_context_mgr_node->name, "servicemanager");
  3787. pr_debug("%d:%d set as servicemanager uid %d\n",
  3788. proc->pid, thread->pid, __kuid_val(binder_context_mgr_uid));
  3789. #endif
  3790. binder_context_mgr_node->local_weak_refs++;
  3791. binder_context_mgr_node->local_strong_refs++;
  3792. binder_context_mgr_node->has_strong_ref = 1;
  3793. binder_context_mgr_node->has_weak_ref = 1;
  3794. out:
  3795. return ret;
  3796. }
  3797. static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  3798. {
  3799. int ret;
  3800. struct binder_proc *proc = filp->private_data;
  3801. struct binder_thread *thread;
  3802. unsigned int size = _IOC_SIZE(cmd);
  3803. void __user *ubuf = (void __user *)arg;
  3804. /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg); */
  3805. trace_binder_ioctl(cmd, arg);
  3806. ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  3807. if (ret)
  3808. goto err_unlocked;
  3809. binder_lock(__func__);
  3810. thread = binder_get_thread(proc);
  3811. if (thread == NULL) {
  3812. ret = -ENOMEM;
  3813. goto err;
  3814. }
  3815. switch (cmd) {
  3816. case BINDER_WRITE_READ:
  3817. ret = binder_ioctl_write_read(filp, cmd, arg, thread);
  3818. if (ret)
  3819. goto err;
  3820. break;
  3821. case BINDER_SET_MAX_THREADS:
  3822. if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
  3823. ret = -EINVAL;
  3824. goto err;
  3825. }
  3826. break;
  3827. case BINDER_SET_CONTEXT_MGR:
  3828. ret = binder_ioctl_set_ctx_mgr(filp, thread);
  3829. if (ret)
  3830. goto err;
  3831. break;
  3832. case BINDER_THREAD_EXIT:
  3833. binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid);
  3834. binder_free_thread(proc, thread);
  3835. thread = NULL;
  3836. break;
  3837. case BINDER_VERSION:{
  3838. struct binder_version __user *ver = ubuf;
  3839. if (size != sizeof(struct binder_version)) {
  3840. ret = -EINVAL;
  3841. goto err;
  3842. }
  3843. if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
  3844. ret = -EINVAL;
  3845. goto err;
  3846. }
  3847. break;
  3848. }
  3849. default:
  3850. ret = -EINVAL;
  3851. goto err;
  3852. }
  3853. ret = 0;
  3854. err:
  3855. if (thread)
  3856. thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
  3857. binder_unlock(__func__);
  3858. wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  3859. if (ret && ret != -ERESTARTSYS)
  3860. pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
  3861. err_unlocked:
  3862. trace_binder_ioctl_done(ret);
  3863. return ret;
  3864. }
  3865. static void binder_vma_open(struct vm_area_struct *vma)
  3866. {
  3867. struct binder_proc *proc = vma->vm_private_data;
  3868. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  3869. "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  3870. proc->pid, vma->vm_start, vma->vm_end,
  3871. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  3872. (unsigned long)pgprot_val(vma->vm_page_prot));
  3873. }
  3874. static void binder_vma_close(struct vm_area_struct *vma)
  3875. {
  3876. struct binder_proc *proc = vma->vm_private_data;
  3877. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  3878. "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  3879. proc->pid, vma->vm_start, vma->vm_end,
  3880. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  3881. (unsigned long)pgprot_val(vma->vm_page_prot));
  3882. proc->vma = NULL;
  3883. proc->vma_vm_mm = NULL;
  3884. binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
  3885. }
  3886. static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  3887. {
  3888. return VM_FAULT_SIGBUS;
  3889. }
  3890. static struct vm_operations_struct binder_vm_ops = {
  3891. .open = binder_vma_open,
  3892. .close = binder_vma_close,
  3893. .fault = binder_vm_fault,
  3894. };
  3895. static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
  3896. {
  3897. int ret;
  3898. struct vm_struct *area;
  3899. struct binder_proc *proc = filp->private_data;
  3900. const char *failure_string;
  3901. struct binder_buffer *buffer;
  3902. if (proc->tsk != current)
  3903. return -EINVAL;
  3904. if ((vma->vm_end - vma->vm_start) > SZ_4M)
  3905. vma->vm_end = vma->vm_start + SZ_4M;
  3906. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  3907. "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
  3908. proc->pid, vma->vm_start, vma->vm_end,
  3909. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  3910. (unsigned long)pgprot_val(vma->vm_page_prot));
  3911. if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
  3912. ret = -EPERM;
  3913. failure_string = "bad vm_flags";
  3914. goto err_bad_arg;
  3915. }
  3916. vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
  3917. mutex_lock(&binder_mmap_lock);
  3918. if (proc->buffer) {
  3919. ret = -EBUSY;
  3920. failure_string = "already mapped";
  3921. goto err_already_mapped;
  3922. }
  3923. area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
  3924. if (area == NULL) {
  3925. ret = -ENOMEM;
  3926. failure_string = "get_vm_area";
  3927. goto err_get_vm_area_failed;
  3928. }
  3929. proc->buffer = area->addr;
  3930. proc->user_buffer_offset = vma->vm_start - (uintptr_t) proc->buffer;
  3931. mutex_unlock(&binder_mmap_lock);
  3932. #ifdef CONFIG_CPU_CACHE_VIPT
  3933. if (cache_is_vipt_aliasing()) {
  3934. while (CACHE_COLOUR((vma->vm_start ^ (uint32_t) proc->buffer))) {
  3935. pr_info
  3936. ("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
  3937. proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
  3938. vma->vm_start += PAGE_SIZE;
  3939. }
  3940. }
  3941. #endif
  3942. if (vma->vm_end - vma->vm_start < BINDER_MIN_ALLOC) {
  3943. ret = -EINVAL;
  3944. failure_string = "VMA size < BINDER_MIN_ALLOC";
  3945. goto err_vma_too_small;
  3946. }
  3947. proc->pages =
  3948. kzalloc(sizeof(proc->pages[0]) *
  3949. ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
  3950. if (proc->pages == NULL) {
  3951. ret = -ENOMEM;
  3952. failure_string = "alloc page array";
  3953. goto err_alloc_pages_failed;
  3954. }
  3955. proc->buffer_size = vma->vm_end - vma->vm_start;
  3956. vma->vm_ops = &binder_vm_ops;
  3957. vma->vm_private_data = proc;
  3958. if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
  3959. ret = -ENOMEM;
  3960. failure_string = "alloc small buf";
  3961. goto err_alloc_small_buf_failed;
  3962. }
  3963. buffer = proc->buffer;
  3964. INIT_LIST_HEAD(&proc->buffers);
  3965. list_add(&buffer->entry, &proc->buffers);
  3966. buffer->free = 1;
  3967. binder_insert_free_buffer(proc, buffer);
  3968. proc->free_async_space = proc->buffer_size / 2;
  3969. barrier();
  3970. proc->files = get_files_struct(current);
  3971. proc->vma = vma;
  3972. proc->vma_vm_mm = vma->vm_mm;
  3973. /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
  3974. proc->pid, vma->vm_start, vma->vm_end, proc->buffer); */
  3975. return 0;
  3976. err_alloc_small_buf_failed:
  3977. kfree(proc->pages);
  3978. proc->pages = NULL;
  3979. err_alloc_pages_failed:
  3980. err_vma_too_small:
  3981. mutex_lock(&binder_mmap_lock);
  3982. vfree(proc->buffer);
  3983. proc->buffer = NULL;
  3984. err_get_vm_area_failed:
  3985. err_already_mapped:
  3986. mutex_unlock(&binder_mmap_lock);
  3987. err_bad_arg:
  3988. pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
  3989. proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  3990. return ret;
  3991. }
  3992. static int binder_open(struct inode *nodp, struct file *filp)
  3993. {
  3994. struct binder_proc *proc;
  3995. binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
  3996. current->group_leader->pid, current->pid);
  3997. proc = kzalloc(sizeof(*proc), GFP_KERNEL);
  3998. if (proc == NULL)
  3999. return -ENOMEM;
  4000. get_task_struct(current);
  4001. proc->tsk = current;
  4002. INIT_LIST_HEAD(&proc->todo);
  4003. init_waitqueue_head(&proc->wait);
  4004. proc->default_priority = task_nice(current);
  4005. #ifdef RT_PRIO_INHERIT
  4006. proc->default_rt_prio = current->rt_priority;
  4007. proc->default_policy = current->policy;
  4008. #endif
  4009. binder_lock(__func__);
  4010. binder_stats_created(BINDER_STAT_PROC);
  4011. hlist_add_head(&proc->proc_node, &binder_procs);
  4012. proc->pid = current->group_leader->pid;
  4013. INIT_LIST_HEAD(&proc->delivered_death);
  4014. filp->private_data = proc;
  4015. binder_unlock(__func__);
  4016. if (binder_debugfs_dir_entry_proc) {
  4017. char strbuf[11];
  4018. snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
  4019. proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
  4020. binder_debugfs_dir_entry_proc,
  4021. proc, &binder_proc_fops);
  4022. }
  4023. return 0;
  4024. }
  4025. static int binder_flush(struct file *filp, fl_owner_t id)
  4026. {
  4027. struct binder_proc *proc = filp->private_data;
  4028. binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
  4029. return 0;
  4030. }
  4031. static void binder_deferred_flush(struct binder_proc *proc)
  4032. {
  4033. struct rb_node *n;
  4034. int wake_count = 0;
  4035. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
  4036. struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
  4037. thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
  4038. if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
  4039. wake_up_interruptible(&thread->wait);
  4040. wake_count++;
  4041. }
  4042. }
  4043. wake_up_interruptible_all(&proc->wait);
  4044. #ifdef MTK_BINDER_DEBUG
  4045. if (wake_count)
  4046. pr_debug("binder_flush: %d woke %d threads\n", proc->pid, wake_count);
  4047. #else
  4048. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4049. "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
  4050. #endif
  4051. }
  4052. static int binder_release(struct inode *nodp, struct file *filp)
  4053. {
  4054. struct binder_proc *proc = filp->private_data;
  4055. debugfs_remove(proc->debugfs_entry);
  4056. binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
  4057. return 0;
  4058. }
  4059. static int binder_node_release(struct binder_node *node, int refs)
  4060. {
  4061. struct binder_ref *ref;
  4062. int death = 0;
  4063. #ifdef BINDER_MONITOR
  4064. int sys_reg = 0;
  4065. #endif
  4066. #if defined(MTK_DEATH_NOTIFY_MONITOR) || defined(MTK_BINDER_DEBUG)
  4067. int dead_pid = node->proc ? node->proc->pid : 0;
  4068. char dead_pname[TASK_COMM_LEN] = "";
  4069. if (node->proc && node->proc->tsk)
  4070. strcpy(dead_pname, node->proc->tsk->comm);
  4071. #endif
  4072. list_del_init(&node->work.entry);
  4073. binder_release_work(&node->async_todo);
  4074. if (hlist_empty(&node->refs)) {
  4075. kfree(node);
  4076. binder_stats_deleted(BINDER_STAT_NODE);
  4077. return refs;
  4078. }
  4079. node->proc = NULL;
  4080. node->local_strong_refs = 0;
  4081. node->local_weak_refs = 0;
  4082. hlist_add_head(&node->dead_node, &binder_dead_nodes);
  4083. hlist_for_each_entry(ref, &node->refs, node_entry) {
  4084. refs++;
  4085. if (!ref->death)
  4086. continue;
  4087. #ifdef MTK_DEATH_NOTIFY_MONITOR
  4088. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  4089. "[DN #3]binder: %d:(%s) cookie 0x%016llx\n", dead_pid,
  4090. #ifdef BINDER_MONITOR
  4091. node->name,
  4092. #else
  4093. dead_pname,
  4094. #endif
  4095. (u64) ref->death->cookie);
  4096. #endif
  4097. #ifdef BINDER_MONITOR
  4098. if (!sys_reg && ref->proc->pid == system_server_pid)
  4099. sys_reg = 1;
  4100. #endif
  4101. death++;
  4102. if (list_empty(&ref->death->work.entry)) {
  4103. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  4104. list_add_tail(&ref->death->work.entry, &ref->proc->todo);
  4105. wake_up_interruptible(&ref->proc->wait);
  4106. } else
  4107. BUG();
  4108. }
  4109. #if defined(BINDER_MONITOR) && defined(MTK_BINDER_DEBUG)
  4110. if (sys_reg)
  4111. pr_debug
  4112. ("%d:%s node %d:%s exits with %d:system_server DeathNotify\n",
  4113. dead_pid, dead_pname, node->debug_id, node->name, system_server_pid);
  4114. #endif
  4115. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4116. "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death);
  4117. return refs;
  4118. }
  4119. static void binder_deferred_release(struct binder_proc *proc)
  4120. {
  4121. struct binder_transaction *t;
  4122. struct rb_node *n;
  4123. int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
  4124. BUG_ON(proc->vma);
  4125. BUG_ON(proc->files);
  4126. hlist_del(&proc->proc_node);
  4127. if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
  4128. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4129. "%s: %d context_mgr_node gone\n", __func__, proc->pid);
  4130. binder_context_mgr_node = NULL;
  4131. }
  4132. threads = 0;
  4133. active_transactions = 0;
  4134. while ((n = rb_first(&proc->threads))) {
  4135. struct binder_thread *thread;
  4136. thread = rb_entry(n, struct binder_thread, rb_node);
  4137. threads++;
  4138. active_transactions += binder_free_thread(proc, thread);
  4139. }
  4140. nodes = 0;
  4141. incoming_refs = 0;
  4142. while ((n = rb_first(&proc->nodes))) {
  4143. struct binder_node *node;
  4144. node = rb_entry(n, struct binder_node, rb_node);
  4145. nodes++;
  4146. rb_erase(&node->rb_node, &proc->nodes);
  4147. incoming_refs = binder_node_release(node, incoming_refs);
  4148. }
  4149. outgoing_refs = 0;
  4150. while ((n = rb_first(&proc->refs_by_desc))) {
  4151. struct binder_ref *ref;
  4152. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  4153. outgoing_refs++;
  4154. binder_delete_ref(ref);
  4155. }
  4156. binder_release_work(&proc->todo);
  4157. binder_release_work(&proc->delivered_death);
  4158. buffers = 0;
  4159. while ((n = rb_first(&proc->allocated_buffers))) {
  4160. struct binder_buffer *buffer;
  4161. buffer = rb_entry(n, struct binder_buffer, rb_node);
  4162. t = buffer->transaction;
  4163. if (t) {
  4164. t->buffer = NULL;
  4165. buffer->transaction = NULL;
  4166. pr_err("release proc %d, transaction %d, not freed\n",
  4167. proc->pid, t->debug_id);
  4168. /*BUG(); */
  4169. #ifdef MTK_BINDER_DEBUG
  4170. pr_err("%d: %p from %d:%d to %d:%d code %x flags %x " "pri %ld r%d "
  4171. #ifdef BINDER_MONITOR
  4172. "start %lu.%06lu"
  4173. #endif
  4174. ,
  4175. t->debug_id, t,
  4176. t->from ? t->from->proc->pid : 0,
  4177. t->from ? t->from->pid : 0,
  4178. t->to_proc ? t->to_proc->pid : 0,
  4179. t->to_thread ? t->to_thread->pid : 0,
  4180. t->code, t->flags, t->priority, t->need_reply
  4181. #ifdef BINDER_MONITOR
  4182. , (unsigned long)t->timestamp.tv_sec,
  4183. (t->timestamp.tv_nsec / NSEC_PER_USEC)
  4184. #endif
  4185. );
  4186. #endif
  4187. }
  4188. binder_free_buf(proc, buffer);
  4189. buffers++;
  4190. }
  4191. binder_stats_deleted(BINDER_STAT_PROC);
  4192. page_count = 0;
  4193. if (proc->pages) {
  4194. int i;
  4195. for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
  4196. void *page_addr;
  4197. if (!proc->pages[i])
  4198. continue;
  4199. page_addr = proc->buffer + i * PAGE_SIZE;
  4200. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  4201. "%s: %d: page %d at %pK not freed\n",
  4202. __func__, proc->pid, i, page_addr);
  4203. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  4204. __free_page(proc->pages[i]);
  4205. page_count++;
  4206. #ifdef MTK_BINDER_PAGE_USED_RECORD
  4207. if (binder_page_used > 0)
  4208. binder_page_used--;
  4209. if (proc->page_used > 0)
  4210. proc->page_used--;
  4211. #endif
  4212. }
  4213. kfree(proc->pages);
  4214. vfree(proc->buffer);
  4215. }
  4216. put_task_struct(proc->tsk);
  4217. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4218. "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
  4219. __func__, proc->pid, threads, nodes, incoming_refs,
  4220. outgoing_refs, active_transactions, buffers, page_count);
  4221. kfree(proc);
  4222. }
  4223. static void binder_deferred_func(struct work_struct *work)
  4224. {
  4225. struct binder_proc *proc;
  4226. struct files_struct *files;
  4227. int defer;
  4228. do {
  4229. binder_lock(__func__);
  4230. mutex_lock(&binder_deferred_lock);
  4231. if (!hlist_empty(&binder_deferred_list)) {
  4232. proc = hlist_entry(binder_deferred_list.first,
  4233. struct binder_proc, deferred_work_node);
  4234. hlist_del_init(&proc->deferred_work_node);
  4235. defer = proc->deferred_work;
  4236. proc->deferred_work = 0;
  4237. } else {
  4238. proc = NULL;
  4239. defer = 0;
  4240. }
  4241. mutex_unlock(&binder_deferred_lock);
  4242. files = NULL;
  4243. if (defer & BINDER_DEFERRED_PUT_FILES) {
  4244. files = proc->files;
  4245. if (files)
  4246. proc->files = NULL;
  4247. }
  4248. if (defer & BINDER_DEFERRED_FLUSH)
  4249. binder_deferred_flush(proc);
  4250. if (defer & BINDER_DEFERRED_RELEASE)
  4251. binder_deferred_release(proc); /* frees proc */
  4252. binder_unlock(__func__);
  4253. if (files)
  4254. put_files_struct(files);
  4255. } while (proc);
  4256. }
  4257. static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
  4258. static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
  4259. {
  4260. mutex_lock(&binder_deferred_lock);
  4261. proc->deferred_work |= defer;
  4262. if (hlist_unhashed(&proc->deferred_work_node)) {
  4263. hlist_add_head(&proc->deferred_work_node, &binder_deferred_list);
  4264. queue_work(binder_deferred_workqueue, &binder_deferred_work);
  4265. }
  4266. mutex_unlock(&binder_deferred_lock);
  4267. }
  4268. static void print_binder_transaction(struct seq_file *m, const char *prefix,
  4269. struct binder_transaction *t)
  4270. {
  4271. #ifdef BINDER_MONITOR
  4272. struct rtc_time tm;
  4273. rtc_time_to_tm(t->tv.tv_sec, &tm);
  4274. #endif
  4275. seq_printf(m,
  4276. "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
  4277. prefix, t->debug_id, t,
  4278. t->from ? t->from->proc->pid : 0,
  4279. t->from ? t->from->pid : 0,
  4280. t->to_proc ? t->to_proc->pid : 0,
  4281. t->to_thread ? t->to_thread->pid : 0,
  4282. t->code, t->flags, t->priority, t->need_reply);
  4283. if (t->buffer == NULL) {
  4284. #ifdef BINDER_MONITOR
  4285. seq_printf(m,
  4286. " start %lu.%06lu android %d-%02d-%02d %02d:%02d:%02d.%03lu",
  4287. (unsigned long)t->timestamp.tv_sec,
  4288. (t->timestamp.tv_nsec / NSEC_PER_USEC),
  4289. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4290. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4291. (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  4292. #endif
  4293. seq_puts(m, " buffer free\n");
  4294. return;
  4295. }
  4296. if (t->buffer->target_node)
  4297. seq_printf(m, " node %d", t->buffer->target_node->debug_id);
  4298. #ifdef BINDER_MONITOR
  4299. seq_printf(m, " size %zd:%zd data %p auf %d start %lu.%06lu",
  4300. t->buffer->data_size, t->buffer->offsets_size,
  4301. t->buffer->data, t->buffer->allow_user_free,
  4302. (unsigned long)t->timestamp.tv_sec,
  4303. (t->timestamp.tv_nsec / NSEC_PER_USEC));
  4304. seq_printf(m, " android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  4305. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4306. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4307. (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  4308. #else
  4309. seq_printf(m, " size %zd:%zd data %pK\n",
  4310. t->buffer->data_size, t->buffer->offsets_size, t->buffer->data);
  4311. #endif
  4312. }
  4313. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  4314. struct binder_buffer *buffer)
  4315. {
  4316. seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
  4317. prefix, buffer->debug_id, buffer->data,
  4318. buffer->data_size, buffer->offsets_size,
  4319. buffer->transaction ? "active" : "delivered");
  4320. }
  4321. static void print_binder_work(struct seq_file *m, const char *prefix,
  4322. const char *transaction_prefix, struct binder_work *w)
  4323. {
  4324. struct binder_node *node;
  4325. struct binder_transaction *t;
  4326. switch (w->type) {
  4327. case BINDER_WORK_TRANSACTION:
  4328. t = container_of(w, struct binder_transaction, work);
  4329. print_binder_transaction(m, transaction_prefix, t);
  4330. break;
  4331. case BINDER_WORK_TRANSACTION_COMPLETE:
  4332. seq_printf(m, "%stransaction complete\n", prefix);
  4333. break;
  4334. case BINDER_WORK_NODE:
  4335. node = container_of(w, struct binder_node, work);
  4336. seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
  4337. prefix, node->debug_id, (u64) node->ptr, (u64) node->cookie);
  4338. break;
  4339. case BINDER_WORK_DEAD_BINDER:
  4340. seq_printf(m, "%shas dead binder\n", prefix);
  4341. break;
  4342. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  4343. seq_printf(m, "%shas cleared dead binder\n", prefix);
  4344. break;
  4345. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
  4346. seq_printf(m, "%shas cleared death notification\n", prefix);
  4347. break;
  4348. default:
  4349. seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
  4350. break;
  4351. }
  4352. }
  4353. static void print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always)
  4354. {
  4355. struct binder_transaction *t;
  4356. struct binder_work *w;
  4357. size_t start_pos = m->count;
  4358. size_t header_pos;
  4359. seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
  4360. header_pos = m->count;
  4361. t = thread->transaction_stack;
  4362. while (t) {
  4363. if (t->from == thread) {
  4364. print_binder_transaction(m, " outgoing transaction", t);
  4365. t = t->from_parent;
  4366. } else if (t->to_thread == thread) {
  4367. print_binder_transaction(m, " incoming transaction", t);
  4368. t = t->to_parent;
  4369. } else {
  4370. print_binder_transaction(m, " bad transaction", t);
  4371. t = NULL;
  4372. }
  4373. }
  4374. list_for_each_entry(w, &thread->todo, entry) {
  4375. print_binder_work(m, " ", " pending transaction", w);
  4376. }
  4377. if (!print_always && m->count == header_pos)
  4378. m->count = start_pos;
  4379. }
  4380. static void print_binder_node(struct seq_file *m, struct binder_node *node)
  4381. {
  4382. struct binder_ref *ref;
  4383. struct binder_work *w;
  4384. int count;
  4385. count = 0;
  4386. hlist_for_each_entry(ref, &node->refs, node_entry)
  4387. count++;
  4388. #ifdef BINDER_MONITOR
  4389. seq_printf(m,
  4390. " node %d (%s): u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
  4391. node->debug_id, node->name, (u64) node->ptr,
  4392. (u64) node->cookie, node->has_strong_ref, node->has_weak_ref,
  4393. node->local_strong_refs, node->local_weak_refs,
  4394. node->internal_strong_refs, count);
  4395. #else
  4396. seq_printf(m,
  4397. " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
  4398. node->debug_id, (u64) node->ptr, (u64) node->cookie,
  4399. node->has_strong_ref, node->has_weak_ref,
  4400. node->local_strong_refs, node->local_weak_refs,
  4401. node->internal_strong_refs, count);
  4402. #endif
  4403. if (count) {
  4404. seq_puts(m, " proc");
  4405. hlist_for_each_entry(ref, &node->refs, node_entry)
  4406. seq_printf(m, " %d", ref->proc->pid);
  4407. }
  4408. seq_puts(m, "\n");
  4409. #ifdef MTK_BINDER_DEBUG
  4410. if (node->async_pid)
  4411. seq_printf(m, " pending async transaction on %d:\n", node->async_pid);
  4412. #endif
  4413. list_for_each_entry(w, &node->async_todo, entry)
  4414. print_binder_work(m, " ", " pending async transaction", w);
  4415. }
  4416. static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
  4417. {
  4418. seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
  4419. ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
  4420. ref->node->debug_id, ref->strong, ref->weak, ref->death);
  4421. }
  4422. static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all)
  4423. {
  4424. struct binder_work *w;
  4425. struct rb_node *n;
  4426. size_t start_pos = m->count;
  4427. size_t header_pos;
  4428. seq_printf(m, "proc %d\n", proc->pid);
  4429. header_pos = m->count;
  4430. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4431. print_binder_thread(m, rb_entry(n, struct binder_thread, rb_node), print_all);
  4432. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
  4433. struct binder_node *node = rb_entry(n, struct binder_node,
  4434. rb_node);
  4435. if (print_all || node->has_async_transaction)
  4436. print_binder_node(m, node);
  4437. }
  4438. if (print_all) {
  4439. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n))
  4440. print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc));
  4441. }
  4442. for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
  4443. print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node));
  4444. list_for_each_entry(w, &proc->todo, entry)
  4445. print_binder_work(m, " ", " pending transaction", w);
  4446. list_for_each_entry(w, &proc->delivered_death, entry) {
  4447. seq_puts(m, " has delivered dead binder\n");
  4448. break;
  4449. }
  4450. if (!print_all && m->count == header_pos)
  4451. m->count = start_pos;
  4452. }
  4453. static const char *const binder_return_strings[] = {
  4454. "BR_ERROR",
  4455. "BR_OK",
  4456. "BR_TRANSACTION",
  4457. "BR_REPLY",
  4458. "BR_ACQUIRE_RESULT",
  4459. "BR_DEAD_REPLY",
  4460. "BR_TRANSACTION_COMPLETE",
  4461. "BR_INCREFS",
  4462. "BR_ACQUIRE",
  4463. "BR_RELEASE",
  4464. "BR_DECREFS",
  4465. "BR_ATTEMPT_ACQUIRE",
  4466. "BR_NOOP",
  4467. "BR_SPAWN_LOOPER",
  4468. "BR_FINISHED",
  4469. "BR_DEAD_BINDER",
  4470. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  4471. "BR_FAILED_REPLY"
  4472. };
  4473. static const char *const binder_command_strings[] = {
  4474. "BC_TRANSACTION",
  4475. "BC_REPLY",
  4476. "BC_ACQUIRE_RESULT",
  4477. "BC_FREE_BUFFER",
  4478. "BC_INCREFS",
  4479. "BC_ACQUIRE",
  4480. "BC_RELEASE",
  4481. "BC_DECREFS",
  4482. "BC_INCREFS_DONE",
  4483. "BC_ACQUIRE_DONE",
  4484. "BC_ATTEMPT_ACQUIRE",
  4485. "BC_REGISTER_LOOPER",
  4486. "BC_ENTER_LOOPER",
  4487. "BC_EXIT_LOOPER",
  4488. "BC_REQUEST_DEATH_NOTIFICATION",
  4489. "BC_CLEAR_DEATH_NOTIFICATION",
  4490. "BC_DEAD_BINDER_DONE"
  4491. };
  4492. static const char *const binder_objstat_strings[] = {
  4493. "proc",
  4494. "thread",
  4495. "node",
  4496. "ref",
  4497. "death",
  4498. "transaction",
  4499. "transaction_complete"
  4500. };
  4501. static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats)
  4502. {
  4503. int i;
  4504. BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings));
  4505. for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
  4506. if (stats->bc[i])
  4507. seq_printf(m, "%s%s: %d\n", prefix,
  4508. binder_command_strings[i], stats->bc[i]);
  4509. }
  4510. BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings));
  4511. for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
  4512. if (stats->br[i])
  4513. seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], stats->br[i]);
  4514. }
  4515. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings));
  4516. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted));
  4517. for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
  4518. if (stats->obj_created[i] || stats->obj_deleted[i])
  4519. seq_printf(m, "%s%s: active %d total %d\n", prefix,
  4520. binder_objstat_strings[i],
  4521. stats->obj_created[i] -
  4522. stats->obj_deleted[i], stats->obj_created[i]);
  4523. }
  4524. }
  4525. static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc)
  4526. {
  4527. struct binder_work *w;
  4528. struct rb_node *n;
  4529. int count, strong, weak;
  4530. seq_printf(m, "proc %d\n", proc->pid);
  4531. count = 0;
  4532. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4533. count++;
  4534. seq_printf(m, " threads: %d\n", count);
  4535. seq_printf(m, " requested threads: %d+%d/%d\n"
  4536. " ready threads %d\n"
  4537. " free async space %zd\n", proc->requested_threads,
  4538. proc->requested_threads_started, proc->max_threads,
  4539. proc->ready_threads, proc->free_async_space);
  4540. count = 0;
  4541. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
  4542. count++;
  4543. seq_printf(m, " nodes: %d\n", count);
  4544. count = 0;
  4545. strong = 0;
  4546. weak = 0;
  4547. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  4548. struct binder_ref *ref = rb_entry(n, struct binder_ref,
  4549. rb_node_desc);
  4550. count++;
  4551. strong += ref->strong;
  4552. weak += ref->weak;
  4553. }
  4554. seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
  4555. count = 0;
  4556. for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
  4557. count++;
  4558. seq_printf(m, " buffers: %d\n", count);
  4559. count = 0;
  4560. list_for_each_entry(w, &proc->todo, entry) {
  4561. switch (w->type) {
  4562. case BINDER_WORK_TRANSACTION:
  4563. count++;
  4564. break;
  4565. default:
  4566. break;
  4567. }
  4568. }
  4569. seq_printf(m, " pending transactions: %d\n", count);
  4570. print_binder_stats(m, " ", &proc->stats);
  4571. }
  4572. static int binder_state_show(struct seq_file *m, void *unused)
  4573. {
  4574. struct binder_proc *proc;
  4575. struct binder_node *node;
  4576. int do_lock = !binder_debug_no_lock;
  4577. if (do_lock)
  4578. binder_lock(__func__);
  4579. seq_puts(m, "binder state:\n");
  4580. if (!hlist_empty(&binder_dead_nodes))
  4581. seq_puts(m, "dead nodes:\n");
  4582. hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
  4583. print_binder_node(m, node);
  4584. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4585. print_binder_proc(m, proc, 1);
  4586. if (do_lock)
  4587. binder_unlock(__func__);
  4588. return 0;
  4589. }
  4590. static int binder_stats_show(struct seq_file *m, void *unused)
  4591. {
  4592. struct binder_proc *proc;
  4593. int do_lock = !binder_debug_no_lock;
  4594. if (do_lock)
  4595. binder_lock(__func__);
  4596. seq_puts(m, "binder stats:\n");
  4597. print_binder_stats(m, "", &binder_stats);
  4598. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4599. print_binder_proc_stats(m, proc);
  4600. if (do_lock)
  4601. binder_unlock(__func__);
  4602. return 0;
  4603. }
  4604. static int binder_transactions_show(struct seq_file *m, void *unused)
  4605. {
  4606. struct binder_proc *proc;
  4607. int do_lock = !binder_debug_no_lock;
  4608. if (do_lock)
  4609. binder_lock(__func__);
  4610. seq_puts(m, "binder transactions:\n");
  4611. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4612. print_binder_proc(m, proc, 0);
  4613. if (do_lock)
  4614. binder_unlock(__func__);
  4615. return 0;
  4616. }
  4617. static int binder_proc_show(struct seq_file *m, void *unused)
  4618. {
  4619. struct binder_proc *itr;
  4620. struct binder_proc *proc = m->private;
  4621. int do_lock = !binder_debug_no_lock;
  4622. bool valid_proc = false;
  4623. if (do_lock)
  4624. binder_lock(__func__);
  4625. hlist_for_each_entry(itr, &binder_procs, proc_node) {
  4626. if (itr == proc) {
  4627. valid_proc = true;
  4628. break;
  4629. }
  4630. }
  4631. if (valid_proc) {
  4632. seq_puts(m, "binder proc state:\n");
  4633. print_binder_proc(m, proc, 1);
  4634. }
  4635. #ifdef MTK_BINDER_DEBUG
  4636. else
  4637. pr_debug("show proc addr 0x%p exit\n", proc);
  4638. #endif
  4639. if (do_lock)
  4640. binder_unlock(__func__);
  4641. return 0;
  4642. }
  4643. static void print_binder_transaction_log_entry(struct seq_file *m, struct
  4644. binder_transaction_log_entry * e)
  4645. {
  4646. #ifdef BINDER_MONITOR
  4647. char tmp[30];
  4648. struct rtc_time tm;
  4649. struct timespec sub_read_t, sub_total_t;
  4650. unsigned long read_ms = 0;
  4651. unsigned long total_ms = 0;
  4652. memset(&sub_read_t, 0, sizeof(sub_read_t));
  4653. memset(&sub_total_t, 0, sizeof(sub_total_t));
  4654. if (e->fd != -1)
  4655. sprintf(tmp, " (fd %d)", e->fd);
  4656. else
  4657. tmp[0] = '\0';
  4658. if ((e->call_type == 0) && timespec_valid_strict(&e->endstamp) &&
  4659. (timespec_compare(&e->endstamp, &e->timestamp) > 0)) {
  4660. sub_total_t = timespec_sub(e->endstamp, e->timestamp);
  4661. total_ms = ((unsigned long)sub_total_t.tv_sec) * MSEC_PER_SEC +
  4662. sub_total_t.tv_nsec / NSEC_PER_MSEC;
  4663. }
  4664. if ((e->call_type == 1) && timespec_valid_strict(&e->readstamp) &&
  4665. (timespec_compare(&e->readstamp, &e->timestamp) > 0)) {
  4666. sub_read_t = timespec_sub(e->readstamp, e->timestamp);
  4667. read_ms = ((unsigned long)sub_read_t.tv_sec) * MSEC_PER_SEC +
  4668. sub_read_t.tv_nsec / NSEC_PER_MSEC;
  4669. }
  4670. rtc_time_to_tm(e->tv.tv_sec, &tm);
  4671. seq_printf(m,
  4672. "%d: %s from %d:%d to %d:%d node %d handle %d (%s) size %d:%d%s dex %u",
  4673. e->debug_id, (e->call_type == 2) ? "reply" :
  4674. ((e->call_type == 1) ? "async" : "call "),
  4675. e->from_proc, e->from_thread, e->to_proc, e->to_thread,
  4676. e->to_node, e->target_handle, e->service,
  4677. e->data_size, e->offsets_size, tmp, e->code);
  4678. seq_printf(m,
  4679. " start %lu.%06lu android %d-%02d-%02d %02d:%02d:%02d.%03lu read %lu.%06lu %s %lu.%06lu total %lu.%06lums\n",
  4680. (unsigned long)e->timestamp.tv_sec,
  4681. (e->timestamp.tv_nsec / NSEC_PER_USEC),
  4682. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4683. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4684. (unsigned long)(e->tv.tv_usec / USEC_PER_MSEC),
  4685. (unsigned long)e->readstamp.tv_sec,
  4686. (e->readstamp.tv_nsec / NSEC_PER_USEC),
  4687. (e->call_type == 0) ? "end" : "",
  4688. (e->call_type ==
  4689. 0) ? ((unsigned long)e->endstamp.tv_sec) : 0,
  4690. (e->call_type ==
  4691. 0) ? (e->endstamp.tv_nsec / NSEC_PER_USEC) : 0,
  4692. (e->call_type == 0) ? total_ms : read_ms,
  4693. (e->call_type ==
  4694. 0) ? (sub_total_t.tv_nsec %
  4695. NSEC_PER_MSEC) : (sub_read_t.tv_nsec % NSEC_PER_MSEC));
  4696. #else
  4697. seq_printf(m,
  4698. "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
  4699. e->debug_id, (e->call_type == 2) ? "reply" :
  4700. ((e->call_type == 1) ? "async" : "call "), e->from_proc,
  4701. e->from_thread, e->to_proc, e->to_thread, e->to_node,
  4702. e->target_handle, e->data_size, e->offsets_size);
  4703. #endif
  4704. }
  4705. #ifdef BINDER_MONITOR
  4706. static void log_resume_func(struct work_struct *w)
  4707. {
  4708. pr_debug("transaction log is self resumed\n");
  4709. log_disable = 0;
  4710. }
  4711. static DECLARE_DELAYED_WORK(log_resume_work, log_resume_func);
  4712. static int binder_transaction_log_show(struct seq_file *m, void *unused)
  4713. {
  4714. struct binder_transaction_log *log = m->private;
  4715. int i;
  4716. if (!log->entry)
  4717. return 0;
  4718. if (log->full) {
  4719. for (i = log->next; i < log->size; i++)
  4720. print_binder_transaction_log_entry(m, &log->entry[i]);
  4721. }
  4722. for (i = 0; i < log->next; i++)
  4723. print_binder_transaction_log_entry(m, &log->entry[i]);
  4724. if (log_disable & BINDER_LOG_RESUME) {
  4725. pr_debug("%d (%s) read transaction log and resume\n", task_pid_nr(current), current->comm);
  4726. cancel_delayed_work(&log_resume_work);
  4727. log_disable = 0;
  4728. }
  4729. return 0;
  4730. }
  4731. #else
  4732. static int binder_transaction_log_show(struct seq_file *m, void *unused)
  4733. {
  4734. struct binder_transaction_log *log = m->private;
  4735. int i;
  4736. if (log->full) {
  4737. for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
  4738. print_binder_transaction_log_entry(m, &log->entry[i]);
  4739. }
  4740. for (i = 0; i < log->next; i++)
  4741. print_binder_transaction_log_entry(m, &log->entry[i]);
  4742. return 0;
  4743. }
  4744. #endif
  4745. static const struct file_operations binder_fops = {
  4746. .owner = THIS_MODULE,
  4747. .poll = binder_poll,
  4748. .unlocked_ioctl = binder_ioctl,
  4749. .compat_ioctl = binder_ioctl,
  4750. .mmap = binder_mmap,
  4751. .open = binder_open,
  4752. .flush = binder_flush,
  4753. .release = binder_release,
  4754. };
  4755. static struct miscdevice binder_miscdev = {
  4756. .minor = MISC_DYNAMIC_MINOR,
  4757. .name = "binder",
  4758. .fops = &binder_fops
  4759. };
  4760. #ifdef BINDER_MONITOR
  4761. static int binder_log_level_show(struct seq_file *m, void *unused)
  4762. {
  4763. seq_printf(m, " Current log level: %lu\n", binder_log_level);
  4764. return 0;
  4765. }
  4766. static ssize_t binder_log_level_write(struct file *filp, const char *ubuf,
  4767. size_t cnt, loff_t *data)
  4768. {
  4769. char buf[32];
  4770. size_t copy_size = cnt;
  4771. unsigned long val;
  4772. int ret;
  4773. if (cnt >= sizeof(buf))
  4774. copy_size = 32 - 1;
  4775. buf[copy_size] = '\0';
  4776. if (copy_from_user(&buf, ubuf, copy_size))
  4777. return -EFAULT;
  4778. pr_debug("[Binder] Set binder log level:%lu -> ", binder_log_level);
  4779. ret = kstrtoul(buf, 10, &val);
  4780. if (ret < 0) {
  4781. pr_debug("Null\ninvalid string, need number foramt, err:%d\n", ret);
  4782. pr_debug("Log Level: 0 ---- 4\n");
  4783. pr_debug(" Less ---- More\n");
  4784. return cnt; /* string to unsined long fail */
  4785. }
  4786. pr_debug("%lu\n", val);
  4787. if (val == 0) {
  4788. binder_debug_mask =
  4789. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4790. BINDER_DEBUG_DEAD_TRANSACTION;
  4791. binder_log_level = val;
  4792. } else if (val == 1) {
  4793. binder_debug_mask =
  4794. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4795. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4796. BINDER_DEBUG_DEATH_NOTIFICATION;
  4797. binder_log_level = val;
  4798. } else if (val == 2) {
  4799. binder_debug_mask =
  4800. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4801. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4802. BINDER_DEBUG_DEATH_NOTIFICATION | BINDER_DEBUG_THREADS |
  4803. BINDER_DEBUG_TRANSACTION | BINDER_DEBUG_TRANSACTION_COMPLETE;
  4804. binder_log_level = val;
  4805. } else if (val == 3) {
  4806. binder_debug_mask =
  4807. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4808. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4809. BINDER_DEBUG_DEATH_NOTIFICATION | BINDER_DEBUG_THREADS |
  4810. BINDER_DEBUG_TRANSACTION | BINDER_DEBUG_TRANSACTION_COMPLETE
  4811. | BINDER_DEBUG_OPEN_CLOSE | BINDER_DEBUG_READ_WRITE;
  4812. binder_log_level = val;
  4813. } else if (val == 4) {
  4814. binder_debug_mask =
  4815. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4816. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4817. BINDER_DEBUG_DEATH_NOTIFICATION | BINDER_DEBUG_THREADS |
  4818. BINDER_DEBUG_OPEN_CLOSE | BINDER_DEBUG_READ_WRITE |
  4819. BINDER_DEBUG_TRANSACTION | BINDER_DEBUG_TRANSACTION_COMPLETE
  4820. | BINDER_DEBUG_USER_REFS | BINDER_DEBUG_INTERNAL_REFS |
  4821. BINDER_DEBUG_PRIORITY_CAP | BINDER_DEBUG_FREE_BUFFER |
  4822. BINDER_DEBUG_BUFFER_ALLOC;
  4823. binder_log_level = val;
  4824. } else {
  4825. pr_debug("invalid value:%lu, should be 0 ~ 4\n", val);
  4826. }
  4827. return cnt;
  4828. }
  4829. static void print_binder_timeout_log_entry(struct seq_file *m, struct binder_timeout_log_entry *e)
  4830. {
  4831. struct rtc_time tm;
  4832. rtc_time_to_tm(e->tv.tv_sec, &tm);
  4833. seq_printf(m, "%d:%s %d:%d to %d:%d spends %u000 ms (%s) dex_code %u ",
  4834. e->debug_id, binder_wait_on_str[e->r],
  4835. e->from_proc, e->from_thrd, e->to_proc, e->to_thrd,
  4836. e->over_sec, e->service, e->code);
  4837. seq_printf(m, "start_at %lu.%03ld android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  4838. (unsigned long)e->ts.tv_sec,
  4839. (e->ts.tv_nsec / NSEC_PER_MSEC),
  4840. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4841. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4842. (unsigned long)(e->tv.tv_usec / USEC_PER_MSEC));
  4843. }
  4844. static int binder_timeout_log_show(struct seq_file *m, void *unused)
  4845. {
  4846. struct binder_timeout_log *log = m->private;
  4847. int i, latest;
  4848. int end_idx = ARRAY_SIZE(log->entry) - 1;
  4849. binder_lock(__func__);
  4850. latest = log->next ? (log->next - 1) : end_idx;
  4851. if (log->next == 0 && !log->full)
  4852. goto timeout_log_show_unlock;
  4853. if (latest >= ARRAY_SIZE(log->entry) || latest < 0) {
  4854. int j;
  4855. pr_alert("timeout log index error, log %p latest %d next %d end_idx %d\n",
  4856. log, latest, log->next, end_idx);
  4857. for (j = -4; j <= 3; j++) {
  4858. unsigned int *tmp = (unsigned int *)log + (j * 8);
  4859. pr_alert("0x%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  4860. tmp,
  4861. *tmp, *(tmp + 1), *(tmp + 2), *(tmp + 3),
  4862. *(tmp + 4), *(tmp + 5), *(tmp + 6), *(tmp + 7));
  4863. }
  4864. #if defined(CONFIG_MTK_AEE_FEATURE)
  4865. aee_kernel_warning_api(__FILE__, __LINE__,
  4866. DB_OPT_SWT_JBT_TRACES |
  4867. DB_OPT_BINDER_INFO,
  4868. "binder: timeout log index error",
  4869. "detect for memory corruption\n\n"
  4870. "check kernel log for more details\n");
  4871. #endif
  4872. goto timeout_log_show_unlock;
  4873. }
  4874. for (i = latest; i >= 0; i--)
  4875. print_binder_timeout_log_entry(m, &log->entry[i]);
  4876. if (log->full) {
  4877. for (i = end_idx; i > latest; i--)
  4878. print_binder_timeout_log_entry(m, &log->entry[i]);
  4879. }
  4880. timeout_log_show_unlock:
  4881. binder_unlock(__func__);
  4882. return 0;
  4883. }
  4884. BINDER_DEBUG_SETTING_ENTRY(log_level);
  4885. BINDER_DEBUG_ENTRY(timeout_log);
  4886. static int binder_transaction_log_enable_show(struct seq_file *m, void *unused)
  4887. {
  4888. #ifdef BINDER_MONITOR
  4889. seq_printf(m, " Current transaciton log is %s %s %s"
  4890. #ifdef RT_PRIO_INHERIT
  4891. " %s"
  4892. #endif
  4893. "\n",
  4894. (log_disable & 0x1) ? "disabled" : "enabled",
  4895. (log_disable & BINDER_LOG_RESUME) ? "(self resume)" : "",
  4896. (log_disable & BINDER_BUF_WARN) ? "(buf warning enabled)" : ""
  4897. #ifdef RT_PRIO_INHERIT
  4898. , (log_disable & BINDER_RT_LOG_ENABLE) ? "(rt inherit log enabled)" : ""
  4899. #endif
  4900. );
  4901. #else
  4902. seq_printf(m, " Current transaciton log is %s %s\n",
  4903. log_disable ? "disabled" : "enabled",
  4904. (log_disable & BINDER_LOG_RESUME) ? "(self resume)" : "");
  4905. #endif
  4906. return 0;
  4907. }
  4908. static ssize_t binder_transaction_log_enable_write(struct file *filp,
  4909. const char *ubuf, size_t cnt, loff_t *data)
  4910. {
  4911. char buf[32];
  4912. size_t copy_size = cnt;
  4913. unsigned long val;
  4914. int ret;
  4915. if (cnt >= sizeof(buf))
  4916. copy_size = 32 - 1;
  4917. buf[copy_size] = '\0';
  4918. if (copy_from_user(&buf, ubuf, copy_size))
  4919. return -EFAULT;
  4920. ret = kstrtoul(buf, 10, &val);
  4921. if (ret < 0) {
  4922. pr_debug("failed to switch logging, " "need number format\n");
  4923. return cnt;
  4924. }
  4925. log_disable = !(val & 0x1);
  4926. if (log_disable && (val & BINDER_LOG_RESUME)) {
  4927. log_disable |= BINDER_LOG_RESUME;
  4928. queue_delayed_work(binder_deferred_workqueue, &log_resume_work, (120 * HZ));
  4929. }
  4930. #ifdef BINDER_MONITOR
  4931. if (val & BINDER_BUF_WARN)
  4932. log_disable |= BINDER_BUF_WARN;
  4933. #ifdef RT_PRIO_INHERIT
  4934. if (val & BINDER_RT_LOG_ENABLE)
  4935. log_disable |= BINDER_RT_LOG_ENABLE;
  4936. #endif
  4937. pr_debug("%d (%s) set transaction log %s %s %s"
  4938. #ifdef RT_PRIO_INHERIT
  4939. " %s"
  4940. #endif
  4941. "\n",
  4942. task_pid_nr(current), current->comm,
  4943. (log_disable & 0x1) ? "disabled" : "enabled",
  4944. (log_disable & BINDER_LOG_RESUME) ?
  4945. "(self resume)" : "", (log_disable & BINDER_BUF_WARN) ? "(buf warning)" : ""
  4946. #ifdef RT_PRIO_INHERIT
  4947. , (log_disable & BINDER_RT_LOG_ENABLE) ? "(rt inherit log enabled)" : ""
  4948. #endif
  4949. );
  4950. #else
  4951. pr_debug("%d (%s) set transaction log %s %s\n",
  4952. task_pid_nr(current), current->comm,
  4953. log_disable ? "disabled" : "enabled",
  4954. (log_disable & BINDER_LOG_RESUME) ? "(self resume)" : "");
  4955. #endif
  4956. return cnt;
  4957. }
  4958. BINDER_DEBUG_SETTING_ENTRY(transaction_log_enable);
  4959. #endif
  4960. #ifdef MTK_BINDER_PAGE_USED_RECORD
  4961. static int binder_page_used_show(struct seq_file *s, void *p)
  4962. {
  4963. struct binder_proc *proc;
  4964. int do_lock = !binder_debug_no_lock;
  4965. seq_printf(s, "page_used:%d[%dMB]\npage_used_peak:%d[%dMB]\n",
  4966. binder_page_used, binder_page_used >> 8,
  4967. binder_page_used_peak, binder_page_used_peak >> 8);
  4968. if (do_lock)
  4969. binder_lock(__func__);
  4970. seq_puts(s, "binder page stats by binder_proc:\n");
  4971. hlist_for_each_entry(proc, &binder_procs, proc_node) {
  4972. seq_printf(s,
  4973. " proc %d(%s):page_used:%d[%dMB] page_used_peak:%d[%dMB]\n",
  4974. proc->pid, proc->tsk ? proc->tsk->comm : " ",
  4975. proc->page_used, proc->page_used >> 8,
  4976. proc->page_used_peak, proc->page_used_peak >> 8);
  4977. }
  4978. if (do_lock)
  4979. binder_unlock(__func__);
  4980. return 0;
  4981. }
  4982. BINDER_DEBUG_ENTRY(page_used);
  4983. #endif
  4984. BINDER_DEBUG_ENTRY(state);
  4985. BINDER_DEBUG_ENTRY(stats);
  4986. BINDER_DEBUG_ENTRY(transactions);
  4987. BINDER_DEBUG_ENTRY(transaction_log);
  4988. static int __init binder_init(void)
  4989. {
  4990. int ret;
  4991. #ifdef BINDER_MONITOR
  4992. struct task_struct *th;
  4993. th = kthread_create(binder_bwdog_thread, NULL, "binder_watchdog");
  4994. if (IS_ERR(th))
  4995. pr_err("fail to create watchdog thread " "(err:%li)\n", PTR_ERR(th));
  4996. else
  4997. wake_up_process(th);
  4998. binder_transaction_log_failed.entry = &entry_failed[0];
  4999. binder_transaction_log_failed.size = ARRAY_SIZE(entry_failed);
  5000. #ifdef CONFIG_MTK_EXTMEM
  5001. binder_transaction_log.entry =
  5002. extmem_malloc_page_align(sizeof(struct binder_transaction_log_entry)
  5003. * MAX_ENG_TRANS_LOG_BUFF_LEN);
  5004. binder_transaction_log.size = MAX_ENG_TRANS_LOG_BUFF_LEN;
  5005. if (binder_transaction_log.entry == NULL) {
  5006. pr_err("%s[%s] ext emory alloc failed!!!\n", __FILE__, __func__);
  5007. binder_transaction_log.entry =
  5008. vmalloc(sizeof(struct binder_transaction_log_entry) *
  5009. MAX_ENG_TRANS_LOG_BUFF_LEN);
  5010. }
  5011. #else
  5012. binder_transaction_log.entry = &entry_t[0];
  5013. binder_transaction_log.size = ARRAY_SIZE(entry_t);
  5014. #endif
  5015. #endif
  5016. binder_deferred_workqueue = create_singlethread_workqueue("binder");
  5017. if (!binder_deferred_workqueue)
  5018. return -ENOMEM;
  5019. binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
  5020. if (binder_debugfs_dir_entry_root)
  5021. binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
  5022. binder_debugfs_dir_entry_root);
  5023. ret = misc_register(&binder_miscdev);
  5024. if (binder_debugfs_dir_entry_root) {
  5025. debugfs_create_file("state",
  5026. S_IRUGO,
  5027. binder_debugfs_dir_entry_root, NULL, &binder_state_fops);
  5028. debugfs_create_file("stats",
  5029. S_IRUGO,
  5030. binder_debugfs_dir_entry_root, NULL, &binder_stats_fops);
  5031. debugfs_create_file("transactions",
  5032. S_IRUGO,
  5033. binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops);
  5034. debugfs_create_file("transaction_log",
  5035. S_IRUGO,
  5036. binder_debugfs_dir_entry_root,
  5037. &binder_transaction_log, &binder_transaction_log_fops);
  5038. debugfs_create_file("failed_transaction_log",
  5039. S_IRUGO,
  5040. binder_debugfs_dir_entry_root,
  5041. &binder_transaction_log_failed, &binder_transaction_log_fops);
  5042. #ifdef BINDER_MONITOR
  5043. /* system_server is the main writer, remember to
  5044. * change group as "system" for write permission
  5045. * via related init.rc */
  5046. debugfs_create_file("transaction_log_enable",
  5047. (S_IRUGO | S_IWUSR | S_IWGRP),
  5048. binder_debugfs_dir_entry_root,
  5049. NULL, &binder_transaction_log_enable_fops);
  5050. debugfs_create_file("log_level",
  5051. (S_IRUGO | S_IWUSR | S_IWGRP),
  5052. binder_debugfs_dir_entry_root, NULL, &binder_log_level_fops);
  5053. debugfs_create_file("timeout_log",
  5054. S_IRUGO,
  5055. binder_debugfs_dir_entry_root,
  5056. &binder_timeout_log_t, &binder_timeout_log_fops);
  5057. #endif
  5058. #ifdef MTK_BINDER_PAGE_USED_RECORD
  5059. debugfs_create_file("page_used",
  5060. S_IRUGO,
  5061. binder_debugfs_dir_entry_root, NULL, &binder_page_used_fops);
  5062. #endif
  5063. }
  5064. return ret;
  5065. }
  5066. device_initcall(binder_init);
  5067. #define CREATE_TRACE_POINTS
  5068. #include "binder_trace.h"
  5069. MODULE_LICENSE("GPL v2");