binder.c 160 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585
  1. /* binder.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2008 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define DEBUG 1
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <asm/cacheflush.h>
  20. #include <linux/fdtable.h>
  21. #include <linux/file.h>
  22. #include <linux/freezer.h>
  23. #include <linux/fs.h>
  24. #include <linux/list.h>
  25. #include <linux/miscdevice.h>
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/mutex.h>
  29. #include <linux/nsproxy.h>
  30. #include <linux/poll.h>
  31. #include <linux/debugfs.h>
  32. #include <linux/rbtree.h>
  33. #include <linux/sched.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/slab.h>
  38. #include <linux/pid_namespace.h>
  39. #include <linux/security.h>
  40. #include <linux/time.h>
  41. #include <linux/delay.h>
  42. #include <linux/kthread.h>
  43. #include <linux/rtc.h>
  44. #include <mt-plat/aee.h>
  45. #ifdef CONFIG_MT_PRIO_TRACER
  46. #include <linux/prio_tracer.h>
  47. #endif
  48. #include "binder.h"
  49. #include "binder_trace.h"
  50. static DEFINE_MUTEX(binder_main_lock);
  51. static DEFINE_MUTEX(binder_deferred_lock);
  52. static DEFINE_MUTEX(binder_mmap_lock);
  53. static HLIST_HEAD(binder_procs);
  54. static HLIST_HEAD(binder_deferred_list);
  55. static HLIST_HEAD(binder_dead_nodes);
  56. static struct dentry *binder_debugfs_dir_entry_root;
  57. static struct dentry *binder_debugfs_dir_entry_proc;
  58. static struct binder_node *binder_context_mgr_node;
  59. static kuid_t binder_context_mgr_uid = INVALID_UID;
  60. static int binder_last_id;
  61. static struct workqueue_struct *binder_deferred_workqueue;
  62. #define RT_PRIO_INHERIT "v1.7"
  63. #ifdef RT_PRIO_INHERIT
  64. #include <linux/sched/rt.h>
  65. #endif
  66. #define MTK_BINDER_DEBUG "v0.1" /* defined for mtk internal added debug code */
  67. /*****************************************************************************************************/
  68. /* MTK Death Notify | */
  69. /* Debug Log Prefix | Description */
  70. /* --------------------------------------------------------------------- */
  71. /* [DN #1] | Some one requests Death Notify from upper layer. */
  72. /* [DN #2] | Some one cancels Death Notify from upper layer. */
  73. /* [DN #3] | Binder Driver sends Death Notify to all requesters' Binder Thread. */
  74. /* [DN #4] | Some requester's binder_thread_read() handles Death Notify works. */
  75. /* [DN #5] | Some requester sends confirmation to Binder Driver. (In IPCThreadState.cpp)*/
  76. /* [DN #6] | Finally receive requester's confirmation from upper layer. */
  77. /******************************************************************************************************/
  78. #define MTK_DEATH_NOTIFY_MONITOR "v0.1"
  79. /**
  80. * Revision history of binder monitor
  81. *
  82. * v0.1 - enhance debug log
  83. * v0.2 - transaction timeout log
  84. * v0.2.1 - buffer allocation debug
  85. */
  86. #ifdef CONFIG_MT_ENG_BUILD
  87. #define BINDER_MONITOR "v0.2.1" /* BINDER_MONITOR only turn on for eng build */
  88. #endif
  89. #ifdef BINDER_MONITOR
  90. #define MAX_SERVICE_NAME_LEN 32
  91. /*******************************************************************************************************/
  92. /* Payload layout of addService(): */
  93. /* | Parcel header | IServiceManager.descriptor | Parcel header | Service name | ... */
  94. /* (Please refer ServiceManagerNative.java:addService()) */
  95. /* IServiceManager.descriptor is 'android.os.IServiceManager' interleaved with character '\0'. */
  96. /* that is, 'a', '\0', 'n', '\0', 'd', '\0', 'r', '\0', 'o', ... */
  97. /* so the offset of Service name = Parcel header x2 + strlen(android.os.IServiceManager) x2 = 8x2 + 26x2 = 68*/
  98. /*******************************************************************************************************/
  99. #define MAGIC_SERVICE_NAME_OFFSET 68
  100. #define MAX_ENG_TRANS_LOG_BUFF_LEN 10240
  101. static pid_t system_server_pid;
  102. static int binder_check_buf_pid;
  103. static int binder_check_buf_tid;
  104. static unsigned long binder_log_level;
  105. char aee_msg[512];
  106. char aee_word[100];
  107. #define TRANS_LOG_LEN 210
  108. char large_msg[TRANS_LOG_LEN];
  109. #define BINDER_PERF_EVAL "V0.1"
  110. #endif
  111. #define BINDER_DEBUG_ENTRY(name) \
  112. static int binder_##name##_open(struct inode *inode, struct file *file) \
  113. { \
  114. return single_open(file, binder_##name##_show, inode->i_private); \
  115. } \
  116. \
  117. static const struct file_operations binder_##name##_fops = { \
  118. .owner = THIS_MODULE, \
  119. .open = binder_##name##_open, \
  120. .read = seq_read, \
  121. .llseek = seq_lseek, \
  122. .release = single_release, \
  123. }
  124. #ifdef BINDER_MONITOR
  125. #define BINDER_DEBUG_SETTING_ENTRY(name) \
  126. static int binder_##name##_open(struct inode *inode, struct file *file) \
  127. { \
  128. return single_open(file, binder_##name##_show, inode->i_private); \
  129. } \
  130. \
  131. static const struct file_operations binder_##name##_fops = { \
  132. .owner = THIS_MODULE, \
  133. .open = binder_##name##_open, \
  134. .read = seq_read, \
  135. .write = binder_##name##_write, \
  136. .llseek = seq_lseek, \
  137. .release = single_release, \
  138. }
  139. #endif
  140. /*LCH add, for binder pages leakage debug*/
  141. #ifdef CONFIG_MT_ENG_BUILD
  142. #define MTK_BINDER_PAGE_USED_RECORD
  143. #endif
  144. #ifdef MTK_BINDER_PAGE_USED_RECORD
  145. static unsigned int binder_page_used;
  146. static unsigned int binder_page_used_peak;
  147. #endif
  148. static int binder_proc_show(struct seq_file *m, void *unused);
  149. BINDER_DEBUG_ENTRY(proc);
  150. /* This is only defined in include/asm-arm/sizes.h */
  151. #ifndef SZ_1K
  152. #define SZ_1K 0x400
  153. #endif
  154. #ifndef SZ_4M
  155. #define SZ_4M 0x400000
  156. #endif
  157. #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
  158. #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
  159. enum {
  160. BINDER_DEBUG_USER_ERROR = 1U << 0,
  161. BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
  162. BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
  163. BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
  164. BINDER_DEBUG_DEAD_BINDER = 1U << 4,
  165. BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
  166. BINDER_DEBUG_READ_WRITE = 1U << 6,
  167. BINDER_DEBUG_USER_REFS = 1U << 7,
  168. BINDER_DEBUG_THREADS = 1U << 8,
  169. BINDER_DEBUG_TRANSACTION = 1U << 9,
  170. BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
  171. BINDER_DEBUG_FREE_BUFFER = 1U << 11,
  172. BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
  173. BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
  174. BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
  175. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
  176. };
  177. static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
  178. BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
  179. module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
  180. static bool binder_debug_no_lock;
  181. module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
  182. static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
  183. static int binder_stop_on_user_error;
  184. static int binder_set_stop_on_user_error(const char *val, struct kernel_param *kp)
  185. {
  186. int ret;
  187. ret = param_set_int(val, kp);
  188. if (binder_stop_on_user_error < 2)
  189. wake_up(&binder_user_error_wait);
  190. return ret;
  191. }
  192. module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
  193. param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
  194. #define binder_debug(mask, x...) \
  195. do { \
  196. if (binder_debug_mask & mask) \
  197. pr_info(x); \
  198. } while (0)
  199. #ifdef BINDER_MONITOR
  200. #define binder_user_error(x...) \
  201. do { \
  202. if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
  203. pr_err(x); \
  204. if (binder_stop_on_user_error) \
  205. binder_stop_on_user_error = 2; \
  206. } while (0)
  207. #else
  208. #define binder_user_error(x...) \
  209. do { \
  210. if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
  211. pr_info(x); \
  212. if (binder_stop_on_user_error) \
  213. binder_stop_on_user_error = 2; \
  214. } while (0)
  215. #endif
  216. enum binder_stat_types {
  217. BINDER_STAT_PROC,
  218. BINDER_STAT_THREAD,
  219. BINDER_STAT_NODE,
  220. BINDER_STAT_REF,
  221. BINDER_STAT_DEATH,
  222. BINDER_STAT_TRANSACTION,
  223. BINDER_STAT_TRANSACTION_COMPLETE,
  224. BINDER_STAT_COUNT
  225. };
  226. struct binder_stats {
  227. int br[_IOC_NR(BR_FAILED_REPLY) + 1];
  228. int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
  229. int obj_created[BINDER_STAT_COUNT];
  230. int obj_deleted[BINDER_STAT_COUNT];
  231. };
  232. static struct binder_stats binder_stats;
  233. static inline void binder_stats_deleted(enum binder_stat_types type)
  234. {
  235. binder_stats.obj_deleted[type]++;
  236. }
  237. static inline void binder_stats_created(enum binder_stat_types type)
  238. {
  239. binder_stats.obj_created[type]++;
  240. }
  241. struct binder_transaction_log_entry {
  242. int debug_id;
  243. int call_type;
  244. int from_proc;
  245. int from_thread;
  246. int target_handle;
  247. int to_proc;
  248. int to_thread;
  249. int to_node;
  250. int data_size;
  251. int offsets_size;
  252. #ifdef BINDER_MONITOR
  253. unsigned int code;
  254. struct timespec timestamp;
  255. char service[MAX_SERVICE_NAME_LEN];
  256. int fd;
  257. struct timeval tv;
  258. struct timespec readstamp;
  259. struct timespec endstamp;
  260. #endif
  261. };
  262. struct binder_transaction_log {
  263. int next;
  264. int full;
  265. #ifdef BINDER_MONITOR
  266. unsigned size;
  267. struct binder_transaction_log_entry *entry;
  268. #else
  269. struct binder_transaction_log_entry entry[32];
  270. #endif
  271. };
  272. static struct binder_transaction_log binder_transaction_log;
  273. static struct binder_transaction_log binder_transaction_log_failed;
  274. static struct binder_transaction_log_entry *binder_transaction_log_add(
  275. struct binder_transaction_log *log)
  276. {
  277. struct binder_transaction_log_entry *e;
  278. e = &log->entry[log->next];
  279. memset(e, 0, sizeof(*e));
  280. log->next++;
  281. #ifdef BINDER_MONITOR
  282. if (log->next == log->size) {
  283. log->next = 0;
  284. log->full = 1;
  285. }
  286. #else
  287. if (log->next == ARRAY_SIZE(log->entry)) {
  288. log->next = 0;
  289. log->full = 1;
  290. }
  291. #endif
  292. return e;
  293. }
  294. #ifdef BINDER_MONITOR
  295. static struct binder_transaction_log_entry entry_failed[32];
  296. /* log_disable bitmap
  297. * bit: 31...43210
  298. * | |||||_ 0: log enable / 1: log disable
  299. * | ||||__ 1: self resume
  300. * | |||____2: manually trigger kernel warning for buffer allocation
  301. * | ||____ 3: 1:rt_inherit log enable / 0: rt_inherit log disable
  302. * | |
  303. */
  304. static int log_disable;
  305. #define BINDER_LOG_RESUME 0x2
  306. #define BINDER_BUF_WARN 0x4
  307. #ifdef RT_PRIO_INHERIT
  308. #define BINDER_RT_LOG_ENABLE 0x8
  309. #endif
  310. #ifdef CONFIG_MTK_EXTMEM
  311. #include <linux/exm_driver.h>
  312. #else
  313. static struct binder_transaction_log_entry entry_t[MAX_ENG_TRANS_LOG_BUFF_LEN];
  314. #endif
  315. #endif
  316. struct binder_work {
  317. struct list_head entry;
  318. enum {
  319. BINDER_WORK_TRANSACTION = 1,
  320. BINDER_WORK_TRANSACTION_COMPLETE,
  321. BINDER_WORK_NODE,
  322. BINDER_WORK_DEAD_BINDER,
  323. BINDER_WORK_DEAD_BINDER_AND_CLEAR,
  324. BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
  325. } type;
  326. };
  327. struct binder_node {
  328. int debug_id;
  329. struct binder_work work;
  330. union {
  331. struct rb_node rb_node;
  332. struct hlist_node dead_node;
  333. };
  334. struct binder_proc *proc;
  335. struct hlist_head refs;
  336. int internal_strong_refs;
  337. int local_weak_refs;
  338. int local_strong_refs;
  339. binder_uintptr_t ptr;
  340. binder_uintptr_t cookie;
  341. unsigned has_strong_ref:1;
  342. unsigned pending_strong_ref:1;
  343. unsigned has_weak_ref:1;
  344. unsigned pending_weak_ref:1;
  345. unsigned has_async_transaction:1;
  346. unsigned accept_fds:1;
  347. unsigned min_priority:8;
  348. struct list_head async_todo;
  349. #ifdef BINDER_MONITOR
  350. char name[MAX_SERVICE_NAME_LEN];
  351. #endif
  352. #ifdef MTK_BINDER_DEBUG
  353. int async_pid;
  354. #endif
  355. };
  356. struct binder_ref_death {
  357. struct binder_work work;
  358. binder_uintptr_t cookie;
  359. };
  360. struct binder_ref {
  361. /* Lookups needed: */
  362. /* node + proc => ref (transaction) */
  363. /* desc + proc => ref (transaction, inc/dec ref) */
  364. /* node => refs + procs (proc exit) */
  365. int debug_id;
  366. struct rb_node rb_node_desc;
  367. struct rb_node rb_node_node;
  368. struct hlist_node node_entry;
  369. struct binder_proc *proc;
  370. struct binder_node *node;
  371. uint32_t desc;
  372. int strong;
  373. int weak;
  374. struct binder_ref_death *death;
  375. };
  376. struct binder_buffer {
  377. struct list_head entry; /* free and allocated entries by address */
  378. struct rb_node rb_node; /* free entry by size or allocated entry */
  379. /* by address */
  380. unsigned free:1;
  381. unsigned allow_user_free:1;
  382. unsigned async_transaction:1;
  383. unsigned debug_id:29;
  384. struct binder_transaction *transaction;
  385. #ifdef BINDER_MONITOR
  386. struct binder_transaction_log_entry *log_entry;
  387. #endif
  388. struct binder_node *target_node;
  389. size_t data_size;
  390. size_t offsets_size;
  391. uint8_t data[0];
  392. };
  393. enum binder_deferred_state {
  394. BINDER_DEFERRED_PUT_FILES = 0x01,
  395. BINDER_DEFERRED_FLUSH = 0x02,
  396. BINDER_DEFERRED_RELEASE = 0x04,
  397. };
  398. #ifdef BINDER_MONITOR
  399. enum wait_on_reason {
  400. WAIT_ON_NONE = 0U,
  401. WAIT_ON_READ = 1U,
  402. WAIT_ON_EXEC = 2U,
  403. WAIT_ON_REPLY_READ = 3U
  404. };
  405. #endif
  406. struct binder_proc {
  407. struct hlist_node proc_node;
  408. struct rb_root threads;
  409. struct rb_root nodes;
  410. struct rb_root refs_by_desc;
  411. struct rb_root refs_by_node;
  412. int pid;
  413. struct vm_area_struct *vma;
  414. struct mm_struct *vma_vm_mm;
  415. struct task_struct *tsk;
  416. struct files_struct *files;
  417. struct hlist_node deferred_work_node;
  418. int deferred_work;
  419. void *buffer;
  420. ptrdiff_t user_buffer_offset;
  421. struct list_head buffers;
  422. struct rb_root free_buffers;
  423. struct rb_root allocated_buffers;
  424. size_t free_async_space;
  425. struct page **pages;
  426. size_t buffer_size;
  427. uint32_t buffer_free;
  428. struct list_head todo;
  429. wait_queue_head_t wait;
  430. struct binder_stats stats;
  431. struct list_head delivered_death;
  432. int max_threads;
  433. int requested_threads;
  434. int requested_threads_started;
  435. int ready_threads;
  436. long default_priority;
  437. struct dentry *debugfs_entry;
  438. #ifdef RT_PRIO_INHERIT
  439. unsigned long default_rt_prio:16;
  440. unsigned long default_policy:16;
  441. #endif
  442. #ifdef BINDER_MONITOR
  443. struct binder_buffer *large_buffer;
  444. #endif
  445. #ifdef MTK_BINDER_PAGE_USED_RECORD
  446. unsigned int page_used;
  447. unsigned int page_used_peak;
  448. #endif
  449. };
  450. enum {
  451. BINDER_LOOPER_STATE_REGISTERED = 0x01,
  452. BINDER_LOOPER_STATE_ENTERED = 0x02,
  453. BINDER_LOOPER_STATE_EXITED = 0x04,
  454. BINDER_LOOPER_STATE_INVALID = 0x08,
  455. BINDER_LOOPER_STATE_WAITING = 0x10,
  456. BINDER_LOOPER_STATE_NEED_RETURN = 0x20
  457. };
  458. struct binder_thread {
  459. struct binder_proc *proc;
  460. struct rb_node rb_node;
  461. int pid;
  462. int looper;
  463. struct binder_transaction *transaction_stack;
  464. struct list_head todo;
  465. uint32_t return_error; /* Write failed, return error code in read buf */
  466. uint32_t return_error2; /* Write failed, return error code in read */
  467. /* buffer. Used when sending a reply to a dead process that */
  468. /* we are also waiting on */
  469. wait_queue_head_t wait;
  470. struct binder_stats stats;
  471. };
  472. struct binder_transaction {
  473. int debug_id;
  474. struct binder_work work;
  475. struct binder_thread *from;
  476. struct binder_transaction *from_parent;
  477. struct binder_proc *to_proc;
  478. struct binder_thread *to_thread;
  479. struct binder_transaction *to_parent;
  480. unsigned need_reply:1;
  481. /* unsigned is_dead:1; *//* not used at the moment */
  482. struct binder_buffer *buffer;
  483. unsigned int code;
  484. unsigned int flags;
  485. long priority;
  486. long saved_priority;
  487. kuid_t sender_euid;
  488. #ifdef RT_PRIO_INHERIT
  489. unsigned long rt_prio:16;
  490. unsigned long policy:16;
  491. unsigned long saved_rt_prio:16;
  492. unsigned long saved_policy:16;
  493. #endif
  494. #ifdef BINDER_MONITOR
  495. struct timespec timestamp;
  496. enum wait_on_reason wait_on;
  497. enum wait_on_reason bark_on;
  498. struct rb_node rb_node; /* by bark_time */
  499. struct timespec bark_time;
  500. struct timespec exe_timestamp;
  501. struct timeval tv;
  502. char service[MAX_SERVICE_NAME_LEN];
  503. pid_t fproc;
  504. pid_t fthrd;
  505. pid_t tproc;
  506. pid_t tthrd;
  507. unsigned int log_idx;
  508. #endif
  509. };
  510. static void
  511. binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
  512. static inline void binder_lock(const char *tag);
  513. static inline void binder_unlock(const char *tag);
  514. #ifdef BINDER_MONITOR
  515. /* work should be done within how many secs */
  516. #define WAIT_BUDGET_READ 2
  517. #define WAIT_BUDGET_EXEC 4
  518. #define WAIT_BUDGET_MIN min(WAIT_BUDGET_READ, WAIT_BUDGET_EXEC)
  519. static struct rb_root bwdog_transacts;
  520. static const char *const binder_wait_on_str[] = {
  521. "none",
  522. "read",
  523. "exec",
  524. "rply"
  525. };
  526. struct binder_timeout_log_entry {
  527. enum wait_on_reason r;
  528. pid_t from_proc;
  529. pid_t from_thrd;
  530. pid_t to_proc;
  531. pid_t to_thrd;
  532. unsigned over_sec;
  533. struct timespec ts;
  534. struct timeval tv;
  535. unsigned int code;
  536. char service[MAX_SERVICE_NAME_LEN];
  537. int debug_id;
  538. };
  539. struct binder_timeout_log {
  540. int next;
  541. int full;
  542. #ifdef BINDER_PERF_EVAL
  543. struct binder_timeout_log_entry entry[256];
  544. #else
  545. struct binder_timeout_log_entry entry[64];
  546. #endif
  547. };
  548. static struct binder_timeout_log binder_timeout_log_t;
  549. /**
  550. * binder_timeout_log_add - Insert a timeout log
  551. */
  552. static struct binder_timeout_log_entry *binder_timeout_log_add(void)
  553. {
  554. struct binder_timeout_log *log = &binder_timeout_log_t;
  555. struct binder_timeout_log_entry *e;
  556. e = &log->entry[log->next];
  557. memset(e, 0, sizeof(*e));
  558. log->next++;
  559. if (log->next == ARRAY_SIZE(log->entry)) {
  560. log->next = 0;
  561. log->full = 1;
  562. }
  563. return e;
  564. }
  565. /**
  566. * binder_print_bwdog - Output info of a timeout transaction
  567. * @t: pointer to the timeout transaction
  568. * @cur_in: current timespec while going to print
  569. * @e: timeout log entry to record
  570. * @r: output reason, either while barking or after barked
  571. */
  572. static void binder_print_bwdog(struct binder_transaction *t,
  573. struct timespec *cur_in,
  574. struct binder_timeout_log_entry *e, enum wait_on_reason r)
  575. {
  576. struct rtc_time tm;
  577. struct timespec *startime;
  578. struct timespec cur, sub_t;
  579. if (cur_in && e) {
  580. memcpy(&cur, cur_in, sizeof(struct timespec));
  581. } else {
  582. do_posix_clock_monotonic_gettime(&cur);
  583. /*monotonic_to_bootbased(&cur); */
  584. }
  585. startime = (r == WAIT_ON_EXEC) ? &t->exe_timestamp : &t->timestamp;
  586. sub_t = timespec_sub(cur, *startime);
  587. rtc_time_to_tm(t->tv.tv_sec, &tm);
  588. pr_debug("%d %s %d:%d to %d:%d %s %u.%03ld sec (%s) dex_code %u",
  589. t->debug_id, binder_wait_on_str[r],
  590. t->fproc, t->fthrd, t->tproc, t->tthrd,
  591. (cur_in && e) ? "over" : "total",
  592. (unsigned)sub_t.tv_sec, (sub_t.tv_nsec / NSEC_PER_MSEC),
  593. t->service, t->code);
  594. pr_debug(" start_at %lu.%03ld android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  595. (unsigned long)startime->tv_sec,
  596. (startime->tv_nsec / NSEC_PER_MSEC),
  597. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  598. tm.tm_hour, tm.tm_min, tm.tm_sec, (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  599. if (e) {
  600. e->over_sec = sub_t.tv_sec;
  601. memcpy(&e->ts, startime, sizeof(struct timespec));
  602. }
  603. }
  604. /**
  605. * binder_bwdog_safe - Check a transaction is monitor-free or not
  606. * @t: pointer to the transaction to check
  607. *
  608. * Returns 1 means safe.
  609. */
  610. static inline int binder_bwdog_safe(struct binder_transaction *t)
  611. {
  612. return (t->wait_on == WAIT_ON_NONE) ? 1 : 0;
  613. }
  614. /**
  615. * binder_query_bwdog - Check a transaction is queued or not
  616. * @t: pointer to the transaction to check
  617. *
  618. * Returns a pointer points to t, or NULL if it's not queued.
  619. */
  620. static struct rb_node **binder_query_bwdog(struct binder_transaction *t)
  621. {
  622. struct rb_node **p = &bwdog_transacts.rb_node;
  623. struct rb_node *parent = NULL;
  624. struct binder_transaction *transact = NULL;
  625. int comp;
  626. while (*p) {
  627. parent = *p;
  628. transact = rb_entry(parent, struct binder_transaction, rb_node);
  629. comp = timespec_compare(&t->bark_time, &transact->bark_time);
  630. if (comp < 0)
  631. p = &(*p)->rb_left;
  632. else if (comp > 0)
  633. p = &(*p)->rb_right;
  634. else
  635. break;
  636. }
  637. return p;
  638. }
  639. /**
  640. * binder_queue_bwdog - Queue a transaction to keep tracking
  641. * @t: pointer to the transaction being tracked
  642. * @budget: seconds, which this transaction can afford
  643. */
  644. static void binder_queue_bwdog(struct binder_transaction *t, time_t budget)
  645. {
  646. struct rb_node **p = &bwdog_transacts.rb_node;
  647. struct rb_node *parent = NULL;
  648. struct binder_transaction *transact = NULL;
  649. int ret;
  650. do_posix_clock_monotonic_gettime(&t->bark_time);
  651. /* monotonic_to_bootbased(&t->bark_time); */
  652. t->bark_time.tv_sec += budget;
  653. while (*p) {
  654. parent = *p;
  655. transact = rb_entry(parent, struct binder_transaction, rb_node);
  656. ret = timespec_compare(&t->bark_time, &transact->bark_time);
  657. if (ret < 0)
  658. p = &(*p)->rb_left;
  659. else if (ret > 0)
  660. p = &(*p)->rb_right;
  661. else {
  662. pr_debug("%d found same key\n", t->debug_id);
  663. t->bark_time.tv_nsec += 1;
  664. p = &(*p)->rb_right;
  665. }
  666. }
  667. rb_link_node(&t->rb_node, parent, p);
  668. rb_insert_color(&t->rb_node, &bwdog_transacts);
  669. }
  670. /**
  671. * binder_cancel_bwdog - Cancel a transaction from tracking list
  672. * @t: pointer to the transaction being cancelled
  673. */
  674. static void binder_cancel_bwdog(struct binder_transaction *t)
  675. {
  676. struct rb_node **p = NULL;
  677. if (binder_bwdog_safe(t)) {
  678. if (t->bark_on) {
  679. binder_print_bwdog(t, NULL, NULL, t->bark_on);
  680. t->bark_on = WAIT_ON_NONE;
  681. }
  682. return;
  683. }
  684. p = binder_query_bwdog(t);
  685. if (*p == NULL) {
  686. pr_err("%d waits %s, but not queued...\n",
  687. t->debug_id, binder_wait_on_str[t->wait_on]);
  688. return;
  689. }
  690. rb_erase(&t->rb_node, &bwdog_transacts);
  691. t->wait_on = WAIT_ON_NONE;
  692. }
  693. /**
  694. * binder_bwdog_bark -
  695. * Barking function while timeout. Record target process or thread, which
  696. * cannot handle transaction in time, including todo list. Also add a log
  697. * entry for AMS reference.
  698. *
  699. * @t: pointer to the transaction, which triggers watchdog
  700. * @cur: current kernel timespec
  701. */
  702. static void binder_bwdog_bark(struct binder_transaction *t, struct timespec *cur)
  703. {
  704. struct binder_timeout_log_entry *e;
  705. if (binder_bwdog_safe(t)) {
  706. pr_debug("%d watched, but wait nothing\n", t->debug_id);
  707. return;
  708. }
  709. e = binder_timeout_log_add();
  710. binder_print_bwdog(t, cur, e, t->wait_on);
  711. e->r = t->wait_on;
  712. e->from_proc = t->fproc;
  713. e->from_thrd = t->fthrd;
  714. e->debug_id = t->debug_id;
  715. memcpy(&e->tv, &t->tv, sizeof(struct timeval));
  716. switch (t->wait_on) {
  717. case WAIT_ON_READ:{
  718. if (!t->to_proc) {
  719. pr_err("%d has NULL target\n", t->debug_id);
  720. return;
  721. }
  722. e->to_proc = t->tproc;
  723. e->to_thrd = t->tthrd;
  724. e->code = t->code;
  725. strcpy(e->service, t->service);
  726. break;
  727. }
  728. case WAIT_ON_EXEC:{
  729. if (!t->to_thread) {
  730. pr_err("%d has NULL target for " "execution\n", t->debug_id);
  731. return;
  732. }
  733. e->to_proc = t->tproc;
  734. e->to_thrd = t->tthrd;
  735. e->code = t->code;
  736. strcpy(e->service, t->service);
  737. goto dumpBackTrace;
  738. }
  739. case WAIT_ON_REPLY_READ:{
  740. if (!t->to_thread) {
  741. pr_err("%d has NULL target thread\n", t->debug_id);
  742. return;
  743. }
  744. e->to_proc = t->tproc;
  745. e->to_thrd = t->tthrd;
  746. strcpy(e->service, "");
  747. break;
  748. }
  749. default:{
  750. return;
  751. }
  752. }
  753. dumpBackTrace:
  754. return;
  755. }
  756. /**
  757. * binder_bwdog_thread - Main thread to check timeout list periodically
  758. */
  759. static int binder_bwdog_thread(void *__unused)
  760. {
  761. unsigned long sleep_sec;
  762. struct rb_node *n = NULL;
  763. struct timespec cur_time;
  764. struct binder_transaction *t = NULL;
  765. for (;;) {
  766. binder_lock(__func__);
  767. do_posix_clock_monotonic_gettime(&cur_time);
  768. /* monotonic_to_bootbased(&cur_time); */
  769. for (n = rb_first(&bwdog_transacts); n != NULL; n = rb_next(n)) {
  770. t = rb_entry(n, struct binder_transaction, rb_node);
  771. if (timespec_compare(&cur_time, &t->bark_time) < 0)
  772. break;
  773. binder_bwdog_bark(t, &cur_time);
  774. rb_erase(&t->rb_node, &bwdog_transacts);
  775. t->bark_on = t->wait_on;
  776. t->wait_on = WAIT_ON_NONE;
  777. }
  778. if (!n)
  779. sleep_sec = WAIT_BUDGET_MIN;
  780. else
  781. sleep_sec = timespec_sub(t->bark_time, cur_time).tv_sec;
  782. binder_unlock(__func__);
  783. msleep(sleep_sec * MSEC_PER_SEC);
  784. }
  785. pr_debug("%s exit...\n", __func__);
  786. return 0;
  787. }
  788. /**
  789. * find_process_by_pid - convert pid to task_struct
  790. * @pid: pid for convert task
  791. */
  792. static inline struct task_struct *find_process_by_pid(pid_t pid)
  793. {
  794. return pid ? find_task_by_vpid(pid) : NULL;
  795. }
  796. /**
  797. * binder_find_buffer_sender - find the sender task_struct of this buffer
  798. * @buf binder buffer
  799. * @tsk task_struct of buf sender
  800. */
  801. static struct task_struct *binder_find_buffer_sender(struct binder_buffer *buf)
  802. {
  803. struct binder_transaction *t;
  804. struct binder_transaction_log_entry *e;
  805. struct task_struct *tsk;
  806. t = buf->transaction;
  807. if (t && t->fproc)
  808. tsk = find_process_by_pid(t->fproc);
  809. else {
  810. e = buf->log_entry;
  811. if ((buf->debug_id == e->debug_id) && e->from_proc)
  812. tsk = find_process_by_pid(e->from_proc);
  813. else
  814. tsk = NULL;
  815. }
  816. return tsk;
  817. }
  818. /**
  819. * copy from /kernel/fs/proc/base.c and modified to get task full name
  820. */
  821. static int binder_proc_pid_cmdline(struct task_struct *task, char *buf)
  822. {
  823. int res = 0;
  824. unsigned int len;
  825. struct mm_struct *mm;
  826. /*============ add begin =============================*/
  827. char c = ' ';
  828. char *str;
  829. unsigned int size;
  830. char *buffer;
  831. if (NULL == task)
  832. goto out;
  833. /*============ add end ===============================*/
  834. mm = get_task_mm(task);
  835. if (!mm)
  836. goto out;
  837. if (!mm->arg_end)
  838. goto out_mm; /* Shh! No looking before we're done */
  839. /*============ add begin =============================*/
  840. buffer = kzalloc(PAGE_SIZE, GFP_KERNEL);
  841. if (NULL == buffer)
  842. goto out_mm;
  843. /*============ add end ===============================*/
  844. len = mm->arg_end - mm->arg_start;
  845. if (len > PAGE_SIZE)
  846. len = PAGE_SIZE;
  847. res = access_process_vm(task, mm->arg_start, buffer, len, 0);
  848. /* If the nul at the end of args has been overwritten, then */
  849. /* assume application is using setproctitle(3). */
  850. if (res > 0 && buffer[res - 1] != '\0' && len < PAGE_SIZE) {
  851. len = strnlen(buffer, res);
  852. if (len < res) {
  853. res = len;
  854. } else {
  855. len = mm->env_end - mm->env_start;
  856. if (len > PAGE_SIZE - res)
  857. len = PAGE_SIZE - res;
  858. res += access_process_vm(task, mm->env_start, buffer + res, len, 0);
  859. res = strnlen(buffer, res);
  860. }
  861. }
  862. /*============ add begin =============================*/
  863. str = strchr(buffer, c);
  864. if (NULL != str)
  865. size = (unsigned int)(str - buffer);
  866. else
  867. size = res;
  868. if (size > 256)
  869. size = 256;
  870. snprintf(buf, size, buffer);
  871. kfree(buffer);
  872. /*============ add end ===============================*/
  873. out_mm:
  874. mmput(mm);
  875. out:
  876. return res;
  877. }
  878. /**
  879. * binder_print_buf - Print buffer info
  880. * @t: transaction
  881. * @buffer: target buffer
  882. * @dest: dest string pointer
  883. * @success: does this buffer allocate success
  884. * @check: check this log for owner finding
  885. */
  886. static void binder_print_buf(struct binder_buffer *buffer, char *dest, int success, int check)
  887. {
  888. struct rtc_time tm;
  889. struct binder_transaction *t = buffer->transaction;
  890. char str[TRANS_LOG_LEN];
  891. struct task_struct *sender_tsk;
  892. struct task_struct *rec_tsk;
  893. char sender_name[256], rec_name[256];
  894. int len_s, len_r;
  895. int ptr = 0;
  896. if (NULL == t) {
  897. struct binder_transaction_log_entry *log_entry = buffer->log_entry;
  898. if ((log_entry != NULL)
  899. && (buffer->debug_id == log_entry->debug_id)) {
  900. rtc_time_to_tm(log_entry->tv.tv_sec, &tm);
  901. sender_tsk = find_process_by_pid(log_entry->from_proc);
  902. rec_tsk = find_process_by_pid(log_entry->to_proc);
  903. len_s = binder_proc_pid_cmdline(sender_tsk, sender_name);
  904. len_r = binder_proc_pid_cmdline(rec_tsk, rec_name);
  905. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  906. "binder:check=%d,success=%d,id=%d,call=%s,type=%s,",
  907. check, success, buffer->debug_id,
  908. buffer->async_transaction ? "async" : "sync",
  909. (2 == log_entry->call_type) ? "reply" :
  910. ((1 == log_entry->call_type) ? "async" : "call"));
  911. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  912. "from=%d,tid=%d,name=%s,to=%d,name=%s,tid=%d,name=%s,",
  913. log_entry->from_proc, log_entry->from_thread,
  914. len_s ? sender_name : ((sender_tsk != NULL) ?
  915. sender_tsk->comm : ""),
  916. log_entry->to_proc,
  917. len_r ? rec_name : ((rec_tsk != NULL) ? rec_tsk->comm : ""),
  918. log_entry->to_thread, log_entry->service);
  919. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  920. "size=%zd,node=%d,handle=%d,dex=%u,auf=%d,start=%lu.%03ld,",
  921. (buffer->data_size + buffer->offsets_size),
  922. log_entry->to_node, log_entry->target_handle,
  923. log_entry->code, buffer->allow_user_free,
  924. (unsigned long)log_entry->timestamp.tv_sec,
  925. (log_entry->timestamp.tv_nsec / NSEC_PER_MSEC));
  926. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  927. "android=%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  928. (tm.tm_year + 1900), (tm.tm_mon + 1),
  929. tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
  930. (unsigned long)(log_entry->tv.tv_usec / USEC_PER_MSEC));
  931. } else {
  932. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  933. "binder:check=%d,success=%d,id=%d,call=%s, ,",
  934. check, success, buffer->debug_id,
  935. buffer->async_transaction ? "async" : "sync");
  936. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  937. ",,,,,,,size=%zd,,,," "auf=%d,,\n",
  938. (buffer->data_size + buffer->offsets_size),
  939. buffer->allow_user_free);
  940. }
  941. } else {
  942. rtc_time_to_tm(t->tv.tv_sec, &tm);
  943. sender_tsk = find_process_by_pid(t->fproc);
  944. rec_tsk = find_process_by_pid(t->tproc);
  945. len_s = binder_proc_pid_cmdline(sender_tsk, sender_name);
  946. len_r = binder_proc_pid_cmdline(rec_tsk, rec_name);
  947. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  948. "binder:check=%d,success=%d,id=%d,call=%s,type=%s,",
  949. check, success, t->debug_id,
  950. buffer->async_transaction ? "async" : "sync ",
  951. binder_wait_on_str[t->wait_on]);
  952. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  953. "from=%d,tid=%d,name=%s,to=%d,name=%s,tid=%d,name=%s,",
  954. t->fproc, t->fthrd,
  955. len_s ? sender_name : ((sender_tsk != NULL) ?
  956. sender_tsk->comm : ""),
  957. t->tproc,
  958. len_r ? rec_name : ((rec_tsk != NULL) ? rec_tsk->comm : ""),
  959. t->tthrd, t->service);
  960. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  961. "size=%zd,,,dex=%u,auf=%d,start=%lu.%03ld,android=",
  962. (buffer->data_size + buffer->offsets_size), t->code,
  963. buffer->allow_user_free, (unsigned long)t->timestamp.tv_sec,
  964. (t->timestamp.tv_nsec / NSEC_PER_MSEC));
  965. ptr += snprintf(str+ptr, sizeof(str)-ptr,
  966. "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  967. (tm.tm_year + 1900),
  968. (tm.tm_mon + 1), tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
  969. (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  970. }
  971. pr_debug("%s", str);
  972. if (dest != NULL)
  973. strncat(dest, str, sizeof(str) - strlen(dest) - 1);
  974. }
  975. /**
  976. * binder_check_buf_checked -
  977. * Consider buffer related issue usually makes a series of failure.
  978. * Only care about the first problem time to minimize debug overhead.
  979. */
  980. static int binder_check_buf_checked(void)
  981. {
  982. return (binder_check_buf_pid == -1);
  983. }
  984. static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer);
  985. /**
  986. * binder_check_buf - Dump necessary info for buffer usage analysis
  987. * @target_proc: receiver
  988. * @size: requested size
  989. * @is_async: 1 if an async call
  990. */
  991. static void binder_check_buf(struct binder_proc *target_proc, size_t size, int is_async)
  992. {
  993. struct rb_node *n;
  994. struct binder_buffer *buffer;
  995. int i;
  996. int large_buffer_count = 0;
  997. size_t tmp_size, threshold;
  998. struct task_struct *sender;
  999. struct task_struct *larger;
  1000. char sender_name[256], rec_name[256];
  1001. struct timespec exp_timestamp;
  1002. struct timeval tv;
  1003. struct rtc_time tm;
  1004. #if defined(CONFIG_MTK_AEE_FEATURE)
  1005. int db_flag = DB_OPT_BINDER_INFO;
  1006. #endif
  1007. int len_s, len_r;
  1008. int ptr = 0;
  1009. pr_debug("buffer allocation failed on %d:0 %s from %d:%d size %zd\n",
  1010. target_proc->pid,
  1011. is_async ? "async" : "call ", binder_check_buf_pid, binder_check_buf_tid, size);
  1012. if (binder_check_buf_checked())
  1013. return;
  1014. /* check blocked service for async call */
  1015. if (is_async) {
  1016. pr_debug("buffer allocation failed on %d:0 (%s) async service blocked\n",
  1017. target_proc->pid, target_proc->tsk ? target_proc->tsk->comm : "");
  1018. }
  1019. pr_debug("%d:0 pending transactions:\n", target_proc->pid);
  1020. threshold = target_proc->buffer_size / 16;
  1021. for (n = rb_last(&target_proc->allocated_buffers), i = 0; n; n = rb_prev(n), i++) {
  1022. buffer = rb_entry(n, struct binder_buffer, rb_node);
  1023. tmp_size = binder_buffer_size(target_proc, buffer);
  1024. BUG_ON(buffer->free);
  1025. if (tmp_size > threshold) {
  1026. if ((NULL == target_proc->large_buffer) ||
  1027. (target_proc->large_buffer &&
  1028. (tmp_size >
  1029. binder_buffer_size(target_proc, target_proc->large_buffer))))
  1030. target_proc->large_buffer = buffer;
  1031. large_buffer_count++;
  1032. binder_print_buf(buffer, NULL, 1, 0);
  1033. } else {
  1034. if (i < 20)
  1035. binder_print_buf(buffer, NULL, 1, 0);
  1036. }
  1037. }
  1038. pr_debug("%d:0 total pending trans: %d(%d large isze)\n",
  1039. target_proc->pid, i, large_buffer_count);
  1040. do_posix_clock_monotonic_gettime(&exp_timestamp);
  1041. /* monotonic_to_bootbased(&exp_timestamp); */
  1042. do_gettimeofday(&tv);
  1043. /* consider time zone. translate to android time */
  1044. tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
  1045. rtc_time_to_tm(tv.tv_sec, &tm);
  1046. sender = find_process_by_pid(binder_check_buf_pid);
  1047. len_s = binder_proc_pid_cmdline(sender, sender_name);
  1048. len_r = binder_proc_pid_cmdline(target_proc->tsk, rec_name);
  1049. if (size > threshold) {
  1050. if (target_proc->large_buffer) {
  1051. pr_debug("on %d:0 the largest pending trans is:\n", target_proc->pid);
  1052. binder_print_buf(target_proc->large_buffer, large_msg, 1, 0);
  1053. }
  1054. snprintf(aee_word, sizeof(aee_word),
  1055. "check %s: large binder trans fail on %d:0 size %zd",
  1056. len_s ? sender_name : ((sender != NULL) ? sender->comm : ""),
  1057. target_proc->pid, size);
  1058. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1059. "BINDER_BUF_DEBUG\n%s",
  1060. large_msg);
  1061. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1062. "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,",
  1063. 1, 0, is_async ? "async" : "sync",
  1064. binder_check_buf_pid, binder_check_buf_tid);
  1065. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1066. "name=%s,to=%d,name=%s,,,size=%zd,,,," ",start=%lu.%03ld,android=",
  1067. len_s ? sender_name : ((sender != NULL) ? sender->comm : ""),
  1068. target_proc->pid,
  1069. len_r ? rec_name : ((target_proc->tsk != NULL) ? target_proc->tsk->
  1070. comm : ""), size, (unsigned long)exp_timestamp.tv_sec,
  1071. (exp_timestamp.tv_nsec / NSEC_PER_MSEC));
  1072. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1073. "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  1074. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday, tm.tm_hour,
  1075. tm.tm_min, tm.tm_sec, (unsigned long)(tv.tv_usec / USEC_PER_MSEC));
  1076. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1077. "large data size,check sender %d(%s)! check kernel log\n",
  1078. binder_check_buf_pid, sender ? sender->comm : "");
  1079. } else {
  1080. if (target_proc->large_buffer) {
  1081. pr_debug("on %d:0 the largest pending trans is:\n", target_proc->pid);
  1082. binder_print_buf(target_proc->large_buffer, large_msg, 1, 1);
  1083. larger = binder_find_buffer_sender(target_proc->large_buffer);
  1084. snprintf(aee_word, sizeof(aee_word),
  1085. "check %s: large binder trans",
  1086. (larger != NULL) ? larger->comm : "");
  1087. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1088. "BINDER_BUF_DEBUG:\n%s",
  1089. large_msg);
  1090. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1091. "binder:check=%d,success=%d,,call=%s,,from=%d,tid=%d,name=%s,",
  1092. 0, 0, is_async ? "async" : "sync",
  1093. binder_check_buf_pid, binder_check_buf_tid,
  1094. len_s ? sender_name : ((sender != NULL) ?
  1095. sender->comm : ""));
  1096. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1097. "to=%d,name=%s,,,size=%zd,,,,",
  1098. target_proc->pid, len_r ? rec_name : ((target_proc->tsk != NULL)
  1099. ? target_proc->tsk->comm : ""), size);
  1100. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1101. ",start=%lu.%03ld,android=",
  1102. (unsigned long)exp_timestamp.tv_sec,
  1103. (exp_timestamp.tv_nsec / NSEC_PER_MSEC));
  1104. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1105. "%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  1106. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  1107. tm.tm_hour, tm.tm_min, tm.tm_sec,
  1108. (unsigned long)(tv.tv_usec / USEC_PER_MSEC));
  1109. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1110. "large data size,check sender %d(%s)! check kernel log\n",
  1111. (larger != NULL) ? larger->pid : 0,
  1112. (larger != NULL) ? larger->comm : "");
  1113. } else {
  1114. snprintf(aee_word, sizeof(aee_word),
  1115. "check %s: binder buffer exhaust ",
  1116. len_r ? rec_name : ((target_proc->tsk != NULL)
  1117. ? target_proc->tsk->comm : ""));
  1118. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1119. "BINDER_BUF_DEBUG\n binder:check=%d,success=%d,",
  1120. 1, 0);
  1121. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1122. "call=%s,from=%d,tid=%d,name=%s,to=%d,name=%s,,,size=%zd,,,,",
  1123. is_async ? "async" : "sync",
  1124. binder_check_buf_pid, binder_check_buf_tid,
  1125. len_s ? sender_name : ((sender != NULL) ?
  1126. sender->comm : ""),
  1127. target_proc->pid, len_r ? rec_name : ((target_proc->tsk != NULL)
  1128. ? target_proc->
  1129. tsk->comm : ""), size);
  1130. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1131. ",start=%lu.%03ld,android=%d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  1132. (unsigned long)exp_timestamp.tv_sec,
  1133. (exp_timestamp.tv_nsec / NSEC_PER_MSEC), (tm.tm_year + 1900),
  1134. (tm.tm_mon + 1), tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec,
  1135. (unsigned long)(tv.tv_usec / USEC_PER_MSEC));
  1136. ptr += snprintf(aee_msg+ptr, sizeof(aee_msg)-ptr,
  1137. "%d small trans pending, check receiver %d(%s)! check kernel log\n",
  1138. i, target_proc->pid,
  1139. target_proc->tsk ? target_proc->tsk->comm : "");
  1140. }
  1141. }
  1142. binder_check_buf_pid = -1;
  1143. binder_check_buf_tid = -1;
  1144. #if defined(CONFIG_MTK_AEE_FEATURE)
  1145. aee_kernel_warning_api(__FILE__, __LINE__, db_flag, &aee_word[0], &aee_msg[0]);
  1146. #endif
  1147. }
  1148. #endif
  1149. static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
  1150. {
  1151. struct files_struct *files = proc->files;
  1152. unsigned long rlim_cur;
  1153. unsigned long irqs;
  1154. if (files == NULL)
  1155. return -ESRCH;
  1156. if (!lock_task_sighand(proc->tsk, &irqs))
  1157. return -EMFILE;
  1158. rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
  1159. unlock_task_sighand(proc->tsk, &irqs);
  1160. return __alloc_fd(files, 0, rlim_cur, flags);
  1161. }
  1162. /*
  1163. * copied from fd_install
  1164. */
  1165. static void task_fd_install(struct binder_proc *proc, unsigned int fd, struct file *file)
  1166. {
  1167. if (proc->files)
  1168. __fd_install(proc->files, fd, file);
  1169. }
  1170. /*
  1171. * copied from sys_close
  1172. */
  1173. static long task_close_fd(struct binder_proc *proc, unsigned int fd)
  1174. {
  1175. int retval;
  1176. if (proc->files == NULL)
  1177. return -ESRCH;
  1178. retval = __close_fd(proc->files, fd);
  1179. /* can't restart close syscall because file table entry was cleared */
  1180. if (unlikely(retval == -ERESTARTSYS ||
  1181. retval == -ERESTARTNOINTR ||
  1182. retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK))
  1183. retval = -EINTR;
  1184. return retval;
  1185. }
  1186. static inline void binder_lock(const char *tag)
  1187. {
  1188. trace_binder_lock(tag);
  1189. mutex_lock(&binder_main_lock);
  1190. trace_binder_locked(tag);
  1191. }
  1192. static inline void binder_unlock(const char *tag)
  1193. {
  1194. trace_binder_unlock(tag);
  1195. mutex_unlock(&binder_main_lock);
  1196. }
  1197. static void binder_set_nice(long nice)
  1198. {
  1199. long min_nice;
  1200. if (can_nice(current, nice)) {
  1201. set_user_nice(current, nice);
  1202. return;
  1203. }
  1204. min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
  1205. binder_debug(BINDER_DEBUG_PRIORITY_CAP,
  1206. "%d: nice value %ld not allowed use %ld instead\n",
  1207. current->pid, nice, min_nice);
  1208. set_user_nice(current, min_nice);
  1209. if (min_nice <= MAX_NICE)
  1210. return;
  1211. binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
  1212. }
  1213. static size_t binder_buffer_size(struct binder_proc *proc, struct binder_buffer *buffer)
  1214. {
  1215. if (list_is_last(&buffer->entry, &proc->buffers))
  1216. return proc->buffer + proc->buffer_size - (void *)buffer->data;
  1217. return (size_t) list_entry(buffer->entry.next,
  1218. struct binder_buffer, entry)-(size_t) buffer->data;
  1219. }
  1220. static void binder_insert_free_buffer(struct binder_proc *proc, struct binder_buffer *new_buffer)
  1221. {
  1222. struct rb_node **p = &proc->free_buffers.rb_node;
  1223. struct rb_node *parent = NULL;
  1224. struct binder_buffer *buffer;
  1225. size_t buffer_size;
  1226. size_t new_buffer_size;
  1227. BUG_ON(!new_buffer->free);
  1228. new_buffer_size = binder_buffer_size(proc, new_buffer);
  1229. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1230. "%d: add free buffer, size %zd, at %p\n",
  1231. proc->pid, new_buffer_size, new_buffer);
  1232. while (*p) {
  1233. parent = *p;
  1234. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  1235. BUG_ON(!buffer->free);
  1236. buffer_size = binder_buffer_size(proc, buffer);
  1237. if (new_buffer_size < buffer_size)
  1238. p = &parent->rb_left;
  1239. else
  1240. p = &parent->rb_right;
  1241. }
  1242. rb_link_node(&new_buffer->rb_node, parent, p);
  1243. rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
  1244. }
  1245. static void binder_insert_allocated_buffer(struct binder_proc *proc,
  1246. struct binder_buffer *new_buffer)
  1247. {
  1248. struct rb_node **p = &proc->allocated_buffers.rb_node;
  1249. struct rb_node *parent = NULL;
  1250. struct binder_buffer *buffer;
  1251. BUG_ON(new_buffer->free);
  1252. while (*p) {
  1253. parent = *p;
  1254. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  1255. BUG_ON(buffer->free);
  1256. if (new_buffer < buffer)
  1257. p = &parent->rb_left;
  1258. else if (new_buffer > buffer)
  1259. p = &parent->rb_right;
  1260. else
  1261. BUG();
  1262. }
  1263. rb_link_node(&new_buffer->rb_node, parent, p);
  1264. rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
  1265. }
  1266. static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, uintptr_t user_ptr)
  1267. {
  1268. struct rb_node *n = proc->allocated_buffers.rb_node;
  1269. struct binder_buffer *buffer;
  1270. struct binder_buffer *kern_ptr;
  1271. kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
  1272. - offsetof(struct binder_buffer, data));
  1273. while (n) {
  1274. buffer = rb_entry(n, struct binder_buffer, rb_node);
  1275. BUG_ON(buffer->free);
  1276. if (kern_ptr < buffer)
  1277. n = n->rb_left;
  1278. else if (kern_ptr > buffer)
  1279. n = n->rb_right;
  1280. else
  1281. return buffer;
  1282. }
  1283. return NULL;
  1284. }
  1285. static int binder_update_page_range(struct binder_proc *proc, int allocate,
  1286. void *start, void *end, struct vm_area_struct *vma)
  1287. {
  1288. void *page_addr;
  1289. unsigned long user_page_addr;
  1290. struct vm_struct tmp_area;
  1291. struct page **page;
  1292. struct mm_struct *mm;
  1293. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1294. "%d: %s pages %p-%p\n", proc->pid, allocate ? "allocate" : "free", start, end);
  1295. if (end <= start)
  1296. return 0;
  1297. trace_binder_update_page_range(proc, allocate, start, end);
  1298. if (vma)
  1299. mm = NULL;
  1300. else
  1301. mm = get_task_mm(proc->tsk);
  1302. if (mm) {
  1303. down_write(&mm->mmap_sem);
  1304. vma = proc->vma;
  1305. if (vma && mm != proc->vma_vm_mm) {
  1306. pr_err("%d: vma mm and task mm mismatch\n", proc->pid);
  1307. vma = NULL;
  1308. }
  1309. }
  1310. if (allocate == 0)
  1311. goto free_range;
  1312. if (vma == NULL) {
  1313. pr_err
  1314. ("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", proc->pid);
  1315. goto err_no_vma;
  1316. }
  1317. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  1318. int ret;
  1319. page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
  1320. BUG_ON(*page);
  1321. *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  1322. if (*page == NULL) {
  1323. pr_err("%d: binder_alloc_buf failed for page at %p\n",
  1324. proc->pid, page_addr);
  1325. goto err_alloc_page_failed;
  1326. }
  1327. #ifdef MTK_BINDER_PAGE_USED_RECORD
  1328. binder_page_used++;
  1329. proc->page_used++;
  1330. if (binder_page_used > binder_page_used_peak)
  1331. binder_page_used_peak = binder_page_used;
  1332. if (proc->page_used > proc->page_used_peak)
  1333. proc->page_used_peak = proc->page_used;
  1334. #endif
  1335. tmp_area.addr = page_addr;
  1336. tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
  1337. ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
  1338. if (ret) {
  1339. pr_err
  1340. ("%d: binder_alloc_buf failed to map page at %p in kernel\n",
  1341. proc->pid, page_addr);
  1342. goto err_map_kernel_failed;
  1343. }
  1344. user_page_addr = (uintptr_t) page_addr + proc->user_buffer_offset;
  1345. ret = vm_insert_page(vma, user_page_addr, page[0]);
  1346. if (ret) {
  1347. pr_err
  1348. ("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  1349. proc->pid, user_page_addr);
  1350. goto err_vm_insert_page_failed;
  1351. }
  1352. /* vm_insert_page does not seem to increment the refcount */
  1353. }
  1354. if (mm) {
  1355. up_write(&mm->mmap_sem);
  1356. mmput(mm);
  1357. }
  1358. return 0;
  1359. free_range:
  1360. for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) {
  1361. page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
  1362. if (vma)
  1363. zap_page_range(vma, (uintptr_t) page_addr +
  1364. proc->user_buffer_offset, PAGE_SIZE, NULL);
  1365. err_vm_insert_page_failed:
  1366. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  1367. err_map_kernel_failed:
  1368. __free_page(*page);
  1369. *page = NULL;
  1370. #ifdef MTK_BINDER_PAGE_USED_RECORD
  1371. if (binder_page_used > 0)
  1372. binder_page_used--;
  1373. if (proc->page_used > 0)
  1374. proc->page_used--;
  1375. #endif
  1376. err_alloc_page_failed:
  1377. ;
  1378. }
  1379. err_no_vma:
  1380. if (mm) {
  1381. up_write(&mm->mmap_sem);
  1382. mmput(mm);
  1383. }
  1384. return -ENOMEM;
  1385. }
  1386. static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
  1387. size_t data_size, size_t offsets_size, int is_async)
  1388. {
  1389. struct rb_node *n = proc->free_buffers.rb_node;
  1390. struct binder_buffer *buffer;
  1391. size_t buffer_size;
  1392. struct rb_node *best_fit = NULL;
  1393. void *has_page_addr;
  1394. void *end_page_addr;
  1395. size_t size;
  1396. #ifdef MTK_BINDER_DEBUG
  1397. size_t proc_max_size;
  1398. #endif
  1399. if (proc->vma == NULL) {
  1400. pr_err("%d: binder_alloc_buf, no vma\n", proc->pid);
  1401. return NULL;
  1402. }
  1403. size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *));
  1404. if (size < data_size || size < offsets_size) {
  1405. binder_user_error
  1406. ("%d: got transaction with invalid size %zd-%zd\n",
  1407. proc->pid, data_size, offsets_size);
  1408. return NULL;
  1409. }
  1410. #ifdef MTK_BINDER_DEBUG
  1411. proc_max_size = (is_async ? (proc->buffer_size / 2) : proc->buffer_size);
  1412. if (proc_max_size < size + sizeof(struct binder_buffer)) {
  1413. binder_user_error("%d: got transaction with too large size %s alloc size %zd-%zd allowed size %zd\n",
  1414. proc->pid, is_async ? "async" : "sync",
  1415. data_size, offsets_size,
  1416. (proc_max_size - sizeof(struct binder_buffer)));
  1417. return NULL;
  1418. }
  1419. #endif
  1420. if (is_async && proc->free_async_space < size + sizeof(struct binder_buffer)) {
  1421. #ifdef MTK_BINDER_DEBUG
  1422. pr_err("%d: binder_alloc_buf size %zd failed, no async space left (%zd)\n",
  1423. proc->pid, size, proc->free_async_space);
  1424. #else
  1425. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1426. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  1427. proc->pid, size);
  1428. #endif
  1429. #ifdef BINDER_MONITOR
  1430. binder_check_buf(proc, size, 1);
  1431. #endif
  1432. return NULL;
  1433. }
  1434. while (n) {
  1435. buffer = rb_entry(n, struct binder_buffer, rb_node);
  1436. BUG_ON(!buffer->free);
  1437. buffer_size = binder_buffer_size(proc, buffer);
  1438. if (size < buffer_size) {
  1439. best_fit = n;
  1440. n = n->rb_left;
  1441. } else if (size > buffer_size)
  1442. n = n->rb_right;
  1443. else {
  1444. best_fit = n;
  1445. break;
  1446. }
  1447. }
  1448. #ifdef BINDER_MONITOR
  1449. if (log_disable & BINDER_BUF_WARN) {
  1450. if (size > 64) {
  1451. pr_err
  1452. ("%d: binder_alloc_buf size %zd failed, UT auto triggerd!\n",
  1453. proc->pid, size);
  1454. binder_check_buf(proc, size, 0);
  1455. }
  1456. }
  1457. #endif
  1458. if (best_fit == NULL) {
  1459. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", proc->pid, size);
  1460. #ifdef BINDER_MONITOR
  1461. binder_check_buf(proc, size, 0);
  1462. #endif
  1463. return NULL;
  1464. }
  1465. if (n == NULL) {
  1466. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  1467. buffer_size = binder_buffer_size(proc, buffer);
  1468. }
  1469. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1470. "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
  1471. proc->pid, size, buffer, buffer_size);
  1472. has_page_addr = (void *)(((uintptr_t) buffer->data + buffer_size) & PAGE_MASK);
  1473. if (n == NULL) {
  1474. if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
  1475. buffer_size = size; /* no room for other buffers */
  1476. else
  1477. buffer_size = size + sizeof(struct binder_buffer);
  1478. }
  1479. end_page_addr = (void *)PAGE_ALIGN((uintptr_t) buffer->data + buffer_size);
  1480. if (end_page_addr > has_page_addr)
  1481. end_page_addr = has_page_addr;
  1482. if (binder_update_page_range(proc, 1,
  1483. (void *)PAGE_ALIGN((uintptr_t) buffer->data), end_page_addr,
  1484. NULL))
  1485. return NULL;
  1486. rb_erase(best_fit, &proc->free_buffers);
  1487. buffer->free = 0;
  1488. binder_insert_allocated_buffer(proc, buffer);
  1489. if (buffer_size != size) {
  1490. struct binder_buffer *new_buffer = (void *)buffer->data + size;
  1491. list_add(&new_buffer->entry, &buffer->entry);
  1492. new_buffer->free = 1;
  1493. binder_insert_free_buffer(proc, new_buffer);
  1494. }
  1495. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1496. "%d: binder_alloc_buf size %zd got %p\n", proc->pid, size, buffer);
  1497. buffer->data_size = data_size;
  1498. buffer->offsets_size = offsets_size;
  1499. buffer->async_transaction = is_async;
  1500. if (is_async) {
  1501. proc->free_async_space -= size + sizeof(struct binder_buffer);
  1502. binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  1503. "%d: binder_alloc_buf size %zd async free %zd\n",
  1504. proc->pid, size, proc->free_async_space);
  1505. }
  1506. return buffer;
  1507. }
  1508. static void *buffer_start_page(struct binder_buffer *buffer)
  1509. {
  1510. return (void *)((uintptr_t) buffer & PAGE_MASK);
  1511. }
  1512. static void *buffer_end_page(struct binder_buffer *buffer)
  1513. {
  1514. return (void *)(((uintptr_t) (buffer + 1) - 1) & PAGE_MASK);
  1515. }
  1516. static void binder_delete_free_buffer(struct binder_proc *proc, struct binder_buffer *buffer)
  1517. {
  1518. struct binder_buffer *prev, *next = NULL;
  1519. int free_page_end = 1;
  1520. int free_page_start = 1;
  1521. BUG_ON(proc->buffers.next == &buffer->entry);
  1522. prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
  1523. BUG_ON(!prev->free);
  1524. if (buffer_end_page(prev) == buffer_start_page(buffer)) {
  1525. free_page_start = 0;
  1526. if (buffer_end_page(prev) == buffer_end_page(buffer))
  1527. free_page_end = 0;
  1528. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1529. "%d: merge free, buffer %p share page with %p\n",
  1530. proc->pid, buffer, prev);
  1531. }
  1532. if (!list_is_last(&buffer->entry, &proc->buffers)) {
  1533. next = list_entry(buffer->entry.next, struct binder_buffer, entry);
  1534. if (buffer_start_page(next) == buffer_end_page(buffer)) {
  1535. free_page_end = 0;
  1536. if (buffer_start_page(next) == buffer_start_page(buffer))
  1537. free_page_start = 0;
  1538. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1539. "%d: merge free, buffer %p share page with %p\n",
  1540. proc->pid, buffer, prev);
  1541. }
  1542. }
  1543. list_del(&buffer->entry);
  1544. if (free_page_start || free_page_end) {
  1545. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1546. "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
  1547. proc->pid, buffer, free_page_start ? "" : " end",
  1548. free_page_end ? "" : " start", prev, next);
  1549. binder_update_page_range(proc, 0, free_page_start ?
  1550. buffer_start_page(buffer) :
  1551. buffer_end_page(buffer),
  1552. (free_page_end ?
  1553. buffer_end_page(buffer) :
  1554. buffer_start_page(buffer)) + PAGE_SIZE, NULL);
  1555. }
  1556. }
  1557. static void binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
  1558. {
  1559. size_t size, buffer_size;
  1560. buffer_size = binder_buffer_size(proc, buffer);
  1561. size = ALIGN(buffer->data_size, sizeof(void *)) +
  1562. ALIGN(buffer->offsets_size, sizeof(void *));
  1563. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  1564. "%d: binder_free_buf %p size %zd buffer_size %zd\n",
  1565. proc->pid, buffer, size, buffer_size);
  1566. BUG_ON(buffer->free);
  1567. BUG_ON(size > buffer_size);
  1568. BUG_ON(buffer->transaction != NULL);
  1569. BUG_ON((void *)buffer < proc->buffer);
  1570. BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
  1571. #ifdef BINDER_MONITOR
  1572. buffer->log_entry = NULL;
  1573. #endif
  1574. if (buffer->async_transaction) {
  1575. proc->free_async_space += size + sizeof(struct binder_buffer);
  1576. binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  1577. "%d: binder_free_buf size %zd async free %zd\n",
  1578. proc->pid, size, proc->free_async_space);
  1579. }
  1580. binder_update_page_range(proc, 0,
  1581. (void *)PAGE_ALIGN((uintptr_t) buffer->data),
  1582. (void
  1583. *)(((uintptr_t) buffer->data + buffer_size) & PAGE_MASK), NULL);
  1584. rb_erase(&buffer->rb_node, &proc->allocated_buffers);
  1585. buffer->free = 1;
  1586. if (!list_is_last(&buffer->entry, &proc->buffers)) {
  1587. struct binder_buffer *next = list_entry(buffer->entry.next,
  1588. struct binder_buffer,
  1589. entry);
  1590. if (next->free) {
  1591. rb_erase(&next->rb_node, &proc->free_buffers);
  1592. binder_delete_free_buffer(proc, next);
  1593. }
  1594. }
  1595. if (proc->buffers.next != &buffer->entry) {
  1596. struct binder_buffer *prev = list_entry(buffer->entry.prev,
  1597. struct binder_buffer,
  1598. entry);
  1599. if (prev->free) {
  1600. binder_delete_free_buffer(proc, buffer);
  1601. rb_erase(&prev->rb_node, &proc->free_buffers);
  1602. buffer = prev;
  1603. }
  1604. }
  1605. binder_insert_free_buffer(proc, buffer);
  1606. }
  1607. static struct binder_node *binder_get_node(struct binder_proc *proc, binder_uintptr_t ptr)
  1608. {
  1609. struct rb_node *n = proc->nodes.rb_node;
  1610. struct binder_node *node;
  1611. while (n) {
  1612. node = rb_entry(n, struct binder_node, rb_node);
  1613. if (ptr < node->ptr)
  1614. n = n->rb_left;
  1615. else if (ptr > node->ptr)
  1616. n = n->rb_right;
  1617. else
  1618. return node;
  1619. }
  1620. return NULL;
  1621. }
  1622. static struct binder_node *binder_new_node(struct binder_proc *proc,
  1623. binder_uintptr_t ptr, binder_uintptr_t cookie)
  1624. {
  1625. struct rb_node **p = &proc->nodes.rb_node;
  1626. struct rb_node *parent = NULL;
  1627. struct binder_node *node;
  1628. while (*p) {
  1629. parent = *p;
  1630. node = rb_entry(parent, struct binder_node, rb_node);
  1631. if (ptr < node->ptr)
  1632. p = &(*p)->rb_left;
  1633. else if (ptr > node->ptr)
  1634. p = &(*p)->rb_right;
  1635. else
  1636. return NULL;
  1637. }
  1638. node = kzalloc(sizeof(*node), GFP_KERNEL);
  1639. if (node == NULL)
  1640. return NULL;
  1641. binder_stats_created(BINDER_STAT_NODE);
  1642. rb_link_node(&node->rb_node, parent, p);
  1643. rb_insert_color(&node->rb_node, &proc->nodes);
  1644. node->debug_id = ++binder_last_id;
  1645. node->proc = proc;
  1646. node->ptr = ptr;
  1647. node->cookie = cookie;
  1648. node->work.type = BINDER_WORK_NODE;
  1649. INIT_LIST_HEAD(&node->work.entry);
  1650. INIT_LIST_HEAD(&node->async_todo);
  1651. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1652. "%d:%d node %d u%016llx c%016llx created\n",
  1653. proc->pid, current->pid, node->debug_id, (u64) node->ptr, (u64) node->cookie);
  1654. return node;
  1655. }
  1656. static int binder_inc_node(struct binder_node *node, int strong, int internal,
  1657. struct list_head *target_list)
  1658. {
  1659. if (strong) {
  1660. if (internal) {
  1661. if (target_list == NULL &&
  1662. node->internal_strong_refs == 0 &&
  1663. !(node == binder_context_mgr_node && node->has_strong_ref)) {
  1664. pr_err("invalid inc strong node for %d\n", node->debug_id);
  1665. return -EINVAL;
  1666. }
  1667. node->internal_strong_refs++;
  1668. } else
  1669. node->local_strong_refs++;
  1670. if (!node->has_strong_ref && target_list) {
  1671. list_del_init(&node->work.entry);
  1672. list_add_tail(&node->work.entry, target_list);
  1673. }
  1674. } else {
  1675. if (!internal)
  1676. node->local_weak_refs++;
  1677. if (!node->has_weak_ref && list_empty(&node->work.entry)) {
  1678. if (target_list == NULL) {
  1679. pr_err("invalid inc weak node for %d\n", node->debug_id);
  1680. return -EINVAL;
  1681. }
  1682. list_add_tail(&node->work.entry, target_list);
  1683. }
  1684. }
  1685. return 0;
  1686. }
  1687. static int binder_dec_node(struct binder_node *node, int strong, int internal)
  1688. {
  1689. if (strong) {
  1690. if (internal)
  1691. node->internal_strong_refs--;
  1692. else
  1693. node->local_strong_refs--;
  1694. if (node->local_strong_refs || node->internal_strong_refs)
  1695. return 0;
  1696. } else {
  1697. if (!internal)
  1698. node->local_weak_refs--;
  1699. if (node->local_weak_refs || !hlist_empty(&node->refs))
  1700. return 0;
  1701. }
  1702. if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
  1703. if (list_empty(&node->work.entry)) {
  1704. list_add_tail(&node->work.entry, &node->proc->todo);
  1705. wake_up_interruptible(&node->proc->wait);
  1706. }
  1707. } else {
  1708. if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs) {
  1709. list_del_init(&node->work.entry);
  1710. if (node->proc) {
  1711. rb_erase(&node->rb_node, &node->proc->nodes);
  1712. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1713. "refless node %d deleted\n", node->debug_id);
  1714. } else {
  1715. hlist_del(&node->dead_node);
  1716. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1717. "dead node %d deleted\n", node->debug_id);
  1718. }
  1719. kfree(node);
  1720. binder_stats_deleted(BINDER_STAT_NODE);
  1721. }
  1722. }
  1723. return 0;
  1724. }
  1725. static struct binder_ref *binder_get_ref(struct binder_proc *proc, uint32_t desc)
  1726. {
  1727. struct rb_node *n = proc->refs_by_desc.rb_node;
  1728. struct binder_ref *ref;
  1729. while (n) {
  1730. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1731. if (desc < ref->desc)
  1732. n = n->rb_left;
  1733. else if (desc > ref->desc)
  1734. n = n->rb_right;
  1735. else
  1736. return ref;
  1737. }
  1738. return NULL;
  1739. }
  1740. static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
  1741. struct binder_node *node)
  1742. {
  1743. struct rb_node *n;
  1744. struct rb_node **p = &proc->refs_by_node.rb_node;
  1745. struct rb_node *parent = NULL;
  1746. struct binder_ref *ref, *new_ref;
  1747. while (*p) {
  1748. parent = *p;
  1749. ref = rb_entry(parent, struct binder_ref, rb_node_node);
  1750. if (node < ref->node)
  1751. p = &(*p)->rb_left;
  1752. else if (node > ref->node)
  1753. p = &(*p)->rb_right;
  1754. else
  1755. return ref;
  1756. }
  1757. new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  1758. if (new_ref == NULL)
  1759. return NULL;
  1760. binder_stats_created(BINDER_STAT_REF);
  1761. new_ref->debug_id = ++binder_last_id;
  1762. new_ref->proc = proc;
  1763. new_ref->node = node;
  1764. rb_link_node(&new_ref->rb_node_node, parent, p);
  1765. rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
  1766. new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
  1767. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  1768. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  1769. if (ref->desc > new_ref->desc)
  1770. break;
  1771. new_ref->desc = ref->desc + 1;
  1772. }
  1773. p = &proc->refs_by_desc.rb_node;
  1774. while (*p) {
  1775. parent = *p;
  1776. ref = rb_entry(parent, struct binder_ref, rb_node_desc);
  1777. if (new_ref->desc < ref->desc)
  1778. p = &(*p)->rb_left;
  1779. else if (new_ref->desc > ref->desc)
  1780. p = &(*p)->rb_right;
  1781. else
  1782. BUG();
  1783. }
  1784. rb_link_node(&new_ref->rb_node_desc, parent, p);
  1785. rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
  1786. if (node) {
  1787. hlist_add_head(&new_ref->node_entry, &node->refs);
  1788. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1789. "%d new ref %d desc %d for node %d\n",
  1790. proc->pid, new_ref->debug_id, new_ref->desc, node->debug_id);
  1791. } else {
  1792. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1793. "%d new ref %d desc %d for dead node\n",
  1794. proc->pid, new_ref->debug_id, new_ref->desc);
  1795. }
  1796. return new_ref;
  1797. }
  1798. static void binder_delete_ref(struct binder_ref *ref)
  1799. {
  1800. binder_debug(BINDER_DEBUG_INTERNAL_REFS,
  1801. "%d delete ref %d desc %d for node %d\n",
  1802. ref->proc->pid, ref->debug_id, ref->desc, ref->node->debug_id);
  1803. rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
  1804. rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
  1805. if (ref->strong)
  1806. binder_dec_node(ref->node, 1, 1);
  1807. hlist_del(&ref->node_entry);
  1808. binder_dec_node(ref->node, 0, 1);
  1809. if (ref->death) {
  1810. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1811. "%d delete ref %d desc %d has death notification\n",
  1812. ref->proc->pid, ref->debug_id, ref->desc);
  1813. list_del(&ref->death->work.entry);
  1814. kfree(ref->death);
  1815. binder_stats_deleted(BINDER_STAT_DEATH);
  1816. }
  1817. kfree(ref);
  1818. binder_stats_deleted(BINDER_STAT_REF);
  1819. }
  1820. static int binder_inc_ref(struct binder_ref *ref, int strong, struct list_head *target_list)
  1821. {
  1822. int ret;
  1823. if (strong) {
  1824. if (ref->strong == 0) {
  1825. ret = binder_inc_node(ref->node, 1, 1, target_list);
  1826. if (ret)
  1827. return ret;
  1828. }
  1829. ref->strong++;
  1830. } else {
  1831. if (ref->weak == 0) {
  1832. ret = binder_inc_node(ref->node, 0, 1, target_list);
  1833. if (ret)
  1834. return ret;
  1835. }
  1836. ref->weak++;
  1837. }
  1838. return 0;
  1839. }
  1840. static int binder_dec_ref(struct binder_ref *ref, int strong)
  1841. {
  1842. if (strong) {
  1843. if (ref->strong == 0) {
  1844. binder_user_error
  1845. ("%d invalid dec strong, ref %d desc %d s %d w %d\n",
  1846. ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak);
  1847. return -EINVAL;
  1848. }
  1849. ref->strong--;
  1850. if (ref->strong == 0) {
  1851. int ret;
  1852. ret = binder_dec_node(ref->node, strong, 1);
  1853. if (ret)
  1854. return ret;
  1855. }
  1856. } else {
  1857. if (ref->weak == 0) {
  1858. binder_user_error
  1859. ("%d invalid dec weak, ref %d desc %d s %d w %d\n",
  1860. ref->proc->pid, ref->debug_id, ref->desc, ref->strong, ref->weak);
  1861. return -EINVAL;
  1862. }
  1863. ref->weak--;
  1864. }
  1865. if (ref->strong == 0 && ref->weak == 0)
  1866. binder_delete_ref(ref);
  1867. return 0;
  1868. }
  1869. static void binder_pop_transaction(struct binder_thread *target_thread,
  1870. struct binder_transaction *t)
  1871. {
  1872. if (target_thread) {
  1873. BUG_ON(target_thread->transaction_stack != t);
  1874. BUG_ON(target_thread->transaction_stack->from != target_thread);
  1875. target_thread->transaction_stack = target_thread->transaction_stack->from_parent;
  1876. t->from = NULL;
  1877. }
  1878. t->need_reply = 0;
  1879. if (t->buffer)
  1880. t->buffer->transaction = NULL;
  1881. #ifdef BINDER_MONITOR
  1882. binder_cancel_bwdog(t);
  1883. #endif
  1884. kfree(t);
  1885. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  1886. }
  1887. static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code)
  1888. {
  1889. struct binder_thread *target_thread;
  1890. struct binder_transaction *next;
  1891. BUG_ON(t->flags & TF_ONE_WAY);
  1892. while (1) {
  1893. target_thread = t->from;
  1894. if (target_thread) {
  1895. if (target_thread->return_error != BR_OK &&
  1896. target_thread->return_error2 == BR_OK) {
  1897. target_thread->return_error2 = target_thread->return_error;
  1898. target_thread->return_error = BR_OK;
  1899. }
  1900. if (target_thread->return_error == BR_OK) {
  1901. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1902. "send failed reply for transaction %d to %d:%d\n",
  1903. t->debug_id,
  1904. target_thread->proc->pid, target_thread->pid);
  1905. binder_pop_transaction(target_thread, t);
  1906. target_thread->return_error = error_code;
  1907. wake_up_interruptible(&target_thread->wait);
  1908. } else {
  1909. pr_err
  1910. ("reply failed, target thread, %d:%d, has error code %d already\n",
  1911. target_thread->proc->pid,
  1912. target_thread->pid, target_thread->return_error);
  1913. }
  1914. return;
  1915. }
  1916. next = t->from_parent;
  1917. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  1918. "send failed reply for transaction %d, target dead\n", t->debug_id);
  1919. binder_pop_transaction(target_thread, t);
  1920. if (next == NULL) {
  1921. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1922. "reply failed, no target thread at root\n");
  1923. return;
  1924. }
  1925. t = next;
  1926. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  1927. "reply failed, no target thread -- retry %d\n", t->debug_id);
  1928. }
  1929. }
  1930. static void binder_transaction_buffer_release(struct binder_proc *proc,
  1931. struct binder_buffer *buffer,
  1932. binder_size_t *failed_at)
  1933. {
  1934. binder_size_t *offp, *off_end;
  1935. int debug_id = buffer->debug_id;
  1936. binder_debug(BINDER_DEBUG_TRANSACTION,
  1937. "%d buffer release %d, size %zd-%zd, failed at %p\n",
  1938. proc->pid, buffer->debug_id,
  1939. buffer->data_size, buffer->offsets_size, failed_at);
  1940. if (buffer->target_node)
  1941. binder_dec_node(buffer->target_node, 1, 0);
  1942. offp = (binder_size_t *) (buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
  1943. if (failed_at)
  1944. off_end = failed_at;
  1945. else
  1946. off_end = (void *)offp + buffer->offsets_size;
  1947. for (; offp < off_end; offp++) {
  1948. struct flat_binder_object *fp;
  1949. if (*offp > buffer->data_size - sizeof(*fp) ||
  1950. buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) {
  1951. pr_err
  1952. ("transaction release %d bad offset %lld, size %zd\n",
  1953. debug_id, (u64) *offp, buffer->data_size);
  1954. continue;
  1955. }
  1956. fp = (struct flat_binder_object *)(buffer->data + *offp);
  1957. switch (fp->type) {
  1958. case BINDER_TYPE_BINDER:
  1959. case BINDER_TYPE_WEAK_BINDER:{
  1960. struct binder_node *node = binder_get_node(proc, fp->binder);
  1961. if (node == NULL) {
  1962. pr_err
  1963. ("transaction release %d bad node %016llx\n",
  1964. debug_id, (u64) fp->binder);
  1965. break;
  1966. }
  1967. binder_debug(BINDER_DEBUG_TRANSACTION,
  1968. " node %d u%016llx\n",
  1969. node->debug_id, (u64) node->ptr);
  1970. binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
  1971. }
  1972. break;
  1973. case BINDER_TYPE_HANDLE:
  1974. case BINDER_TYPE_WEAK_HANDLE:{
  1975. struct binder_ref *ref = binder_get_ref(proc, fp->handle);
  1976. if (ref == NULL) {
  1977. pr_err
  1978. ("transaction release %d bad handle %d\n",
  1979. debug_id, fp->handle);
  1980. break;
  1981. }
  1982. binder_debug(BINDER_DEBUG_TRANSACTION,
  1983. " ref %d desc %d (node %d)\n",
  1984. ref->debug_id, ref->desc, ref->node->debug_id);
  1985. binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
  1986. }
  1987. break;
  1988. case BINDER_TYPE_FD:
  1989. binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d\n", fp->handle);
  1990. if (failed_at)
  1991. task_close_fd(proc, fp->handle);
  1992. break;
  1993. default:
  1994. pr_err("transaction release %d bad object type %x\n", debug_id, fp->type);
  1995. break;
  1996. }
  1997. }
  1998. }
  1999. #ifdef RT_PRIO_INHERIT
  2000. static void mt_sched_setscheduler_nocheck(struct task_struct *p, int policy,
  2001. struct sched_param *param)
  2002. {
  2003. int ret;
  2004. ret = sched_setscheduler_nocheck(p, policy, param);
  2005. if (ret)
  2006. pr_err("set scheduler fail, error code: %d\n", ret);
  2007. }
  2008. #endif
  2009. #ifdef BINDER_MONITOR
  2010. /* binder_update_transaction_time - update read/exec done time for transaction
  2011. ** step:
  2012. ** 0: start // not used
  2013. ** 1: read
  2014. ** 2: reply
  2015. */
  2016. static void binder_update_transaction_time(struct binder_transaction_log *t_log,
  2017. struct binder_transaction *bt, int step)
  2018. {
  2019. if (step < 1 || step > 2) {
  2020. pr_err("update trans time fail, wrong step value for id %d\n", bt->debug_id);
  2021. return;
  2022. }
  2023. if ((NULL == bt) || (bt->log_idx == -1)
  2024. || (bt->log_idx > (t_log->size - 1)))
  2025. return;
  2026. if (t_log->entry[bt->log_idx].debug_id == bt->debug_id) {
  2027. if (step == 1)
  2028. do_posix_clock_monotonic_gettime(&t_log->entry[bt->log_idx].readstamp);
  2029. else if (step == 2)
  2030. do_posix_clock_monotonic_gettime(&t_log->entry[bt->log_idx].endstamp);
  2031. }
  2032. }
  2033. /* binder_update_transaction_tid - update to thread pid transaction
  2034. */
  2035. static void binder_update_transaction_ttid(struct binder_transaction_log *t_log,
  2036. struct binder_transaction *bt)
  2037. {
  2038. if ((NULL == bt) || (NULL == t_log))
  2039. return;
  2040. if ((bt->log_idx == -1) || (bt->log_idx > (t_log->size - 1)))
  2041. return;
  2042. if (bt->tthrd < 0)
  2043. return;
  2044. if ((t_log->entry[bt->log_idx].debug_id == bt->debug_id) &&
  2045. (t_log->entry[bt->log_idx].to_thread == 0)) {
  2046. t_log->entry[bt->log_idx].to_thread = bt->tthrd;
  2047. }
  2048. }
  2049. /* this is an addService() transaction identified by:
  2050. * fp->type == BINDER_TYPE_BINDER && tr->target.handle == 0
  2051. */
  2052. static void parse_service_name(struct binder_transaction_data *tr,
  2053. struct binder_proc *proc, char *name)
  2054. {
  2055. unsigned int i, len = 0;
  2056. char *tmp;
  2057. if (tr->target.handle == 0) {
  2058. for (i = 0; (2 * i) < tr->data_size; i++) {
  2059. /* hack into addService() payload:
  2060. * service name string is located at MAGIC_SERVICE_NAME_OFFSET,
  2061. * and interleaved with character '\0'.
  2062. * for example, 'p', '\0', 'h', '\0', 'o', '\0', 'n', '\0', 'e'
  2063. */
  2064. if ((2 * i) < MAGIC_SERVICE_NAME_OFFSET)
  2065. continue;
  2066. /* prevent array index overflow */
  2067. if (len >= (MAX_SERVICE_NAME_LEN - 1))
  2068. break;
  2069. tmp = (char *)(uintptr_t)(tr->data.ptr.buffer + (2 * i));
  2070. len += sprintf(name + len, "%c", *tmp);
  2071. }
  2072. name[len] = '\0';
  2073. } else {
  2074. name[0] = '\0';
  2075. }
  2076. /* via addService of activity service, identify
  2077. * system_server's process id.
  2078. */
  2079. if (!strcmp(name, "activity")) {
  2080. system_server_pid = proc->pid;
  2081. pr_debug("system_server %d\n", system_server_pid);
  2082. }
  2083. }
  2084. #endif
  2085. static void binder_transaction(struct binder_proc *proc,
  2086. struct binder_thread *thread,
  2087. struct binder_transaction_data *tr, int reply)
  2088. {
  2089. struct binder_transaction *t;
  2090. struct binder_work *tcomplete;
  2091. binder_size_t *offp, *off_end;
  2092. binder_size_t off_min;
  2093. struct binder_proc *target_proc;
  2094. struct binder_thread *target_thread = NULL;
  2095. struct binder_node *target_node = NULL;
  2096. struct list_head *target_list;
  2097. wait_queue_head_t *target_wait;
  2098. struct binder_transaction *in_reply_to = NULL;
  2099. struct binder_transaction_log_entry *e;
  2100. uint32_t return_error;
  2101. #ifdef BINDER_MONITOR
  2102. struct binder_transaction_log_entry log_entry;
  2103. unsigned int log_idx = -1;
  2104. if ((reply && (tr->data_size < (proc->buffer_size / 16)))
  2105. || log_disable)
  2106. e = &log_entry;
  2107. else {
  2108. e = binder_transaction_log_add(&binder_transaction_log);
  2109. if (binder_transaction_log.next)
  2110. log_idx = binder_transaction_log.next - 1;
  2111. else
  2112. log_idx = binder_transaction_log.size - 1;
  2113. }
  2114. #else
  2115. e = binder_transaction_log_add(&binder_transaction_log);
  2116. #endif
  2117. e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
  2118. e->from_proc = proc->pid;
  2119. e->from_thread = thread->pid;
  2120. e->target_handle = tr->target.handle;
  2121. e->data_size = tr->data_size;
  2122. e->offsets_size = tr->offsets_size;
  2123. #ifdef BINDER_MONITOR
  2124. e->code = tr->code;
  2125. /* fd 0 is also valid... set initial value to -1 */
  2126. e->fd = -1;
  2127. do_posix_clock_monotonic_gettime(&e->timestamp);
  2128. /* monotonic_to_bootbased(&e->timestamp); */
  2129. do_gettimeofday(&e->tv);
  2130. /* consider time zone. translate to android time */
  2131. e->tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
  2132. #endif
  2133. if (reply) {
  2134. in_reply_to = thread->transaction_stack;
  2135. if (in_reply_to == NULL) {
  2136. binder_user_error
  2137. ("%d:%d got reply transaction with no transaction stack\n",
  2138. proc->pid, thread->pid);
  2139. return_error = BR_FAILED_REPLY;
  2140. goto err_empty_call_stack;
  2141. }
  2142. #ifdef BINDER_MONITOR
  2143. binder_cancel_bwdog(in_reply_to);
  2144. #endif
  2145. binder_set_nice(in_reply_to->saved_priority);
  2146. #ifdef RT_PRIO_INHERIT
  2147. if (rt_task(current)
  2148. && (MAX_RT_PRIO != in_reply_to->saved_rt_prio)
  2149. && !(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  2150. BINDER_LOOPER_STATE_ENTERED))) {
  2151. struct sched_param param = {
  2152. .sched_priority = in_reply_to->saved_rt_prio,
  2153. };
  2154. mt_sched_setscheduler_nocheck(current, in_reply_to->saved_policy, &param);
  2155. #ifdef BINDER_MONITOR
  2156. if (log_disable & BINDER_RT_LOG_ENABLE) {
  2157. pr_debug
  2158. ("reply reset %d sched_policy from %d to %d rt_prio from %d to %d\n",
  2159. proc->pid, in_reply_to->policy,
  2160. in_reply_to->saved_policy,
  2161. in_reply_to->rt_prio, in_reply_to->saved_rt_prio);
  2162. }
  2163. #endif
  2164. }
  2165. #endif
  2166. if (in_reply_to->to_thread != thread) {
  2167. binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2168. proc->pid, thread->pid, in_reply_to->debug_id,
  2169. in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0,
  2170. in_reply_to->to_thread ?
  2171. in_reply_to->to_thread->pid : 0);
  2172. return_error = BR_FAILED_REPLY;
  2173. in_reply_to = NULL;
  2174. goto err_bad_call_stack;
  2175. }
  2176. thread->transaction_stack = in_reply_to->to_parent;
  2177. target_thread = in_reply_to->from;
  2178. if (target_thread == NULL) {
  2179. #ifdef MTK_BINDER_DEBUG
  2180. binder_user_error("%d:%d got reply transaction with bad transaction reply_from, ",
  2181. proc->pid, thread->pid);
  2182. binder_user_error("transaction %d has target %d:%d\n",
  2183. in_reply_to->debug_id,
  2184. in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0,
  2185. in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0);
  2186. #endif
  2187. return_error = BR_DEAD_REPLY;
  2188. goto err_dead_binder;
  2189. }
  2190. if (target_thread->transaction_stack != in_reply_to) {
  2191. binder_user_error
  2192. ("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
  2193. proc->pid, thread->pid,
  2194. target_thread->transaction_stack ? target_thread->transaction_stack->
  2195. debug_id : 0, in_reply_to->debug_id);
  2196. return_error = BR_FAILED_REPLY;
  2197. in_reply_to = NULL;
  2198. target_thread = NULL;
  2199. goto err_dead_binder;
  2200. }
  2201. target_proc = target_thread->proc;
  2202. #ifdef BINDER_MONITOR
  2203. e->service[0] = '\0';
  2204. #endif
  2205. } else {
  2206. if (tr->target.handle) {
  2207. struct binder_ref *ref;
  2208. ref = binder_get_ref(proc, tr->target.handle);
  2209. if (ref == NULL) {
  2210. binder_user_error
  2211. ("%d:%d got transaction to invalid handle\n",
  2212. proc->pid, thread->pid);
  2213. return_error = BR_FAILED_REPLY;
  2214. goto err_invalid_target_handle;
  2215. }
  2216. target_node = ref->node;
  2217. } else {
  2218. target_node = binder_context_mgr_node;
  2219. if (target_node == NULL) {
  2220. #ifdef MTK_BINDER_DEBUG
  2221. binder_user_error("%d:%d binder_context_mgr_node is NULL\n",
  2222. proc->pid, thread->pid);
  2223. #endif
  2224. return_error = BR_DEAD_REPLY;
  2225. goto err_no_context_mgr_node;
  2226. }
  2227. }
  2228. e->to_node = target_node->debug_id;
  2229. #ifdef BINDER_MONITOR
  2230. strcpy(e->service, target_node->name);
  2231. #endif
  2232. target_proc = target_node->proc;
  2233. if (target_proc == NULL) {
  2234. #ifdef MTK_BINDER_DEBUG
  2235. binder_user_error("%d:%d target_proc is NULL\n", proc->pid, thread->pid);
  2236. #endif
  2237. return_error = BR_DEAD_REPLY;
  2238. goto err_dead_binder;
  2239. }
  2240. if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
  2241. return_error = BR_FAILED_REPLY;
  2242. goto err_invalid_target_handle;
  2243. }
  2244. if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
  2245. struct binder_transaction *tmp;
  2246. tmp = thread->transaction_stack;
  2247. if (tmp->to_thread != thread) {
  2248. binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
  2249. proc->pid, thread->pid, tmp->debug_id,
  2250. tmp->to_proc ? tmp->to_proc->pid : 0,
  2251. tmp->to_thread ?
  2252. tmp->to_thread->pid : 0);
  2253. return_error = BR_FAILED_REPLY;
  2254. goto err_bad_call_stack;
  2255. }
  2256. while (tmp) {
  2257. if (tmp->from && tmp->from->proc == target_proc)
  2258. target_thread = tmp->from;
  2259. tmp = tmp->from_parent;
  2260. }
  2261. }
  2262. }
  2263. if (target_thread) {
  2264. e->to_thread = target_thread->pid;
  2265. target_list = &target_thread->todo;
  2266. target_wait = &target_thread->wait;
  2267. } else {
  2268. target_list = &target_proc->todo;
  2269. target_wait = &target_proc->wait;
  2270. }
  2271. e->to_proc = target_proc->pid;
  2272. /* TODO: reuse incoming transaction for reply */
  2273. t = kzalloc(sizeof(*t), GFP_KERNEL);
  2274. if (t == NULL) {
  2275. #ifdef MTK_BINDER_DEBUG
  2276. binder_user_error("%d:%d transaction allocation failed\n", proc->pid, thread->pid);
  2277. #endif
  2278. return_error = BR_FAILED_REPLY;
  2279. goto err_alloc_t_failed;
  2280. }
  2281. #ifdef BINDER_MONITOR
  2282. memcpy(&t->timestamp, &e->timestamp, sizeof(struct timespec));
  2283. /* do_gettimeofday(&t->tv); */
  2284. /* consider time zone. translate to android time */
  2285. /* t->tv.tv_sec -= (sys_tz.tz_minuteswest * 60); */
  2286. memcpy(&t->tv, &e->tv, sizeof(struct timeval));
  2287. if (!reply)
  2288. strcpy(t->service, target_node->name);
  2289. #endif
  2290. binder_stats_created(BINDER_STAT_TRANSACTION);
  2291. tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
  2292. if (tcomplete == NULL) {
  2293. #ifdef MTK_BINDER_DEBUG
  2294. binder_user_error("%d:%d tcomplete allocation failed\n", proc->pid, thread->pid);
  2295. #endif
  2296. return_error = BR_FAILED_REPLY;
  2297. goto err_alloc_tcomplete_failed;
  2298. }
  2299. binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
  2300. t->debug_id = ++binder_last_id;
  2301. e->debug_id = t->debug_id;
  2302. if (reply)
  2303. binder_debug(BINDER_DEBUG_TRANSACTION,
  2304. "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
  2305. proc->pid, thread->pid, t->debug_id,
  2306. target_proc->pid, target_thread->pid,
  2307. (u64) tr->data.ptr.buffer,
  2308. (u64) tr->data.ptr.offsets,
  2309. (u64) tr->data_size, (u64) tr->offsets_size);
  2310. else
  2311. binder_debug(BINDER_DEBUG_TRANSACTION,
  2312. "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
  2313. proc->pid, thread->pid, t->debug_id,
  2314. target_proc->pid, target_node->debug_id,
  2315. (u64) tr->data.ptr.buffer,
  2316. (u64) tr->data.ptr.offsets,
  2317. (u64) tr->data_size, (u64) tr->offsets_size);
  2318. #ifdef BINDER_MONITOR
  2319. t->fproc = proc->pid;
  2320. t->fthrd = thread->pid;
  2321. t->tproc = target_proc->pid;
  2322. t->tthrd = target_thread ? target_thread->pid : 0;
  2323. t->log_idx = log_idx;
  2324. if (!binder_check_buf_checked()) {
  2325. binder_check_buf_pid = proc->pid;
  2326. binder_check_buf_tid = thread->pid;
  2327. }
  2328. #endif
  2329. if (!reply && !(tr->flags & TF_ONE_WAY))
  2330. t->from = thread;
  2331. else
  2332. t->from = NULL;
  2333. t->sender_euid = task_euid(proc->tsk);
  2334. t->to_proc = target_proc;
  2335. t->to_thread = target_thread;
  2336. t->code = tr->code;
  2337. t->flags = tr->flags;
  2338. t->priority = task_nice(current);
  2339. #ifdef RT_PRIO_INHERIT
  2340. t->rt_prio = current->rt_priority;
  2341. t->policy = current->policy;
  2342. t->saved_rt_prio = MAX_RT_PRIO;
  2343. #endif
  2344. trace_binder_transaction(reply, t, target_node);
  2345. t->buffer = binder_alloc_buf(target_proc, tr->data_size,
  2346. tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
  2347. if (t->buffer == NULL) {
  2348. #ifdef MTK_BINDER_DEBUG
  2349. binder_user_error("%d:%d buffer allocation failed on %d:0\n", proc->pid, thread->pid, target_proc->pid);
  2350. #endif
  2351. return_error = BR_FAILED_REPLY;
  2352. goto err_binder_alloc_buf_failed;
  2353. }
  2354. t->buffer->allow_user_free = 0;
  2355. t->buffer->debug_id = t->debug_id;
  2356. t->buffer->transaction = t;
  2357. #ifdef BINDER_MONITOR
  2358. t->buffer->log_entry = e;
  2359. #endif
  2360. t->buffer->target_node = target_node;
  2361. trace_binder_transaction_alloc_buf(t->buffer);
  2362. if (target_node)
  2363. binder_inc_node(target_node, 1, 0, NULL);
  2364. offp = (binder_size_t *) (t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
  2365. if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
  2366. tr->data.ptr.buffer, tr->data_size)) {
  2367. binder_user_error
  2368. ("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid);
  2369. return_error = BR_FAILED_REPLY;
  2370. goto err_copy_data_failed;
  2371. }
  2372. if (copy_from_user(offp, (const void __user *)(uintptr_t)
  2373. tr->data.ptr.offsets, tr->offsets_size)) {
  2374. binder_user_error
  2375. ("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid);
  2376. return_error = BR_FAILED_REPLY;
  2377. goto err_copy_data_failed;
  2378. }
  2379. if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
  2380. binder_user_error
  2381. ("%d:%d got transaction with invalid offsets size, %lld\n",
  2382. proc->pid, thread->pid, (u64) tr->offsets_size);
  2383. return_error = BR_FAILED_REPLY;
  2384. goto err_bad_offset;
  2385. }
  2386. off_end = (void *)offp + tr->offsets_size;
  2387. off_min = 0;
  2388. for (; offp < off_end; offp++) {
  2389. struct flat_binder_object *fp;
  2390. if (*offp > t->buffer->data_size - sizeof(*fp) ||
  2391. *offp < off_min ||
  2392. t->buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(u32))) {
  2393. binder_user_error
  2394. ("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
  2395. proc->pid, thread->pid, (u64) *offp,
  2396. (u64) off_min, (u64) (t->buffer->data_size - sizeof(*fp)));
  2397. return_error = BR_FAILED_REPLY;
  2398. goto err_bad_offset;
  2399. }
  2400. fp = (struct flat_binder_object *)(t->buffer->data + *offp);
  2401. off_min = *offp + sizeof(struct flat_binder_object);
  2402. switch (fp->type) {
  2403. case BINDER_TYPE_BINDER:
  2404. case BINDER_TYPE_WEAK_BINDER:{
  2405. struct binder_ref *ref;
  2406. struct binder_node *node = binder_get_node(proc, fp->binder);
  2407. if (node == NULL) {
  2408. node = binder_new_node(proc, fp->binder, fp->cookie);
  2409. if (node == NULL) {
  2410. #ifdef MTK_BINDER_DEBUG
  2411. binder_user_error
  2412. ("%d:%d create new node failed\n",
  2413. proc->pid, thread->pid);
  2414. #endif
  2415. return_error = BR_FAILED_REPLY;
  2416. goto err_binder_new_node_failed;
  2417. }
  2418. node->min_priority =
  2419. fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
  2420. node->accept_fds =
  2421. !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
  2422. #ifdef BINDER_MONITOR
  2423. parse_service_name(tr, proc, node->name);
  2424. #endif
  2425. }
  2426. if (fp->cookie != node->cookie) {
  2427. binder_user_error
  2428. ("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
  2429. proc->pid, thread->pid,
  2430. (u64) fp->binder, node->debug_id,
  2431. (u64) fp->cookie, (u64) node->cookie);
  2432. return_error = BR_FAILED_REPLY;
  2433. goto err_binder_get_ref_for_node_failed;
  2434. }
  2435. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2436. return_error = BR_FAILED_REPLY;
  2437. goto err_binder_get_ref_for_node_failed;
  2438. }
  2439. ref = binder_get_ref_for_node(target_proc, node);
  2440. if (ref == NULL) {
  2441. #ifdef MTK_BINDER_DEBUG
  2442. binder_user_error
  2443. ("%d:%d get binder ref failed\n",
  2444. proc->pid, thread->pid);
  2445. #endif
  2446. return_error = BR_FAILED_REPLY;
  2447. goto err_binder_get_ref_for_node_failed;
  2448. }
  2449. if (fp->type == BINDER_TYPE_BINDER)
  2450. fp->type = BINDER_TYPE_HANDLE;
  2451. else
  2452. fp->type = BINDER_TYPE_WEAK_HANDLE;
  2453. fp->handle = ref->desc;
  2454. binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
  2455. trace_binder_transaction_node_to_ref(t, node, ref);
  2456. binder_debug(BINDER_DEBUG_TRANSACTION,
  2457. " node %d u%016llx -> ref %d desc %d\n",
  2458. node->debug_id, (u64) node->ptr,
  2459. ref->debug_id, ref->desc);
  2460. }
  2461. break;
  2462. case BINDER_TYPE_HANDLE:
  2463. case BINDER_TYPE_WEAK_HANDLE:{
  2464. struct binder_ref *ref = binder_get_ref(proc, fp->handle);
  2465. if (ref == NULL) {
  2466. binder_user_error
  2467. ("%d:%d got transaction with invalid handle, %d\n",
  2468. proc->pid, thread->pid, fp->handle);
  2469. return_error = BR_FAILED_REPLY;
  2470. goto err_binder_get_ref_failed;
  2471. }
  2472. if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
  2473. return_error = BR_FAILED_REPLY;
  2474. goto err_binder_get_ref_failed;
  2475. }
  2476. if (ref->node->proc == target_proc) {
  2477. if (fp->type == BINDER_TYPE_HANDLE)
  2478. fp->type = BINDER_TYPE_BINDER;
  2479. else
  2480. fp->type = BINDER_TYPE_WEAK_BINDER;
  2481. fp->binder = ref->node->ptr;
  2482. fp->cookie = ref->node->cookie;
  2483. binder_inc_node(ref->node,
  2484. fp->type == BINDER_TYPE_BINDER, 0, NULL);
  2485. trace_binder_transaction_ref_to_node(t, ref);
  2486. binder_debug(BINDER_DEBUG_TRANSACTION,
  2487. " ref %d desc %d -> node %d u%016llx\n",
  2488. ref->debug_id, ref->desc,
  2489. ref->node->debug_id, (u64) ref->node->ptr);
  2490. } else {
  2491. struct binder_ref *new_ref;
  2492. new_ref = binder_get_ref_for_node(target_proc, ref->node);
  2493. if (new_ref == NULL) {
  2494. #ifdef MTK_BINDER_DEBUG
  2495. binder_user_error
  2496. ("%d:%d get new binder ref failed\n",
  2497. proc->pid, thread->pid);
  2498. #endif
  2499. return_error = BR_FAILED_REPLY;
  2500. goto err_binder_get_ref_for_node_failed;
  2501. }
  2502. fp->handle = new_ref->desc;
  2503. binder_inc_ref(new_ref,
  2504. fp->type == BINDER_TYPE_HANDLE, NULL);
  2505. trace_binder_transaction_ref_to_ref(t, ref, new_ref);
  2506. binder_debug(BINDER_DEBUG_TRANSACTION,
  2507. " ref %d desc %d -> ref %d desc %d (node %d)\n",
  2508. ref->debug_id, ref->desc,
  2509. new_ref->debug_id,
  2510. new_ref->desc, ref->node->debug_id);
  2511. }
  2512. }
  2513. break;
  2514. case BINDER_TYPE_FD:{
  2515. int target_fd;
  2516. struct file *file;
  2517. if (reply) {
  2518. if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
  2519. binder_user_error
  2520. ("%d:%d got reply with fd, %d, but target does not allow fds\n",
  2521. proc->pid, thread->pid, fp->handle);
  2522. return_error = BR_FAILED_REPLY;
  2523. goto err_fd_not_allowed;
  2524. }
  2525. } else if (!target_node->accept_fds) {
  2526. binder_user_error
  2527. ("%d:%d got transaction with fd, %d, but target does not allow fds\n",
  2528. proc->pid, thread->pid, fp->handle);
  2529. return_error = BR_FAILED_REPLY;
  2530. goto err_fd_not_allowed;
  2531. }
  2532. file = fget(fp->handle);
  2533. if (file == NULL) {
  2534. binder_user_error
  2535. ("%d:%d got transaction with invalid fd, %d\n",
  2536. proc->pid, thread->pid, fp->handle);
  2537. return_error = BR_FAILED_REPLY;
  2538. goto err_fget_failed;
  2539. }
  2540. if (security_binder_transfer_file
  2541. (proc->tsk, target_proc->tsk, file) < 0) {
  2542. fput(file);
  2543. return_error = BR_FAILED_REPLY;
  2544. goto err_get_unused_fd_failed;
  2545. }
  2546. target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
  2547. if (target_fd < 0) {
  2548. fput(file);
  2549. #ifdef MTK_BINDER_DEBUG
  2550. binder_user_error
  2551. ("%d:%d to %d failed, %d no unused fd available(%d:%s fd leak?), %d\n",
  2552. proc->pid, thread->pid,
  2553. target_proc->pid, target_proc->pid,
  2554. target_proc->pid,
  2555. target_proc->tsk ? target_proc->tsk->comm : "",
  2556. target_fd);
  2557. #endif
  2558. return_error = BR_FAILED_REPLY;
  2559. goto err_get_unused_fd_failed;
  2560. }
  2561. task_fd_install(target_proc, target_fd, file);
  2562. trace_binder_transaction_fd(t, fp->handle, target_fd);
  2563. binder_debug(BINDER_DEBUG_TRANSACTION,
  2564. " fd %d -> %d\n", fp->handle, target_fd);
  2565. /* TODO: fput? */
  2566. fp->handle = target_fd;
  2567. #ifdef BINDER_MONITOR
  2568. e->fd = target_fd;
  2569. #endif
  2570. }
  2571. break;
  2572. default:
  2573. binder_user_error
  2574. ("%d:%d got transaction with invalid object type, %x\n",
  2575. proc->pid, thread->pid, fp->type);
  2576. return_error = BR_FAILED_REPLY;
  2577. goto err_bad_object_type;
  2578. }
  2579. }
  2580. if (reply) {
  2581. BUG_ON(t->buffer->async_transaction != 0);
  2582. #ifdef BINDER_MONITOR
  2583. binder_update_transaction_time(&binder_transaction_log, in_reply_to, 2);
  2584. #endif
  2585. binder_pop_transaction(target_thread, in_reply_to);
  2586. } else if (!(t->flags & TF_ONE_WAY)) {
  2587. BUG_ON(t->buffer->async_transaction != 0);
  2588. t->need_reply = 1;
  2589. t->from_parent = thread->transaction_stack;
  2590. thread->transaction_stack = t;
  2591. } else {
  2592. BUG_ON(target_node == NULL);
  2593. BUG_ON(t->buffer->async_transaction != 1);
  2594. if (target_node->has_async_transaction) {
  2595. target_list = &target_node->async_todo;
  2596. target_wait = NULL;
  2597. } else
  2598. target_node->has_async_transaction = 1;
  2599. }
  2600. t->work.type = BINDER_WORK_TRANSACTION;
  2601. list_add_tail(&t->work.entry, target_list);
  2602. tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
  2603. list_add_tail(&tcomplete->entry, &thread->todo);
  2604. #ifdef RT_PRIO_INHERIT
  2605. if (target_wait) {
  2606. unsigned long flag;
  2607. wait_queue_t *curr, *next;
  2608. bool is_lock = false;
  2609. spin_lock_irqsave(&target_wait->lock, flag);
  2610. is_lock = true;
  2611. list_for_each_entry_safe(curr, next, &target_wait->task_list, task_list) {
  2612. unsigned flags = curr->flags;
  2613. struct task_struct *tsk = curr->private;
  2614. if (tsk == NULL) {
  2615. spin_unlock_irqrestore(&target_wait->lock, flag);
  2616. is_lock = false;
  2617. wake_up_interruptible(target_wait);
  2618. break;
  2619. }
  2620. #ifdef MTK_BINDER_DEBUG
  2621. if (tsk->state == TASK_UNINTERRUPTIBLE) {
  2622. pr_err("from %d:%d to %d:%d target thread state: %ld\n",
  2623. proc->pid, thread->pid, tsk->tgid, tsk->pid, tsk->state);
  2624. show_stack(tsk, NULL);
  2625. }
  2626. #endif
  2627. if (!reply && (t->policy == SCHED_RR || t->policy == SCHED_FIFO)
  2628. && t->rt_prio > tsk->rt_priority && !(t->flags & TF_ONE_WAY)) {
  2629. struct sched_param param = {
  2630. .sched_priority = t->rt_prio,
  2631. };
  2632. t->saved_rt_prio = tsk->rt_priority;
  2633. t->saved_policy = tsk->policy;
  2634. mt_sched_setscheduler_nocheck(tsk, t->policy, &param);
  2635. #ifdef BINDER_MONITOR
  2636. if (log_disable & BINDER_RT_LOG_ENABLE) {
  2637. pr_debug
  2638. ("write set %d sched_policy from %d to %d rt_prio from %d to %d\n",
  2639. tsk->pid, t->saved_policy,
  2640. t->policy, t->saved_rt_prio, t->rt_prio);
  2641. }
  2642. #endif
  2643. }
  2644. if (curr->func(curr, TASK_INTERRUPTIBLE, 0, NULL) &&
  2645. (flags & WQ_FLAG_EXCLUSIVE))
  2646. break;
  2647. }
  2648. if (is_lock)
  2649. spin_unlock_irqrestore(&target_wait->lock, flag);
  2650. }
  2651. #else
  2652. if (target_wait)
  2653. wake_up_interruptible(target_wait);
  2654. #endif
  2655. #ifdef BINDER_MONITOR
  2656. t->wait_on = reply ? WAIT_ON_REPLY_READ : WAIT_ON_READ;
  2657. binder_queue_bwdog(t, (time_t) WAIT_BUDGET_READ);
  2658. #endif
  2659. return;
  2660. err_get_unused_fd_failed:
  2661. err_fget_failed:
  2662. err_fd_not_allowed:
  2663. err_binder_get_ref_for_node_failed:
  2664. err_binder_get_ref_failed:
  2665. err_binder_new_node_failed:
  2666. err_bad_object_type:
  2667. err_bad_offset:
  2668. err_copy_data_failed:
  2669. trace_binder_transaction_failed_buffer_release(t->buffer);
  2670. binder_transaction_buffer_release(target_proc, t->buffer, offp);
  2671. t->buffer->transaction = NULL;
  2672. binder_free_buf(target_proc, t->buffer);
  2673. err_binder_alloc_buf_failed:
  2674. kfree(tcomplete);
  2675. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  2676. err_alloc_tcomplete_failed:
  2677. #ifdef BINDER_MONITOR
  2678. binder_cancel_bwdog(t);
  2679. #endif
  2680. kfree(t);
  2681. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  2682. err_alloc_t_failed:
  2683. err_bad_call_stack:
  2684. err_empty_call_stack:
  2685. err_dead_binder:
  2686. err_invalid_target_handle:
  2687. err_no_context_mgr_node:
  2688. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  2689. "%d:%d transaction failed %d, size %lld-%lld\n",
  2690. proc->pid, thread->pid, return_error,
  2691. (u64) tr->data_size, (u64) tr->offsets_size);
  2692. {
  2693. struct binder_transaction_log_entry *fe;
  2694. fe = binder_transaction_log_add(&binder_transaction_log_failed);
  2695. *fe = *e;
  2696. }
  2697. BUG_ON(thread->return_error != BR_OK);
  2698. if (in_reply_to) {
  2699. thread->return_error = BR_TRANSACTION_COMPLETE;
  2700. binder_send_failed_reply(in_reply_to, return_error);
  2701. } else
  2702. thread->return_error = return_error;
  2703. }
  2704. static int binder_thread_write(struct binder_proc *proc,
  2705. struct binder_thread *thread,
  2706. binder_uintptr_t binder_buffer, size_t size,
  2707. binder_size_t *consumed)
  2708. {
  2709. uint32_t cmd;
  2710. void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  2711. void __user *ptr = buffer + *consumed;
  2712. void __user *end = buffer + size;
  2713. while (ptr < end && thread->return_error == BR_OK) {
  2714. if (get_user(cmd, (uint32_t __user *)ptr))
  2715. return -EFAULT;
  2716. ptr += sizeof(uint32_t);
  2717. trace_binder_command(cmd);
  2718. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
  2719. binder_stats.bc[_IOC_NR(cmd)]++;
  2720. proc->stats.bc[_IOC_NR(cmd)]++;
  2721. thread->stats.bc[_IOC_NR(cmd)]++;
  2722. }
  2723. switch (cmd) {
  2724. case BC_INCREFS:
  2725. case BC_ACQUIRE:
  2726. case BC_RELEASE:
  2727. case BC_DECREFS: {
  2728. uint32_t target;
  2729. struct binder_ref *ref;
  2730. const char *debug_string;
  2731. if (get_user(target, (uint32_t __user *) ptr))
  2732. return -EFAULT;
  2733. ptr += sizeof(uint32_t);
  2734. if (target == 0 && binder_context_mgr_node &&
  2735. (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
  2736. ref = binder_get_ref_for_node(proc,
  2737. binder_context_mgr_node);
  2738. if (ref->desc != target) {
  2739. binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
  2740. proc->pid, thread->pid,
  2741. ref->desc);
  2742. }
  2743. } else
  2744. ref = binder_get_ref(proc, target);
  2745. if (ref == NULL) {
  2746. binder_user_error("%d:%d refcount change on invalid ref %d\n",
  2747. proc->pid, thread->pid, target);
  2748. break;
  2749. }
  2750. switch (cmd) {
  2751. case BC_INCREFS:
  2752. debug_string = "IncRefs";
  2753. binder_inc_ref(ref, 0, NULL);
  2754. break;
  2755. case BC_ACQUIRE:
  2756. debug_string = "Acquire";
  2757. binder_inc_ref(ref, 1, NULL);
  2758. break;
  2759. case BC_RELEASE:
  2760. debug_string = "Release";
  2761. binder_dec_ref(ref, 1);
  2762. break;
  2763. case BC_DECREFS:
  2764. default:
  2765. debug_string = "DecRefs";
  2766. binder_dec_ref(ref, 0);
  2767. break;
  2768. }
  2769. binder_debug(BINDER_DEBUG_USER_REFS,
  2770. "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
  2771. proc->pid, thread->pid, debug_string, ref->debug_id,
  2772. ref->desc, ref->strong, ref->weak, ref->node->debug_id);
  2773. break;
  2774. }
  2775. case BC_INCREFS_DONE:
  2776. case BC_ACQUIRE_DONE:{
  2777. binder_uintptr_t node_ptr;
  2778. binder_uintptr_t cookie;
  2779. struct binder_node *node;
  2780. if (get_user(node_ptr, (binder_uintptr_t __user *) ptr))
  2781. return -EFAULT;
  2782. ptr += sizeof(binder_uintptr_t);
  2783. if (get_user(cookie, (binder_uintptr_t __user *) ptr))
  2784. return -EFAULT;
  2785. ptr += sizeof(binder_uintptr_t);
  2786. node = binder_get_node(proc, node_ptr);
  2787. if (node == NULL) {
  2788. binder_user_error("%d:%d %s u%016llx no match\n",
  2789. proc->pid, thread->pid,
  2790. cmd == BC_INCREFS_DONE ?
  2791. "BC_INCREFS_DONE" :
  2792. "BC_ACQUIRE_DONE",
  2793. (u64) node_ptr);
  2794. break;
  2795. }
  2796. if (cookie != node->cookie) {
  2797. binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
  2798. proc->pid, thread->pid,
  2799. cmd == BC_INCREFS_DONE ?
  2800. "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  2801. (u64) node_ptr, node->debug_id,
  2802. (u64) cookie, (u64) node->cookie);
  2803. break;
  2804. }
  2805. if (cmd == BC_ACQUIRE_DONE) {
  2806. if (node->pending_strong_ref == 0) {
  2807. binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
  2808. proc->pid, thread->pid,
  2809. node->debug_id);
  2810. break;
  2811. }
  2812. node->pending_strong_ref = 0;
  2813. } else {
  2814. if (node->pending_weak_ref == 0) {
  2815. binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
  2816. proc->pid, thread->pid,
  2817. node->debug_id);
  2818. break;
  2819. }
  2820. node->pending_weak_ref = 0;
  2821. }
  2822. binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
  2823. binder_debug(BINDER_DEBUG_USER_REFS,
  2824. "%d:%d %s node %d ls %d lw %d\n",
  2825. proc->pid, thread->pid,
  2826. cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
  2827. node->debug_id, node->local_strong_refs, node->local_weak_refs);
  2828. break;
  2829. }
  2830. case BC_ATTEMPT_ACQUIRE:
  2831. pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
  2832. return -EINVAL;
  2833. case BC_ACQUIRE_RESULT:
  2834. pr_err("BC_ACQUIRE_RESULT not supported\n");
  2835. return -EINVAL;
  2836. case BC_FREE_BUFFER: {
  2837. binder_uintptr_t data_ptr;
  2838. struct binder_buffer *buffer;
  2839. if (get_user(data_ptr, (binder_uintptr_t __user *) ptr))
  2840. return -EFAULT;
  2841. ptr += sizeof(binder_uintptr_t);
  2842. buffer = binder_buffer_lookup(proc, data_ptr);
  2843. if (buffer == NULL) {
  2844. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
  2845. proc->pid, thread->pid, (u64)data_ptr);
  2846. break;
  2847. }
  2848. if (!buffer->allow_user_free) {
  2849. binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
  2850. proc->pid, thread->pid, (u64) data_ptr);
  2851. break;
  2852. }
  2853. binder_debug(BINDER_DEBUG_FREE_BUFFER,
  2854. "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
  2855. proc->pid, thread->pid,
  2856. (u64) data_ptr, buffer->debug_id,
  2857. buffer->transaction ? "active" : "finished");
  2858. if (buffer->transaction) {
  2859. buffer->transaction->buffer = NULL;
  2860. buffer->transaction = NULL;
  2861. }
  2862. if (buffer->async_transaction && buffer->target_node) {
  2863. BUG_ON(!buffer->target_node->has_async_transaction);
  2864. #ifdef MTK_BINDER_DEBUG
  2865. if (list_empty(&buffer->target_node->async_todo)) {
  2866. buffer->target_node->has_async_transaction = 0;
  2867. buffer->target_node->async_pid = 0;
  2868. } else {
  2869. list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
  2870. buffer->target_node->async_pid = thread->pid;
  2871. }
  2872. #else
  2873. if (list_empty(&buffer->target_node->async_todo))
  2874. buffer->target_node->has_async_transaction = 0;
  2875. else
  2876. list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
  2877. #endif
  2878. }
  2879. trace_binder_transaction_buffer_release(buffer);
  2880. binder_transaction_buffer_release(proc, buffer, NULL);
  2881. binder_free_buf(proc, buffer);
  2882. break;
  2883. }
  2884. case BC_TRANSACTION:
  2885. case BC_REPLY: {
  2886. struct binder_transaction_data tr;
  2887. if (copy_from_user(&tr, ptr, sizeof(tr)))
  2888. return -EFAULT;
  2889. ptr += sizeof(tr);
  2890. binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
  2891. break;
  2892. }
  2893. case BC_REGISTER_LOOPER:
  2894. binder_debug(BINDER_DEBUG_THREADS,
  2895. "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid);
  2896. if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
  2897. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  2898. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
  2899. proc->pid, thread->pid);
  2900. } else if (proc->requested_threads == 0) {
  2901. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  2902. binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
  2903. proc->pid, thread->pid);
  2904. } else {
  2905. proc->requested_threads--;
  2906. proc->requested_threads_started++;
  2907. }
  2908. thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
  2909. break;
  2910. case BC_ENTER_LOOPER:
  2911. binder_debug(BINDER_DEBUG_THREADS,
  2912. "%d:%d BC_ENTER_LOOPER\n",
  2913. proc->pid, thread->pid);
  2914. if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
  2915. thread->looper |= BINDER_LOOPER_STATE_INVALID;
  2916. binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
  2917. proc->pid, thread->pid);
  2918. }
  2919. thread->looper |= BINDER_LOOPER_STATE_ENTERED;
  2920. break;
  2921. case BC_EXIT_LOOPER:
  2922. binder_debug(BINDER_DEBUG_THREADS,
  2923. "%d:%d BC_EXIT_LOOPER\n",
  2924. proc->pid, thread->pid);
  2925. thread->looper |= BINDER_LOOPER_STATE_EXITED;
  2926. break;
  2927. case BC_REQUEST_DEATH_NOTIFICATION:
  2928. case BC_CLEAR_DEATH_NOTIFICATION:{
  2929. uint32_t target;
  2930. binder_uintptr_t cookie;
  2931. struct binder_ref *ref;
  2932. struct binder_ref_death *death;
  2933. if (get_user(target, (uint32_t __user *) ptr))
  2934. return -EFAULT;
  2935. ptr += sizeof(uint32_t);
  2936. if (get_user(cookie, (binder_uintptr_t __user *) ptr))
  2937. return -EFAULT;
  2938. ptr += sizeof(binder_uintptr_t);
  2939. ref = binder_get_ref(proc, target);
  2940. if (ref == NULL) {
  2941. binder_user_error("%d:%d %s invalid ref %d\n",
  2942. proc->pid, thread->pid,
  2943. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  2944. "BC_REQUEST_DEATH_NOTIFICATION" :
  2945. "BC_CLEAR_DEATH_NOTIFICATION", target);
  2946. break;
  2947. }
  2948. #ifdef MTK_DEATH_NOTIFY_MONITOR
  2949. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  2950. "[DN #%s]binder: %d:%d %s %d(%s) cookie 0x%016llx\n",
  2951. cmd == BC_REQUEST_DEATH_NOTIFICATION ? "1" :
  2952. "2", proc->pid, thread->pid,
  2953. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  2954. "BC_REQUEST_DEATH_NOTIFICATION" :
  2955. "BC_CLEAR_DEATH_NOTIFICATION",
  2956. ref->node->proc ? ref->node->proc->pid : 0,
  2957. #ifdef BINDER_MONITOR
  2958. ref->node ? ref->node->name : "",
  2959. #else
  2960. "",
  2961. #endif
  2962. (u64) cookie);
  2963. #else
  2964. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  2965. "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
  2966. proc->pid, thread->pid,
  2967. cmd == BC_REQUEST_DEATH_NOTIFICATION ?
  2968. "BC_REQUEST_DEATH_NOTIFICATION" :
  2969. "BC_CLEAR_DEATH_NOTIFICATION",
  2970. (u64) cookie, ref->debug_id,
  2971. ref->desc, ref->strong, ref->weak,
  2972. ref->node->debug_id);
  2973. #endif
  2974. if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
  2975. if (ref->death) {
  2976. binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
  2977. proc->pid, thread->pid);
  2978. break;
  2979. }
  2980. death = kzalloc(sizeof(*death), GFP_KERNEL);
  2981. if (death == NULL) {
  2982. thread->return_error = BR_ERROR;
  2983. binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
  2984. "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
  2985. proc->pid, thread->pid);
  2986. break;
  2987. }
  2988. binder_stats_created(BINDER_STAT_DEATH);
  2989. INIT_LIST_HEAD(&death->work.entry);
  2990. death->cookie = cookie;
  2991. ref->death = death;
  2992. if (ref->node->proc == NULL) {
  2993. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  2994. if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
  2995. list_add_tail(&ref->death->work.entry, &thread->todo);
  2996. } else {
  2997. list_add_tail(&ref->death->work.entry, &proc->todo);
  2998. wake_up_interruptible(&proc->wait);
  2999. }
  3000. }
  3001. } else {
  3002. if (ref->death == NULL) {
  3003. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
  3004. proc->pid, thread->pid);
  3005. break;
  3006. }
  3007. death = ref->death;
  3008. if (death->cookie != cookie) {
  3009. binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
  3010. proc->pid, thread->pid,
  3011. (u64) death->cookie, (u64) cookie);
  3012. break;
  3013. }
  3014. ref->death = NULL;
  3015. if (list_empty(&death->work.entry)) {
  3016. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3017. if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
  3018. list_add_tail(&death->work.entry, &thread->todo);
  3019. } else {
  3020. list_add_tail(&death->work.entry, &proc->todo);
  3021. wake_up_interruptible(&proc->wait);
  3022. }
  3023. } else {
  3024. BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
  3025. death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
  3026. }
  3027. }
  3028. }
  3029. break;
  3030. case BC_DEAD_BINDER_DONE: {
  3031. struct binder_work *w;
  3032. binder_uintptr_t cookie;
  3033. struct binder_ref_death *death = NULL;
  3034. if (get_user(cookie, (binder_uintptr_t __user *) ptr))
  3035. return -EFAULT;
  3036. #ifdef MTK_DEATH_NOTIFY_MONITOR
  3037. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3038. "[DN #6]binder: %d:%d cookie 0x%016llx\n",
  3039. proc->pid, thread->pid, (u64) cookie);
  3040. #endif
  3041. ptr += sizeof(void *);
  3042. list_for_each_entry(w, &proc->delivered_death, entry) {
  3043. struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
  3044. if (tmp_death->cookie == cookie) {
  3045. death = tmp_death;
  3046. break;
  3047. }
  3048. }
  3049. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  3050. "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
  3051. proc->pid, thread->pid, (u64) cookie,
  3052. death);
  3053. if (death == NULL) {
  3054. binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
  3055. proc->pid, thread->pid, (u64) cookie);
  3056. break;
  3057. }
  3058. list_del_init(&death->work.entry);
  3059. if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
  3060. death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
  3061. if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
  3062. list_add_tail(&death->work.entry, &thread->todo);
  3063. } else {
  3064. list_add_tail(&death->work.entry, &proc->todo);
  3065. wake_up_interruptible(&proc->wait);
  3066. }
  3067. }
  3068. }
  3069. break;
  3070. default:
  3071. pr_err("%d:%d unknown command %d\n",
  3072. proc->pid, thread->pid, cmd);
  3073. return -EINVAL;
  3074. }
  3075. *consumed = ptr - buffer;
  3076. }
  3077. return 0;
  3078. }
  3079. static void binder_stat_br(struct binder_proc *proc,
  3080. struct binder_thread *thread, uint32_t cmd)
  3081. {
  3082. trace_binder_return(cmd);
  3083. if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
  3084. binder_stats.br[_IOC_NR(cmd)]++;
  3085. proc->stats.br[_IOC_NR(cmd)]++;
  3086. thread->stats.br[_IOC_NR(cmd)]++;
  3087. }
  3088. }
  3089. static int binder_has_proc_work(struct binder_proc *proc,
  3090. struct binder_thread *thread)
  3091. {
  3092. return !list_empty(&proc->todo) ||
  3093. (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
  3094. }
  3095. static int binder_has_thread_work(struct binder_thread *thread)
  3096. {
  3097. return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
  3098. (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
  3099. }
  3100. static int binder_thread_read(struct binder_proc *proc,
  3101. struct binder_thread *thread,
  3102. binder_uintptr_t binder_buffer, size_t size,
  3103. binder_size_t *consumed, int non_block)
  3104. {
  3105. void __user *buffer = (void __user *)(uintptr_t) binder_buffer;
  3106. void __user *ptr = buffer + *consumed;
  3107. void __user *end = buffer + size;
  3108. int ret = 0;
  3109. int wait_for_proc_work;
  3110. if (*consumed == 0) {
  3111. if (put_user(BR_NOOP, (uint32_t __user *)ptr))
  3112. return -EFAULT;
  3113. ptr += sizeof(uint32_t);
  3114. }
  3115. retry:
  3116. wait_for_proc_work = thread->transaction_stack == NULL &&
  3117. list_empty(&thread->todo);
  3118. if (thread->return_error != BR_OK && ptr < end) {
  3119. if (thread->return_error2 != BR_OK) {
  3120. if (put_user(thread->return_error2, (uint32_t __user *) ptr))
  3121. return -EFAULT;
  3122. ptr += sizeof(uint32_t);
  3123. pr_err
  3124. ("read put err2 %u to user %p, thread error %u:%u\n",
  3125. thread->return_error2, ptr, thread->return_error,
  3126. thread->return_error2);
  3127. binder_stat_br(proc, thread, thread->return_error2);
  3128. if (ptr == end)
  3129. goto done;
  3130. thread->return_error2 = BR_OK;
  3131. }
  3132. if (put_user(thread->return_error, (uint32_t __user *) ptr))
  3133. return -EFAULT;
  3134. ptr += sizeof(uint32_t);
  3135. pr_err("read put err %u to user %p, thread error %u:%u\n",
  3136. thread->return_error, ptr, thread->return_error, thread->return_error2);
  3137. binder_stat_br(proc, thread, thread->return_error);
  3138. thread->return_error = BR_OK;
  3139. goto done;
  3140. }
  3141. thread->looper |= BINDER_LOOPER_STATE_WAITING;
  3142. if (wait_for_proc_work)
  3143. proc->ready_threads++;
  3144. binder_unlock(__func__);
  3145. trace_binder_wait_for_work(wait_for_proc_work,
  3146. !!thread->transaction_stack, !list_empty(&thread->todo));
  3147. if (wait_for_proc_work) {
  3148. if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
  3149. BINDER_LOOPER_STATE_ENTERED))) {
  3150. binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
  3151. proc->pid, thread->pid, thread->looper);
  3152. wait_event_interruptible(binder_user_error_wait,
  3153. binder_stop_on_user_error < 2);
  3154. }
  3155. #ifdef RT_PRIO_INHERIT
  3156. /* disable preemption to prevent from schedule-out immediately */
  3157. preempt_disable();
  3158. #endif
  3159. binder_set_nice(proc->default_priority);
  3160. #ifdef RT_PRIO_INHERIT
  3161. if (rt_task(current) && !binder_has_proc_work(proc, thread)) {
  3162. /* make sure binder has no work before setting priority back */
  3163. struct sched_param param = {
  3164. .sched_priority = proc->default_rt_prio,
  3165. };
  3166. #ifdef BINDER_MONITOR
  3167. if (log_disable & BINDER_RT_LOG_ENABLE) {
  3168. pr_debug
  3169. ("enter threadpool reset %d sched_policy from %u to %d rt_prio from %u to %d\n",
  3170. current->pid, current->policy,
  3171. proc->default_policy, current->rt_priority,
  3172. proc->default_rt_prio);
  3173. }
  3174. #endif
  3175. mt_sched_setscheduler_nocheck(current, proc->default_policy, &param);
  3176. }
  3177. preempt_enable_no_resched();
  3178. #endif
  3179. if (non_block) {
  3180. if (!binder_has_proc_work(proc, thread))
  3181. ret = -EAGAIN;
  3182. } else
  3183. ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
  3184. } else {
  3185. if (non_block) {
  3186. if (!binder_has_thread_work(thread))
  3187. ret = -EAGAIN;
  3188. } else
  3189. ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
  3190. }
  3191. binder_lock(__func__);
  3192. if (wait_for_proc_work)
  3193. proc->ready_threads--;
  3194. thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
  3195. if (ret)
  3196. return ret;
  3197. while (1) {
  3198. uint32_t cmd;
  3199. struct binder_transaction_data tr;
  3200. struct binder_work *w;
  3201. struct binder_transaction *t = NULL;
  3202. if (!list_empty(&thread->todo)) {
  3203. w = list_first_entry(&thread->todo, struct binder_work, entry);
  3204. } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
  3205. w = list_first_entry(&proc->todo, struct binder_work, entry);
  3206. } else {
  3207. /* no data added */
  3208. if (ptr - buffer == 4 &&
  3209. !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
  3210. goto retry;
  3211. break;
  3212. }
  3213. if (end - ptr < sizeof(tr) + 4)
  3214. break;
  3215. switch (w->type) {
  3216. case BINDER_WORK_TRANSACTION:{
  3217. t = container_of(w, struct binder_transaction, work);
  3218. #ifdef BINDER_MONITOR
  3219. binder_cancel_bwdog(t);
  3220. #endif
  3221. } break;
  3222. case BINDER_WORK_TRANSACTION_COMPLETE:{
  3223. cmd = BR_TRANSACTION_COMPLETE;
  3224. if (put_user(cmd, (uint32_t __user *) ptr))
  3225. return -EFAULT;
  3226. ptr += sizeof(uint32_t);
  3227. binder_stat_br(proc, thread, cmd);
  3228. binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
  3229. "%d:%d BR_TRANSACTION_COMPLETE\n",
  3230. proc->pid, thread->pid);
  3231. list_del(&w->entry);
  3232. kfree(w);
  3233. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3234. }
  3235. break;
  3236. case BINDER_WORK_NODE:{
  3237. struct binder_node *node =
  3238. container_of(w, struct binder_node, work);
  3239. uint32_t cmd = BR_NOOP;
  3240. const char *cmd_name;
  3241. int strong = node->internal_strong_refs || node->local_strong_refs;
  3242. int weak = !hlist_empty(&node->refs)
  3243. || node->local_weak_refs || strong;
  3244. if (weak && !node->has_weak_ref) {
  3245. cmd = BR_INCREFS;
  3246. cmd_name = "BR_INCREFS";
  3247. node->has_weak_ref = 1;
  3248. node->pending_weak_ref = 1;
  3249. node->local_weak_refs++;
  3250. } else if (strong && !node->has_strong_ref) {
  3251. cmd = BR_ACQUIRE;
  3252. cmd_name = "BR_ACQUIRE";
  3253. node->has_strong_ref = 1;
  3254. node->pending_strong_ref = 1;
  3255. node->local_strong_refs++;
  3256. } else if (!strong && node->has_strong_ref) {
  3257. cmd = BR_RELEASE;
  3258. cmd_name = "BR_RELEASE";
  3259. node->has_strong_ref = 0;
  3260. } else if (!weak && node->has_weak_ref) {
  3261. cmd = BR_DECREFS;
  3262. cmd_name = "BR_DECREFS";
  3263. node->has_weak_ref = 0;
  3264. }
  3265. if (cmd != BR_NOOP) {
  3266. if (put_user(cmd, (uint32_t __user *) ptr))
  3267. return -EFAULT;
  3268. ptr += sizeof(uint32_t);
  3269. if (put_user(node->ptr, (binder_uintptr_t __user *)
  3270. ptr))
  3271. return -EFAULT;
  3272. ptr += sizeof(binder_uintptr_t);
  3273. if (put_user(node->cookie, (binder_uintptr_t __user *)
  3274. ptr))
  3275. return -EFAULT;
  3276. ptr += sizeof(binder_uintptr_t);
  3277. binder_stat_br(proc, thread, cmd);
  3278. binder_debug(BINDER_DEBUG_USER_REFS,
  3279. "%d:%d %s %d u%016llx c%016llx\n",
  3280. proc->pid, thread->pid,
  3281. cmd_name, node->debug_id,
  3282. (u64) node->ptr, (u64) node->cookie);
  3283. } else {
  3284. list_del_init(&w->entry);
  3285. if (!weak && !strong) {
  3286. binder_debug
  3287. (BINDER_DEBUG_INTERNAL_REFS,
  3288. "%d:%d node %d u%016llx c%016llx deleted\n",
  3289. proc->pid, thread->pid,
  3290. node->debug_id,
  3291. (u64) node->ptr, (u64) node->cookie);
  3292. rb_erase(&node->rb_node, &proc->nodes);
  3293. kfree(node);
  3294. binder_stats_deleted(BINDER_STAT_NODE);
  3295. } else {
  3296. binder_debug
  3297. (BINDER_DEBUG_INTERNAL_REFS,
  3298. "%d:%d node %d u%016llx c%016llx state unchanged\n",
  3299. proc->pid, thread->pid,
  3300. node->debug_id,
  3301. (u64) node->ptr, (u64) node->cookie);
  3302. }
  3303. }
  3304. }
  3305. break;
  3306. case BINDER_WORK_DEAD_BINDER:
  3307. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3308. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:{
  3309. struct binder_ref_death *death;
  3310. uint32_t cmd;
  3311. death = container_of(w, struct binder_ref_death, work);
  3312. #ifdef MTK_DEATH_NOTIFY_MONITOR
  3313. binder_debug
  3314. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3315. "[DN #4]binder: %d:%d ",
  3316. proc->pid, thread->pid);
  3317. switch (w->type) {
  3318. case BINDER_WORK_DEAD_BINDER:
  3319. binder_debug
  3320. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3321. "BINDER_WORK_DEAD_BINDER cookie 0x%016llx\n",
  3322. (u64) death->cookie);
  3323. break;
  3324. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3325. binder_debug
  3326. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3327. "BINDER_WORK_DEAD_BINDER_AND_CLEAR cookie 0x%016llx\n",
  3328. (u64) death->cookie);
  3329. break;
  3330. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
  3331. binder_debug
  3332. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3333. "BINDER_WORK_CLEAR_DEATH_NOTIFICATION cookie 0x%016llx\n",
  3334. (u64) death->cookie);
  3335. break;
  3336. default:
  3337. binder_debug
  3338. (BINDER_DEBUG_DEATH_NOTIFICATION,
  3339. "UNKNOWN-%d cookie 0x%016llx\n",
  3340. w->type, (u64) death->cookie);
  3341. break;
  3342. }
  3343. #endif
  3344. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
  3345. cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
  3346. else
  3347. cmd = BR_DEAD_BINDER;
  3348. if (put_user(cmd, (uint32_t __user *) ptr))
  3349. return -EFAULT;
  3350. ptr += sizeof(uint32_t);
  3351. if (put_user(death->cookie, (binder_uintptr_t __user *) ptr))
  3352. return -EFAULT;
  3353. ptr += sizeof(binder_uintptr_t);
  3354. binder_stat_br(proc, thread, cmd);
  3355. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  3356. "%d:%d %s %016llx\n",
  3357. proc->pid, thread->pid,
  3358. cmd == BR_DEAD_BINDER ?
  3359. "BR_DEAD_BINDER" :
  3360. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  3361. (u64) death->cookie);
  3362. if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
  3363. list_del(&w->entry);
  3364. kfree(death);
  3365. binder_stats_deleted(BINDER_STAT_DEATH);
  3366. } else
  3367. list_move(&w->entry, &proc->delivered_death);
  3368. if (cmd == BR_DEAD_BINDER)
  3369. goto done; /* DEAD_BINDER notifications can cause transactions */
  3370. }
  3371. break;
  3372. }
  3373. if (!t)
  3374. continue;
  3375. BUG_ON(t->buffer == NULL);
  3376. if (t->buffer->target_node) {
  3377. struct binder_node *target_node = t->buffer->target_node;
  3378. tr.target.ptr = target_node->ptr;
  3379. tr.cookie = target_node->cookie;
  3380. t->saved_priority = task_nice(current);
  3381. #ifdef RT_PRIO_INHERIT
  3382. /* since we may fail the rt inherit due to target
  3383. * wait queue task_list is empty, check again here.
  3384. */
  3385. if ((SCHED_RR == t->policy || SCHED_FIFO == t->policy)
  3386. && t->rt_prio > current->rt_priority && !(t->flags & TF_ONE_WAY)) {
  3387. struct sched_param param = {
  3388. .sched_priority = t->rt_prio,
  3389. };
  3390. t->saved_rt_prio = current->rt_priority;
  3391. t->saved_policy = current->policy;
  3392. mt_sched_setscheduler_nocheck(current, t->policy, &param);
  3393. #ifdef BINDER_MONITOR
  3394. if (log_disable & BINDER_RT_LOG_ENABLE) {
  3395. pr_debug
  3396. ("read set %d sched_policy from %d to %d rt_prio from %d to %d\n",
  3397. proc->pid, t->saved_policy,
  3398. t->policy, t->saved_rt_prio, t->rt_prio);
  3399. }
  3400. #endif
  3401. }
  3402. #endif
  3403. if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY))
  3404. binder_set_nice(t->priority);
  3405. else if (!(t->flags & TF_ONE_WAY) ||
  3406. t->saved_priority > target_node->min_priority)
  3407. binder_set_nice(target_node->min_priority);
  3408. cmd = BR_TRANSACTION;
  3409. } else {
  3410. tr.target.ptr = 0;
  3411. tr.cookie = 0;
  3412. cmd = BR_REPLY;
  3413. }
  3414. tr.code = t->code;
  3415. tr.flags = t->flags;
  3416. tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
  3417. if (t->from) {
  3418. struct task_struct *sender = t->from->proc->tsk;
  3419. tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current));
  3420. } else {
  3421. tr.sender_pid = 0;
  3422. }
  3423. tr.data_size = t->buffer->data_size;
  3424. tr.offsets_size = t->buffer->offsets_size;
  3425. tr.data.ptr.buffer = (binder_uintptr_t) ((uintptr_t) t->buffer->data +
  3426. proc->user_buffer_offset);
  3427. tr.data.ptr.offsets =
  3428. tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
  3429. if (put_user(cmd, (uint32_t __user *) ptr))
  3430. return -EFAULT;
  3431. ptr += sizeof(uint32_t);
  3432. if (copy_to_user(ptr, &tr, sizeof(tr)))
  3433. return -EFAULT;
  3434. ptr += sizeof(tr);
  3435. trace_binder_transaction_received(t);
  3436. binder_stat_br(proc, thread, cmd);
  3437. binder_debug(BINDER_DEBUG_TRANSACTION,
  3438. "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
  3439. proc->pid, thread->pid,
  3440. (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
  3441. "BR_REPLY",
  3442. t->debug_id, t->from ? t->from->proc->pid : 0,
  3443. t->from ? t->from->pid : 0, cmd,
  3444. t->buffer->data_size, t->buffer->offsets_size,
  3445. (u64) tr.data.ptr.buffer, (u64) tr.data.ptr.offsets);
  3446. list_del(&t->work.entry);
  3447. t->buffer->allow_user_free = 1;
  3448. if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
  3449. t->to_parent = thread->transaction_stack;
  3450. t->to_thread = thread;
  3451. thread->transaction_stack = t;
  3452. #ifdef BINDER_MONITOR
  3453. do_posix_clock_monotonic_gettime(&t->exe_timestamp);
  3454. /* monotonic_to_bootbased(&t->exe_timestamp); */
  3455. do_gettimeofday(&t->tv);
  3456. /* consider time zone. translate to android time */
  3457. t->tv.tv_sec -= (sys_tz.tz_minuteswest * 60);
  3458. t->wait_on = WAIT_ON_EXEC;
  3459. t->tthrd = thread->pid;
  3460. binder_queue_bwdog(t, (time_t) WAIT_BUDGET_EXEC);
  3461. binder_update_transaction_time(&binder_transaction_log, t, 1);
  3462. binder_update_transaction_ttid(&binder_transaction_log, t);
  3463. #endif
  3464. } else {
  3465. t->buffer->transaction = NULL;
  3466. #ifdef BINDER_MONITOR
  3467. binder_cancel_bwdog(t);
  3468. if (cmd == BR_TRANSACTION && (t->flags & TF_ONE_WAY)) {
  3469. binder_update_transaction_time(&binder_transaction_log, t, 1);
  3470. t->tthrd = thread->pid;
  3471. binder_update_transaction_ttid(&binder_transaction_log, t);
  3472. }
  3473. #endif
  3474. kfree(t);
  3475. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  3476. }
  3477. break;
  3478. }
  3479. done:
  3480. *consumed = ptr - buffer;
  3481. if (proc->requested_threads + proc->ready_threads == 0 &&
  3482. proc->requested_threads_started < proc->max_threads &&
  3483. (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))
  3484. /* the user-space code fails to */
  3485. /*spawn a new thread if we leave this out */
  3486. ) {
  3487. proc->requested_threads++;
  3488. binder_debug(BINDER_DEBUG_THREADS,
  3489. "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid);
  3490. if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
  3491. return -EFAULT;
  3492. binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
  3493. }
  3494. return 0;
  3495. }
  3496. static void binder_release_work(struct list_head *list)
  3497. {
  3498. struct binder_work *w;
  3499. while (!list_empty(list)) {
  3500. w = list_first_entry(list, struct binder_work, entry);
  3501. list_del_init(&w->entry);
  3502. switch (w->type) {
  3503. case BINDER_WORK_TRANSACTION:{
  3504. struct binder_transaction *t;
  3505. t = container_of(w, struct binder_transaction, work);
  3506. if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
  3507. binder_send_failed_reply(t, BR_DEAD_REPLY);
  3508. } else {
  3509. binder_debug
  3510. (BINDER_DEBUG_DEAD_TRANSACTION,
  3511. "undelivered transaction %d\n", t->debug_id);
  3512. t->buffer->transaction = NULL;
  3513. #ifdef BINDER_MONITOR
  3514. binder_cancel_bwdog(t);
  3515. #endif
  3516. kfree(t);
  3517. binder_stats_deleted(BINDER_STAT_TRANSACTION);
  3518. }
  3519. }
  3520. break;
  3521. case BINDER_WORK_TRANSACTION_COMPLETE:{
  3522. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3523. "undelivered TRANSACTION_COMPLETE\n");
  3524. kfree(w);
  3525. binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
  3526. }
  3527. break;
  3528. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  3529. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:{
  3530. struct binder_ref_death *death;
  3531. death = container_of(w, struct binder_ref_death, work);
  3532. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3533. "undelivered death notification, %016llx\n",
  3534. (u64) death->cookie);
  3535. kfree(death);
  3536. binder_stats_deleted(BINDER_STAT_DEATH);
  3537. } break;
  3538. default:
  3539. pr_err("unexpected work type, %d, not freed\n", w->type);
  3540. break;
  3541. }
  3542. }
  3543. }
  3544. static struct binder_thread *binder_get_thread(struct binder_proc *proc)
  3545. {
  3546. struct binder_thread *thread = NULL;
  3547. struct rb_node *parent = NULL;
  3548. struct rb_node **p = &proc->threads.rb_node;
  3549. while (*p) {
  3550. parent = *p;
  3551. thread = rb_entry(parent, struct binder_thread, rb_node);
  3552. if (current->pid < thread->pid)
  3553. p = &(*p)->rb_left;
  3554. else if (current->pid > thread->pid)
  3555. p = &(*p)->rb_right;
  3556. else
  3557. break;
  3558. }
  3559. if (*p == NULL) {
  3560. thread = kzalloc(sizeof(*thread), GFP_KERNEL);
  3561. if (thread == NULL)
  3562. return NULL;
  3563. binder_stats_created(BINDER_STAT_THREAD);
  3564. thread->proc = proc;
  3565. thread->pid = current->pid;
  3566. init_waitqueue_head(&thread->wait);
  3567. INIT_LIST_HEAD(&thread->todo);
  3568. rb_link_node(&thread->rb_node, parent, p);
  3569. rb_insert_color(&thread->rb_node, &proc->threads);
  3570. thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
  3571. thread->return_error = BR_OK;
  3572. thread->return_error2 = BR_OK;
  3573. }
  3574. return thread;
  3575. }
  3576. static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread)
  3577. {
  3578. struct binder_transaction *t;
  3579. struct binder_transaction *send_reply = NULL;
  3580. int active_transactions = 0;
  3581. rb_erase(&thread->rb_node, &proc->threads);
  3582. t = thread->transaction_stack;
  3583. if (t && t->to_thread == thread)
  3584. send_reply = t;
  3585. while (t) {
  3586. active_transactions++;
  3587. binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
  3588. "release %d:%d transaction %d %s, still active\n",
  3589. proc->pid, thread->pid,
  3590. t->debug_id, (t->to_thread == thread) ? "in" : "out");
  3591. #ifdef MTK_BINDER_DEBUG
  3592. pr_err("%d: %p from %d:%d to %d:%d code %x flags %x " "pri %ld r%d "
  3593. #ifdef BINDER_MONITOR
  3594. "start %lu.%06lu"
  3595. #endif
  3596. ,
  3597. t->debug_id, t,
  3598. t->from ? t->from->proc->pid : 0,
  3599. t->from ? t->from->pid : 0,
  3600. t->to_proc ? t->to_proc->pid : 0,
  3601. t->to_thread ? t->to_thread->pid : 0,
  3602. t->code, t->flags, t->priority, t->need_reply
  3603. #ifdef BINDER_MONITOR
  3604. , (unsigned long)t->timestamp.tv_sec, (t->timestamp.tv_nsec / NSEC_PER_USEC)
  3605. #endif
  3606. );
  3607. #endif
  3608. if (t->to_thread == thread) {
  3609. t->to_proc = NULL;
  3610. t->to_thread = NULL;
  3611. if (t->buffer) {
  3612. t->buffer->transaction = NULL;
  3613. t->buffer = NULL;
  3614. }
  3615. t = t->to_parent;
  3616. } else if (t->from == thread) {
  3617. t->from = NULL;
  3618. t = t->from_parent;
  3619. } else
  3620. BUG();
  3621. }
  3622. if (send_reply)
  3623. binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
  3624. binder_release_work(&thread->todo);
  3625. kfree(thread);
  3626. binder_stats_deleted(BINDER_STAT_THREAD);
  3627. return active_transactions;
  3628. }
  3629. static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait)
  3630. {
  3631. struct binder_proc *proc = filp->private_data;
  3632. struct binder_thread *thread = NULL;
  3633. int wait_for_proc_work;
  3634. binder_lock(__func__);
  3635. thread = binder_get_thread(proc);
  3636. wait_for_proc_work = thread->transaction_stack == NULL &&
  3637. list_empty(&thread->todo) && thread->return_error == BR_OK;
  3638. binder_unlock(__func__);
  3639. if (wait_for_proc_work) {
  3640. if (binder_has_proc_work(proc, thread))
  3641. return POLLIN;
  3642. poll_wait(filp, &proc->wait, wait);
  3643. if (binder_has_proc_work(proc, thread))
  3644. return POLLIN;
  3645. } else {
  3646. if (binder_has_thread_work(thread))
  3647. return POLLIN;
  3648. poll_wait(filp, &thread->wait, wait);
  3649. if (binder_has_thread_work(thread))
  3650. return POLLIN;
  3651. }
  3652. return 0;
  3653. }
  3654. static int binder_ioctl_write_read(struct file *filp,
  3655. unsigned int cmd, unsigned long arg,
  3656. struct binder_thread *thread)
  3657. {
  3658. int ret = 0;
  3659. struct binder_proc *proc = filp->private_data;
  3660. unsigned int size = _IOC_SIZE(cmd);
  3661. void __user *ubuf = (void __user *)arg;
  3662. struct binder_write_read bwr;
  3663. if (size != sizeof(struct binder_write_read)) {
  3664. ret = -EINVAL;
  3665. goto out;
  3666. }
  3667. if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
  3668. ret = -EFAULT;
  3669. goto out;
  3670. }
  3671. binder_debug(BINDER_DEBUG_READ_WRITE,
  3672. "%d:%d write %lld at %016llx, read %lld at %016llx\n",
  3673. proc->pid, thread->pid,
  3674. (u64) bwr.write_size, (u64) bwr.write_buffer,
  3675. (u64) bwr.read_size, (u64) bwr.read_buffer);
  3676. if (bwr.write_size > 0) {
  3677. ret = binder_thread_write(proc, thread,
  3678. bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
  3679. trace_binder_write_done(ret);
  3680. if (ret < 0) {
  3681. bwr.read_consumed = 0;
  3682. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  3683. ret = -EFAULT;
  3684. goto out;
  3685. }
  3686. }
  3687. if (bwr.read_size > 0) {
  3688. ret = binder_thread_read(proc, thread, bwr.read_buffer,
  3689. bwr.read_size,
  3690. &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
  3691. trace_binder_read_done(ret);
  3692. if (!list_empty(&proc->todo)) {
  3693. if (thread->proc != proc) {
  3694. int i;
  3695. unsigned int *p;
  3696. pr_debug("binder: " "thread->proc != proc\n");
  3697. pr_debug("binder: thread %p\n", thread);
  3698. p = (unsigned int *)thread - 32;
  3699. for (i = -4; i <= 3; i++, p += 8) {
  3700. pr_debug("%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3701. p, *(p), *(p + 1), *(p + 2),
  3702. *(p + 3), *(p + 4), *(p + 5), *(p + 6), *(p + 7));
  3703. }
  3704. pr_debug("binder: thread->proc " "%p\n", thread->proc);
  3705. p = (unsigned int *)thread->proc - 32;
  3706. for (i = -4; i <= 5; i++, p += 8) {
  3707. pr_debug("%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3708. p, *(p), *(p + 1), *(p + 2),
  3709. *(p + 3), *(p + 4), *(p + 5), *(p + 6), *(p + 7));
  3710. }
  3711. pr_debug("binder: proc %p\n", proc);
  3712. p = (unsigned int *)proc - 32;
  3713. for (i = -4; i <= 5; i++, p += 8) {
  3714. pr_debug("%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  3715. p, *(p), *(p + 1), *(p + 2),
  3716. *(p + 3), *(p + 4), *(p + 5), *(p + 6), *(p + 7));
  3717. }
  3718. BUG();
  3719. }
  3720. wake_up_interruptible(&proc->wait);
  3721. }
  3722. if (ret < 0) {
  3723. if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
  3724. ret = -EFAULT;
  3725. goto out;
  3726. }
  3727. }
  3728. binder_debug(BINDER_DEBUG_READ_WRITE,
  3729. "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
  3730. proc->pid, thread->pid,
  3731. (u64) bwr.write_consumed, (u64) bwr.write_size,
  3732. (u64) bwr.read_consumed, (u64) bwr.read_size);
  3733. if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
  3734. ret = -EFAULT;
  3735. goto out;
  3736. }
  3737. out:
  3738. return ret;
  3739. }
  3740. static int binder_ioctl_set_ctx_mgr(struct file *filp, struct binder_thread
  3741. *thread)
  3742. {
  3743. int ret = 0;
  3744. struct binder_proc *proc = filp->private_data;
  3745. kuid_t curr_euid = current_euid();
  3746. if (binder_context_mgr_node != NULL) {
  3747. pr_err("BINDER_SET_CONTEXT_MGR already set\n");
  3748. ret = -EBUSY;
  3749. goto out;
  3750. }
  3751. if (uid_valid(binder_context_mgr_uid)) {
  3752. if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
  3753. pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
  3754. from_kuid(&init_user_ns, curr_euid),
  3755. from_kuid(&init_user_ns, binder_context_mgr_uid));
  3756. ret = -EPERM;
  3757. goto out;
  3758. }
  3759. } else {
  3760. binder_context_mgr_uid = curr_euid;
  3761. }
  3762. binder_context_mgr_node = binder_new_node(proc, 0, 0);
  3763. if (binder_context_mgr_node == NULL) {
  3764. ret = -ENOMEM;
  3765. goto out;
  3766. }
  3767. #ifdef BINDER_MONITOR
  3768. strcpy(binder_context_mgr_node->name, "servicemanager");
  3769. pr_debug("%d:%d set as servicemanager uid %d\n",
  3770. proc->pid, thread->pid, __kuid_val(binder_context_mgr_uid));
  3771. #endif
  3772. binder_context_mgr_node->local_weak_refs++;
  3773. binder_context_mgr_node->local_strong_refs++;
  3774. binder_context_mgr_node->has_strong_ref = 1;
  3775. binder_context_mgr_node->has_weak_ref = 1;
  3776. out:
  3777. return ret;
  3778. }
  3779. static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  3780. {
  3781. int ret;
  3782. struct binder_proc *proc = filp->private_data;
  3783. struct binder_thread *thread;
  3784. unsigned int size = _IOC_SIZE(cmd);
  3785. void __user *ubuf = (void __user *)arg;
  3786. /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg); */
  3787. trace_binder_ioctl(cmd, arg);
  3788. ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  3789. if (ret)
  3790. goto err_unlocked;
  3791. binder_lock(__func__);
  3792. thread = binder_get_thread(proc);
  3793. if (thread == NULL) {
  3794. ret = -ENOMEM;
  3795. goto err;
  3796. }
  3797. switch (cmd) {
  3798. case BINDER_WRITE_READ:
  3799. ret = binder_ioctl_write_read(filp, cmd, arg, thread);
  3800. if (ret)
  3801. goto err;
  3802. break;
  3803. case BINDER_SET_MAX_THREADS:
  3804. if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
  3805. ret = -EINVAL;
  3806. goto err;
  3807. }
  3808. break;
  3809. case BINDER_SET_CONTEXT_MGR:
  3810. ret = binder_ioctl_set_ctx_mgr(filp, thread);
  3811. if (ret)
  3812. goto err;
  3813. ret = security_binder_set_context_mgr(proc->tsk);
  3814. if (ret < 0)
  3815. goto err;
  3816. break;
  3817. case BINDER_THREAD_EXIT:
  3818. binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid);
  3819. binder_free_thread(proc, thread);
  3820. thread = NULL;
  3821. break;
  3822. case BINDER_VERSION:{
  3823. struct binder_version __user *ver = ubuf;
  3824. if (size != sizeof(struct binder_version)) {
  3825. ret = -EINVAL;
  3826. goto err;
  3827. }
  3828. if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
  3829. ret = -EINVAL;
  3830. goto err;
  3831. }
  3832. break;
  3833. }
  3834. default:
  3835. ret = -EINVAL;
  3836. goto err;
  3837. }
  3838. ret = 0;
  3839. err:
  3840. if (thread)
  3841. thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
  3842. binder_unlock(__func__);
  3843. wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
  3844. if (ret && ret != -ERESTARTSYS)
  3845. pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
  3846. err_unlocked:
  3847. trace_binder_ioctl_done(ret);
  3848. return ret;
  3849. }
  3850. static void binder_vma_open(struct vm_area_struct *vma)
  3851. {
  3852. struct binder_proc *proc = vma->vm_private_data;
  3853. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  3854. "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  3855. proc->pid, vma->vm_start, vma->vm_end,
  3856. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  3857. (unsigned long)pgprot_val(vma->vm_page_prot));
  3858. }
  3859. static void binder_vma_close(struct vm_area_struct *vma)
  3860. {
  3861. struct binder_proc *proc = vma->vm_private_data;
  3862. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  3863. "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
  3864. proc->pid, vma->vm_start, vma->vm_end,
  3865. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  3866. (unsigned long)pgprot_val(vma->vm_page_prot));
  3867. proc->vma = NULL;
  3868. proc->vma_vm_mm = NULL;
  3869. binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
  3870. }
  3871. static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  3872. {
  3873. return VM_FAULT_SIGBUS;
  3874. }
  3875. static struct vm_operations_struct binder_vm_ops = {
  3876. .open = binder_vma_open,
  3877. .close = binder_vma_close,
  3878. .fault = binder_vm_fault,
  3879. };
  3880. static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
  3881. {
  3882. int ret;
  3883. struct vm_struct *area;
  3884. struct binder_proc *proc = filp->private_data;
  3885. const char *failure_string;
  3886. struct binder_buffer *buffer;
  3887. if (proc->tsk != current)
  3888. return -EINVAL;
  3889. if ((vma->vm_end - vma->vm_start) > SZ_4M)
  3890. vma->vm_end = vma->vm_start + SZ_4M;
  3891. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  3892. "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
  3893. proc->pid, vma->vm_start, vma->vm_end,
  3894. (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
  3895. (unsigned long)pgprot_val(vma->vm_page_prot));
  3896. if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
  3897. ret = -EPERM;
  3898. failure_string = "bad vm_flags";
  3899. goto err_bad_arg;
  3900. }
  3901. vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
  3902. mutex_lock(&binder_mmap_lock);
  3903. if (proc->buffer) {
  3904. ret = -EBUSY;
  3905. failure_string = "already mapped";
  3906. goto err_already_mapped;
  3907. }
  3908. area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
  3909. if (area == NULL) {
  3910. ret = -ENOMEM;
  3911. failure_string = "get_vm_area";
  3912. goto err_get_vm_area_failed;
  3913. }
  3914. proc->buffer = area->addr;
  3915. proc->user_buffer_offset = vma->vm_start - (uintptr_t) proc->buffer;
  3916. mutex_unlock(&binder_mmap_lock);
  3917. #ifdef CONFIG_CPU_CACHE_VIPT
  3918. if (cache_is_vipt_aliasing()) {
  3919. while (CACHE_COLOUR((vma->vm_start ^ (uint32_t) proc->buffer))) {
  3920. pr_info
  3921. ("binder_mmap: %d %lx-%lx maps %p bad alignment\n",
  3922. proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
  3923. vma->vm_start += PAGE_SIZE;
  3924. }
  3925. }
  3926. #endif
  3927. proc->pages =
  3928. kzalloc(sizeof(proc->pages[0]) *
  3929. ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
  3930. if (proc->pages == NULL) {
  3931. ret = -ENOMEM;
  3932. failure_string = "alloc page array";
  3933. goto err_alloc_pages_failed;
  3934. }
  3935. proc->buffer_size = vma->vm_end - vma->vm_start;
  3936. vma->vm_ops = &binder_vm_ops;
  3937. vma->vm_private_data = proc;
  3938. if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
  3939. ret = -ENOMEM;
  3940. failure_string = "alloc small buf";
  3941. goto err_alloc_small_buf_failed;
  3942. }
  3943. buffer = proc->buffer;
  3944. INIT_LIST_HEAD(&proc->buffers);
  3945. list_add(&buffer->entry, &proc->buffers);
  3946. buffer->free = 1;
  3947. binder_insert_free_buffer(proc, buffer);
  3948. proc->free_async_space = proc->buffer_size / 2;
  3949. barrier();
  3950. proc->files = get_files_struct(current);
  3951. proc->vma = vma;
  3952. proc->vma_vm_mm = vma->vm_mm;
  3953. /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
  3954. proc->pid, vma->vm_start, vma->vm_end, proc->buffer); */
  3955. return 0;
  3956. err_alloc_small_buf_failed:
  3957. kfree(proc->pages);
  3958. proc->pages = NULL;
  3959. err_alloc_pages_failed:
  3960. mutex_lock(&binder_mmap_lock);
  3961. vfree(proc->buffer);
  3962. proc->buffer = NULL;
  3963. err_get_vm_area_failed:
  3964. err_already_mapped:
  3965. mutex_unlock(&binder_mmap_lock);
  3966. err_bad_arg:
  3967. pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
  3968. proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  3969. return ret;
  3970. }
  3971. static int binder_open(struct inode *nodp, struct file *filp)
  3972. {
  3973. struct binder_proc *proc;
  3974. binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
  3975. current->group_leader->pid, current->pid);
  3976. proc = kzalloc(sizeof(*proc), GFP_KERNEL);
  3977. if (proc == NULL)
  3978. return -ENOMEM;
  3979. get_task_struct(current);
  3980. proc->tsk = current;
  3981. INIT_LIST_HEAD(&proc->todo);
  3982. init_waitqueue_head(&proc->wait);
  3983. proc->default_priority = task_nice(current);
  3984. #ifdef RT_PRIO_INHERIT
  3985. proc->default_rt_prio = current->rt_priority;
  3986. proc->default_policy = current->policy;
  3987. #endif
  3988. binder_lock(__func__);
  3989. binder_stats_created(BINDER_STAT_PROC);
  3990. hlist_add_head(&proc->proc_node, &binder_procs);
  3991. proc->pid = current->group_leader->pid;
  3992. INIT_LIST_HEAD(&proc->delivered_death);
  3993. filp->private_data = proc;
  3994. binder_unlock(__func__);
  3995. if (binder_debugfs_dir_entry_proc) {
  3996. char strbuf[11];
  3997. snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
  3998. proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
  3999. binder_debugfs_dir_entry_proc,
  4000. proc, &binder_proc_fops);
  4001. }
  4002. return 0;
  4003. }
  4004. static int binder_flush(struct file *filp, fl_owner_t id)
  4005. {
  4006. struct binder_proc *proc = filp->private_data;
  4007. binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
  4008. return 0;
  4009. }
  4010. static void binder_deferred_flush(struct binder_proc *proc)
  4011. {
  4012. struct rb_node *n;
  4013. int wake_count = 0;
  4014. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
  4015. struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
  4016. thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
  4017. if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
  4018. wake_up_interruptible(&thread->wait);
  4019. wake_count++;
  4020. }
  4021. }
  4022. wake_up_interruptible_all(&proc->wait);
  4023. #ifdef MTK_BINDER_DEBUG
  4024. if (wake_count)
  4025. pr_debug("binder_flush: %d woke %d threads\n", proc->pid, wake_count);
  4026. #else
  4027. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4028. "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
  4029. #endif
  4030. }
  4031. static int binder_release(struct inode *nodp, struct file *filp)
  4032. {
  4033. struct binder_proc *proc = filp->private_data;
  4034. debugfs_remove(proc->debugfs_entry);
  4035. binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
  4036. return 0;
  4037. }
  4038. static int binder_node_release(struct binder_node *node, int refs)
  4039. {
  4040. struct binder_ref *ref;
  4041. int death = 0;
  4042. #ifdef BINDER_MONITOR
  4043. int sys_reg = 0;
  4044. #endif
  4045. #if defined(MTK_DEATH_NOTIFY_MONITOR) || defined(MTK_BINDER_DEBUG)
  4046. int dead_pid = node->proc ? node->proc->pid : 0;
  4047. char dead_pname[TASK_COMM_LEN] = "";
  4048. if (node->proc && node->proc->tsk)
  4049. strcpy(dead_pname, node->proc->tsk->comm);
  4050. #endif
  4051. list_del_init(&node->work.entry);
  4052. binder_release_work(&node->async_todo);
  4053. if (hlist_empty(&node->refs)) {
  4054. kfree(node);
  4055. binder_stats_deleted(BINDER_STAT_NODE);
  4056. return refs;
  4057. }
  4058. node->proc = NULL;
  4059. node->local_strong_refs = 0;
  4060. node->local_weak_refs = 0;
  4061. hlist_add_head(&node->dead_node, &binder_dead_nodes);
  4062. hlist_for_each_entry(ref, &node->refs, node_entry) {
  4063. refs++;
  4064. if (!ref->death)
  4065. continue;
  4066. #ifdef MTK_DEATH_NOTIFY_MONITOR
  4067. binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
  4068. "[DN #3]binder: %d:(%s) cookie 0x%016llx\n", dead_pid,
  4069. #ifdef BINDER_MONITOR
  4070. node->name,
  4071. #else
  4072. dead_pname,
  4073. #endif
  4074. (u64) ref->death->cookie);
  4075. #endif
  4076. #ifdef BINDER_MONITOR
  4077. if (!sys_reg && ref->proc->pid == system_server_pid)
  4078. sys_reg = 1;
  4079. #endif
  4080. death++;
  4081. if (list_empty(&ref->death->work.entry)) {
  4082. ref->death->work.type = BINDER_WORK_DEAD_BINDER;
  4083. list_add_tail(&ref->death->work.entry, &ref->proc->todo);
  4084. wake_up_interruptible(&ref->proc->wait);
  4085. } else
  4086. BUG();
  4087. }
  4088. #if defined(BINDER_MONITOR) && defined(MTK_BINDER_DEBUG)
  4089. if (sys_reg)
  4090. pr_debug
  4091. ("%d:%s node %d:%s exits with %d:system_server DeathNotify\n",
  4092. dead_pid, dead_pname, node->debug_id, node->name, system_server_pid);
  4093. #endif
  4094. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4095. "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death);
  4096. return refs;
  4097. }
  4098. static void binder_deferred_release(struct binder_proc *proc)
  4099. {
  4100. struct binder_transaction *t;
  4101. struct rb_node *n;
  4102. int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
  4103. BUG_ON(proc->vma);
  4104. BUG_ON(proc->files);
  4105. hlist_del(&proc->proc_node);
  4106. if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
  4107. binder_debug(BINDER_DEBUG_DEAD_BINDER,
  4108. "%s: %d context_mgr_node gone\n", __func__, proc->pid);
  4109. binder_context_mgr_node = NULL;
  4110. }
  4111. threads = 0;
  4112. active_transactions = 0;
  4113. while ((n = rb_first(&proc->threads))) {
  4114. struct binder_thread *thread;
  4115. thread = rb_entry(n, struct binder_thread, rb_node);
  4116. threads++;
  4117. active_transactions += binder_free_thread(proc, thread);
  4118. }
  4119. nodes = 0;
  4120. incoming_refs = 0;
  4121. while ((n = rb_first(&proc->nodes))) {
  4122. struct binder_node *node;
  4123. node = rb_entry(n, struct binder_node, rb_node);
  4124. nodes++;
  4125. rb_erase(&node->rb_node, &proc->nodes);
  4126. incoming_refs = binder_node_release(node, incoming_refs);
  4127. }
  4128. outgoing_refs = 0;
  4129. while ((n = rb_first(&proc->refs_by_desc))) {
  4130. struct binder_ref *ref;
  4131. ref = rb_entry(n, struct binder_ref, rb_node_desc);
  4132. outgoing_refs++;
  4133. binder_delete_ref(ref);
  4134. }
  4135. binder_release_work(&proc->todo);
  4136. binder_release_work(&proc->delivered_death);
  4137. buffers = 0;
  4138. while ((n = rb_first(&proc->allocated_buffers))) {
  4139. struct binder_buffer *buffer;
  4140. buffer = rb_entry(n, struct binder_buffer, rb_node);
  4141. t = buffer->transaction;
  4142. if (t) {
  4143. t->buffer = NULL;
  4144. buffer->transaction = NULL;
  4145. pr_err("release proc %d, transaction %d, not freed\n",
  4146. proc->pid, t->debug_id);
  4147. /*BUG(); */
  4148. #ifdef MTK_BINDER_DEBUG
  4149. pr_err("%d: %p from %d:%d to %d:%d code %x flags %x " "pri %ld r%d "
  4150. #ifdef BINDER_MONITOR
  4151. "start %lu.%06lu"
  4152. #endif
  4153. ,
  4154. t->debug_id, t,
  4155. t->from ? t->from->proc->pid : 0,
  4156. t->from ? t->from->pid : 0,
  4157. t->to_proc ? t->to_proc->pid : 0,
  4158. t->to_thread ? t->to_thread->pid : 0,
  4159. t->code, t->flags, t->priority, t->need_reply
  4160. #ifdef BINDER_MONITOR
  4161. , (unsigned long)t->timestamp.tv_sec,
  4162. (t->timestamp.tv_nsec / NSEC_PER_USEC)
  4163. #endif
  4164. );
  4165. #endif
  4166. }
  4167. binder_free_buf(proc, buffer);
  4168. buffers++;
  4169. }
  4170. binder_stats_deleted(BINDER_STAT_PROC);
  4171. page_count = 0;
  4172. if (proc->pages) {
  4173. int i;
  4174. for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
  4175. void *page_addr;
  4176. if (!proc->pages[i])
  4177. continue;
  4178. page_addr = proc->buffer + i * PAGE_SIZE;
  4179. binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
  4180. "%s: %d: page %d at %p not freed\n",
  4181. __func__, proc->pid, i, page_addr);
  4182. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  4183. __free_page(proc->pages[i]);
  4184. page_count++;
  4185. #ifdef MTK_BINDER_PAGE_USED_RECORD
  4186. if (binder_page_used > 0)
  4187. binder_page_used--;
  4188. if (proc->page_used > 0)
  4189. proc->page_used--;
  4190. #endif
  4191. }
  4192. kfree(proc->pages);
  4193. vfree(proc->buffer);
  4194. }
  4195. put_task_struct(proc->tsk);
  4196. binder_debug(BINDER_DEBUG_OPEN_CLOSE,
  4197. "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
  4198. __func__, proc->pid, threads, nodes, incoming_refs,
  4199. outgoing_refs, active_transactions, buffers, page_count);
  4200. kfree(proc);
  4201. }
  4202. static void binder_deferred_func(struct work_struct *work)
  4203. {
  4204. struct binder_proc *proc;
  4205. struct files_struct *files;
  4206. int defer;
  4207. do {
  4208. binder_lock(__func__);
  4209. mutex_lock(&binder_deferred_lock);
  4210. if (!hlist_empty(&binder_deferred_list)) {
  4211. proc = hlist_entry(binder_deferred_list.first,
  4212. struct binder_proc, deferred_work_node);
  4213. hlist_del_init(&proc->deferred_work_node);
  4214. defer = proc->deferred_work;
  4215. proc->deferred_work = 0;
  4216. } else {
  4217. proc = NULL;
  4218. defer = 0;
  4219. }
  4220. mutex_unlock(&binder_deferred_lock);
  4221. files = NULL;
  4222. if (defer & BINDER_DEFERRED_PUT_FILES) {
  4223. files = proc->files;
  4224. if (files)
  4225. proc->files = NULL;
  4226. }
  4227. if (defer & BINDER_DEFERRED_FLUSH)
  4228. binder_deferred_flush(proc);
  4229. if (defer & BINDER_DEFERRED_RELEASE)
  4230. binder_deferred_release(proc); /* frees proc */
  4231. binder_unlock(__func__);
  4232. if (files)
  4233. put_files_struct(files);
  4234. } while (proc);
  4235. }
  4236. static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
  4237. static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
  4238. {
  4239. mutex_lock(&binder_deferred_lock);
  4240. proc->deferred_work |= defer;
  4241. if (hlist_unhashed(&proc->deferred_work_node)) {
  4242. hlist_add_head(&proc->deferred_work_node, &binder_deferred_list);
  4243. queue_work(binder_deferred_workqueue, &binder_deferred_work);
  4244. }
  4245. mutex_unlock(&binder_deferred_lock);
  4246. }
  4247. static void print_binder_transaction(struct seq_file *m, const char *prefix,
  4248. struct binder_transaction *t)
  4249. {
  4250. #ifdef BINDER_MONITOR
  4251. struct rtc_time tm;
  4252. rtc_time_to_tm(t->tv.tv_sec, &tm);
  4253. #endif
  4254. seq_printf(m,
  4255. "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
  4256. prefix, t->debug_id, t,
  4257. t->from ? t->from->proc->pid : 0,
  4258. t->from ? t->from->pid : 0,
  4259. t->to_proc ? t->to_proc->pid : 0,
  4260. t->to_thread ? t->to_thread->pid : 0,
  4261. t->code, t->flags, t->priority, t->need_reply);
  4262. if (t->buffer == NULL) {
  4263. #ifdef BINDER_MONITOR
  4264. seq_printf(m,
  4265. " start %lu.%06lu android %d-%02d-%02d %02d:%02d:%02d.%03lu",
  4266. (unsigned long)t->timestamp.tv_sec,
  4267. (t->timestamp.tv_nsec / NSEC_PER_USEC),
  4268. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4269. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4270. (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  4271. #endif
  4272. seq_puts(m, " buffer free\n");
  4273. return;
  4274. }
  4275. if (t->buffer->target_node)
  4276. seq_printf(m, " node %d", t->buffer->target_node->debug_id);
  4277. #ifdef BINDER_MONITOR
  4278. seq_printf(m, " size %zd:%zd data %p auf %d start %lu.%06lu",
  4279. t->buffer->data_size, t->buffer->offsets_size,
  4280. t->buffer->data, t->buffer->allow_user_free,
  4281. (unsigned long)t->timestamp.tv_sec,
  4282. (t->timestamp.tv_nsec / NSEC_PER_USEC));
  4283. seq_printf(m, " android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  4284. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4285. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4286. (unsigned long)(t->tv.tv_usec / USEC_PER_MSEC));
  4287. #else
  4288. seq_printf(m, " size %zd:%zd data %p\n",
  4289. t->buffer->data_size, t->buffer->offsets_size, t->buffer->data);
  4290. #endif
  4291. }
  4292. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  4293. struct binder_buffer *buffer)
  4294. {
  4295. seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
  4296. prefix, buffer->debug_id, buffer->data,
  4297. buffer->data_size, buffer->offsets_size,
  4298. buffer->transaction ? "active" : "delivered");
  4299. }
  4300. static void print_binder_work(struct seq_file *m, const char *prefix,
  4301. const char *transaction_prefix, struct binder_work *w)
  4302. {
  4303. struct binder_node *node;
  4304. struct binder_transaction *t;
  4305. switch (w->type) {
  4306. case BINDER_WORK_TRANSACTION:
  4307. t = container_of(w, struct binder_transaction, work);
  4308. print_binder_transaction(m, transaction_prefix, t);
  4309. break;
  4310. case BINDER_WORK_TRANSACTION_COMPLETE:
  4311. seq_printf(m, "%stransaction complete\n", prefix);
  4312. break;
  4313. case BINDER_WORK_NODE:
  4314. node = container_of(w, struct binder_node, work);
  4315. seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
  4316. prefix, node->debug_id, (u64) node->ptr, (u64) node->cookie);
  4317. break;
  4318. case BINDER_WORK_DEAD_BINDER:
  4319. seq_printf(m, "%shas dead binder\n", prefix);
  4320. break;
  4321. case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
  4322. seq_printf(m, "%shas cleared dead binder\n", prefix);
  4323. break;
  4324. case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
  4325. seq_printf(m, "%shas cleared death notification\n", prefix);
  4326. break;
  4327. default:
  4328. seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
  4329. break;
  4330. }
  4331. }
  4332. static void print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always)
  4333. {
  4334. struct binder_transaction *t;
  4335. struct binder_work *w;
  4336. size_t start_pos = m->count;
  4337. size_t header_pos;
  4338. seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
  4339. header_pos = m->count;
  4340. t = thread->transaction_stack;
  4341. while (t) {
  4342. if (t->from == thread) {
  4343. print_binder_transaction(m, " outgoing transaction", t);
  4344. t = t->from_parent;
  4345. } else if (t->to_thread == thread) {
  4346. print_binder_transaction(m, " incoming transaction", t);
  4347. t = t->to_parent;
  4348. } else {
  4349. print_binder_transaction(m, " bad transaction", t);
  4350. t = NULL;
  4351. }
  4352. }
  4353. list_for_each_entry(w, &thread->todo, entry) {
  4354. print_binder_work(m, " ", " pending transaction", w);
  4355. }
  4356. if (!print_always && m->count == header_pos)
  4357. m->count = start_pos;
  4358. }
  4359. static void print_binder_node(struct seq_file *m, struct binder_node *node)
  4360. {
  4361. struct binder_ref *ref;
  4362. struct binder_work *w;
  4363. int count;
  4364. count = 0;
  4365. hlist_for_each_entry(ref, &node->refs, node_entry)
  4366. count++;
  4367. #ifdef BINDER_MONITOR
  4368. seq_printf(m,
  4369. " node %d (%s): u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
  4370. node->debug_id, node->name, (u64) node->ptr,
  4371. (u64) node->cookie, node->has_strong_ref, node->has_weak_ref,
  4372. node->local_strong_refs, node->local_weak_refs,
  4373. node->internal_strong_refs, count);
  4374. #else
  4375. seq_printf(m,
  4376. " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
  4377. node->debug_id, (u64) node->ptr, (u64) node->cookie,
  4378. node->has_strong_ref, node->has_weak_ref,
  4379. node->local_strong_refs, node->local_weak_refs,
  4380. node->internal_strong_refs, count);
  4381. #endif
  4382. if (count) {
  4383. seq_puts(m, " proc");
  4384. hlist_for_each_entry(ref, &node->refs, node_entry)
  4385. seq_printf(m, " %d", ref->proc->pid);
  4386. }
  4387. seq_puts(m, "\n");
  4388. #ifdef MTK_BINDER_DEBUG
  4389. if (node->async_pid)
  4390. seq_printf(m, " pending async transaction on %d:\n", node->async_pid);
  4391. #endif
  4392. list_for_each_entry(w, &node->async_todo, entry)
  4393. print_binder_work(m, " ", " pending async transaction", w);
  4394. }
  4395. static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
  4396. {
  4397. seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
  4398. ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
  4399. ref->node->debug_id, ref->strong, ref->weak, ref->death);
  4400. }
  4401. static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all)
  4402. {
  4403. struct binder_work *w;
  4404. struct rb_node *n;
  4405. size_t start_pos = m->count;
  4406. size_t header_pos;
  4407. seq_printf(m, "proc %d\n", proc->pid);
  4408. header_pos = m->count;
  4409. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4410. print_binder_thread(m, rb_entry(n, struct binder_thread, rb_node), print_all);
  4411. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
  4412. struct binder_node *node = rb_entry(n, struct binder_node,
  4413. rb_node);
  4414. if (print_all || node->has_async_transaction)
  4415. print_binder_node(m, node);
  4416. }
  4417. if (print_all) {
  4418. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n))
  4419. print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc));
  4420. }
  4421. for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
  4422. print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node));
  4423. list_for_each_entry(w, &proc->todo, entry)
  4424. print_binder_work(m, " ", " pending transaction", w);
  4425. list_for_each_entry(w, &proc->delivered_death, entry) {
  4426. seq_puts(m, " has delivered dead binder\n");
  4427. break;
  4428. }
  4429. if (!print_all && m->count == header_pos)
  4430. m->count = start_pos;
  4431. }
  4432. static const char *const binder_return_strings[] = {
  4433. "BR_ERROR",
  4434. "BR_OK",
  4435. "BR_TRANSACTION",
  4436. "BR_REPLY",
  4437. "BR_ACQUIRE_RESULT",
  4438. "BR_DEAD_REPLY",
  4439. "BR_TRANSACTION_COMPLETE",
  4440. "BR_INCREFS",
  4441. "BR_ACQUIRE",
  4442. "BR_RELEASE",
  4443. "BR_DECREFS",
  4444. "BR_ATTEMPT_ACQUIRE",
  4445. "BR_NOOP",
  4446. "BR_SPAWN_LOOPER",
  4447. "BR_FINISHED",
  4448. "BR_DEAD_BINDER",
  4449. "BR_CLEAR_DEATH_NOTIFICATION_DONE",
  4450. "BR_FAILED_REPLY"
  4451. };
  4452. static const char *const binder_command_strings[] = {
  4453. "BC_TRANSACTION",
  4454. "BC_REPLY",
  4455. "BC_ACQUIRE_RESULT",
  4456. "BC_FREE_BUFFER",
  4457. "BC_INCREFS",
  4458. "BC_ACQUIRE",
  4459. "BC_RELEASE",
  4460. "BC_DECREFS",
  4461. "BC_INCREFS_DONE",
  4462. "BC_ACQUIRE_DONE",
  4463. "BC_ATTEMPT_ACQUIRE",
  4464. "BC_REGISTER_LOOPER",
  4465. "BC_ENTER_LOOPER",
  4466. "BC_EXIT_LOOPER",
  4467. "BC_REQUEST_DEATH_NOTIFICATION",
  4468. "BC_CLEAR_DEATH_NOTIFICATION",
  4469. "BC_DEAD_BINDER_DONE"
  4470. };
  4471. static const char *const binder_objstat_strings[] = {
  4472. "proc",
  4473. "thread",
  4474. "node",
  4475. "ref",
  4476. "death",
  4477. "transaction",
  4478. "transaction_complete"
  4479. };
  4480. static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats)
  4481. {
  4482. int i;
  4483. BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings));
  4484. for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
  4485. if (stats->bc[i])
  4486. seq_printf(m, "%s%s: %d\n", prefix,
  4487. binder_command_strings[i], stats->bc[i]);
  4488. }
  4489. BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings));
  4490. for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
  4491. if (stats->br[i])
  4492. seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], stats->br[i]);
  4493. }
  4494. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings));
  4495. BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted));
  4496. for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
  4497. if (stats->obj_created[i] || stats->obj_deleted[i])
  4498. seq_printf(m, "%s%s: active %d total %d\n", prefix,
  4499. binder_objstat_strings[i],
  4500. stats->obj_created[i] -
  4501. stats->obj_deleted[i], stats->obj_created[i]);
  4502. }
  4503. }
  4504. static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc)
  4505. {
  4506. struct binder_work *w;
  4507. struct rb_node *n;
  4508. int count, strong, weak;
  4509. seq_printf(m, "proc %d\n", proc->pid);
  4510. count = 0;
  4511. for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
  4512. count++;
  4513. seq_printf(m, " threads: %d\n", count);
  4514. seq_printf(m, " requested threads: %d+%d/%d\n"
  4515. " ready threads %d\n"
  4516. " free async space %zd\n", proc->requested_threads,
  4517. proc->requested_threads_started, proc->max_threads,
  4518. proc->ready_threads, proc->free_async_space);
  4519. count = 0;
  4520. for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
  4521. count++;
  4522. seq_printf(m, " nodes: %d\n", count);
  4523. count = 0;
  4524. strong = 0;
  4525. weak = 0;
  4526. for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
  4527. struct binder_ref *ref = rb_entry(n, struct binder_ref,
  4528. rb_node_desc);
  4529. count++;
  4530. strong += ref->strong;
  4531. weak += ref->weak;
  4532. }
  4533. seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
  4534. count = 0;
  4535. for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
  4536. count++;
  4537. seq_printf(m, " buffers: %d\n", count);
  4538. count = 0;
  4539. list_for_each_entry(w, &proc->todo, entry) {
  4540. switch (w->type) {
  4541. case BINDER_WORK_TRANSACTION:
  4542. count++;
  4543. break;
  4544. default:
  4545. break;
  4546. }
  4547. }
  4548. seq_printf(m, " pending transactions: %d\n", count);
  4549. print_binder_stats(m, " ", &proc->stats);
  4550. }
  4551. static int binder_state_show(struct seq_file *m, void *unused)
  4552. {
  4553. struct binder_proc *proc;
  4554. struct binder_node *node;
  4555. int do_lock = !binder_debug_no_lock;
  4556. if (do_lock)
  4557. binder_lock(__func__);
  4558. seq_puts(m, "binder state:\n");
  4559. if (!hlist_empty(&binder_dead_nodes))
  4560. seq_puts(m, "dead nodes:\n");
  4561. hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
  4562. print_binder_node(m, node);
  4563. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4564. print_binder_proc(m, proc, 1);
  4565. if (do_lock)
  4566. binder_unlock(__func__);
  4567. return 0;
  4568. }
  4569. static int binder_stats_show(struct seq_file *m, void *unused)
  4570. {
  4571. struct binder_proc *proc;
  4572. int do_lock = !binder_debug_no_lock;
  4573. if (do_lock)
  4574. binder_lock(__func__);
  4575. seq_puts(m, "binder stats:\n");
  4576. print_binder_stats(m, "", &binder_stats);
  4577. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4578. print_binder_proc_stats(m, proc);
  4579. if (do_lock)
  4580. binder_unlock(__func__);
  4581. return 0;
  4582. }
  4583. static int binder_transactions_show(struct seq_file *m, void *unused)
  4584. {
  4585. struct binder_proc *proc;
  4586. int do_lock = !binder_debug_no_lock;
  4587. if (do_lock)
  4588. binder_lock(__func__);
  4589. seq_puts(m, "binder transactions:\n");
  4590. hlist_for_each_entry(proc, &binder_procs, proc_node)
  4591. print_binder_proc(m, proc, 0);
  4592. if (do_lock)
  4593. binder_unlock(__func__);
  4594. return 0;
  4595. }
  4596. static int binder_proc_show(struct seq_file *m, void *unused)
  4597. {
  4598. struct binder_proc *itr;
  4599. struct binder_proc *proc = m->private;
  4600. int do_lock = !binder_debug_no_lock;
  4601. bool valid_proc = false;
  4602. if (do_lock)
  4603. binder_lock(__func__);
  4604. hlist_for_each_entry(itr, &binder_procs, proc_node) {
  4605. if (itr == proc) {
  4606. valid_proc = true;
  4607. break;
  4608. }
  4609. }
  4610. if (valid_proc) {
  4611. seq_puts(m, "binder proc state:\n");
  4612. print_binder_proc(m, proc, 1);
  4613. }
  4614. #ifdef MTK_BINDER_DEBUG
  4615. else
  4616. pr_debug("show proc addr 0x%p exit\n", proc);
  4617. #endif
  4618. if (do_lock)
  4619. binder_unlock(__func__);
  4620. return 0;
  4621. }
  4622. static void print_binder_transaction_log_entry(struct seq_file *m, struct
  4623. binder_transaction_log_entry * e)
  4624. {
  4625. #ifdef BINDER_MONITOR
  4626. char tmp[30];
  4627. struct rtc_time tm;
  4628. struct timespec sub_read_t, sub_total_t;
  4629. unsigned long read_ms = 0;
  4630. unsigned long total_ms = 0;
  4631. memset(&sub_read_t, 0, sizeof(sub_read_t));
  4632. memset(&sub_total_t, 0, sizeof(sub_total_t));
  4633. if (e->fd != -1)
  4634. sprintf(tmp, " (fd %d)", e->fd);
  4635. else
  4636. tmp[0] = '\0';
  4637. if ((e->call_type == 0) && timespec_valid_strict(&e->endstamp) &&
  4638. (timespec_compare(&e->endstamp, &e->timestamp) > 0)) {
  4639. sub_total_t = timespec_sub(e->endstamp, e->timestamp);
  4640. total_ms = ((unsigned long)sub_total_t.tv_sec) * MSEC_PER_SEC +
  4641. sub_total_t.tv_nsec / NSEC_PER_MSEC;
  4642. }
  4643. if ((e->call_type == 1) && timespec_valid_strict(&e->readstamp) &&
  4644. (timespec_compare(&e->readstamp, &e->timestamp) > 0)) {
  4645. sub_read_t = timespec_sub(e->readstamp, e->timestamp);
  4646. read_ms = ((unsigned long)sub_read_t.tv_sec) * MSEC_PER_SEC +
  4647. sub_read_t.tv_nsec / NSEC_PER_MSEC;
  4648. }
  4649. rtc_time_to_tm(e->tv.tv_sec, &tm);
  4650. seq_printf(m,
  4651. "%d: %s from %d:%d to %d:%d node %d handle %d (%s) size %d:%d%s dex %u",
  4652. e->debug_id, (e->call_type == 2) ? "reply" :
  4653. ((e->call_type == 1) ? "async" : "call "),
  4654. e->from_proc, e->from_thread, e->to_proc, e->to_thread,
  4655. e->to_node, e->target_handle, e->service,
  4656. e->data_size, e->offsets_size, tmp, e->code);
  4657. seq_printf(m,
  4658. " start %lu.%06lu android %d-%02d-%02d %02d:%02d:%02d.%03lu read %lu.%06lu %s %lu.%06lu total %lu.%06lums\n",
  4659. (unsigned long)e->timestamp.tv_sec,
  4660. (e->timestamp.tv_nsec / NSEC_PER_USEC),
  4661. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4662. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4663. (unsigned long)(e->tv.tv_usec / USEC_PER_MSEC),
  4664. (unsigned long)e->readstamp.tv_sec,
  4665. (e->readstamp.tv_nsec / NSEC_PER_USEC),
  4666. (e->call_type == 0) ? "end" : "",
  4667. (e->call_type ==
  4668. 0) ? ((unsigned long)e->endstamp.tv_sec) : 0,
  4669. (e->call_type ==
  4670. 0) ? (e->endstamp.tv_nsec / NSEC_PER_USEC) : 0,
  4671. (e->call_type == 0) ? total_ms : read_ms,
  4672. (e->call_type ==
  4673. 0) ? (sub_total_t.tv_nsec %
  4674. NSEC_PER_MSEC) : (sub_read_t.tv_nsec % NSEC_PER_MSEC));
  4675. #else
  4676. seq_printf(m,
  4677. "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
  4678. e->debug_id, (e->call_type == 2) ? "reply" :
  4679. ((e->call_type == 1) ? "async" : "call "), e->from_proc,
  4680. e->from_thread, e->to_proc, e->to_thread, e->to_node,
  4681. e->target_handle, e->data_size, e->offsets_size);
  4682. #endif
  4683. }
  4684. #ifdef BINDER_MONITOR
  4685. static void log_resume_func(struct work_struct *w)
  4686. {
  4687. pr_debug("transaction log is self resumed\n");
  4688. log_disable = 0;
  4689. }
  4690. static DECLARE_DELAYED_WORK(log_resume_work, log_resume_func);
  4691. static int binder_transaction_log_show(struct seq_file *m, void *unused)
  4692. {
  4693. struct binder_transaction_log *log = m->private;
  4694. int i;
  4695. if (!log->entry)
  4696. return 0;
  4697. if (log->full) {
  4698. for (i = log->next; i < log->size; i++)
  4699. print_binder_transaction_log_entry(m, &log->entry[i]);
  4700. }
  4701. for (i = 0; i < log->next; i++)
  4702. print_binder_transaction_log_entry(m, &log->entry[i]);
  4703. if (log_disable & BINDER_LOG_RESUME) {
  4704. pr_debug("%d (%s) read transaction log and resume\n", task_pid_nr(current), current->comm);
  4705. cancel_delayed_work(&log_resume_work);
  4706. log_disable = 0;
  4707. }
  4708. return 0;
  4709. }
  4710. #else
  4711. static int binder_transaction_log_show(struct seq_file *m, void *unused)
  4712. {
  4713. struct binder_transaction_log *log = m->private;
  4714. int i;
  4715. if (log->full) {
  4716. for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
  4717. print_binder_transaction_log_entry(m, &log->entry[i]);
  4718. }
  4719. for (i = 0; i < log->next; i++)
  4720. print_binder_transaction_log_entry(m, &log->entry[i]);
  4721. return 0;
  4722. }
  4723. #endif
  4724. static const struct file_operations binder_fops = {
  4725. .owner = THIS_MODULE,
  4726. .poll = binder_poll,
  4727. .unlocked_ioctl = binder_ioctl,
  4728. .compat_ioctl = binder_ioctl,
  4729. .mmap = binder_mmap,
  4730. .open = binder_open,
  4731. .flush = binder_flush,
  4732. .release = binder_release,
  4733. };
  4734. static struct miscdevice binder_miscdev = {
  4735. .minor = MISC_DYNAMIC_MINOR,
  4736. .name = "binder",
  4737. .fops = &binder_fops
  4738. };
  4739. #ifdef BINDER_MONITOR
  4740. static int binder_log_level_show(struct seq_file *m, void *unused)
  4741. {
  4742. seq_printf(m, " Current log level: %lu\n", binder_log_level);
  4743. return 0;
  4744. }
  4745. static ssize_t binder_log_level_write(struct file *filp, const char *ubuf,
  4746. size_t cnt, loff_t *data)
  4747. {
  4748. char buf[32];
  4749. size_t copy_size = cnt;
  4750. unsigned long val;
  4751. int ret;
  4752. if (cnt >= sizeof(buf))
  4753. copy_size = 32 - 1;
  4754. buf[copy_size] = '\0';
  4755. if (copy_from_user(&buf, ubuf, copy_size))
  4756. return -EFAULT;
  4757. pr_debug("[Binder] Set binder log level:%lu -> ", binder_log_level);
  4758. ret = kstrtoul(buf, 10, &val);
  4759. if (ret < 0) {
  4760. pr_debug("Null\ninvalid string, need number foramt, err:%d\n", ret);
  4761. pr_debug("Log Level: 0 ---- 4\n");
  4762. pr_debug(" Less ---- More\n");
  4763. return cnt; /* string to unsined long fail */
  4764. }
  4765. pr_debug("%lu\n", val);
  4766. if (val == 0) {
  4767. binder_debug_mask =
  4768. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4769. BINDER_DEBUG_DEAD_TRANSACTION;
  4770. binder_log_level = val;
  4771. } else if (val == 1) {
  4772. binder_debug_mask =
  4773. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4774. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4775. BINDER_DEBUG_DEATH_NOTIFICATION;
  4776. binder_log_level = val;
  4777. } else if (val == 2) {
  4778. binder_debug_mask =
  4779. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4780. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4781. BINDER_DEBUG_DEATH_NOTIFICATION | BINDER_DEBUG_THREADS |
  4782. BINDER_DEBUG_TRANSACTION | BINDER_DEBUG_TRANSACTION_COMPLETE;
  4783. binder_log_level = val;
  4784. } else if (val == 3) {
  4785. binder_debug_mask =
  4786. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4787. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4788. BINDER_DEBUG_DEATH_NOTIFICATION | BINDER_DEBUG_THREADS |
  4789. BINDER_DEBUG_TRANSACTION | BINDER_DEBUG_TRANSACTION_COMPLETE
  4790. | BINDER_DEBUG_OPEN_CLOSE | BINDER_DEBUG_READ_WRITE;
  4791. binder_log_level = val;
  4792. } else if (val == 4) {
  4793. binder_debug_mask =
  4794. BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION |
  4795. BINDER_DEBUG_DEAD_TRANSACTION | BINDER_DEBUG_DEAD_BINDER |
  4796. BINDER_DEBUG_DEATH_NOTIFICATION | BINDER_DEBUG_THREADS |
  4797. BINDER_DEBUG_OPEN_CLOSE | BINDER_DEBUG_READ_WRITE |
  4798. BINDER_DEBUG_TRANSACTION | BINDER_DEBUG_TRANSACTION_COMPLETE
  4799. | BINDER_DEBUG_USER_REFS | BINDER_DEBUG_INTERNAL_REFS |
  4800. BINDER_DEBUG_PRIORITY_CAP | BINDER_DEBUG_FREE_BUFFER |
  4801. BINDER_DEBUG_BUFFER_ALLOC;
  4802. binder_log_level = val;
  4803. } else {
  4804. pr_debug("invalid value:%lu, should be 0 ~ 4\n", val);
  4805. }
  4806. return cnt;
  4807. }
  4808. static void print_binder_timeout_log_entry(struct seq_file *m, struct binder_timeout_log_entry *e)
  4809. {
  4810. struct rtc_time tm;
  4811. rtc_time_to_tm(e->tv.tv_sec, &tm);
  4812. seq_printf(m, "%d:%s %d:%d to %d:%d spends %u000 ms (%s) dex_code %u ",
  4813. e->debug_id, binder_wait_on_str[e->r],
  4814. e->from_proc, e->from_thrd, e->to_proc, e->to_thrd,
  4815. e->over_sec, e->service, e->code);
  4816. seq_printf(m, "start_at %lu.%03ld android %d-%02d-%02d %02d:%02d:%02d.%03lu\n",
  4817. (unsigned long)e->ts.tv_sec,
  4818. (e->ts.tv_nsec / NSEC_PER_MSEC),
  4819. (tm.tm_year + 1900), (tm.tm_mon + 1), tm.tm_mday,
  4820. tm.tm_hour, tm.tm_min, tm.tm_sec,
  4821. (unsigned long)(e->tv.tv_usec / USEC_PER_MSEC));
  4822. }
  4823. static int binder_timeout_log_show(struct seq_file *m, void *unused)
  4824. {
  4825. struct binder_timeout_log *log = m->private;
  4826. int i, latest;
  4827. int end_idx = ARRAY_SIZE(log->entry) - 1;
  4828. binder_lock(__func__);
  4829. latest = log->next ? (log->next - 1) : end_idx;
  4830. if (log->next == 0 && !log->full)
  4831. goto timeout_log_show_unlock;
  4832. if (latest >= ARRAY_SIZE(log->entry) || latest < 0) {
  4833. int j;
  4834. pr_alert("timeout log index error, log %p latest %d next %d end_idx %d\n",
  4835. log, latest, log->next, end_idx);
  4836. for (j = -4; j <= 3; j++) {
  4837. unsigned int *tmp = (unsigned int *)log + (j * 8);
  4838. pr_alert("0x%p %08x %08x %08x %08x %08x %08x %08x %08x\n",
  4839. tmp,
  4840. *tmp, *(tmp + 1), *(tmp + 2), *(tmp + 3),
  4841. *(tmp + 4), *(tmp + 5), *(tmp + 6), *(tmp + 7));
  4842. }
  4843. #if defined(CONFIG_MTK_AEE_FEATURE)
  4844. aee_kernel_warning_api(__FILE__, __LINE__,
  4845. DB_OPT_SWT_JBT_TRACES |
  4846. DB_OPT_BINDER_INFO,
  4847. "binder: timeout log index error",
  4848. "detect for memory corruption\n\n"
  4849. "check kernel log for more details\n");
  4850. #endif
  4851. goto timeout_log_show_unlock;
  4852. }
  4853. for (i = latest; i >= 0; i--)
  4854. print_binder_timeout_log_entry(m, &log->entry[i]);
  4855. if (log->full) {
  4856. for (i = end_idx; i > latest; i--)
  4857. print_binder_timeout_log_entry(m, &log->entry[i]);
  4858. }
  4859. timeout_log_show_unlock:
  4860. binder_unlock(__func__);
  4861. return 0;
  4862. }
  4863. BINDER_DEBUG_SETTING_ENTRY(log_level);
  4864. BINDER_DEBUG_ENTRY(timeout_log);
  4865. static int binder_transaction_log_enable_show(struct seq_file *m, void *unused)
  4866. {
  4867. #ifdef BINDER_MONITOR
  4868. seq_printf(m, " Current transaciton log is %s %s %s"
  4869. #ifdef RT_PRIO_INHERIT
  4870. " %s"
  4871. #endif
  4872. "\n",
  4873. (log_disable & 0x1) ? "disabled" : "enabled",
  4874. (log_disable & BINDER_LOG_RESUME) ? "(self resume)" : "",
  4875. (log_disable & BINDER_BUF_WARN) ? "(buf warning enabled)" : ""
  4876. #ifdef RT_PRIO_INHERIT
  4877. , (log_disable & BINDER_RT_LOG_ENABLE) ? "(rt inherit log enabled)" : ""
  4878. #endif
  4879. );
  4880. #else
  4881. seq_printf(m, " Current transaciton log is %s %s\n",
  4882. log_disable ? "disabled" : "enabled",
  4883. (log_disable & BINDER_LOG_RESUME) ? "(self resume)" : "");
  4884. #endif
  4885. return 0;
  4886. }
  4887. static ssize_t binder_transaction_log_enable_write(struct file *filp,
  4888. const char *ubuf, size_t cnt, loff_t *data)
  4889. {
  4890. char buf[32];
  4891. size_t copy_size = cnt;
  4892. unsigned long val;
  4893. int ret;
  4894. if (cnt >= sizeof(buf))
  4895. copy_size = 32 - 1;
  4896. buf[copy_size] = '\0';
  4897. if (copy_from_user(&buf, ubuf, copy_size))
  4898. return -EFAULT;
  4899. ret = kstrtoul(buf, 10, &val);
  4900. if (ret < 0) {
  4901. pr_debug("failed to switch logging, " "need number format\n");
  4902. return cnt;
  4903. }
  4904. log_disable = !(val & 0x1);
  4905. if (log_disable && (val & BINDER_LOG_RESUME)) {
  4906. log_disable |= BINDER_LOG_RESUME;
  4907. queue_delayed_work(binder_deferred_workqueue, &log_resume_work, (120 * HZ));
  4908. }
  4909. #ifdef BINDER_MONITOR
  4910. if (val & BINDER_BUF_WARN)
  4911. log_disable |= BINDER_BUF_WARN;
  4912. #ifdef RT_PRIO_INHERIT
  4913. if (val & BINDER_RT_LOG_ENABLE)
  4914. log_disable |= BINDER_RT_LOG_ENABLE;
  4915. #endif
  4916. pr_debug("%d (%s) set transaction log %s %s %s"
  4917. #ifdef RT_PRIO_INHERIT
  4918. " %s"
  4919. #endif
  4920. "\n",
  4921. task_pid_nr(current), current->comm,
  4922. (log_disable & 0x1) ? "disabled" : "enabled",
  4923. (log_disable & BINDER_LOG_RESUME) ?
  4924. "(self resume)" : "", (log_disable & BINDER_BUF_WARN) ? "(buf warning)" : ""
  4925. #ifdef RT_PRIO_INHERIT
  4926. , (log_disable & BINDER_RT_LOG_ENABLE) ? "(rt inherit log enabled)" : ""
  4927. #endif
  4928. );
  4929. #else
  4930. pr_debug("%d (%s) set transaction log %s %s\n",
  4931. task_pid_nr(current), current->comm,
  4932. log_disable ? "disabled" : "enabled",
  4933. (log_disable & BINDER_LOG_RESUME) ? "(self resume)" : "");
  4934. #endif
  4935. return cnt;
  4936. }
  4937. BINDER_DEBUG_SETTING_ENTRY(transaction_log_enable);
  4938. #endif
  4939. #ifdef MTK_BINDER_PAGE_USED_RECORD
  4940. static int binder_page_used_show(struct seq_file *s, void *p)
  4941. {
  4942. struct binder_proc *proc;
  4943. int do_lock = !binder_debug_no_lock;
  4944. seq_printf(s, "page_used:%d[%dMB]\npage_used_peak:%d[%dMB]\n",
  4945. binder_page_used, binder_page_used >> 8,
  4946. binder_page_used_peak, binder_page_used_peak >> 8);
  4947. if (do_lock)
  4948. binder_lock(__func__);
  4949. seq_puts(s, "binder page stats by binder_proc:\n");
  4950. hlist_for_each_entry(proc, &binder_procs, proc_node) {
  4951. seq_printf(s,
  4952. " proc %d(%s):page_used:%d[%dMB] page_used_peak:%d[%dMB]\n",
  4953. proc->pid, proc->tsk ? proc->tsk->comm : " ",
  4954. proc->page_used, proc->page_used >> 8,
  4955. proc->page_used_peak, proc->page_used_peak >> 8);
  4956. }
  4957. if (do_lock)
  4958. binder_unlock(__func__);
  4959. return 0;
  4960. }
  4961. BINDER_DEBUG_ENTRY(page_used);
  4962. #endif
  4963. BINDER_DEBUG_ENTRY(state);
  4964. BINDER_DEBUG_ENTRY(stats);
  4965. BINDER_DEBUG_ENTRY(transactions);
  4966. BINDER_DEBUG_ENTRY(transaction_log);
  4967. static int __init binder_init(void)
  4968. {
  4969. int ret;
  4970. #ifdef BINDER_MONITOR
  4971. struct task_struct *th;
  4972. th = kthread_create(binder_bwdog_thread, NULL, "binder_watchdog");
  4973. if (IS_ERR(th))
  4974. pr_err("fail to create watchdog thread " "(err:%li)\n", PTR_ERR(th));
  4975. else
  4976. wake_up_process(th);
  4977. binder_transaction_log_failed.entry = &entry_failed[0];
  4978. binder_transaction_log_failed.size = ARRAY_SIZE(entry_failed);
  4979. #ifdef CONFIG_MTK_EXTMEM
  4980. binder_transaction_log.entry =
  4981. extmem_malloc_page_align(sizeof(struct binder_transaction_log_entry)
  4982. * MAX_ENG_TRANS_LOG_BUFF_LEN);
  4983. binder_transaction_log.size = MAX_ENG_TRANS_LOG_BUFF_LEN;
  4984. if (binder_transaction_log.entry == NULL) {
  4985. pr_err("%s[%s] ext emory alloc failed!!!\n", __FILE__, __func__);
  4986. binder_transaction_log.entry =
  4987. vmalloc(sizeof(struct binder_transaction_log_entry) *
  4988. MAX_ENG_TRANS_LOG_BUFF_LEN);
  4989. }
  4990. #else
  4991. binder_transaction_log.entry = &entry_t[0];
  4992. binder_transaction_log.size = ARRAY_SIZE(entry_t);
  4993. #endif
  4994. #endif
  4995. binder_deferred_workqueue = create_singlethread_workqueue("binder");
  4996. if (!binder_deferred_workqueue)
  4997. return -ENOMEM;
  4998. binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
  4999. if (binder_debugfs_dir_entry_root)
  5000. binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
  5001. binder_debugfs_dir_entry_root);
  5002. ret = misc_register(&binder_miscdev);
  5003. if (binder_debugfs_dir_entry_root) {
  5004. debugfs_create_file("state",
  5005. S_IRUGO,
  5006. binder_debugfs_dir_entry_root, NULL, &binder_state_fops);
  5007. debugfs_create_file("stats",
  5008. S_IRUGO,
  5009. binder_debugfs_dir_entry_root, NULL, &binder_stats_fops);
  5010. debugfs_create_file("transactions",
  5011. S_IRUGO,
  5012. binder_debugfs_dir_entry_root, NULL, &binder_transactions_fops);
  5013. debugfs_create_file("transaction_log",
  5014. S_IRUGO,
  5015. binder_debugfs_dir_entry_root,
  5016. &binder_transaction_log, &binder_transaction_log_fops);
  5017. debugfs_create_file("failed_transaction_log",
  5018. S_IRUGO,
  5019. binder_debugfs_dir_entry_root,
  5020. &binder_transaction_log_failed, &binder_transaction_log_fops);
  5021. #ifdef BINDER_MONITOR
  5022. /* system_server is the main writer, remember to
  5023. * change group as "system" for write permission
  5024. * via related init.rc */
  5025. debugfs_create_file("transaction_log_enable",
  5026. (S_IRUGO | S_IWUSR | S_IWGRP),
  5027. binder_debugfs_dir_entry_root,
  5028. NULL, &binder_transaction_log_enable_fops);
  5029. debugfs_create_file("log_level",
  5030. (S_IRUGO | S_IWUSR | S_IWGRP),
  5031. binder_debugfs_dir_entry_root, NULL, &binder_log_level_fops);
  5032. debugfs_create_file("timeout_log",
  5033. S_IRUGO,
  5034. binder_debugfs_dir_entry_root,
  5035. &binder_timeout_log_t, &binder_timeout_log_fops);
  5036. #endif
  5037. #ifdef MTK_BINDER_PAGE_USED_RECORD
  5038. debugfs_create_file("page_used",
  5039. S_IRUGO,
  5040. binder_debugfs_dir_entry_root, NULL, &binder_page_used_fops);
  5041. #endif
  5042. }
  5043. return ret;
  5044. }
  5045. device_initcall(binder_init);
  5046. #define CREATE_TRACE_POINTS
  5047. #include "binder_trace.h"
  5048. MODULE_LICENSE("GPL v2");