m4u.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670
  1. #include <linux/uaccess.h>
  2. #include <linux/module.h>
  3. #include <linux/fs.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/platform_device.h>
  6. #include <linux/miscdevice.h>
  7. #include <asm/io.h>
  8. #include <linux/sched.h>
  9. #include <linux/wait.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/delay.h>
  12. #include <linux/fb.h>
  13. /* #include <linux/earlysuspend.h> */
  14. #include <linux/mm.h>
  15. #include <linux/mman.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/slab.h>
  18. #include <linux/timer.h>
  19. #include <mt-plat/sync_write.h>
  20. #include <mach/mt_clkmgr.h>
  21. #include <mach/irqs.h>
  22. #include <asm/cacheflush.h>
  23. #include <linux/mm.h>
  24. #include <linux/pagemap.h>
  25. #include <linux/dma-direction.h>
  26. #include <asm/page.h>
  27. #include <linux/proc_fs.h>
  28. #include "m4u_priv.h"
  29. #include "m4u.h"
  30. #include "m4u_hw.h"
  31. #include <linux/of.h>
  32. #include <linux/of_irq.h>
  33. #include <linux/of_address.h>
  34. #ifdef M4U_TEE_SERVICE_ENABLE
  35. #include "mobicore_driver_api.h"
  36. #include "tz_m4u.h"
  37. #ifdef __M4U_SECURE_SYSTRACE_ENABLE__
  38. #include <linux/sectrace.h>
  39. #endif
  40. int m4u_tee_en = 0;
  41. #endif
  42. #if IS_ENABLED(CONFIG_COMPAT)
  43. #include <linux/uaccess.h>
  44. #include <linux/compat.h>
  45. #endif
  46. static m4u_buf_info_t gMvaNode_unknown = {
  47. .va = 0,
  48. .mva = 0,
  49. .size = 0,
  50. .port = M4U_PORT_UNKNOWN,
  51. };
  52. /* -------------------------------------Global variables------------------------------------------------// */
  53. #ifdef M4U_PROFILE
  54. MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
  55. #endif
  56. #define M4U_DEV_NAME "m4u"
  57. struct m4u_device *gM4uDev;
  58. static int m4u_buf_show(void *priv, unsigned int mva_start, unsigned int mva_end, void *data)
  59. {
  60. m4u_buf_info_t *pMvaInfo = priv;
  61. M4U_PRINT_LOG_OR_SEQ(data, "0x%-8x, 0x%-8x, 0x%lx, 0x%-8x, 0x%x, %s, 0x%x, 0x%x, 0x%x\n",
  62. pMvaInfo->mva, pMvaInfo->mva+pMvaInfo->size-1, pMvaInfo->va,
  63. pMvaInfo->size, pMvaInfo->prot, m4u_get_port_name(pMvaInfo->port),
  64. pMvaInfo->flags, mva_start, mva_end);
  65. return 0;
  66. }
  67. int m4u_dump_buf_info(struct seq_file *seq)
  68. {
  69. M4U_PRINT_LOG_OR_SEQ(seq, "\ndump mva allocated info ========>\n");
  70. M4U_PRINT_LOG_OR_SEQ(seq,
  71. "mva_start mva_end va size prot module flags debug1 debug2\n");
  72. mva_foreach_priv((void *) m4u_buf_show, seq);
  73. M4U_PRINT_LOG_OR_SEQ(seq, " dump mva allocated info done ========>\n");
  74. return 0;
  75. }
  76. #ifdef M4U_PROFILE
  77. static void m4u_profile_init(void)
  78. {
  79. MMP_Event M4U_Event;
  80. MMProfileEnable(1);
  81. M4U_Event = MMProfileRegisterEvent(MMP_RootEvent, "M4U");
  82. /* register events */
  83. M4U_MMP_Events[M4U_MMP_ALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "Alloc MVA");
  84. M4U_MMP_Events[M4U_MMP_DEALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "DeAlloc MVA");
  85. M4U_MMP_Events[M4U_MMP_CONFIG_PORT] = MMProfileRegisterEvent(M4U_Event, "Config Port");
  86. M4U_MMP_Events[M4U_MMP_M4U_ERROR] = MMProfileRegisterEvent(M4U_Event, "M4U ERROR");
  87. M4U_MMP_Events[M4U_MMP_CACHE_SYNC] = MMProfileRegisterEvent(M4U_Event, "M4U_CACHE_SYNC");
  88. M4U_MMP_Events[M4U_MMP_TOGGLE_CG] = MMProfileRegisterEvent(M4U_Event, "M4U_Toggle_CG");
  89. /* enable events by default */
  90. MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], 1);
  91. MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], 1);
  92. MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], 1);
  93. MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_M4U_ERROR], 1);
  94. MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], 1);
  95. /* MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], 0); */
  96. MMProfileStart(1);
  97. }
  98. #endif
  99. /* get ref count on all pages in sgtable */
  100. int m4u_get_sgtable_pages(struct sg_table *table)
  101. {
  102. return 0;
  103. }
  104. /* put ref count on all pages in sgtable */
  105. int m4u_put_sgtable_pages(struct sg_table *table)
  106. {
  107. int i;
  108. struct scatterlist *sg;
  109. for_each_sg(table->sgl, sg, table->nents, i) {
  110. struct page *page = sg_page(sg);
  111. if (page) {
  112. if (!PageReserved(page))
  113. SetPageDirty(page);
  114. page_cache_release(page);
  115. }
  116. }
  117. return 0;
  118. }
  119. static m4u_buf_info_t *m4u_alloc_buf_info(void)
  120. {
  121. m4u_buf_info_t *pList = NULL;
  122. pList = kzalloc(sizeof(m4u_buf_info_t), GFP_KERNEL);
  123. if (pList == NULL) {
  124. M4UMSG("m4u_client_add_buf(), pList=0x%p\n", pList);
  125. return NULL;
  126. }
  127. INIT_LIST_HEAD(&(pList->link));
  128. return pList;
  129. }
  130. static int m4u_free_buf_info(m4u_buf_info_t *pList)
  131. {
  132. kfree(pList);
  133. return 0;
  134. }
  135. static int m4u_client_add_buf(m4u_client_t *client, m4u_buf_info_t *pList)
  136. {
  137. mutex_lock(&(client->dataMutex));
  138. list_add(&(pList->link), &(client->mvaList));
  139. mutex_unlock(&(client->dataMutex));
  140. return 0;
  141. }
  142. /*
  143. static int m4u_client_del_buf(m4u_client_t *client, m4u_buf_info_t *pList)
  144. {
  145. mutex_lock(&(client->dataMutex));
  146. list_del(&(pList->link));
  147. mutex_unlock(&(client->dataMutex));
  148. return 0;
  149. }
  150. */
  151. /***********************************************************/
  152. /** find or delete a buffer from client list
  153. * @param client -- client to be searched
  154. * @param mva -- mva to be searched
  155. * @param del -- should we del this buffer from client?
  156. *
  157. * @return buffer_info if found, NULL on fail
  158. * @remark
  159. * @see
  160. * @to-do we need to add multi domain support here.
  161. * @author K Zhang @date 2013/11/14
  162. ************************************************************/
  163. static m4u_buf_info_t *m4u_client_find_buf(m4u_client_t *client, unsigned int mva, int del)
  164. {
  165. struct list_head *pListHead;
  166. m4u_buf_info_t *pList = NULL;
  167. m4u_buf_info_t *ret = NULL;
  168. if (client == NULL) {
  169. M4UERR("m4u_delete_from_garbage_list(), client is NULL!\n");
  170. m4u_dump_buf_info(NULL);
  171. return NULL;
  172. }
  173. mutex_lock(&(client->dataMutex));
  174. list_for_each(pListHead, &(client->mvaList)) {
  175. pList = container_of(pListHead, m4u_buf_info_t, link);
  176. if (pList->mva == mva)
  177. break;
  178. }
  179. if (pListHead == &(client->mvaList)) {
  180. ret = NULL;
  181. } else {
  182. if (del)
  183. list_del(pListHead);
  184. ret = pList;
  185. }
  186. mutex_unlock(&(client->dataMutex));
  187. return ret;
  188. }
  189. /*
  190. //dump buf info in client
  191. static void m4u_client_dump_buf(m4u_client_t *client, const char *pMsg)
  192. {
  193. m4u_buf_info_t *pList;
  194. struct list_head *pListHead;
  195. M4UMSG("print mva list [%s] ================================>\n", pMsg);
  196. mutex_lock(&(client->dataMutex));
  197. list_for_each(pListHead, &(client->mvaList))
  198. {
  199. pList = container_of(pListHead, m4u_buf_info_t, link);
  200. M4UMSG("port=%s, va=0x%x, size=0x%x, mva=0x%x, prot=%d\n",
  201. m4u_get_port_name(pList->port), pList->va, pList->size, pList->mva, pList->prot);
  202. }
  203. mutex_unlock(&(client->dataMutex));
  204. M4UMSG("print mva list done ==========================>\n");
  205. }
  206. */
  207. m4u_client_t *m4u_create_client(void)
  208. {
  209. m4u_client_t *client;
  210. client = kmalloc(sizeof(m4u_client_t), GFP_ATOMIC);
  211. if (!client)
  212. return NULL;
  213. mutex_init(&(client->dataMutex));
  214. mutex_lock(&(client->dataMutex));
  215. client->open_pid = current->pid;
  216. client->open_tgid = current->tgid;
  217. INIT_LIST_HEAD(&(client->mvaList));
  218. mutex_unlock(&(client->dataMutex));
  219. return client;
  220. }
  221. int m4u_destroy_client(m4u_client_t *client)
  222. {
  223. m4u_buf_info_t *pMvaInfo;
  224. unsigned int mva, size;
  225. M4U_PORT_ID port;
  226. while (1) {
  227. mutex_lock(&(client->dataMutex));
  228. if (list_empty(&client->mvaList)) {
  229. mutex_unlock(&(client->dataMutex));
  230. break;
  231. }
  232. pMvaInfo = container_of(client->mvaList.next, m4u_buf_info_t, link);
  233. M4UMSG("warnning: clean garbage at m4u close: module=%s,va=0x%lx,mva=0x%x,size=%d\n",
  234. m4u_get_port_name(pMvaInfo->port), pMvaInfo->va, pMvaInfo->mva,
  235. pMvaInfo->size);
  236. port = pMvaInfo->port;
  237. mva = pMvaInfo->mva;
  238. size = pMvaInfo->size;
  239. mutex_unlock(&(client->dataMutex));
  240. m4u_reclaim_notify(port, mva, size);
  241. /* m4u_dealloc_mva will lock client->dataMutex again */
  242. m4u_dealloc_mva(client, port, mva);
  243. }
  244. kfree(client);
  245. return 0;
  246. }
  247. static int m4u_dump_mmaps(unsigned long addr)
  248. {
  249. struct vm_area_struct *vma;
  250. M4ULOG_MID("addr=0x%lx, name=%s, pid=0x%x,", addr, current->comm, current->pid);
  251. vma = find_vma(current->mm, addr);
  252. if (vma && (addr >= vma->vm_start)) {
  253. M4ULOG_MID("find vma: 0x%16lx-0x%16lx, flags=0x%lx\n",
  254. (vma->vm_start), (vma->vm_end), vma->vm_flags);
  255. return 0;
  256. }
  257. M4UMSG("cannot find vma for addr 0x%lx\n", addr);
  258. return -1;
  259. }
  260. /* to-do: need modification to support 4G DRAM */
  261. static phys_addr_t m4u_user_v2p(unsigned long va)
  262. {
  263. unsigned long pageOffset = (va & (PAGE_SIZE - 1));
  264. pgd_t *pgd;
  265. pud_t *pud;
  266. pmd_t *pmd;
  267. pte_t *pte;
  268. phys_addr_t pa;
  269. if (NULL == current) {
  270. M4UMSG("warning: m4u_user_v2p, current is NULL!\n");
  271. return 0;
  272. }
  273. if (NULL == current->mm) {
  274. M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s\n",
  275. current->tgid, current->comm);
  276. return 0;
  277. }
  278. pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
  279. if (pgd_none(*pgd) || pgd_bad(*pgd)) {
  280. M4UMSG("m4u_user_v2p(), va=0x%lx, pgd invalid!\n", va);
  281. return 0;
  282. }
  283. pud = pud_offset(pgd, va);
  284. if (pud_none(*pud) || pud_bad(*pud)) {
  285. M4UMSG("m4u_user_v2p(), va=0x%lx, pud invalid!\n", va);
  286. return 0;
  287. }
  288. pmd = pmd_offset(pud, va);
  289. if (pmd_none(*pmd) || pmd_bad(*pmd)) {
  290. M4UMSG("m4u_user_v2p(), va=0x%lx, pmd invalid!\n", va);
  291. return 0;
  292. }
  293. pte = pte_offset_map(pmd, va);
  294. if (pte_present(*pte)) {
  295. /* pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; */
  296. pa = (pte_val(*pte) & (PHYS_MASK) & (~((phys_addr_t) 0xfff))) | pageOffset;
  297. pte_unmap(pte);
  298. return pa;
  299. }
  300. pte_unmap(pte);
  301. M4UMSG("m4u_user_v2p(), va=0x%lx, pte invalid!\n", va);
  302. return 0;
  303. }
  304. static int m4u_fill_sgtable_user(struct vm_area_struct *vma, unsigned long va, int page_num,
  305. struct scatterlist **pSg, int has_page)
  306. {
  307. unsigned long va_align;
  308. phys_addr_t pa = 0;
  309. int i, ret;
  310. struct scatterlist *sg = *pSg;
  311. struct page *pages;
  312. va_align = round_down(va, PAGE_SIZE);
  313. for (i = 0; i < page_num; i++) {
  314. int fault_cnt;
  315. unsigned long va_tmp = va_align+i*PAGE_SIZE;
  316. pa = 0;
  317. for (fault_cnt = 0; fault_cnt < 3000; fault_cnt++) {
  318. if (has_page) {
  319. ret = get_user_pages(current, current->mm, va_tmp, 1,
  320. (vma->vm_flags & VM_WRITE), 0, &pages, NULL);
  321. if (ret == 1)
  322. pa = (page_to_pfn(pages) << PAGE_SHIFT) | (va_tmp & ~PAGE_MASK);
  323. } else {
  324. pa = m4u_user_v2p(va_tmp);
  325. if (!pa) {
  326. handle_mm_fault(current->mm, vma, va_tmp,
  327. (vma->vm_flags&VM_WRITE) ? FAULT_FLAG_WRITE : 0);
  328. }
  329. }
  330. if (pa) {
  331. /* Add one line comment for avoid kernel coding style, WARNING:BRACES: */
  332. break;
  333. }
  334. cond_resched();
  335. }
  336. if (!pa || !sg) {
  337. M4UMSG("%s: fail va=0x%lx,page_num=0x%x,fail_va=0x%lx,pa=0x%lx,sg=0x%p,i=%d\n",
  338. __func__, va, page_num, va_tmp, (unsigned long)pa, sg, i);
  339. show_pte(current->mm, va_tmp);
  340. m4u_dump_mmaps(va);
  341. m4u_dump_mmaps(va_tmp);
  342. return -1;
  343. }
  344. if (fault_cnt > 2) {
  345. M4UINFO("warning: handle_mm_fault for %d times\n", fault_cnt);
  346. show_pte(current->mm, va_tmp);
  347. m4u_dump_mmaps(va_tmp);
  348. }
  349. /* debug check... */
  350. if ((pa & (PAGE_SIZE - 1)) != 0) {
  351. M4ULOG_MID("pa error, pa: 0x%lx, va: 0x%lx, align: 0x%lx\n",
  352. (unsigned long)pa, va_tmp, va_align);
  353. }
  354. if (has_page) {
  355. struct page *page;
  356. page = phys_to_page(pa);
  357. /* M4UMSG("page=0x%x, pfn=%d\n", page, __phys_to_pfn(pa)); */
  358. sg_set_page(sg, page, PAGE_SIZE, 0);
  359. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  360. sg->dma_length = sg->length;
  361. #endif
  362. } else {
  363. sg_dma_address(sg) = pa;
  364. sg_dma_len(sg) = PAGE_SIZE;
  365. }
  366. sg = sg_next(sg);
  367. }
  368. *pSg = sg;
  369. return 0;
  370. }
  371. static int m4u_create_sgtable_user(unsigned long va_align, struct sg_table *table)
  372. {
  373. int ret = 0;
  374. struct vm_area_struct *vma;
  375. struct scatterlist *sg = table->sgl;
  376. unsigned int left_page_num = table->nents;
  377. unsigned long va = va_align;
  378. down_read(&current->mm->mmap_sem);
  379. while (left_page_num) {
  380. unsigned int vma_page_num;
  381. vma = find_vma(current->mm, va);
  382. if (vma == NULL || vma->vm_start > va) {
  383. M4UMSG("cannot find vma: va=0x%lx, vma=0x%p\n", va, vma);
  384. m4u_dump_mmaps(va);
  385. ret = -1;
  386. goto out;
  387. } else {
  388. /* M4ULOG_MID("%s va: 0x%lx, vma->vm_start=0x%lx, vma->vm_end=0x%lx\n",
  389. __func__, va, vma->vm_start, vma->vm_end); */
  390. }
  391. vma_page_num = (vma->vm_end - va) / PAGE_SIZE;
  392. vma_page_num = min(vma_page_num, left_page_num);
  393. if ((vma->vm_flags) & VM_PFNMAP) {
  394. /* ion va or ioremap vma has this flag */
  395. /* VM_PFNMAP: Page-ranges managed without "struct page", just pure PFN */
  396. ret = m4u_fill_sgtable_user(vma, va, vma_page_num, &sg, 0);
  397. M4ULOG_MID("alloc_mva VM_PFNMAP va=0x%lx, page_num=0x%x\n", va,
  398. vma_page_num);
  399. } else {
  400. /* Add one line comment for avoid kernel coding style, WARNING:BRACES: */
  401. ret = m4u_fill_sgtable_user(vma, va, vma_page_num, &sg, 1);
  402. }
  403. if (ret) {
  404. /* Add one line comment for avoid kernel coding style, WARNING:BRACES: */
  405. goto out;
  406. }
  407. left_page_num -= vma_page_num;
  408. va += vma_page_num * PAGE_SIZE;
  409. }
  410. out:
  411. up_read(&current->mm->mmap_sem);
  412. return ret;
  413. }
  414. /* make a sgtable for virtual buffer */
  415. struct sg_table *m4u_create_sgtable(unsigned long va, unsigned int size)
  416. {
  417. struct sg_table *table;
  418. int ret, i, page_num;
  419. unsigned long va_align;
  420. phys_addr_t pa;
  421. struct scatterlist *sg;
  422. struct page *page;
  423. page_num = M4U_GET_PAGE_NUM(va, size);
  424. va_align = round_down(va, PAGE_SIZE);
  425. table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
  426. if (!table) {
  427. M4UMSG("%s table kmalloc fail: va=0x%lx, size=0x%x, page_num=%d\n",
  428. __func__, va, size, page_num);
  429. return ERR_PTR(-ENOMEM);
  430. }
  431. ret = sg_alloc_table(table, page_num, GFP_KERNEL);
  432. if (ret) {
  433. kfree(table);
  434. M4UMSG("%s alloc_sgtable fail: va=0x%lx, size=0x%x, page_num=%d\n",
  435. __func__, va, size, page_num);
  436. return ERR_PTR(-ENOMEM);
  437. }
  438. M4ULOG_LOW("%s va=0x%lx, PAGE_OFFSET=0x%lx, VMALLOC_START=0x%lx, VMALLOC_END=0x%lx\n",
  439. __func__, va, PAGE_OFFSET, VMALLOC_START, VMALLOC_END);
  440. if (va < PAGE_OFFSET) { /* from user space */
  441. if (va >= VMALLOC_START && va <= VMALLOC_END) { /* vmalloc */
  442. M4ULOG_MID(" from user space vmalloc, va = 0x%lx", va);
  443. for_each_sg(table->sgl, sg, table->nents, i) {
  444. page = vmalloc_to_page((void *)(va_align + i * PAGE_SIZE));
  445. if (!page) {
  446. M4UMSG("vmalloc_to_page fail, va=0x%lx\n",
  447. va_align + i * PAGE_SIZE);
  448. goto err;
  449. }
  450. sg_set_page(sg, page, PAGE_SIZE, 0);
  451. }
  452. } else {
  453. ret = m4u_create_sgtable_user(va_align, table);
  454. if (ret) {
  455. M4UMSG("%s error va=0x%lx, size=%d\n", __func__, va, size);
  456. goto err;
  457. }
  458. }
  459. } else { /* from kernel space */
  460. if (va >= VMALLOC_START && va <= VMALLOC_END) { /* vmalloc */
  461. M4ULOG_MID(" from kernel space vmalloc, va = 0x%lx", va);
  462. for_each_sg(table->sgl, sg, table->nents, i) {
  463. page = vmalloc_to_page((void *)(va_align + i * PAGE_SIZE));
  464. if (!page) {
  465. M4UMSG("vmalloc_to_page fail, va=0x%lx\n",
  466. va_align + i * PAGE_SIZE);
  467. goto err;
  468. }
  469. sg_set_page(sg, page, PAGE_SIZE, 0);
  470. }
  471. } else { /* kmalloc to-do: use one entry sgtable. */
  472. for_each_sg(table->sgl, sg, table->nents, i) {
  473. pa = virt_to_phys((void *)(va_align + i * PAGE_SIZE));
  474. page = phys_to_page(pa);
  475. sg_set_page(sg, page, PAGE_SIZE, 0);
  476. }
  477. }
  478. }
  479. return table;
  480. err:
  481. sg_free_table(table);
  482. kfree(table);
  483. return ERR_PTR(-EFAULT);
  484. }
  485. int m4u_destroy_sgtable(struct sg_table *table)
  486. {
  487. if (!IS_ERR_OR_NULL(table)) {
  488. sg_free_table(table);
  489. kfree(table);
  490. }
  491. return 0;
  492. }
  493. /* #define __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__ */
  494. int m4u_alloc_mva(m4u_client_t *client, M4U_PORT_ID port,
  495. unsigned long va, struct sg_table *sg_table,
  496. unsigned int size, unsigned int prot, unsigned int flags, unsigned int *pMva)
  497. {
  498. int ret;
  499. m4u_buf_info_t *pMvaInfo;
  500. unsigned int mva = 0, mva_align, size_align;
  501. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagStart, va, size);
  502. if (va && sg_table) {
  503. M4UMSG("%s, va or sg_table are both valid: va=0x%lx, sg=0x%p\n", __func__,
  504. va, sg_table);
  505. }
  506. if (va) {
  507. sg_table = m4u_create_sgtable(va, size);
  508. if (IS_ERR_OR_NULL(sg_table)) {
  509. M4UMSG("%s, cannot create sg: larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x\n"
  510. , __func__, m4u_port_2_larb_id(port), m4u_get_port_name(port),
  511. va, sg_table, size, prot, flags);
  512. ret = -EFAULT;
  513. goto err;
  514. }
  515. }
  516. /* here we get correct sg_table for this buffer */
  517. pMvaInfo = m4u_alloc_buf_info();
  518. if (!pMvaInfo) {
  519. ret = -ENOMEM;
  520. goto err;
  521. }
  522. pMvaInfo->va = va;
  523. pMvaInfo->port = port;
  524. pMvaInfo->size = size;
  525. pMvaInfo->prot = prot;
  526. pMvaInfo->flags = flags;
  527. pMvaInfo->sg_table = sg_table;
  528. if (flags & M4U_FLAGS_FIX_MVA)
  529. mva = m4u_do_mva_alloc_fix(*pMva, size, pMvaInfo);
  530. else
  531. mva = m4u_do_mva_alloc(va, size, pMvaInfo);
  532. if (mva == 0) {
  533. m4u_aee_print("alloc mva fail: larb=%d,module=%s,size=%d\n",
  534. m4u_port_2_larb_id(port), m4u_get_port_name(port), size);
  535. m4u_dump_buf_info(NULL);
  536. ret = -EINVAL;
  537. goto err1;
  538. } else
  539. M4ULOG_LOW("%s,mva = 0x%x\n", __func__, mva);
  540. m4u_get_sgtable_pages(sg_table);
  541. mva_align = round_down(mva, PAGE_SIZE);
  542. size_align = PAGE_ALIGN(mva + size - mva_align);
  543. ret = m4u_map_sgtable(m4u_get_domain_by_port(port), mva_align, sg_table,
  544. size_align, pMvaInfo->prot);
  545. if (ret < 0) {
  546. M4UMSG("error to map sgtable\n");
  547. goto err2;
  548. }
  549. pMvaInfo->mva = mva;
  550. pMvaInfo->mva_align = mva_align;
  551. pMvaInfo->size_align = size_align;
  552. *pMva = mva;
  553. if (flags & M4U_FLAGS_SEQ_ACCESS)
  554. pMvaInfo->seq_id = m4u_insert_seq_range(port, mva, mva + size - 1);
  555. m4u_client_add_buf(client, pMvaInfo);
  556. M4ULOG_MID("%s: pMvaInfo=0x%p, larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x,mva=0x%x\n",
  557. __func__, pMvaInfo, m4u_port_2_larb_id(port), m4u_get_port_name(port), va, sg_table,
  558. size, prot, flags, mva);
  559. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagEnd, port, mva);
  560. #ifdef __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
  561. /* map this mva to kernel va just for debug */
  562. {
  563. unsigned long kernel_va;
  564. unsigned int kernel_size;
  565. int ret;
  566. ret = m4u_mva_map_kernel(mva, size, &kernel_va, &kernel_size);
  567. if (ret)
  568. M4UMSG("error to map kernel va: mva=0x%x, size=%d\n", mva, size);
  569. else {
  570. pMvaInfo->mapped_kernel_va_for_debug = kernel_va;
  571. M4ULOG_MID("[kernel_va_debug] map va: mva=0x%x, kernel_va=0x%lx, size=0x%x\n",
  572. mva, kernel_va, size);
  573. }
  574. }
  575. #endif
  576. return 0;
  577. err2:
  578. m4u_do_mva_free(mva, size);
  579. err1:
  580. m4u_free_buf_info(pMvaInfo);
  581. err:
  582. if (va)
  583. m4u_destroy_sgtable(sg_table);
  584. *pMva = 0;
  585. M4UMSG("error: larb=%d,module=%s,va=0x%lx,size=%d,prot=0x%x,flags=0x%x, mva=0x%x\n",
  586. m4u_port_2_larb_id(port), m4u_get_port_name(port), va, size, prot, flags, mva);
  587. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagEnd, port, 0);
  588. return ret;
  589. }
  590. /* interface for ion */
  591. static m4u_client_t *ion_m4u_client;
  592. int m4u_alloc_mva_sg(int eModuleID,
  593. struct sg_table *sg_table,
  594. const unsigned int BufSize,
  595. int security, int cache_coherent, unsigned int *pRetMVABuf)
  596. {
  597. int prot;
  598. if (!ion_m4u_client) {
  599. ion_m4u_client = m4u_create_client();
  600. if (IS_ERR_OR_NULL(ion_m4u_client)) {
  601. ion_m4u_client = NULL;
  602. return -1;
  603. }
  604. }
  605. prot = M4U_PROT_READ | M4U_PROT_WRITE
  606. | (cache_coherent ? (M4U_PROT_SHARE | M4U_PROT_CACHE) : 0)
  607. | (security ? M4U_PROT_SEC : 0);
  608. return m4u_alloc_mva(ion_m4u_client, eModuleID, 0, sg_table, BufSize, prot, 0, pRetMVABuf);
  609. }
  610. #ifdef M4U_TEE_SERVICE_ENABLE
  611. static int m4u_unmap_nonsec_buffer(unsigned int mva, unsigned int size);
  612. int m4u_register_mva_share(int eModuleID, unsigned int mva)
  613. {
  614. m4u_buf_info_t *pMvaInfo;
  615. pMvaInfo = mva_get_priv(mva);
  616. if (!pMvaInfo) {
  617. M4UMSG("%s cannot find mva: module=%s, mva=0x%x\n", __func__,
  618. m4u_get_port_name(eModuleID), mva);
  619. return -1;
  620. }
  621. pMvaInfo->flags |= M4U_FLAGS_SEC_SHAREABLE;
  622. return 0;
  623. }
  624. #endif
  625. int m4u_dealloc_mva_sg(int eModuleID, struct sg_table *sg_table,
  626. const unsigned int BufSize, const unsigned int MVA)
  627. {
  628. if (!ion_m4u_client) {
  629. m4u_aee_print("ion_m4u_client==NULL !! oops oops~~~~\n");
  630. return -1;
  631. }
  632. return m4u_dealloc_mva(ion_m4u_client, eModuleID, MVA);
  633. }
  634. /* should not hold client->dataMutex here. */
  635. int m4u_dealloc_mva(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva)
  636. {
  637. m4u_buf_info_t *pMvaInfo;
  638. int ret, is_err = 0;
  639. unsigned int size;
  640. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagStart, port, mva);
  641. pMvaInfo = m4u_client_find_buf(client, mva, 1);
  642. if (unlikely(!pMvaInfo)) {
  643. M4UMSG("error: m4u_dealloc_mva no mva found in client! module=%s, mva=0x%x\n",
  644. m4u_get_port_name(port), mva);
  645. m4u_dump_buf_info(NULL);
  646. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagStart, 0x5a5a5a5a, mva);
  647. return -EINVAL;
  648. }
  649. pMvaInfo->flags |= M4U_FLAGS_MVA_IN_FREE;
  650. M4ULOG_MID("m4u_dealloc_mva: larb=%d,module=%s,mva=0x%x, size=%d\n",
  651. m4u_port_2_larb_id(port), m4u_get_port_name(port), mva, pMvaInfo->size);
  652. #ifdef M4U_TEE_SERVICE_ENABLE
  653. if (pMvaInfo->flags & M4U_FLAGS_SEC_SHAREABLE)
  654. m4u_unmap_nonsec_buffer(mva, pMvaInfo->size);
  655. #endif
  656. ret = m4u_unmap(m4u_get_domain_by_port(port), pMvaInfo->mva_align, pMvaInfo->size_align);
  657. if (ret) {
  658. is_err = 1;
  659. M4UMSG("m4u_unmap fail\n");
  660. }
  661. if (0 != pMvaInfo->va) {
  662. /* non ion buffer*/
  663. if (pMvaInfo->va < PAGE_OFFSET) { /* from user space */
  664. if (!(pMvaInfo->va >= VMALLOC_START && pMvaInfo->va <= VMALLOC_END)) { /* non vmalloc */
  665. m4u_put_sgtable_pages(pMvaInfo->sg_table);
  666. }
  667. }
  668. }
  669. ret = m4u_do_mva_free(mva, pMvaInfo->size);
  670. if (ret) {
  671. is_err = 1;
  672. M4UMSG("do_mva_free fail\n");
  673. }
  674. if (pMvaInfo->va) { /* buffer is allocated by va */
  675. m4u_destroy_sgtable(pMvaInfo->sg_table);
  676. }
  677. if (pMvaInfo->flags & M4U_FLAGS_SEQ_ACCESS) {
  678. if (pMvaInfo->seq_id > 0)
  679. m4u_invalid_seq_range_by_id(port, pMvaInfo->seq_id);
  680. }
  681. if (is_err) {
  682. m4u_aee_print("%s fail: port=%s, mva=0x%x, size=0x%x, va=0x%lx\n", __func__,
  683. m4u_get_port_name(port), mva, pMvaInfo->size, pMvaInfo->va);
  684. ret = -EINVAL;
  685. } else
  686. ret = 0;
  687. size = pMvaInfo->size;
  688. #ifdef __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
  689. /* unmap kernel va for debug */
  690. {
  691. if (pMvaInfo->mapped_kernel_va_for_debug) {
  692. M4ULOG_MID("[kernel_va_debug] unmap va: mva=0x%x, kernel_va=0x%lx, size=0x%x\n",
  693. pMvaInfo->mva, pMvaInfo->mapped_kernel_va_for_debug, pMvaInfo->size);
  694. m4u_mva_unmap_kernel(pMvaInfo->mva, pMvaInfo->size,
  695. pMvaInfo->mapped_kernel_va_for_debug);
  696. }
  697. }
  698. #endif
  699. m4u_free_buf_info(pMvaInfo);
  700. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagEnd, size, mva);
  701. return ret;
  702. }
  703. int m4u_dma_cache_flush_all(void)
  704. {
  705. smp_inner_dcache_flush_all();
  706. outer_flush_all();
  707. return 0;
  708. }
  709. static struct vm_struct *cache_map_vm_struct;
  710. static int m4u_cache_sync_init(void)
  711. {
  712. cache_map_vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
  713. if (!cache_map_vm_struct)
  714. return -ENOMEM;
  715. return 0;
  716. }
  717. static void *m4u_cache_map_page_va(struct page *page)
  718. {
  719. int ret;
  720. struct page **ppPage = &page;
  721. ret = map_vm_area(cache_map_vm_struct, PAGE_KERNEL, ppPage);
  722. if (ret) {
  723. M4UMSG("error to map page\n");
  724. return NULL;
  725. }
  726. return cache_map_vm_struct->addr;
  727. }
  728. static void m4u_cache_unmap_page_va(unsigned int va)
  729. {
  730. unmap_kernel_range((unsigned long)cache_map_vm_struct->addr, PAGE_SIZE);
  731. }
  732. static int __m4u_cache_sync_kernel(const void *start, size_t size, M4U_CACHE_SYNC_ENUM sync_type)
  733. {
  734. if (sync_type == M4U_CACHE_CLEAN_BY_RANGE)
  735. dmac_map_area((void *)start, size, DMA_TO_DEVICE);
  736. else if (sync_type == M4U_CACHE_INVALID_BY_RANGE)
  737. dmac_unmap_area((void *)start, size, DMA_FROM_DEVICE);
  738. else if (sync_type == M4U_CACHE_FLUSH_BY_RANGE)
  739. dmac_flush_range((void *)start, (void *)(start + size));
  740. return 0;
  741. }
  742. static struct page *m4u_cache_get_page(unsigned long va)
  743. {
  744. unsigned long start;
  745. phys_addr_t pa;
  746. struct page *page;
  747. start = va & (~M4U_PAGE_MASK);
  748. pa = m4u_user_v2p(start);
  749. if ((pa == 0)) {
  750. M4UMSG("error m4u_get_phys user_v2p return 0 on va=0x%lx\n", start);
  751. /* dump_page(page); */
  752. m4u_dump_mmaps(start);
  753. show_pte(current->mm, va);
  754. return NULL;
  755. }
  756. page = phys_to_page(pa);
  757. return page;
  758. }
  759. /* lock to protect cache_map_vm_struct */
  760. static DEFINE_MUTEX(gM4u_cache_sync_user_lock);
  761. static int __m4u_cache_sync_user(unsigned long start, size_t size, M4U_CACHE_SYNC_ENUM sync_type)
  762. {
  763. unsigned long map_size, map_start, map_end;
  764. unsigned long end = start + size;
  765. struct page *page;
  766. unsigned long map_va, map_va_align;
  767. int ret = 0;
  768. mutex_lock(&gM4u_cache_sync_user_lock);
  769. if (!cache_map_vm_struct) {
  770. M4UMSG(" error: cache_map_vm_struct is NULL, retry\n");
  771. m4u_cache_sync_init();
  772. }
  773. if (!cache_map_vm_struct) {
  774. M4UMSG("error: cache_map_vm_struct is NULL, no vmalloc area\n");
  775. ret = -1;
  776. goto out;
  777. }
  778. map_start = start;
  779. while (map_start < end) {
  780. map_end = min(((map_start & (~M4U_PAGE_MASK)) + PAGE_SIZE), end);
  781. map_size = map_end - map_start;
  782. page = m4u_cache_get_page(map_start);
  783. if (!page) {
  784. ret = -1;
  785. goto out;
  786. }
  787. map_va = (unsigned long)m4u_cache_map_page_va(page);
  788. if (!map_va) {
  789. ret = -1;
  790. goto out;
  791. }
  792. map_va_align = map_va | (map_start & (PAGE_SIZE - 1));
  793. __m4u_cache_sync_kernel((void *)map_va_align, map_size, sync_type);
  794. m4u_cache_unmap_page_va(map_va);
  795. map_start = map_end;
  796. }
  797. out:
  798. mutex_unlock(&gM4u_cache_sync_user_lock);
  799. return ret;
  800. }
  801. int m4u_cache_sync_by_range(unsigned long va, unsigned int size,
  802. M4U_CACHE_SYNC_ENUM sync_type, struct sg_table *table)
  803. {
  804. int ret = 0;
  805. if (va < PAGE_OFFSET) { /* from user space */
  806. ret = __m4u_cache_sync_user(va, size, sync_type);
  807. } else {
  808. ret = __m4u_cache_sync_kernel((void *)va, size, sync_type);
  809. }
  810. #ifdef CONFIG_OUTER_CACHE
  811. {
  812. struct scatterlist *sg;
  813. int i;
  814. for_each_sg(table->sgl, sg, table->nents, i) {
  815. unsigned int len = sg_dma_len(sg);
  816. phys_addr_t phys_addr = get_sg_phys(sg);
  817. if (sync_type == M4U_CACHE_CLEAN_BY_RANGE)
  818. outer_clean_range(phys_addr, phys_addr + len);
  819. else if (sync_type == M4U_CACHE_INVALID_BY_RANGE)
  820. outer_inv_range(phys_addr, phys_addr + len);
  821. else if (sync_type == M4U_CACHE_FLUSH_BY_RANGE)
  822. outer_flush_range(phys_addr, phys_addr + len);
  823. }
  824. }
  825. #endif
  826. return ret;
  827. }
  828. /**
  829. notes: only mva allocated by m4u_alloc_mva can use this function.
  830. if buffer is allocated by ion, please use ion_cache_sync
  831. **/
  832. int m4u_cache_sync(m4u_client_t *client, M4U_PORT_ID port,
  833. unsigned long va, unsigned int size, unsigned int mva,
  834. M4U_CACHE_SYNC_ENUM sync_type)
  835. {
  836. int ret = 0;
  837. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagStart, va, mva);
  838. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagPulse, size, ((sync_type)<<24) | port);
  839. M4ULOG_MID("cache_sync port=%s, va=0x%lx, size=0x%x, mva=0x%x, type=%d\n",
  840. m4u_get_port_name(port), va, size, mva, sync_type);
  841. if (sync_type < M4U_CACHE_CLEAN_ALL) {
  842. m4u_buf_info_t *pMvaInfo = NULL;
  843. if (client)
  844. pMvaInfo = m4u_client_find_buf(client, mva, 0);
  845. /* some user may sync mva from other client (eg. ovl may not know
  846. * who allocated this buffer, but he need to sync cache). */
  847. /* we make a workaround here by query mva from mva manager */
  848. if (!pMvaInfo)
  849. pMvaInfo = mva_get_priv(mva);
  850. if (!pMvaInfo) {
  851. M4UMSG("cache sync fail, cannot find buf: mva=0x%x, client=0x%p\n", mva,
  852. client);
  853. m4u_dump_buf_info(NULL);
  854. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, 0, 0);
  855. return -1;
  856. }
  857. if ((pMvaInfo->size != size) || (pMvaInfo->va != va)) {
  858. M4UMSG("cache_sync fail: expect mva=0x%x,size=0x%x,va=0x%lx, but mva=0x%x,size=0x%x,va=0x%lx\n",
  859. pMvaInfo->mva, pMvaInfo->size, pMvaInfo->va, mva, size, va);
  860. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd,
  861. pMvaInfo->va, pMvaInfo->mva);
  862. return -1;
  863. }
  864. if ((va | size) & (L1_CACHE_BYTES - 1)) { /* va size should be cache line align */
  865. M4UMSG("warning: cache_sync not align: va=0x%lx,size=0x%x,align=0x%x\n",
  866. va, size, L1_CACHE_BYTES);
  867. }
  868. ret = m4u_cache_sync_by_range(va, size, sync_type, pMvaInfo->sg_table);
  869. } else {
  870. /* All cache operation */
  871. if (sync_type == M4U_CACHE_CLEAN_ALL) {
  872. smp_inner_dcache_flush_all();
  873. outer_clean_all();
  874. } else if (sync_type == M4U_CACHE_INVALID_ALL) {
  875. M4UMSG("no one can use invalid all!\n");
  876. return -1;
  877. } else if (sync_type == M4U_CACHE_FLUSH_ALL) {
  878. smp_inner_dcache_flush_all();
  879. outer_flush_all();
  880. }
  881. }
  882. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, size, mva);
  883. return ret;
  884. }
  885. void m4u_dma_map_area(void *start, size_t size, M4U_DMA_DIR dir)
  886. {
  887. if (dir == M4U_DMA_FROM_DEVICE)
  888. dmac_map_area(start, size, DMA_FROM_DEVICE);
  889. else if (dir == M4U_DMA_TO_DEVICE)
  890. dmac_map_area(start, size, DMA_TO_DEVICE);
  891. else if (dir == M4U_DMA_BIDIRECTIONAL)
  892. dmac_map_area(start, size, DMA_BIDIRECTIONAL);
  893. }
  894. void m4u_dma_unmap_area(void *start, size_t size, M4U_DMA_DIR dir)
  895. {
  896. if (dir == M4U_DMA_FROM_DEVICE)
  897. dmac_unmap_area(start, size, DMA_FROM_DEVICE);
  898. else if (dir == M4U_DMA_TO_DEVICE)
  899. dmac_unmap_area(start, size, DMA_TO_DEVICE);
  900. else if (dir == M4U_DMA_BIDIRECTIONAL)
  901. dmac_unmap_area(start, size, DMA_BIDIRECTIONAL);
  902. }
  903. long m4u_dma_op(m4u_client_t *client, M4U_PORT_ID port,
  904. unsigned long va, unsigned int size, unsigned int mva,
  905. M4U_DMA_TYPE dma_type, M4U_DMA_DIR dma_dir)
  906. {
  907. struct scatterlist *sg;
  908. int i, j;
  909. struct sg_table *table = NULL;
  910. int npages = 0;
  911. unsigned long start = -1;
  912. m4u_buf_info_t *pMvaInfo = NULL;
  913. if (client)
  914. pMvaInfo = m4u_client_find_buf(client, mva, 0);
  915. /* some user may sync mva from other client
  916. (eg. ovl may not know who allocated this buffer,
  917. but he need to sync cache).
  918. we make a workaround here by query mva from mva manager */
  919. if (!pMvaInfo)
  920. pMvaInfo = mva_get_priv(mva);
  921. if (!pMvaInfo) {
  922. M4UMSG("m4u dma fail, cannot find buf: mva=0x%x, client=0x%p.\n", mva, client);
  923. return -1;
  924. }
  925. if ((pMvaInfo->size != size) || (pMvaInfo->va != va)) {
  926. M4UMSG("m4u dma fail: expect mva=0x%x,size=0x%x,va=0x%lx, but mva=0x%x,size=0x%x,va=0x%lx\n",
  927. pMvaInfo->mva, pMvaInfo->size, pMvaInfo->va, mva, size, va);
  928. return -1;
  929. }
  930. if ((va|size) & (L1_CACHE_BYTES-1)) /* va size should be cache line align */
  931. M4UMSG("warning: cache_sync not align: va=0x%lx,size=0x%x,align=0x%x\n",
  932. va, size, L1_CACHE_BYTES);
  933. table = pMvaInfo->sg_table;
  934. /* npages = PAGE_ALIGN(size) / PAGE_SIZE; */
  935. npages = M4U_GET_PAGE_NUM(va, size);
  936. mutex_lock(&gM4u_cache_sync_user_lock);
  937. if (!cache_map_vm_struct) {
  938. M4UMSG(" error: cache_map_vm_struct is NULL, retry\n");
  939. m4u_cache_sync_init();
  940. }
  941. if (!cache_map_vm_struct) {
  942. M4UMSG("error: cache_map_vm_struct is NULL, no vmalloc area\n");
  943. mutex_unlock(&gM4u_cache_sync_user_lock);
  944. return -ENOMEM;
  945. }
  946. for_each_sg(table->sgl, sg, table->nents, i) {
  947. int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
  948. struct page *page = sg_page(sg);
  949. if (!page) {
  950. phys_addr_t pa = sg_dma_address(sg);
  951. if (!pa) {
  952. M4UMSG("m4u_dma_op fail, VM_PFNMAP, no page.\n");
  953. return -EFAULT;
  954. }
  955. page = phys_to_page(pa);
  956. if (!pfn_valid(page_to_pfn(page))) {
  957. M4UMSG("m4u_dma_op fail, VM_PFNMAP, no page, va = 0x%lx, size = 0x%x, npages = 0x%x.\n",
  958. va, size, npages);
  959. return -EFAULT;
  960. }
  961. }
  962. BUG_ON(i >= npages);
  963. for (j = 0; j < npages_this_entry; j++) {
  964. start = (unsigned long) m4u_cache_map_page_va(page++);
  965. if (IS_ERR_OR_NULL((void *) start)) {
  966. M4UMSG("cannot do cache sync: ret=%lu\n", start);
  967. mutex_unlock(&gM4u_cache_sync_user_lock);
  968. return -EFAULT;
  969. }
  970. if (dma_type == M4U_DMA_MAP_AREA)
  971. m4u_dma_map_area((void *)start, PAGE_SIZE, dma_dir);
  972. else if (dma_type == M4U_DMA_UNMAP_AREA)
  973. m4u_dma_unmap_area((void *)start, PAGE_SIZE, dma_dir);
  974. m4u_cache_unmap_page_va(start);
  975. }
  976. }
  977. mutex_unlock(&gM4u_cache_sync_user_lock);
  978. return 0;
  979. }
  980. int m4u_dump_info(int m4u_index)
  981. {
  982. return 0;
  983. }
  984. void m4u_get_pgd(m4u_client_t *client, M4U_PORT_ID port, void **pgd_va, void **pgd_pa,
  985. unsigned int *size)
  986. {
  987. m4u_domain_t *pDomain;
  988. pDomain = m4u_get_domain_by_port(port);
  989. *pgd_va = pDomain->pgd;
  990. *pgd_pa = (void *)(uintptr_t)pDomain->pgd_pa;
  991. *size = M4U_PGD_SIZE;
  992. }
  993. unsigned long m4u_mva_to_pa(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva)
  994. {
  995. unsigned long pa;
  996. m4u_domain_t *pDomain;
  997. pDomain = m4u_get_domain_by_port(port);
  998. pa = m4u_get_pte(pDomain, mva);
  999. return pa;
  1000. }
  1001. int m4u_query_mva_info(unsigned int mva, unsigned int size, unsigned int *real_mva,
  1002. unsigned int *real_size)
  1003. {
  1004. m4u_buf_info_t *pMvaInfo;
  1005. if ((!real_mva) || (!real_size))
  1006. return -1;
  1007. pMvaInfo = mva_get_priv(mva);
  1008. if (!pMvaInfo) {
  1009. M4UMSG("%s cannot find mva: mva=0x%x, size=0x%x\n", __func__, mva, size);
  1010. *real_mva = 0;
  1011. *real_size = 0;
  1012. return -2;
  1013. }
  1014. *real_mva = pMvaInfo->mva;
  1015. *real_size = pMvaInfo->size;
  1016. return 0;
  1017. }
  1018. EXPORT_SYMBOL(m4u_query_mva_info);
  1019. /***********************************************************/
  1020. /** map mva buffer to kernel va buffer
  1021. * this function should ONLY used for DEBUG
  1022. ************************************************************/
  1023. int m4u_mva_map_kernel(unsigned int mva, unsigned int size, unsigned long *map_va,
  1024. unsigned int *map_size)
  1025. {
  1026. m4u_buf_info_t *pMvaInfo;
  1027. struct sg_table *table;
  1028. struct scatterlist *sg;
  1029. int i, j, k, ret = 0;
  1030. struct page **pages;
  1031. unsigned int page_num;
  1032. void *kernel_va;
  1033. unsigned int kernel_size;
  1034. pMvaInfo = mva_get_priv(mva);
  1035. if (!pMvaInfo || pMvaInfo->size < size) {
  1036. M4UMSG("%s cannot find mva: mva=0x%x, size=0x%x\n", __func__, mva, size);
  1037. if (pMvaInfo)
  1038. M4UMSG("pMvaInfo: mva=0x%x, size=0x%x\n", pMvaInfo->mva, pMvaInfo->size);
  1039. return -1;
  1040. }
  1041. table = pMvaInfo->sg_table;
  1042. page_num = M4U_GET_PAGE_NUM(mva, size);
  1043. pages = vmalloc(sizeof(struct page *) * page_num);
  1044. if (pages == NULL) {
  1045. M4UMSG("mva_map_kernel:error to vmalloc for %d\n",
  1046. (unsigned int)sizeof(struct page *) * page_num);
  1047. return -1;
  1048. }
  1049. k = 0;
  1050. for_each_sg(table->sgl, sg, table->nents, i) {
  1051. struct page *page_start;
  1052. int pages_in_this_sg = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
  1053. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  1054. if (0 == sg_dma_address(sg))
  1055. pages_in_this_sg = PAGE_ALIGN(sg->length) / PAGE_SIZE;
  1056. #endif
  1057. page_start = sg_page(sg);
  1058. for (j = 0; j < pages_in_this_sg; j++) {
  1059. pages[k++] = page_start++;
  1060. if (k >= page_num)
  1061. goto get_pages_done;
  1062. }
  1063. }
  1064. get_pages_done:
  1065. if (k < page_num) {
  1066. /* this should not happen, because we have checked the size before. */
  1067. M4UMSG("mva_map_kernel:only get %d pages: mva=0x%x, size=0x%x, pg_num=%d\n", k, mva,
  1068. size, page_num);
  1069. ret = -1;
  1070. goto error_out;
  1071. }
  1072. kernel_va = 0;
  1073. kernel_size = 0;
  1074. kernel_va = vmap(pages, page_num, VM_MAP, PAGE_KERNEL);
  1075. if (kernel_va == 0 || (unsigned long)kernel_va & M4U_PAGE_MASK) {
  1076. M4UMSG("mva_map_kernel:vmap fail: page_num=%d, kernel_va=0x%p\n", page_num,
  1077. kernel_va);
  1078. ret = -2;
  1079. goto error_out;
  1080. }
  1081. kernel_va += ((unsigned long)mva & (M4U_PAGE_MASK));
  1082. *map_va = (unsigned long)kernel_va;
  1083. *map_size = size;
  1084. error_out:
  1085. vfree(pages);
  1086. M4ULOG_LOW("mva_map_kernel:mva=0x%x,size=0x%x,map_va=0x%lx,map_size=0x%x\n",
  1087. mva, size, *map_va, *map_size);
  1088. return ret;
  1089. }
  1090. EXPORT_SYMBOL(m4u_mva_map_kernel);
  1091. int m4u_mva_unmap_kernel(unsigned int mva, unsigned int size, unsigned long map_va)
  1092. {
  1093. M4ULOG_LOW("mva_unmap_kernel:mva=0x%x,size=0x%x,va=0x%lx\n", mva, size, map_va);
  1094. vunmap((void *)(map_va & (~M4U_PAGE_MASK)));
  1095. return 0;
  1096. }
  1097. EXPORT_SYMBOL(m4u_mva_unmap_kernel);
  1098. static int MTK_M4U_open(struct inode *inode, struct file *file)
  1099. {
  1100. m4u_client_t *client;
  1101. client = m4u_create_client();
  1102. if (IS_ERR_OR_NULL(client)) {
  1103. M4UMSG("createclientfail\n");
  1104. return -ENOMEM;
  1105. }
  1106. file->private_data = client;
  1107. return 0;
  1108. }
  1109. static int MTK_M4U_release(struct inode *inode, struct file *file)
  1110. {
  1111. m4u_client_t *client = file->private_data;
  1112. m4u_destroy_client(client);
  1113. return 0;
  1114. }
  1115. static int MTK_M4U_flush(struct file *filp, fl_owner_t a_id)
  1116. {
  1117. return 0;
  1118. }
  1119. #ifdef M4U_TEE_SERVICE_ENABLE
  1120. #define TPLAY_DEV_NAME "tz_m4u"
  1121. #define M4U_DRV_UUID {{0x90, 0x73, 0xF0, 0x3A, 0x96, 0x18, 0x38, 0x3B, 0xB1, 0x85, 0x6E, 0xB3, 0xF9, 0x90, 0xBA, 0xBD} }
  1122. static const struct mc_uuid_t m4u_drv_uuid = M4U_DRV_UUID;
  1123. static struct mc_session_handle m4u_dci_session;
  1124. static m4u_msg_t *m4u_dci_msg;
  1125. static DEFINE_MUTEX(m4u_dci_mutex);
  1126. #define M4U_TL_UUID {{0x98, 0xfb, 0x95, 0xbc, 0xb4, 0xbf, 0x42, 0xd2, 0x64, 0x73, 0xea, 0xe4, 0x86, 0x90, 0xd7, 0xea} }
  1127. static const struct mc_uuid_t m4u_tl_uuid = M4U_TL_UUID;
  1128. static struct mc_session_handle m4u_tci_session;
  1129. static m4u_msg_t *m4u_tci_msg;
  1130. static DEFINE_MUTEX(m4u_tci_mutex);
  1131. static int m4u_open_trustlet(uint32_t deviceId)
  1132. {
  1133. enum mc_result mcRet;
  1134. /* Initialize session handle data */
  1135. memset(&m4u_tci_session, 0, sizeof(m4u_tci_session));
  1136. mcRet = mc_malloc_wsm(deviceId, 0, sizeof(m4u_msg_t), (uint8_t **) &m4u_tci_msg, 0);
  1137. if (MC_DRV_OK != mcRet) {
  1138. M4UMSG("tz_m4u: mc_malloc_wsm tci fail: %d\n", mcRet);
  1139. return -1;
  1140. }
  1141. /* Open session the trustlet */
  1142. m4u_tci_session.device_id = deviceId;
  1143. mcRet = mc_open_session(&m4u_tci_session,
  1144. &m4u_tl_uuid,
  1145. (uint8_t *) m4u_tci_msg,
  1146. (uint32_t) sizeof(m4u_msg_t));
  1147. if (MC_DRV_OK != mcRet) {
  1148. M4UMSG("tz_m4u: mc_open_session returned: %d\n", mcRet);
  1149. return -1;
  1150. }
  1151. M4UMSG("tz_m4u: open TCI session success\n");
  1152. return 0;
  1153. }
  1154. int m4u_close_trustlet(uint32_t deviceId)
  1155. {
  1156. enum mc_result mcRet;
  1157. mcRet = mc_free_wsm(deviceId, (uint8_t *) m4u_tci_msg);
  1158. if (mcRet) {
  1159. M4UMSG("tz_m4u: free tci struct fail: %d\n", mcRet);
  1160. return -1;
  1161. }
  1162. /* Close session */
  1163. mcRet = mc_close_session(&m4u_tci_session);
  1164. if (MC_DRV_OK != mcRet) {
  1165. M4UMSG("tz_m4u: mc_close_session returned: %d\n", mcRet);
  1166. return -1;
  1167. }
  1168. return 0;
  1169. }
  1170. static int m4u_exec_cmd(struct mc_session_handle *m4u_session, m4u_msg_t *m4u_msg)
  1171. {
  1172. enum mc_result ret;
  1173. if (NULL == m4u_msg) {
  1174. M4UMSG("%s TCI/DCI error\n", __func__);
  1175. return -1;
  1176. }
  1177. M4UMSG("Notify %x\n", m4u_msg->cmd);
  1178. ret = mc_notify(m4u_session);
  1179. if (MC_DRV_OK != ret) {
  1180. m4u_aee_print("tz_m4u Notify failed: %d\n", ret);
  1181. goto exit;
  1182. }
  1183. ret = mc_wait_notification(m4u_session, MC_INFINITE_TIMEOUT);
  1184. if (MC_DRV_OK != ret) {
  1185. m4u_aee_print("Wait for response notification failed: 0x%x\n", ret);
  1186. goto exit;
  1187. }
  1188. M4UMSG("get_resp %x\n", m4u_msg->cmd);
  1189. exit:
  1190. return ret;
  1191. }
  1192. static int __m4u_sec_init(void)
  1193. {
  1194. int ret;
  1195. void *pgd_va;
  1196. unsigned long pt_pa_nonsec;
  1197. unsigned int size;
  1198. mutex_lock(&m4u_tci_mutex);
  1199. if (NULL == m4u_tci_msg) {
  1200. M4UMSG("%s TCI/DCI error\n", __func__);
  1201. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1202. goto out;
  1203. }
  1204. m4u_get_pgd(NULL, 0, &pgd_va, (void *)&pt_pa_nonsec, &size);
  1205. m4u_tci_msg->cmd = CMD_M4UTL_INIT;
  1206. m4u_tci_msg->init_param.nonsec_pt_pa = pt_pa_nonsec;
  1207. m4u_tci_msg->init_param.l2_en = gM4U_L2_enable;
  1208. m4u_tci_msg->init_param.sec_pt_pa = 0; /* m4u_alloc_sec_pt_for_debug(); */
  1209. M4UMSG("%s call m4u_exec_cmd CMD_M4UTL_INIT, nonsec_pt_pa: 0x%lx\n", __func__,
  1210. pt_pa_nonsec);
  1211. ret = m4u_exec_cmd(&m4u_tci_session, m4u_tci_msg);
  1212. if (ret) {
  1213. M4UMSG("m4u exec command fail\n");
  1214. ret = -1;
  1215. goto out;
  1216. }
  1217. ret = m4u_tci_msg->rsp;
  1218. out:
  1219. mutex_unlock(&m4u_tci_mutex);
  1220. return ret;
  1221. }
  1222. /* ------------------------------------------------------------- */
  1223. #ifdef __M4U_SECURE_SYSTRACE_ENABLE__
  1224. static int dr_map(unsigned long pa, size_t size)
  1225. {
  1226. int ret;
  1227. mutex_lock(&m4u_dci_mutex);
  1228. if (!m4u_dci_msg) {
  1229. M4UMSG("error: m4u_dci_msg==null\n");
  1230. ret = -1;
  1231. goto out;
  1232. }
  1233. memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
  1234. m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_MAP;
  1235. m4u_dci_msg->systrace_param.pa = pa;
  1236. m4u_dci_msg->systrace_param.size = size;
  1237. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1238. if (ret) {
  1239. M4UMSG("m4u exec command fail\n");
  1240. ret = -1;
  1241. goto out;
  1242. }
  1243. ret = m4u_dci_msg->rsp;
  1244. out:
  1245. mutex_unlock(&m4u_dci_mutex);
  1246. return ret;
  1247. }
  1248. static int dr_unmap(unsigned long pa, size_t size)
  1249. {
  1250. int ret;
  1251. mutex_lock(&m4u_dci_mutex);
  1252. if (!m4u_dci_msg) {
  1253. M4UMSG("error: m4u_dci_msg==null\n");
  1254. ret = -1;
  1255. goto out;
  1256. }
  1257. memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
  1258. m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_UNMAP;
  1259. m4u_dci_msg->systrace_param.pa = pa;
  1260. m4u_dci_msg->systrace_param.size = size;
  1261. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1262. if (ret) {
  1263. M4UMSG("m4u exec command fail\n");
  1264. ret = -1;
  1265. goto out;
  1266. }
  1267. ret = m4u_dci_msg->rsp;
  1268. out:
  1269. mutex_unlock(&m4u_dci_mutex);
  1270. return ret;
  1271. }
  1272. static int dr_transact(void)
  1273. {
  1274. int ret;
  1275. mutex_lock(&m4u_dci_mutex);
  1276. if (!m4u_dci_msg) {
  1277. M4UMSG("error: m4u_dci_msg==null\n");
  1278. ret = -1;
  1279. goto out;
  1280. }
  1281. memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
  1282. m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_TRANSACT;
  1283. m4u_dci_msg->systrace_param.pa = 0;
  1284. m4u_dci_msg->systrace_param.size = 0;
  1285. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1286. if (ret) {
  1287. M4UMSG("m4u exec command fail\n");
  1288. ret = -1;
  1289. goto out;
  1290. }
  1291. ret = m4u_dci_msg->rsp;
  1292. out:
  1293. mutex_unlock(&m4u_dci_mutex);
  1294. return ret;
  1295. }
  1296. #endif
  1297. /* ------------------------------------------------------------- */
  1298. int m4u_sec_init(void)
  1299. {
  1300. uint32_t deviceId = MC_DEVICE_ID_DEFAULT;
  1301. enum mc_result mcRet;
  1302. if (m4u_tee_en) {
  1303. M4UMSG("warning: m4u secure has been inited, %d\n", m4u_tee_en);
  1304. return 0;
  1305. }
  1306. M4UMSG("call m4u_sec_init in nornal m4u driver\n");
  1307. /* Initialize session handle data */
  1308. memset(&m4u_dci_session, 0, sizeof(m4u_dci_session));
  1309. /* Open MobiCore device */
  1310. mcRet = mc_open_device(deviceId);
  1311. if (MC_DRV_OK != mcRet) {
  1312. M4UMSG("tz_m4u: error mc_open_device returned: %d\n", mcRet);
  1313. if (mcRet != MC_DRV_ERR_INVALID_OPERATION)
  1314. return -1;
  1315. }
  1316. /* Allocating WSM for DCI */
  1317. mcRet = mc_malloc_wsm(deviceId, 0, sizeof(m4u_msg_t), (uint8_t **) &m4u_dci_msg, 0);
  1318. if (MC_DRV_OK != mcRet) {
  1319. M4UMSG("tz_m4u: mc_malloc_wsm returned: %d\n", mcRet);
  1320. return -1;
  1321. }
  1322. /* Open session the trustlet */
  1323. m4u_dci_session.device_id = deviceId;
  1324. mcRet = mc_open_session(&m4u_dci_session,
  1325. &m4u_drv_uuid,
  1326. (uint8_t *) m4u_dci_msg,
  1327. (uint32_t) sizeof(m4u_msg_t));
  1328. if (MC_DRV_OK != mcRet) {
  1329. M4UMSG("tz_m4u: mc_open_session returned: %d\n", mcRet);
  1330. return -1;
  1331. }
  1332. M4UMSG("tz_m4u: open DCI session returned: %d\n", mcRet);
  1333. {
  1334. volatile int i, j;
  1335. for (i = 0; i < 10000000; i++)
  1336. j++;
  1337. }
  1338. m4u_open_trustlet(deviceId);
  1339. __m4u_sec_init();
  1340. #ifdef __M4U_SECURE_SYSTRACE_ENABLE__
  1341. {
  1342. union callback_func callback;
  1343. callback.dr.map = dr_map;
  1344. callback.dr.unmap = dr_unmap;
  1345. callback.dr.transact = dr_transact;
  1346. init_sectrace("M4U", if_dci, usage_dr, 64, &callback);
  1347. }
  1348. #endif
  1349. m4u_close_trustlet(deviceId);
  1350. m4u_tee_en = 1;
  1351. return 0;
  1352. }
  1353. int m4u_config_port_tee(M4U_PORT_STRUCT *pM4uPort) /* native */
  1354. {
  1355. int ret;
  1356. mutex_lock(&m4u_dci_mutex);
  1357. if (!m4u_dci_msg) {
  1358. M4UMSG("error: m4u_dci_msg==null\n");
  1359. ret = -1;
  1360. goto out;
  1361. }
  1362. m4u_dci_msg->cmd = CMD_M4U_CFG_PORT;
  1363. m4u_dci_msg->port_param.port = pM4uPort->ePortID;
  1364. m4u_dci_msg->port_param.virt = pM4uPort->Virtuality;
  1365. m4u_dci_msg->port_param.direction = pM4uPort->Direction;
  1366. m4u_dci_msg->port_param.distance = pM4uPort->Distance;
  1367. m4u_dci_msg->port_param.sec = 0;
  1368. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1369. if (ret) {
  1370. M4UMSG("m4u exec command fail\n");
  1371. ret = -1;
  1372. goto out;
  1373. }
  1374. ret = m4u_dci_msg->rsp;
  1375. out:
  1376. mutex_unlock(&m4u_dci_mutex);
  1377. return ret;
  1378. }
  1379. int m4u_config_port_array_tee(unsigned char *port_array) /* native */
  1380. {
  1381. int ret;
  1382. mutex_lock(&m4u_dci_mutex);
  1383. if (!m4u_dci_msg) {
  1384. M4UMSG("error: m4u_dci_msg==null\n");
  1385. ret = -1;
  1386. goto out;
  1387. }
  1388. memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
  1389. memcpy(m4u_dci_msg->port_array_param.m4u_port_array, port_array,
  1390. sizeof(m4u_dci_msg->port_array_param.m4u_port_array));
  1391. m4u_dci_msg->cmd = CMD_M4U_CFG_PORT_ARRAY;
  1392. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1393. if (ret) {
  1394. M4UMSG("m4u exec command fail\n");
  1395. ret = -1;
  1396. goto out;
  1397. }
  1398. ret = m4u_dci_msg->rsp;
  1399. out:
  1400. mutex_unlock(&m4u_dci_mutex);
  1401. return ret;
  1402. }
  1403. static int m4u_unmap_nonsec_buffer(unsigned int mva, unsigned int size)
  1404. {
  1405. int ret;
  1406. mutex_lock(&m4u_dci_mutex);
  1407. if (NULL == m4u_dci_msg) {
  1408. M4UMSG("%s TCI/DCI error\n", __func__);
  1409. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1410. goto out;
  1411. }
  1412. m4u_dci_msg->cmd = CMD_M4U_UNMAP_NONSEC_BUFFER;
  1413. m4u_dci_msg->buf_param.mva = mva;
  1414. m4u_dci_msg->buf_param.size = size;
  1415. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1416. if (ret) {
  1417. M4UMSG("m4u exec command fail\n");
  1418. ret = -1;
  1419. goto out;
  1420. }
  1421. ret = m4u_dci_msg->rsp;
  1422. out:
  1423. mutex_unlock(&m4u_dci_mutex);
  1424. return ret;
  1425. }
  1426. int m4u_larb_backup_sec(unsigned int larb_idx)
  1427. {
  1428. int ret;
  1429. mutex_lock(&m4u_dci_mutex);
  1430. if (NULL == m4u_dci_msg) {
  1431. M4UMSG("%s TCI/DCI error\n", __func__);
  1432. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1433. goto out;
  1434. }
  1435. m4u_dci_msg->cmd = CMD_M4U_LARB_BACKUP;
  1436. m4u_dci_msg->larb_param.larb_idx = larb_idx;
  1437. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1438. if (ret) {
  1439. M4UMSG("m4u exec command fail\n");
  1440. ret = -1;
  1441. goto out;
  1442. }
  1443. ret = m4u_dci_msg->rsp;
  1444. out:
  1445. mutex_unlock(&m4u_dci_mutex);
  1446. return ret;
  1447. }
  1448. int m4u_larb_restore_sec(unsigned int larb_idx)
  1449. {
  1450. int ret;
  1451. mutex_lock(&m4u_dci_mutex);
  1452. if (NULL == m4u_dci_msg) {
  1453. M4UMSG("%s TCI/DCI error\n", __func__);
  1454. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1455. goto out;
  1456. }
  1457. m4u_dci_msg->cmd = CMD_M4U_LARB_RESTORE;
  1458. m4u_dci_msg->larb_param.larb_idx = larb_idx;
  1459. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1460. if (ret) {
  1461. M4UMSG("m4u exec command fail\n");
  1462. ret = -1;
  1463. goto out;
  1464. }
  1465. ret = m4u_dci_msg->rsp;
  1466. out:
  1467. mutex_unlock(&m4u_dci_mutex);
  1468. return ret;
  1469. }
  1470. static int m4u_reg_backup_sec(void)
  1471. {
  1472. int ret;
  1473. mutex_lock(&m4u_dci_mutex);
  1474. if (NULL == m4u_dci_msg) {
  1475. M4UMSG("%s TCI/DCI error\n", __func__);
  1476. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1477. goto out;
  1478. }
  1479. m4u_dci_msg->cmd = CMD_M4U_REG_BACKUP;
  1480. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1481. if (ret) {
  1482. M4UMSG("m4u exec command fail\n");
  1483. ret = -1;
  1484. goto out;
  1485. }
  1486. ret = m4u_dci_msg->rsp;
  1487. out:
  1488. mutex_unlock(&m4u_dci_mutex);
  1489. return ret;
  1490. }
  1491. static int m4u_reg_restore_sec(void)
  1492. {
  1493. int ret;
  1494. mutex_lock(&m4u_dci_mutex);
  1495. if (NULL == m4u_dci_msg) {
  1496. M4UMSG("%s TCI/DCI error\n", __func__);
  1497. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1498. goto out;
  1499. }
  1500. m4u_dci_msg->cmd = CMD_M4U_REG_RESTORE;
  1501. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1502. if (ret) {
  1503. M4UMSG("m4u exec command fail\n");
  1504. ret = -1;
  1505. goto out;
  1506. }
  1507. ret = m4u_dci_msg->rsp;
  1508. out:
  1509. mutex_unlock(&m4u_dci_mutex);
  1510. return ret;
  1511. }
  1512. /* static void m4u_early_suspend(struct early_suspend *h)
  1513. {
  1514. M4UMSG("m4u_early_suspend +, %d\n", m4u_tee_en);
  1515. if (m4u_tee_en)
  1516. m4u_reg_backup_sec();
  1517. M4UMSG("m4u_early_suspend -\n");
  1518. }
  1519. static void m4u_late_resume(struct early_suspend *h)
  1520. {
  1521. M4UMSG("m4u_late_resume +, %d\n", m4u_tee_en);
  1522. if (m4u_tee_en)
  1523. m4u_reg_restore_sec();
  1524. M4UMSG("m4u_late_resume -\n");
  1525. }
  1526. static struct early_suspend mtk_m4u_early_suspend_driver = {
  1527. .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 251,
  1528. .suspend = m4u_early_suspend,
  1529. .resume = m4u_late_resume,
  1530. }; */
  1531. static void m4u_early_suspend(void)
  1532. {
  1533. M4UMSG("m4u_early_suspend +, %d\n", m4u_tee_en);
  1534. if (m4u_tee_en)
  1535. m4u_reg_backup_sec();
  1536. M4UMSG("m4u_early_suspend -\n");
  1537. }
  1538. static void m4u_late_resume(void)
  1539. {
  1540. M4UMSG("m4u_late_resume +, %d\n", m4u_tee_en);
  1541. if (m4u_tee_en)
  1542. m4u_reg_restore_sec();
  1543. M4UMSG("m4u_late_resume -\n");
  1544. }
  1545. static struct notifier_block m4u_fb_notifier;
  1546. static int m4u_fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data)
  1547. {
  1548. struct fb_event *evdata = data;
  1549. int blank;
  1550. M4UMSG("m4u_fb_notifier_callback %ld, %d\n", event , FB_EVENT_BLANK);
  1551. if (event != FB_EVENT_BLANK)
  1552. return 0;
  1553. blank = *(int *)evdata->data;
  1554. switch (blank) {
  1555. case FB_BLANK_UNBLANK:
  1556. case FB_BLANK_NORMAL:
  1557. m4u_late_resume();
  1558. break;
  1559. case FB_BLANK_VSYNC_SUSPEND:
  1560. case FB_BLANK_HSYNC_SUSPEND:
  1561. break;
  1562. case FB_BLANK_POWERDOWN:
  1563. m4u_early_suspend();
  1564. break;
  1565. default:
  1566. return -EINVAL;
  1567. }
  1568. return 0;
  1569. }
  1570. #if 1
  1571. int m4u_map_nonsec_buf(int port, unsigned int mva, unsigned int size)
  1572. {
  1573. int ret;
  1574. mutex_lock(&m4u_dci_mutex);
  1575. if (NULL == m4u_dci_msg) {
  1576. M4UMSG("%s TCI/DCI error\n", __func__);
  1577. ret = MC_DRV_ERR_NO_FREE_MEMORY;
  1578. goto out;
  1579. }
  1580. m4u_dci_msg->cmd = CMD_M4U_MAP_NONSEC_BUFFER;
  1581. m4u_dci_msg->buf_param.mva = mva;
  1582. m4u_dci_msg->buf_param.size = size;
  1583. ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
  1584. if (ret) {
  1585. M4UMSG("m4u exec command fail\n");
  1586. ret = -1;
  1587. goto out;
  1588. }
  1589. ret = m4u_dci_msg->rsp;
  1590. out:
  1591. mutex_unlock(&m4u_dci_mutex);
  1592. return ret;
  1593. }
  1594. #endif
  1595. #endif
  1596. #ifdef M4U_TEE_SERVICE_ENABLE
  1597. static DEFINE_MUTEX(gM4u_sec_init);
  1598. #endif
  1599. static long MTK_M4U_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1600. {
  1601. int ret = 0;
  1602. M4U_MOUDLE_STRUCT m4u_module;
  1603. M4U_PORT_STRUCT m4u_port;
  1604. M4U_PORT_ID PortID;
  1605. M4U_PORT_ID ModuleID;
  1606. M4U_CACHE_STRUCT m4u_cache_data;
  1607. M4U_DMA_STRUCT m4u_dma_data;
  1608. m4u_client_t *client = filp->private_data;
  1609. switch (cmd) {
  1610. case MTK_M4U_T_POWER_ON:
  1611. ret = copy_from_user(&ModuleID, (void *)arg, sizeof(unsigned int));
  1612. if (ret) {
  1613. M4UMSG("MTK_M4U_T_POWER_ON,copy_from_user failed,%d\n", ret);
  1614. return -EFAULT;
  1615. }
  1616. ret = m4u_power_on(ModuleID);
  1617. break;
  1618. case MTK_M4U_T_POWER_OFF:
  1619. ret = copy_from_user(&ModuleID, (void *)arg, sizeof(unsigned int));
  1620. if (ret) {
  1621. M4UMSG("MTK_M4U_T_POWER_OFF,copy_from_user failed,%d\n", ret);
  1622. return -EFAULT;
  1623. }
  1624. ret = m4u_power_off(ModuleID);
  1625. break;
  1626. case MTK_M4U_T_ALLOC_MVA:
  1627. ret = copy_from_user(&m4u_module, (void *)arg, sizeof(M4U_MOUDLE_STRUCT));
  1628. if (ret) {
  1629. M4UMSG("MTK_M4U_T_ALLOC_MVA,copy_from_user failed:%d\n", ret);
  1630. return -EFAULT;
  1631. }
  1632. ret = m4u_alloc_mva(client, m4u_module.port, m4u_module.BufAddr, NULL,
  1633. m4u_module.BufSize, m4u_module.prot, m4u_module.flags,
  1634. &(m4u_module.MVAStart));
  1635. if (ret)
  1636. return ret;
  1637. ret = copy_to_user(&(((M4U_MOUDLE_STRUCT *) arg)->MVAStart),
  1638. &(m4u_module.MVAStart), sizeof(unsigned int));
  1639. if (ret) {
  1640. M4UMSG("MTK_M4U_T_ALLOC_MVA,copy_from_user failed:%d\n", ret);
  1641. return -EFAULT;
  1642. }
  1643. break;
  1644. case MTK_M4U_T_DEALLOC_MVA:
  1645. {
  1646. ret = copy_from_user(&m4u_module, (void *)arg, sizeof(M4U_MOUDLE_STRUCT));
  1647. if (ret) {
  1648. M4UMSG("MTK_M4U_T_DEALLOC_MVA,copy_from_user failed:%d\n", ret);
  1649. return -EFAULT;
  1650. }
  1651. ret = m4u_dealloc_mva(client, m4u_module.port, m4u_module.MVAStart);
  1652. if (ret)
  1653. return ret;
  1654. }
  1655. break;
  1656. case MTK_M4U_T_DUMP_INFO:
  1657. ret = copy_from_user(&ModuleID, (void *)arg, sizeof(unsigned int));
  1658. if (ret) {
  1659. M4UMSG("MTK_M4U_Invalid_TLB_Range,copy_from_user failed,%d\n", ret);
  1660. return -EFAULT;
  1661. }
  1662. break;
  1663. case MTK_M4U_T_CACHE_SYNC:
  1664. ret = copy_from_user(&m4u_cache_data, (void *)arg, sizeof(M4U_CACHE_STRUCT));
  1665. if (ret) {
  1666. M4UMSG("m4u_cache_sync,copy_from_user failed:%d\n", ret);
  1667. return -EFAULT;
  1668. }
  1669. ret = m4u_cache_sync(client, m4u_cache_data.port, m4u_cache_data.va,
  1670. m4u_cache_data.size, m4u_cache_data.mva,
  1671. m4u_cache_data.eCacheSync);
  1672. break;
  1673. case MTK_M4U_T_DMA_OP:
  1674. ret = copy_from_user(&m4u_dma_data, (void *) arg,
  1675. sizeof(M4U_DMA_STRUCT));
  1676. if (ret) {
  1677. M4UMSG("m4u dma map/unmap area,copy_from_user failed:%d\n", ret);
  1678. return -EFAULT;
  1679. }
  1680. ret = m4u_dma_op(client, m4u_dma_data.port, m4u_dma_data.va,
  1681. m4u_dma_data.size, m4u_dma_data.mva,
  1682. m4u_dma_data.eDMAType, m4u_dma_data.eDMADir);
  1683. break;
  1684. case MTK_M4U_T_CONFIG_PORT:
  1685. ret = copy_from_user(&m4u_port, (void *)arg, sizeof(M4U_PORT_STRUCT));
  1686. if (ret) {
  1687. M4UMSG("MTK_M4U_T_CONFIG_PORT,copy_from_user failed:%d\n", ret);
  1688. return -EFAULT;
  1689. }
  1690. #ifdef M4U_TEE_SERVICE_ENABLE
  1691. mutex_lock(&gM4u_sec_init);
  1692. #endif
  1693. ret = m4u_config_port(&m4u_port);
  1694. #ifdef M4U_TEE_SERVICE_ENABLE
  1695. mutex_unlock(&gM4u_sec_init);
  1696. #endif
  1697. break;
  1698. case MTK_M4U_T_MONITOR_START:
  1699. ret = copy_from_user(&PortID, (void *)arg, sizeof(unsigned int));
  1700. if (ret) {
  1701. M4UMSG("MTK_M4U_T_MONITOR_START,copy_from_user failed,%d\n", ret);
  1702. return -EFAULT;
  1703. }
  1704. ret = m4u_monitor_start(m4u_port_2_m4u_id(PortID));
  1705. break;
  1706. case MTK_M4U_T_MONITOR_STOP:
  1707. ret = copy_from_user(&PortID, (void *)arg, sizeof(unsigned int));
  1708. if (ret) {
  1709. M4UMSG("MTK_M4U_T_MONITOR_STOP,copy_from_user failed,%d\n", ret);
  1710. return -EFAULT;
  1711. }
  1712. ret = m4u_monitor_stop(m4u_port_2_m4u_id(PortID));
  1713. break;
  1714. case MTK_M4U_T_CACHE_FLUSH_ALL:
  1715. m4u_dma_cache_flush_all();
  1716. break;
  1717. case MTK_M4U_T_CONFIG_PORT_ARRAY:
  1718. {
  1719. struct m4u_port_array port_array;
  1720. ret = copy_from_user(&port_array, (void *)arg, sizeof(struct m4u_port_array));
  1721. if (ret) {
  1722. M4UMSG("MTK_M4U_T_CONFIG_PORT,copy_from_user failed:%d\n", ret);
  1723. return -EFAULT;
  1724. }
  1725. #ifdef M4U_TEE_SERVICE_ENABLE
  1726. mutex_lock(&gM4u_sec_init);
  1727. #endif
  1728. ret = m4u_config_port_array(&port_array);
  1729. #ifdef M4U_TEE_SERVICE_ENABLE
  1730. mutex_unlock(&gM4u_sec_init);
  1731. #endif
  1732. }
  1733. break;
  1734. case MTK_M4U_T_CONFIG_MAU:
  1735. {
  1736. M4U_MAU_STRUCT rMAU;
  1737. ret = copy_from_user(&rMAU, (void *)arg, sizeof(M4U_MAU_STRUCT));
  1738. if (ret) {
  1739. M4UMSG("MTK_M4U_T_CONFIG_MAU,copy_from_user failed:%d\n", ret);
  1740. return -EFAULT;
  1741. }
  1742. ret = config_mau(rMAU);
  1743. }
  1744. break;
  1745. case MTK_M4U_T_CONFIG_TF:
  1746. {
  1747. M4U_TF_STRUCT rM4UTF;
  1748. ret = copy_from_user(&rM4UTF, (void *)arg, sizeof(M4U_TF_STRUCT));
  1749. if (ret) {
  1750. M4UMSG("MTK_M4U_T_CONFIG_TF,copy_from_user failed:%d\n", ret);
  1751. return -EFAULT;
  1752. }
  1753. ret = m4u_enable_tf(rM4UTF.port, rM4UTF.fgEnable);
  1754. }
  1755. break;
  1756. #ifdef M4U_TEE_SERVICE_ENABLE
  1757. case MTK_M4U_T_SEC_INIT:
  1758. {
  1759. M4UMSG("MTK M4U ioctl : MTK_M4U_T_SEC_INIT command!! 0x%x\n", cmd);
  1760. mutex_lock(&gM4u_sec_init);
  1761. ret = m4u_sec_init();
  1762. mutex_unlock(&gM4u_sec_init);
  1763. }
  1764. break;
  1765. #endif
  1766. default:
  1767. /* M4UMSG("MTK M4U ioctl:No such command!!\n"); */
  1768. ret = -EINVAL;
  1769. break;
  1770. }
  1771. return ret;
  1772. }
  1773. #if IS_ENABLED(CONFIG_COMPAT)
  1774. typedef struct {
  1775. compat_uint_t port;
  1776. compat_ulong_t BufAddr;
  1777. compat_uint_t BufSize;
  1778. compat_uint_t prot;
  1779. compat_uint_t MVAStart;
  1780. compat_uint_t MVAEnd;
  1781. compat_uint_t flags;
  1782. } COMPAT_M4U_MOUDLE_STRUCT;
  1783. typedef struct {
  1784. compat_uint_t port;
  1785. compat_uint_t eCacheSync;
  1786. compat_ulong_t va;
  1787. compat_uint_t size;
  1788. compat_uint_t mva;
  1789. } COMPAT_M4U_CACHE_STRUCT;
  1790. typedef struct {
  1791. compat_uint_t port;
  1792. compat_uint_t eDMAType;
  1793. compat_uint_t eDMADir;
  1794. compat_ulong_t va;
  1795. compat_uint_t size;
  1796. compat_uint_t mva;
  1797. } COMPAT_M4U_DMA_STRUCT;
  1798. #define COMPAT_MTK_M4U_T_ALLOC_MVA _IOWR(MTK_M4U_MAGICNO, 4, int)
  1799. #define COMPAT_MTK_M4U_T_DEALLOC_MVA _IOW(MTK_M4U_MAGICNO, 5, int)
  1800. #define COMPAT_MTK_M4U_T_CACHE_SYNC _IOW(MTK_M4U_MAGICNO, 10, int)
  1801. #define COMPAT_MTK_M4U_T_DMA_OP _IOW(MTK_M4U_MAGICNO, 29, int)
  1802. static int compat_get_m4u_module_struct(COMPAT_M4U_MOUDLE_STRUCT __user *data32,
  1803. M4U_MOUDLE_STRUCT __user *data)
  1804. {
  1805. compat_uint_t u;
  1806. compat_ulong_t l;
  1807. int err;
  1808. err = get_user(u, &(data32->port));
  1809. err |= put_user(u, &(data->port));
  1810. err |= get_user(l, &(data32->BufAddr));
  1811. err |= put_user(l, &(data->BufAddr));
  1812. err |= get_user(u, &(data32->BufSize));
  1813. err |= put_user(u, &(data->BufSize));
  1814. err |= get_user(u, &(data32->prot));
  1815. err |= put_user(u, &(data->prot));
  1816. err |= get_user(u, &(data32->MVAStart));
  1817. err |= put_user(u, &(data->MVAStart));
  1818. err |= get_user(u, &(data32->MVAEnd));
  1819. err |= put_user(u, &(data->MVAEnd));
  1820. err |= get_user(u, &(data32->flags));
  1821. err |= put_user(u, &(data->flags));
  1822. return err;
  1823. }
  1824. static int compat_put_m4u_module_struct(COMPAT_M4U_MOUDLE_STRUCT __user *data32,
  1825. M4U_MOUDLE_STRUCT __user *data)
  1826. {
  1827. compat_uint_t u;
  1828. compat_ulong_t l;
  1829. int err;
  1830. err = get_user(u, &(data->port));
  1831. err |= put_user(u, &(data32->port));
  1832. err |= get_user(l, &(data->BufAddr));
  1833. err |= put_user(l, &(data32->BufAddr));
  1834. err |= get_user(u, &(data->BufSize));
  1835. err |= put_user(u, &(data32->BufSize));
  1836. err |= get_user(u, &(data->prot));
  1837. err |= put_user(u, &(data32->prot));
  1838. err |= get_user(u, &(data->MVAStart));
  1839. err |= put_user(u, &(data32->MVAStart));
  1840. err |= get_user(u, &(data->MVAEnd));
  1841. err |= put_user(u, &(data32->MVAEnd));
  1842. err |= get_user(u, &(data->flags));
  1843. err |= put_user(u, &(data32->flags));
  1844. return err;
  1845. }
  1846. static int compat_get_m4u_cache_struct(COMPAT_M4U_CACHE_STRUCT __user *data32,
  1847. M4U_CACHE_STRUCT __user *data)
  1848. {
  1849. compat_uint_t u;
  1850. compat_ulong_t l;
  1851. int err;
  1852. err = get_user(u, &(data32->port));
  1853. err |= put_user(u, &(data->port));
  1854. err |= get_user(u, &(data32->eCacheSync));
  1855. err |= put_user(u, &(data->eCacheSync));
  1856. err |= get_user(l, &(data32->va));
  1857. err |= put_user(l, &(data->va));
  1858. err |= get_user(u, &(data32->size));
  1859. err |= put_user(u, &(data->size));
  1860. err |= get_user(u, &(data32->mva));
  1861. err |= put_user(u, &(data->mva));
  1862. return err;
  1863. }
  1864. static int compat_get_m4u_dma_struct(
  1865. COMPAT_M4U_DMA_STRUCT __user *data32,
  1866. M4U_DMA_STRUCT __user *data)
  1867. {
  1868. compat_uint_t u;
  1869. compat_ulong_t l;
  1870. int err;
  1871. err = get_user(u, &(data32->port));
  1872. err |= put_user(u, &(data->port));
  1873. err |= get_user(u, &(data32->eDMAType));
  1874. err |= put_user(u, &(data->eDMAType));
  1875. err |= get_user(u, &(data32->eDMADir));
  1876. err |= put_user(u, &(data->eDMADir));
  1877. err |= get_user(l, &(data32->va));
  1878. err |= put_user(l, &(data->va));
  1879. err |= get_user(u, &(data32->size));
  1880. err |= put_user(u, &(data->size));
  1881. err |= get_user(u, &(data32->mva));
  1882. err |= put_user(u, &(data->mva));
  1883. return err;
  1884. }
  1885. long MTK_M4U_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1886. {
  1887. long ret;
  1888. if (!filp->f_op || !filp->f_op->unlocked_ioctl)
  1889. return -ENOTTY;
  1890. switch (cmd) {
  1891. case COMPAT_MTK_M4U_T_ALLOC_MVA:
  1892. {
  1893. COMPAT_M4U_MOUDLE_STRUCT __user *data32;
  1894. M4U_MOUDLE_STRUCT __user *data;
  1895. int err;
  1896. data32 = compat_ptr(arg);
  1897. data = compat_alloc_user_space(sizeof(M4U_MOUDLE_STRUCT));
  1898. if (data == NULL)
  1899. return -EFAULT;
  1900. err = compat_get_m4u_module_struct(data32, data);
  1901. if (err)
  1902. return err;
  1903. ret = filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_ALLOC_MVA, (unsigned long)data);
  1904. err = compat_put_m4u_module_struct(data32, data);
  1905. if (err)
  1906. return err;
  1907. return ret;
  1908. }
  1909. case COMPAT_MTK_M4U_T_DEALLOC_MVA:
  1910. {
  1911. COMPAT_M4U_MOUDLE_STRUCT __user *data32;
  1912. M4U_MOUDLE_STRUCT __user *data;
  1913. int err;
  1914. data32 = compat_ptr(arg);
  1915. data = compat_alloc_user_space(sizeof(M4U_MOUDLE_STRUCT));
  1916. if (data == NULL)
  1917. return -EFAULT;
  1918. err = compat_get_m4u_module_struct(data32, data);
  1919. if (err)
  1920. return err;
  1921. return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_DEALLOC_MVA,
  1922. (unsigned long)data);
  1923. }
  1924. case COMPAT_MTK_M4U_T_CACHE_SYNC:
  1925. {
  1926. COMPAT_M4U_CACHE_STRUCT __user *data32;
  1927. M4U_CACHE_STRUCT __user *data;
  1928. int err;
  1929. data32 = compat_ptr(arg);
  1930. data = compat_alloc_user_space(sizeof(M4U_CACHE_STRUCT));
  1931. if (data == NULL)
  1932. return -EFAULT;
  1933. err = compat_get_m4u_cache_struct(data32, data);
  1934. if (err)
  1935. return err;
  1936. return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_CACHE_SYNC,
  1937. (unsigned long)data);
  1938. }
  1939. case COMPAT_MTK_M4U_T_DMA_OP:
  1940. {
  1941. COMPAT_M4U_DMA_STRUCT __user *data32;
  1942. M4U_DMA_STRUCT __user *data;
  1943. int err;
  1944. data32 = compat_ptr(arg);
  1945. data = compat_alloc_user_space(sizeof(M4U_DMA_STRUCT));
  1946. if (data == NULL)
  1947. return -EFAULT;
  1948. err = compat_get_m4u_dma_struct(data32, data);
  1949. if (err)
  1950. return err;
  1951. return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_DMA_OP,
  1952. (unsigned long)data);
  1953. }
  1954. case MTK_M4U_T_POWER_ON:
  1955. case MTK_M4U_T_POWER_OFF:
  1956. case MTK_M4U_T_DUMP_INFO:
  1957. case MTK_M4U_T_CONFIG_PORT:
  1958. case MTK_M4U_T_MONITOR_START:
  1959. case MTK_M4U_T_MONITOR_STOP:
  1960. case MTK_M4U_T_CACHE_FLUSH_ALL:
  1961. case MTK_M4U_T_CONFIG_PORT_ARRAY:
  1962. case MTK_M4U_T_SEC_INIT:
  1963. return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
  1964. default:
  1965. return -ENOIOCTLCMD;
  1966. }
  1967. }
  1968. #else
  1969. #define MTK_M4U_COMPAT_ioctl NULL
  1970. #endif
  1971. static const struct file_operations m4u_fops = {
  1972. .owner = THIS_MODULE,
  1973. .open = MTK_M4U_open,
  1974. .release = MTK_M4U_release,
  1975. .flush = MTK_M4U_flush,
  1976. .unlocked_ioctl = MTK_M4U_ioctl,
  1977. .compat_ioctl = MTK_M4U_COMPAT_ioctl,
  1978. /* .mmap = NULL; */
  1979. };
  1980. static int m4u_probe(struct platform_device *pdev)
  1981. {
  1982. struct device_node *node = pdev->dev.of_node;
  1983. M4UINFO("m4u_probe 0\n");
  1984. if (pdev->dev.of_node) {
  1985. int err;
  1986. err = of_property_read_u32(node, "cell-index", &pdev->id);
  1987. if (err)
  1988. M4UMSG("[DTS] get m4u platform_device id fail!!\n");
  1989. }
  1990. M4UINFO("m4u_probe 1, pdev id = %d name = %s\n", pdev->id, pdev->name);
  1991. gM4uDev->pDev[pdev->id] = &pdev->dev;
  1992. gM4uDev->m4u_base[pdev->id] = (unsigned long)of_iomap(node, 0);
  1993. gM4uDev->irq_num[pdev->id] = irq_of_parse_and_map(node, 0);
  1994. M4UMSG("m4u_probe 2, of_iomap: 0x%lx, irq_num: %d, pDev: %p\n",
  1995. gM4uDev->m4u_base[pdev->id], gM4uDev->irq_num[pdev->id], gM4uDev->pDev[pdev->id]);
  1996. if (0 == pdev->id) {
  1997. m4u_domain_init(gM4uDev, &gMvaNode_unknown);
  1998. #ifdef M4U_TEE_SERVICE_ENABLE
  1999. {
  2000. m4u_buf_info_t *pMvaInfo;
  2001. unsigned int mva;
  2002. pMvaInfo = m4u_alloc_buf_info();
  2003. if (!pMvaInfo) {
  2004. pMvaInfo->port = M4U_PORT_UNKNOWN;
  2005. pMvaInfo->size = M4U_NONSEC_MVA_START - 0x100000;
  2006. }
  2007. mva = m4u_do_mva_alloc(0, M4U_NONSEC_MVA_START - 0x100000, pMvaInfo);
  2008. M4UINFO("reserve sec mva: 0x%x\n", mva);
  2009. }
  2010. #endif
  2011. }
  2012. m4u_hw_init(gM4uDev, pdev->id);
  2013. M4UINFO("m4u_probe 3 finish...\n");
  2014. return 0;
  2015. }
  2016. static int m4u_remove(struct platform_device *pdev)
  2017. {
  2018. m4u_hw_deinit(gM4uDev, pdev->id);
  2019. #ifndef __M4U_USE_PROC_NODE
  2020. misc_deregister(&(gM4uDev->dev));
  2021. #else
  2022. if (gM4uDev->m4u_dev_proc_entry)
  2023. proc_remove(gM4uDev->m4u_dev_proc_entry);
  2024. #endif
  2025. return 0;
  2026. }
  2027. static int m4u_suspend(struct platform_device *pdev, pm_message_t mesg)
  2028. {
  2029. m4u_reg_backup();
  2030. M4UINFO("M4U backup in suspend\n");
  2031. return 0;
  2032. }
  2033. static int m4u_resume(struct platform_device *pdev)
  2034. {
  2035. m4u_reg_restore();
  2036. M4UINFO("M4U restore in resume\n");
  2037. return 0;
  2038. }
  2039. /*---------------------------------------------------------------------------*/
  2040. #ifdef CONFIG_PM
  2041. /*---------------------------------------------------------------------------*/
  2042. static int m4u_pm_suspend(struct device *device)
  2043. {
  2044. struct platform_device *pdev = to_platform_device(device);
  2045. BUG_ON(pdev == NULL);
  2046. return m4u_suspend(pdev, PMSG_SUSPEND);
  2047. }
  2048. static int m4u_pm_resume(struct device *device)
  2049. {
  2050. struct platform_device *pdev = to_platform_device(device);
  2051. BUG_ON(pdev == NULL);
  2052. return m4u_resume(pdev);
  2053. }
  2054. static int m4u_pm_restore_noirq(struct device *device)
  2055. {
  2056. int i;
  2057. for (i = 0; i < TOTAL_M4U_NUM; i++) {
  2058. irq_set_irq_type(gM4uDev->irq_num[i], IRQF_TRIGGER_LOW);
  2059. }
  2060. return 0;
  2061. }
  2062. /*---------------------------------------------------------------------------*/
  2063. #else /*CONFIG_PM */
  2064. /*---------------------------------------------------------------------------*/
  2065. #define m4u_pm_suspend NULL
  2066. #define m4u_pm_resume NULL
  2067. #define m4u_pm_restore_noirq NULL
  2068. /*---------------------------------------------------------------------------*/
  2069. #endif /*CONFIG_PM */
  2070. /*---------------------------------------------------------------------------*/
  2071. static const struct of_device_id iommu_of_ids[] = {
  2072. {.compatible = "mediatek,m4u",},
  2073. {.compatible = "mediatek,perisys_iommu",},
  2074. {}
  2075. };
  2076. const struct dev_pm_ops m4u_pm_ops = {
  2077. .suspend = m4u_pm_suspend,
  2078. .resume = m4u_pm_resume,
  2079. .freeze = m4u_pm_suspend,
  2080. .thaw = m4u_pm_resume,
  2081. .poweroff = m4u_pm_suspend,
  2082. .restore = m4u_pm_resume,
  2083. .restore_noirq = m4u_pm_restore_noirq,
  2084. };
  2085. static struct platform_driver m4uDrv = {
  2086. .probe = m4u_probe,
  2087. .remove = m4u_remove,
  2088. .suspend = m4u_suspend,
  2089. .resume = m4u_resume,
  2090. .driver = {
  2091. .name = "m4u",
  2092. .of_match_table = iommu_of_ids,
  2093. #ifdef CONFIG_PM
  2094. .pm = &m4u_pm_ops,
  2095. #endif
  2096. .owner = THIS_MODULE,
  2097. }
  2098. };
  2099. #if 0
  2100. static u64 m4u_dmamask = ~(u32) 0;
  2101. static struct platform_device mtk_m4u_dev = {
  2102. .name = M4U_DEV_NAME,
  2103. .id = 0,
  2104. .dev = {
  2105. .dma_mask = &m4u_dmamask,
  2106. .coherent_dma_mask = 0xffffffffUL}
  2107. };
  2108. #endif
  2109. #define __M4U_USE_PROC_NODE
  2110. static int __init MTK_M4U_Init(void)
  2111. {
  2112. int ret = 0;
  2113. gM4uDev = kzalloc(sizeof(struct m4u_device), GFP_KERNEL);
  2114. M4UINFO("MTK_M4U_Init kzalloc: %p\n", gM4uDev);
  2115. if (!gM4uDev) {
  2116. M4UMSG("kmalloc for m4u_device fail\n");
  2117. return -ENOMEM;
  2118. }
  2119. #ifndef __M4U_USE_PROC_NODE
  2120. gM4uDev->dev.minor = MISC_DYNAMIC_MINOR;
  2121. gM4uDev->dev.name = M4U_DEV_NAME;
  2122. gM4uDev->dev.fops = &m4u_fops;
  2123. gM4uDev->dev.parent = NULL;
  2124. ret = misc_register(&(gM4uDev->dev));
  2125. M4UINFO("misc_register, minor: %d\n", gM4uDev->dev.minor);
  2126. if (ret) {
  2127. M4UMSG("failed to register misc device.\n");
  2128. return ret;
  2129. }
  2130. #else
  2131. gM4uDev->m4u_dev_proc_entry = proc_create("m4u", 0, NULL, &m4u_fops);
  2132. if (!(gM4uDev->m4u_dev_proc_entry)) {
  2133. M4UMSG("m4u:failed to register m4u in proc/m4u_device.\n");
  2134. return ret;
  2135. }
  2136. #endif
  2137. m4u_debug_init(gM4uDev);
  2138. M4UINFO("M4U platform_driver_register start\n");
  2139. if (platform_driver_register(&m4uDrv)) {
  2140. M4UMSG("failed to register M4U driver");
  2141. return -ENODEV;
  2142. }
  2143. M4UINFO("M4U platform_driver_register finsish\n");
  2144. #if 0
  2145. retval = platform_device_register(&mtk_m4u_dev);
  2146. if (retval != 0)
  2147. return retval;
  2148. #endif
  2149. #ifdef M4U_PROFILE
  2150. m4u_profile_init();
  2151. #endif
  2152. #ifdef M4U_TEE_SERVICE_ENABLE
  2153. m4u_fb_notifier.notifier_call = m4u_fb_notifier_callback;
  2154. ret = fb_register_client(&m4u_fb_notifier);
  2155. if (ret)
  2156. M4UMSG("m4u register fb_notifier failed! ret(%d)\n", ret);
  2157. else
  2158. M4UMSG("m4u register fb_notifier OK!\n");
  2159. #endif
  2160. return 0;
  2161. }
  2162. static int __init mtk_m4u_late_init(void)
  2163. {
  2164. #if !defined(CONFIG_MTK_CLKMGR)
  2165. smi_common_clock_off();
  2166. smi_larb0_clock_off();
  2167. #endif
  2168. return 0;
  2169. }
  2170. static void __exit MTK_M4U_Exit(void)
  2171. {
  2172. platform_driver_unregister(&m4uDrv);
  2173. }
  2174. subsys_initcall(MTK_M4U_Init);
  2175. late_initcall(mtk_m4u_late_init);
  2176. module_exit(MTK_M4U_Exit);
  2177. MODULE_DESCRIPTION("MTKM4Udriver");
  2178. MODULE_AUTHOR("MTK80347 <Xiang.Xu@mediatek.com>");
  2179. MODULE_LICENSE("GPL");