m4u_hw.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278
  1. #include <linux/slab.h>
  2. #include <linux/interrupt.h>
  3. #include "m4u_priv.h"
  4. #include "m4u_hw.h"
  5. #include <linux/of.h>
  6. #include <linux/of_address.h>
  7. static m4u_domain_t gM4uDomain;
  8. static unsigned long gM4UBaseAddr[TOTAL_M4U_NUM];
  9. static unsigned long gLarbBaseAddr[SMI_LARB_NR];
  10. static unsigned long gPericfgBaseAddr;
  11. static M4U_MAU_STATUS_T gM4u0_mau[M4U0_MAU_NR] = {{0} };
  12. static unsigned int gMAU_candidate_id = M4U0_MAU_NR - 1;
  13. static DEFINE_MUTEX(gM4u_seq_mutex);
  14. int gM4U_L2_enable = 1;
  15. int gM4U_4G_DRAM_Mode = 0;
  16. static spinlock_t gM4u_reg_lock;
  17. int gM4u_port_num = M4U_PORT_UNKNOWN;
  18. int m4u_invalid_tlb(int m4u_id, int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end)
  19. {
  20. unsigned int reg = 0;
  21. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  22. if (mva_start >= mva_end)
  23. isInvAll = 1;
  24. if (!isInvAll) {
  25. mva_start = round_down(mva_start, SZ_4K);
  26. mva_end = round_up(mva_end, SZ_4K);
  27. }
  28. if (L2_en)
  29. reg = F_MMU_INV_EN_L2;
  30. reg |= F_MMU_INV_EN_L1;
  31. M4U_WriteReg32(m4u_base, REG_INVLID_SEL, reg);
  32. if (isInvAll)
  33. M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_ALL);
  34. else {
  35. /*
  36. unsigned int type_start = m4u_get_pt_type(gPgd_nonsec, mva_start);
  37. unsigned int type_end = m4u_get_pt_type(gPgd_nonsec, mva_end);
  38. unsigned int type = max(type_start, type_end);
  39. unsigned int alignment;
  40. if(type > MMU_PT_TYPE_SUPERSECTION)
  41. type = MMU_PT_TYPE_SUPERSECTION;
  42. alignment = m4u_get_pt_type_size(type) - 1;
  43. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start & (~alignment));
  44. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end | alignment);
  45. M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
  46. */
  47. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA , mva_start);
  48. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end);
  49. M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
  50. }
  51. if (!isInvAll) {
  52. while (!M4U_ReadReg32(m4u_base, REG_MMU_CPE_DONE))
  53. ;
  54. M4U_WriteReg32(m4u_base, REG_MMU_CPE_DONE, 0);
  55. }
  56. return 0;
  57. }
  58. static void m4u_invalid_tlb_all(int m4u_id)
  59. {
  60. m4u_invalid_tlb(m4u_id, gM4U_L2_enable, 1, 0, 0);
  61. }
  62. void m4u_invalid_tlb_by_range(m4u_domain_t *m4u_domain, unsigned int mva_start, unsigned int mva_end)
  63. {
  64. int i;
  65. /* to-do: should get m4u connected to domain here */
  66. for (i = 0; i < TOTAL_M4U_NUM; i++)
  67. m4u_invalid_tlb(i, gM4U_L2_enable, 0, mva_start, mva_end);
  68. /* m4u_invalid_tlb_all(0); */
  69. /* m4u_invalid_tlb_all(1); */
  70. }
  71. void m4u_invalid_tlb_sec(int m4u_id, int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end)
  72. {
  73. unsigned int reg = 0;
  74. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  75. if (mva_start >= mva_end)
  76. isInvAll = 1;
  77. if (!isInvAll) {
  78. mva_start = round_down(mva_start, SZ_4K);
  79. mva_end = round_up(mva_end, SZ_4K);
  80. }
  81. reg = F_MMU_INV_SEC_EN_L2;
  82. reg |= F_MMU_INV_SEC_EN_L1;
  83. M4U_WriteReg32(m4u_base, REG_INVLID_SEL_SEC, reg);
  84. if (isInvAll)
  85. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SEC, F_MMU_INV_SEC_ALL);
  86. else {
  87. /*
  88. unsigned int type_start = m4u_get_pt_type(gPgd_nonsec, mva_start);
  89. unsigned int type_end = m4u_get_pt_type(gPgd_nonsec, mva_end);
  90. unsigned int type = max(type_start, type_end);
  91. unsigned int alignment;
  92. if(type > MMU_PT_TYPE_SUPERSECTION)
  93. type = MMU_PT_TYPE_SUPERSECTION;
  94. alignment = m4u_get_pt_type_size(type) - 1;
  95. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start & (~alignment));
  96. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end | alignment);
  97. M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
  98. */
  99. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA_SEC , mva_start);
  100. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA_SEC, mva_end);
  101. M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SEC, F_MMU_INV_SEC_RANGE);
  102. }
  103. if (!isInvAll) {
  104. while (!M4U_ReadReg32(m4u_base, REG_MMU_CPE_DONE_SEC))
  105. ;
  106. M4U_WriteReg32(m4u_base, REG_MMU_CPE_DONE_SEC, 0);
  107. }
  108. }
  109. void m4u_invalid_tlb_sec_by_range(int m4u_id,
  110. unsigned int mva_start,
  111. unsigned int mva_end)
  112. {
  113. m4u_invalid_tlb_sec(m4u_id, gM4U_L2_enable, 0, mva_start, mva_end);
  114. }
  115. static int __m4u_dump_rs_info(unsigned int va[], unsigned int pa[], unsigned int st[], unsigned int pte[])
  116. {
  117. int i;
  118. M4ULOG_MID("m4u dump RS information =====>\n");
  119. M4ULOG_MID("id mva valid port-id pa pte larb w/r other-status\n");
  120. for (i = 0; i < MMU_TOTAL_RS_NR; i++) {
  121. M4ULOG_MID("%d: 0x%8x %5d 0x%3x 0x%8x 0x%8x %d %d 0x%3x\n", i,
  122. F_MMU_RSx_VA_GET(va[i]), F_MMU_RSx_VA_VALID(va[i]),
  123. F_MMU_RSx_VA_PID(va[i]), pa[i], pte[i], F_MMU_RSx_ST_LID(st[i]),
  124. F_MMU_RSx_ST_WRT(st[i]), F_MMU_RSx_ST_OTHER(st[i]));
  125. }
  126. M4ULOG_MID("m4u dump RS information done =====>\n");
  127. return 0;
  128. }
  129. static int m4u_dump_rs_info(int m4u_index, int m4u_slave_id)
  130. {
  131. unsigned long m4u_base = gM4UBaseAddr[m4u_index];
  132. int i;
  133. unsigned int va[MMU_TOTAL_RS_NR], pa[MMU_TOTAL_RS_NR], st[MMU_TOTAL_RS_NR], pte[MMU_TOTAL_RS_NR];
  134. for (i = 0; i < MMU_TOTAL_RS_NR; i++) {
  135. va[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_VA(m4u_slave_id, i)));
  136. pa[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_PA(m4u_slave_id, i)));
  137. st[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_ST(m4u_slave_id, i)));
  138. pte[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_2ND_BASE(m4u_slave_id, i)));
  139. }
  140. M4ULOG_MID("m4u dump RS information index: %d=====>\n", m4u_slave_id);
  141. __m4u_dump_rs_info(va, pa, st, pte);
  142. M4ULOG_MID("m4u dump RS information done =====>\n");
  143. return 0;
  144. }
  145. static inline void m4u_clear_intr(unsigned int m4u_id)
  146. {
  147. m4uHw_set_field_by_mask(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, F_INT_L2_CLR_BIT, F_INT_L2_CLR_BIT);
  148. }
  149. static inline void m4u_enable_intr(unsigned int m4u_id)
  150. {
  151. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, 0x6f);
  152. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_MAIN_CONTROL, 0xffffffff);
  153. }
  154. static inline void m4u_disable_intr(unsigned int m4u_id)
  155. {
  156. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, 0);
  157. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_MAIN_CONTROL, 0);
  158. }
  159. static inline void m4u_intr_modify_all(unsigned long enable)
  160. {
  161. int i;
  162. for (i = 0; i < TOTAL_M4U_NUM; i++)
  163. if (enable)
  164. m4u_enable_intr(i);
  165. else
  166. m4u_disable_intr(i);
  167. }
  168. struct mau_config_info {
  169. int m4u_id;
  170. int m4u_slave_id;
  171. int mau_set;
  172. unsigned int start;
  173. unsigned int end;
  174. unsigned int port_mask;
  175. unsigned int larb_mask;
  176. unsigned int write_monitor;/* :1; */
  177. unsigned int virt;/* :1; */
  178. unsigned int io;/* :1; */
  179. unsigned int start_bit32;/* :1; */
  180. unsigned int end_bit32;/* :1; */
  181. };
  182. /***********************************************************/
  183. /**
  184. * @param m4u_id -- IOMMU main id
  185. * @param m4u_slave_id -- IOMMU slave id
  186. * @param mau_set -- mau set/entry (3 mau set per iommu)
  187. * @param wr -- write monitor enable: 0 for read, 1 for write
  188. NOTES: cannot monitor read and write using one mau set!!
  189. * @param vir -- virtual monitor enable ? (if enable we will monitor mva, or else monitor PA)
  190. * @param io -- I/O use mau at input or output of RS. 0 for input, 1 for output
  191. input: mau @ RS input, can monitor mva or pa (bypass m4u);
  192. output:mau @ RS output, can monitor pa to emi(bypass m4u, or after mva translation)
  193. * @param bit32 -- enable bit32 monitor?
  194. * @param start -- start address of monitor (can be any address without alignment)
  195. * @param end -- end address of monitor (can be any address without alignment)
  196. * @param port -- port mask or AXI_ID[4:0] mask
  197. * @param larb -- larb[0..7] mask or AXI_ID[7:5] mask
  198. *
  199. * @return
  200. * @remark
  201. monitor range is [start, end)
  202. * @see
  203. * @author K Zhang @date 2013/11/13
  204. ************************************************************/
  205. int mau_start_monitor(int m4u_id, int m4u_slave_id, int mau_set,
  206. int wr, int vir, int io, int bit32,
  207. unsigned int start, unsigned int end, unsigned int port_mask, unsigned int larb_mask)
  208. {
  209. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  210. if (0 == m4u_base)
  211. return -1;
  212. M4U_WriteReg32(m4u_base, REG_MMU_MAU_START(m4u_slave_id, mau_set), start);
  213. M4U_WriteReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set), !!(bit32));
  214. M4U_WriteReg32(m4u_base, REG_MMU_MAU_END(m4u_slave_id, mau_set), end);
  215. M4U_WriteReg32(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave_id, mau_set), !!(bit32));
  216. M4U_WriteReg32(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave_id, mau_set), port_mask);
  217. m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave_id),
  218. F_MAU_LARB_MSK(mau_set), F_MAU_LARB_VAL(mau_set, larb_mask));
  219. m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_IO(m4u_slave_id),
  220. F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(io, mau_set));
  221. m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_RW(m4u_slave_id),
  222. F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(wr, mau_set));
  223. m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_VA(m4u_slave_id),
  224. F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(vir, mau_set));
  225. return 0;
  226. }
  227. int config_mau(M4U_MAU_STRUCT mau)
  228. {
  229. int i;
  230. int free_id = -1;
  231. int m4u_id = m4u_port_2_m4u_id(mau.port);
  232. int larb = m4u_port_2_larb_id(mau.port);
  233. unsigned int MVAStart = mau.mva;
  234. unsigned int MVAEnd = mau.mva + mau.size;
  235. if (0 != m4u_id)
  236. return -1;
  237. for (i = 0; i < M4U0_MAU_NR; i++) {
  238. if (0 != gM4u0_mau[i].Enabled) {
  239. if (MVAStart >= gM4u0_mau[i].MVAStart && MVAEnd <= gM4u0_mau[i].MVAEnd) { /* no overlap */
  240. if (mau.enable == 0) {
  241. gM4u0_mau[i].Enabled = 0;
  242. mau_start_monitor(0, 0, i, 0, 0, 0, 0, 0, 0, 0, 0);
  243. continue;
  244. }
  245. }
  246. } else {
  247. free_id = i;
  248. }
  249. }
  250. if (mau.enable == 0)
  251. return 0;
  252. if (free_id == -1) {
  253. if (mau.force == 0)
  254. return -1;
  255. }
  256. else {
  257. free_id = gMAU_candidate_id;
  258. if (0 == gMAU_candidate_id)
  259. gMAU_candidate_id = M4U0_MAU_NR - 1;
  260. else
  261. gMAU_candidate_id--;
  262. }
  263. gM4u0_mau[free_id].Enabled = 1;
  264. gM4u0_mau[free_id].MVAStart = MVAStart;
  265. gM4u0_mau[free_id].MVAEnd = MVAEnd;
  266. gM4u0_mau[free_id].port = mau.port;
  267. mau_start_monitor(m4u_id, larb_2_m4u_slave_id(larb), free_id, (int)mau.write,
  268. 1, 0, 0, MVAStart, MVAEnd, 1 << m4u_port_2_larb_port(mau.port), 1 << larb);
  269. return free_id;
  270. }
  271. /* notes: you must fill cfg->m4u_id/m4u_slave_id/mau_set before call this func. */
  272. int mau_get_config_info(struct mau_config_info *cfg)
  273. {
  274. int m4u_id = cfg->m4u_id;
  275. int m4u_slave_id = cfg->m4u_slave_id;
  276. int mau_set = cfg->mau_set;
  277. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  278. cfg->start = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START(m4u_slave_id, mau_set));
  279. cfg->end = M4U_ReadReg32(m4u_base, REG_MMU_MAU_END(m4u_slave_id, mau_set));
  280. cfg->start_bit32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set));
  281. cfg->end_bit32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set));
  282. cfg->port_mask = M4U_ReadReg32(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave_id, mau_set));
  283. cfg->larb_mask = m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave_id), F_MAU_LARB_MSK(mau_set));
  284. cfg->io = !!(m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_IO(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set)));
  285. cfg->write_monitor =
  286. !!m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_RW(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set));
  287. cfg->virt = !!m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_VA(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set));
  288. return 0;
  289. }
  290. int __mau_dump_status(int m4u_id, int m4u_slave_id, int mau)
  291. {
  292. unsigned long m4u_base;
  293. unsigned int status;
  294. unsigned int assert_id, assert_addr, assert_b32;
  295. int larb, port;
  296. struct mau_config_info mau_cfg;
  297. m4u_base = gM4UBaseAddr[m4u_id];
  298. status = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ASSERT_ST(m4u_slave_id));
  299. if (status & (1 << mau)) {
  300. M4ULOG_HIGH("mau_assert in set %d\n", mau);
  301. assert_id = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ASSERT_ID(m4u_slave_id, mau));
  302. assert_addr = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ADDR(m4u_slave_id, mau));
  303. assert_b32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ADDR_BIT32(m4u_slave_id, mau));
  304. larb = F_MMU_MAU_ASSERT_ID_LARB(assert_id);
  305. port = F_MMU_MAU_ASSERT_ID_PORT(assert_id);
  306. M4ULOG_HIGH("id=0x%x(%s),addr=0x%x,b32=0x%x\n", assert_id,
  307. m4u_get_port_name(larb_port_2_m4u_port(larb, port)), assert_addr, assert_b32);
  308. M4U_WriteReg32(m4u_base, REG_MMU_MAU_CLR(m4u_slave_id), (1 << mau));
  309. M4U_WriteReg32(m4u_base, REG_MMU_MAU_CLR(m4u_slave_id), 0);
  310. mau_cfg.m4u_id = m4u_id;
  311. mau_cfg.m4u_slave_id = m4u_slave_id;
  312. mau_cfg.mau_set = mau;
  313. mau_get_config_info(&mau_cfg);
  314. M4ULOG_HIGH("mau_cfg: start=0x%x,end=0x%x,virt(%d),io(%d),wr(%d),s_b32(%d),e_b32(%d)\n",
  315. mau_cfg.start, mau_cfg.end, mau_cfg.virt, mau_cfg.io,
  316. mau_cfg.write_monitor, mau_cfg.start_bit32, mau_cfg.end_bit32);
  317. } else
  318. M4ULOG_MID("mau no assert in set %d\n", mau);
  319. return 0;
  320. }
  321. int mau_dump_status(int m4u_id, int m4u_slave_id)
  322. {
  323. int i;
  324. for (i = 0; i < MAU_NR_PER_M4U_SLAVE; i++)
  325. __mau_dump_status(m4u_id, m4u_slave_id, i);
  326. return 0;
  327. }
  328. int m4u_dump_reg(int m4u_index, unsigned int start)
  329. {
  330. int i;
  331. M4UINFO("Register Start =======\n");
  332. for (i = 0; i < 368 / 8; i += 4) {
  333. M4UINFO("+0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", start + 8 * i,
  334. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 0),
  335. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 1),
  336. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 2),
  337. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 3),
  338. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 4),
  339. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 5),
  340. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 6),
  341. M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 7));
  342. }
  343. M4UINFO("Register End ==========\n");
  344. return 0;
  345. }
  346. unsigned int m4u_get_main_descriptor(int m4u_id, int m4u_slave_id, int idx)
  347. {
  348. unsigned int regValue = 0;
  349. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  350. regValue = F_READ_ENTRY_EN
  351. | F_READ_ENTRY_MMx_MAIN(m4u_slave_id)
  352. | F_READ_ENTRY_MAIN_IDX(idx);
  353. M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue);
  354. while (M4U_ReadReg32(m4u_base, REG_MMU_READ_ENTRY)&F_READ_ENTRY_EN)
  355. ;
  356. return M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA);
  357. }
  358. unsigned int m4u_get_main_tag(int m4u_id, int m4u_slave_id, int idx)
  359. {
  360. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  361. return M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(m4u_slave_id, idx));
  362. }
  363. void m4u_get_main_tlb(int m4u_id, int m4u_slave_id, int idx, mmu_tlb_t *pTlb)
  364. {
  365. pTlb->tag = m4u_get_main_tag(m4u_id, m4u_slave_id, idx);
  366. pTlb->desc = m4u_get_main_descriptor(m4u_id, m4u_slave_id, idx);
  367. }
  368. unsigned int m4u_get_pfh_tlb(int m4u_id, int set, int page, int way, mmu_tlb_t *pTlb)
  369. {
  370. unsigned int regValue = 0;
  371. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  372. regValue = F_READ_ENTRY_EN
  373. | F_READ_ENTRY_PFH
  374. | F_READ_ENTRY_PFH_IDX(set)
  375. | F_READ_ENTRY_PFH_PAGE_IDX(page)
  376. | F_READ_ENTRY_PFH_WAY(way);
  377. M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue);
  378. while (M4U_ReadReg32(m4u_base, REG_MMU_READ_ENTRY)&F_READ_ENTRY_EN)
  379. ;
  380. pTlb->desc = M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA);
  381. pTlb->tag = M4U_ReadReg32(m4u_base, REG_MMU_PFH_TAG_RDATA);
  382. return 0;
  383. }
  384. unsigned int m4u_get_pfh_tag(int m4u_id, int set, int page, int way)
  385. {
  386. mmu_tlb_t tlb;
  387. m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
  388. return tlb.tag;
  389. }
  390. unsigned int m4u_get_pfh_descriptor(int m4u_id, int set, int page, int way)
  391. {
  392. mmu_tlb_t tlb;
  393. m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
  394. return tlb.desc;
  395. }
  396. int m4u_dump_main_tlb(int m4u_id, int m4u_slave_id)
  397. {
  398. /* M4U related */
  399. unsigned int i = 0;
  400. mmu_tlb_t tlb;
  401. M4ULOG_HIGH("dump main tlb: m4u %d ====>\n", m4u_id);
  402. for (i = 0; i < gM4UTagCount[m4u_id]; i++) {
  403. m4u_get_main_tlb(m4u_id, m4u_slave_id, i, &tlb);
  404. M4ULOG_HIGH("%d:0x%x:0x%x ", i, tlb.tag, tlb.desc);
  405. if ((i+1)%8 == 0)
  406. M4ULOG_HIGH("===\n");
  407. }
  408. return 0;
  409. }
  410. int m4u_dump_invalid_main_tlb(int m4u_id, int m4u_slave_id)
  411. {
  412. unsigned int i = 0;
  413. mmu_tlb_t tlb;
  414. M4UMSG("dump inv main tlb=>\n");
  415. for (i = 0; i < gM4UTagCount[m4u_id]; i++) {
  416. m4u_get_main_tlb(m4u_id, m4u_slave_id, i, &tlb);
  417. if ((tlb.tag&(F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT))
  418. == (F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT))
  419. M4ULOG_HIGH("%d:0x%x:0x%x ", i, tlb.tag, tlb.desc);
  420. }
  421. M4ULOG_HIGH("\n");
  422. return 0;
  423. }
  424. static unsigned int imu_pfh_tag_to_va(int mmu, int set, int way, unsigned int tag)
  425. {
  426. unsigned int tmp;
  427. if (tag&F_PFH_TAG_LAYER_BIT)
  428. return (F_PFH_TAG_VA_GET(mmu, tag)|((set)<<15));
  429. tmp = F_PFH_TAG_VA_GET(mmu, tag);
  430. tmp &= F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu);
  431. tmp |= (set)<<23;
  432. return tmp;
  433. }
  434. int m4u_dump_pfh_tlb(int m4u_id)
  435. {
  436. unsigned int regval;
  437. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  438. int result = 0;
  439. int set_nr, way_nr, set, way;
  440. int valid;
  441. set_nr = MMU_SET_NR(m4u_id);
  442. way_nr = MMU_WAY_NR;
  443. M4ULOG_HIGH("dump pfh_tlb: m4u %d ====>\n", m4u_id);
  444. for (way = 0; way < way_nr; way++) {
  445. for (set = 0; set < set_nr; set++) {
  446. int page;
  447. mmu_tlb_t tlb;
  448. regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_id, set, way));
  449. valid = !!(regval & F_MMU_PFH_VLD_BIT(set, way));
  450. m4u_get_pfh_tlb(m4u_id, set, 0, way, &tlb);
  451. M4ULOG_HIGH("va(0x%x) lay(%d) 16x(%d) sec(%d) pfh(%d) v(%d),set(%d),way(%d), 0x%x:",
  452. imu_pfh_tag_to_va(m4u_id, set, way, tlb.tag),
  453. !!(tlb.tag & F_PFH_TAG_LAYER_BIT),
  454. !!(tlb.tag & F_PFH_TAG_16X_BIT),
  455. !!(tlb.tag & F_PFH_TAG_SEC_BIT),
  456. !!(tlb.tag & F_PFH_TAG_AUTO_PFH),
  457. valid,
  458. set, way,
  459. tlb.desc);
  460. for (page = 1; page < 8; page++) {
  461. m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
  462. M4ULOG_HIGH("0x%x:", tlb.desc);
  463. }
  464. M4ULOG_HIGH("\n");
  465. }
  466. }
  467. return result;
  468. }
  469. int m4u_get_pfh_tlb_all(int m4u_id, mmu_pfh_tlb_t *pfh_buf)
  470. {
  471. unsigned int regval;
  472. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  473. int set_nr, way_nr, set, way;
  474. int valid;
  475. int pfh_id = 0;
  476. set_nr = MMU_SET_NR(m4u_id);
  477. way_nr = MMU_WAY_NR;
  478. for (way = 0; way < way_nr; way++) {
  479. for (set = 0; set < set_nr; set++) {
  480. int page;
  481. mmu_tlb_t tlb;
  482. regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_id, set, way));
  483. valid = !!(regval & F_MMU_PFH_VLD_BIT(set, way));
  484. m4u_get_pfh_tlb(m4u_id, set, 0, way, &tlb);
  485. pfh_buf[pfh_id].tag = tlb.tag;
  486. pfh_buf[pfh_id].va = imu_pfh_tag_to_va(m4u_id, set, way, tlb.tag);
  487. pfh_buf[pfh_id].layer = !!(tlb.tag & F_PFH_TAG_LAYER_BIT);
  488. pfh_buf[pfh_id].x16 = !!(tlb.tag & F_PFH_TAG_16X_BIT);
  489. pfh_buf[pfh_id].sec = !!(tlb.tag & F_PFH_TAG_SEC_BIT);
  490. pfh_buf[pfh_id].pfh = !!(tlb.tag & F_PFH_TAG_AUTO_PFH);
  491. pfh_buf[pfh_id].set = set;
  492. pfh_buf[pfh_id].way = way;
  493. pfh_buf[pfh_id].valid = valid;
  494. pfh_buf[pfh_id].desc[0] = tlb.desc;
  495. pfh_buf[pfh_id].page_size = pfh_buf[pfh_id].layer ? MMU_SMALL_PAGE_SIZE : MMU_SECTION_SIZE;
  496. for (page = 1; page < MMU_PAGE_PER_LINE; page++) {
  497. m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
  498. pfh_buf[pfh_id].desc[page] = tlb.desc;
  499. }
  500. pfh_id++;
  501. }
  502. }
  503. return 0;
  504. }
  505. int m4u_confirm_main_range_invalidated(int m4u_index, int m4u_slave_id, unsigned int MVAStart, unsigned int MVAEnd)
  506. {
  507. unsigned int i;
  508. unsigned int regval;
  509. /* /> check Main TLB part */
  510. for (i = 0; i < gM4UTagCount[m4u_index]; i++) {
  511. regval = m4u_get_main_tag(m4u_index, m4u_slave_id, i);
  512. if (regval & (F_MAIN_TLB_VALID_BIT)) {
  513. unsigned int tag_s, tag_e, sa, ea;
  514. int layer = regval&F_MAIN_TLB_LAYER_BIT;
  515. int large = regval&F_MAIN_TLB_16X_BIT;
  516. tag_s = regval & F_MAIN_TLB_VA_MSK;
  517. sa = MVAStart & (~(PAGE_SIZE-1));
  518. ea = MVAEnd | (PAGE_SIZE-1);
  519. if (layer) { /* pte */
  520. if (large)
  521. tag_e = tag_s + MMU_LARGE_PAGE_SIZE - 1;
  522. else
  523. tag_e = tag_s + PAGE_SIZE - 1;
  524. if (!((tag_e < sa) || (tag_s > ea))) {
  525. M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
  526. i, m4u_index, MVAStart, MVAEnd, regval);
  527. return -1;
  528. }
  529. } else {
  530. if (large)
  531. tag_e = tag_s + MMU_SUPERSECTION_SIZE - 1;
  532. else
  533. tag_e = tag_s + MMU_SECTION_SIZE - 1;
  534. if ((tag_s >= sa) && (tag_e <= ea)) {
  535. M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
  536. i, m4u_index, MVAStart, MVAEnd, regval);
  537. return -1;
  538. }
  539. }
  540. }
  541. }
  542. return 0;
  543. }
  544. int m4u_confirm_range_invalidated(int m4u_index, unsigned int MVAStart, unsigned int MVAEnd)
  545. {
  546. unsigned int i = 0;
  547. unsigned int regval;
  548. unsigned long m4u_base = gM4UBaseAddr[m4u_index];
  549. int result = 0;
  550. int set_nr, way_nr, set, way;
  551. /* /> check Main TLB part */
  552. result = m4u_confirm_main_range_invalidated(m4u_index, 0, MVAStart, MVAEnd);
  553. if (result < 0)
  554. return -1;
  555. if (m4u_index == 0) {
  556. result = m4u_confirm_main_range_invalidated(m4u_index, 1, MVAStart, MVAEnd);
  557. if (result < 0)
  558. return -1;
  559. }
  560. set_nr = MMU_SET_NR(m4u_index);
  561. way_nr = MMU_WAY_NR;
  562. for (way = 0; way < way_nr; way++) {
  563. for (set = 0; set < set_nr; set++) {
  564. regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_index, set, way));
  565. if (regval & F_MMU_PFH_VLD_BIT(set, way)) {
  566. unsigned int tag = m4u_get_pfh_tag(m4u_index, set, 0, way);
  567. unsigned int tag_s, tag_e, sa, ea;
  568. int layer = tag&F_PFH_TAG_LAYER_BIT;
  569. int large = tag&F_PFH_TAG_16X_BIT;
  570. tag_s = imu_pfh_tag_to_va(m4u_index, set, way, tag);
  571. sa = MVAStart & (~(PAGE_SIZE-1));
  572. ea = MVAEnd | (PAGE_SIZE-1);
  573. if (layer) { /* pte */
  574. if (large)
  575. tag_e = tag_s + MMU_LARGE_PAGE_SIZE*8 - 1;
  576. else
  577. tag_e = tag_s + PAGE_SIZE*8 - 1;
  578. if (!((tag_e < sa) || (tag_s > ea))) {
  579. M4UERR(
  580. "main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
  581. i, m4u_index, MVAStart, MVAEnd, regval);
  582. return -1;
  583. }
  584. } else {
  585. if (large)
  586. tag_e = tag_s + MMU_SUPERSECTION_SIZE*8 - 1;
  587. else
  588. tag_e = tag_s + MMU_SECTION_SIZE*8 - 1;
  589. /* if((tag_s>=sa)&&(tag_e<=ea)) */
  590. if (!((tag_e < sa) || (tag_s > ea))) {
  591. M4UERR(
  592. "main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
  593. i, m4u_index, MVAStart, MVAEnd, regval);
  594. return -1;
  595. }
  596. }
  597. }
  598. }
  599. }
  600. return result;
  601. }
  602. int m4u_confirm_main_all_invalid(int m4u_index, int m4u_slave_id)
  603. {
  604. unsigned int i;
  605. unsigned int regval;
  606. for (i = 0; i < gM4UTagCount[m4u_index]; i++) {
  607. regval = m4u_get_main_tag(m4u_index, m4u_slave_id, i);
  608. if (regval & (F_MAIN_TLB_VALID_BIT)) {
  609. M4UERR("main: i=%d, idx=0x%x, RegValue=0x%x\n", i, m4u_index, regval);
  610. return -1;
  611. }
  612. }
  613. return 0;
  614. }
  615. int m4u_confirm_pfh_all_invalid(int m4u_index)
  616. {
  617. unsigned int regval;
  618. unsigned long m4u_base = gM4UBaseAddr[m4u_index];
  619. int set_nr, way_nr, set, way;
  620. set_nr = MMU_SET_NR(m4u_index);
  621. way_nr = MMU_WAY_NR;
  622. for (way = 0; way < way_nr; way++)
  623. for (set = 0; set < set_nr; set++)
  624. regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_index, set, way));
  625. if (regval & F_MMU_PFH_VLD_BIT(set, way))
  626. return -1;
  627. return 0;
  628. }
  629. int m4u_confirm_all_invalidated(int m4u_index)
  630. {
  631. if (m4u_confirm_main_all_invalid(m4u_index, 0))
  632. return -1;
  633. if (m4u_index == 0)
  634. if (m4u_confirm_main_all_invalid(m4u_index, 1))
  635. return -1;
  636. if (m4u_confirm_pfh_all_invalid(m4u_index))
  637. return -1;
  638. return 0;
  639. }
  640. int m4u_power_on(int m4u_index)
  641. {
  642. return 0;
  643. }
  644. int m4u_power_off(int m4u_index)
  645. {
  646. return 0;
  647. }
  648. static int m4u_clock_on(void)
  649. {
  650. #if defined(CONFIG_MTK_CLKMGR)
  651. /* no m4u, smi CG */
  652. enable_clock(MT_CG_INFRA_M4U, "infra_m4u");
  653. /* enable_clock(MT_CG_INFRA_SMI, "infra_smi"); */
  654. #else
  655. int ret;
  656. ret = clk_prepare_enable(gM4uDev->infra_m4u);
  657. if (ret)
  658. M4UMSG("error: prepare clk infra m4u fail!.\n");
  659. #endif
  660. return 0;
  661. }
  662. /*
  663. static int m4u_clock_off(void)
  664. {
  665. #if defined(CONFIG_MTK_CLKMGR)
  666. disable_clock(MT_CG_INFRA_M4U, "infra_m4u");
  667. disable_clock(MT_CG_INFRA_SMI, "infra_smi");
  668. #else
  669. clk_disable_unprepare(gM4uDev->infra_m4u);
  670. #endif
  671. return 0;
  672. }
  673. */
  674. #if !defined(CONFIG_MTK_CLKMGR)
  675. const char *smi_clk_name[] = {
  676. "smi_common", "m4u_disp0_smi_larb0", "m4u_vdec0_vdec", "m4u_vdec1_larb",
  677. "m4u_img_image_larb2_smi", "m4u_venc_venc", "m4u_venc_larb"
  678. };
  679. #endif
  680. static int larb_clock_on(int larb)
  681. {
  682. #if defined(CONFIG_MTK_CLKMGR)
  683. switch (larb) {
  684. case 0:
  685. enable_clock(MT_CG_DISP0_SMI_LARB0, "m4u_larb0");
  686. break;
  687. case 1:
  688. enable_clock(MT_CG_VDEC0_VDEC, "m4u_larb1");
  689. enable_clock(MT_CG_VDEC1_LARB, "m4u_larb1");
  690. break;
  691. case 2:
  692. enable_clock(MT_CG_IMAGE_LARB2_SMI, "m4u_larb2");
  693. break;
  694. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
  695. case 3:
  696. enable_clock(MT_CG_VENC_VENC, "m4u_larb3");
  697. enable_clock(MT_CG_VENC_LARB, "m4u_larb3");
  698. break;
  699. #endif
  700. default:
  701. M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
  702. break;
  703. }
  704. #else
  705. int ret;
  706. switch (larb) {
  707. case 0:
  708. ret = clk_prepare_enable(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
  709. if (ret)
  710. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[DISP0_SMI_LARB0_CLK]);
  711. break;
  712. case 1:
  713. ret = clk_prepare_enable(gM4uDev->smi_clk[VDEC0_VDEC_CLK]);
  714. if (ret)
  715. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VDEC0_VDEC_CLK]);
  716. ret = clk_prepare_enable(gM4uDev->smi_clk[VDEC1_LARB_CLK]);
  717. if (ret)
  718. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VDEC1_LARB_CLK]);
  719. break;
  720. case 2:
  721. ret = clk_prepare_enable(gM4uDev->smi_clk[LARB2_SMI_CLK]);
  722. if (ret)
  723. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[LARB2_SMI_CLK]);
  724. break;
  725. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
  726. case 3:
  727. ret = clk_prepare_enable(gM4uDev->smi_clk[VENC_VENC_CLK]);
  728. if (ret)
  729. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VENC_VENC_CLK]);
  730. ret = clk_prepare_enable(gM4uDev->smi_clk[VENC_LARB_CLK]);
  731. if (ret)
  732. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VENC_LARB_CLK]);
  733. break;
  734. #endif
  735. default:
  736. M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
  737. break;
  738. }
  739. #endif
  740. return 0;
  741. }
  742. static int larb_clock_off(int larb)
  743. {
  744. #if defined(CONFIG_MTK_CLKMGR)
  745. switch (larb) {
  746. case 0:
  747. disable_clock(MT_CG_DISP0_SMI_LARB0, "m4u_larb0");
  748. break;
  749. case 1:
  750. disable_clock(MT_CG_VDEC0_VDEC, "m4u_larb1");
  751. disable_clock(MT_CG_VDEC1_LARB, "m4u_larb1");
  752. break;
  753. case 2:
  754. disable_clock(MT_CG_IMAGE_LARB2_SMI, "m4u_larb2");
  755. break;
  756. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
  757. case 3:
  758. disable_clock(MT_CG_VENC_VENC, "m4u_larb3");
  759. disable_clock(MT_CG_VENC_LARB, "m4u_larb3");
  760. break;
  761. #endif
  762. default:
  763. M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
  764. break;
  765. }
  766. #else
  767. switch (larb) {
  768. case 0:
  769. clk_disable_unprepare(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
  770. break;
  771. case 1:
  772. clk_disable_unprepare(gM4uDev->smi_clk[VDEC0_VDEC_CLK]);
  773. clk_disable_unprepare(gM4uDev->smi_clk[VDEC1_LARB_CLK]);
  774. break;
  775. case 2:
  776. clk_disable_unprepare(gM4uDev->smi_clk[LARB2_SMI_CLK]);
  777. break;
  778. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
  779. case 3:
  780. clk_disable_unprepare(gM4uDev->smi_clk[VENC_VENC_CLK]);
  781. clk_disable_unprepare(gM4uDev->smi_clk[VENC_LARB_CLK]);
  782. break;
  783. #endif
  784. default:
  785. M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
  786. break;
  787. }
  788. #endif
  789. return 0;
  790. }
  791. static int larb_clock_all_on(void)
  792. {
  793. int i;
  794. for (i = 0 ; i < SMI_LARB_NR ; i++)
  795. larb_clock_on(i);
  796. return 0;
  797. }
  798. static int larb_clock_all_off(void)
  799. {
  800. int i;
  801. for (i = 0 ; i < SMI_LARB_NR ; i++)
  802. larb_clock_off(i);
  803. return 0;
  804. }
  805. void smi_common_clock_on(void)
  806. {
  807. #if defined(CONFIG_MTK_CLKMGR)
  808. enable_clock(MT_CG_DISP0_SMI_COMMON, "smi_common");
  809. /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x1); */
  810. #else
  811. int ret = clk_prepare_enable(gM4uDev->smi_clk[SMI_COMMON_CLK]);
  812. if (ret)
  813. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[SMI_COMMON_CLK]);
  814. #endif
  815. }
  816. void smi_larb0_clock_on(void)
  817. {
  818. #if defined(CONFIG_MTK_CLKMGR)
  819. enable_clock(MT_CG_DISP0_SMI_LARB0, "smi_larb0");
  820. /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x1); */
  821. #else
  822. int ret = clk_prepare_enable(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
  823. if (ret)
  824. M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[DISP0_SMI_LARB0_CLK]);
  825. #endif
  826. }
  827. EXPORT_SYMBOL(smi_common_clock_on);
  828. void smi_common_clock_off(void)
  829. {
  830. #if defined(CONFIG_MTK_CLKMGR)
  831. disable_clock(MT_CG_DISP0_SMI_COMMON, "smi_common");
  832. /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x0); */
  833. #else
  834. clk_disable_unprepare(gM4uDev->smi_clk[SMI_COMMON_CLK]);
  835. #endif
  836. }
  837. void smi_larb0_clock_off(void)
  838. {
  839. #if defined(CONFIG_MTK_CLKMGR)
  840. disable_clock(MT_CG_DISP0_SMI_LARB0, "smi_larb0");
  841. /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x0); */
  842. #else
  843. clk_disable_unprepare(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
  844. #endif
  845. }
  846. EXPORT_SYMBOL(smi_common_clock_off);
  847. int m4u_insert_seq_range(M4U_PORT_ID port, unsigned int MVAStart, unsigned int MVAEnd)
  848. {
  849. int i, free_id = -1;
  850. unsigned int m4u_index = m4u_port_2_m4u_id(port);
  851. unsigned int m4u_slave_id = m4u_port_2_m4u_slave_id(port);
  852. M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + M4U_SEQ_NUM(m4u_index)*m4u_slave_id;
  853. M4ULOG_MID("m4u_insert_seq_range , module:%s, MVAStart:0x%x, MVAEnd:0x%x\n",
  854. m4u_get_port_name(port), MVAStart, MVAEnd);
  855. if (MVAEnd - MVAStart < PAGE_SIZE) {
  856. M4ULOG_MID("too small size, skip to insert! module:%s, MVAStart:0x%x, size:%d\n",
  857. m4u_get_port_name(port), MVAStart, MVAEnd - MVAStart + 1);
  858. return free_id;
  859. }
  860. /* =============================================== */
  861. /* every seq range has to align to 1M Bytes */
  862. MVAStart &= ~M4U_SEQ_ALIGN_MSK;
  863. MVAEnd |= M4U_SEQ_ALIGN_MSK;
  864. mutex_lock(&gM4u_seq_mutex);
  865. /* ================================================================== */
  866. /* check if the range is overlap with previous ones */
  867. for (i = 0; i < M4U_SEQ_NUM(m4u_index); i++) {
  868. if (1 == pSeq[i].Enabled) {
  869. if (MVAEnd < pSeq[i].MVAStart || MVAStart > pSeq[i].MVAEnd)
  870. continue;
  871. else {
  872. M4ULOG_HIGH("insert range overlap!: larb=%d,module=%s\n",
  873. m4u_port_2_larb_id(port), m4u_get_port_name(port));
  874. M4ULOG_HIGH(
  875. "warning: insert tlb range is overlapped with previous ranges, current process=%s,!\n",
  876. current->comm);
  877. M4ULOG_HIGH("module=%s, mva_start=0x%x, mva_end=0x%x\n",
  878. m4u_get_port_name(port), MVAStart, MVAEnd);
  879. M4ULOG_HIGH("overlapped range id=%d, module=%s, mva_start=0x%x, mva_end=0x%x\n",
  880. i, m4u_get_port_name(pSeq[i].port), pSeq[i].MVAStart, pSeq[i].MVAEnd);
  881. mutex_unlock(&gM4u_seq_mutex);
  882. return -1;
  883. }
  884. } else
  885. free_id = i;
  886. }
  887. if (free_id == -1) {
  888. M4ULOG_MID("warning: can not find available range\n");
  889. mutex_unlock(&gM4u_seq_mutex);
  890. return -1;
  891. }
  892. /* /> record range information in array */
  893. pSeq[free_id].Enabled = 1;
  894. pSeq[free_id].port = port;
  895. pSeq[free_id].MVAStart = MVAStart;
  896. pSeq[free_id].MVAEnd = MVAEnd;
  897. mutex_unlock(&gM4u_seq_mutex);
  898. /* /> set the range register */
  899. MVAStart &= F_SQ_VA_MASK;
  900. MVAStart |= F_SQ_EN_BIT;
  901. /* align mvaend to 1M */
  902. MVAEnd |= ~F_SQ_VA_MASK;
  903. spin_lock(&gM4u_reg_lock);
  904. {
  905. M4U_WriteReg32(gM4UBaseAddr[m4u_index], REG_MMU_SQ_START(m4u_slave_id, free_id), MVAStart);
  906. M4U_WriteReg32(gM4UBaseAddr[m4u_index], REG_MMU_SQ_END(m4u_slave_id, free_id), MVAEnd);
  907. }
  908. spin_unlock(&gM4u_reg_lock);
  909. return free_id;
  910. }
  911. int m4u_invalid_seq_range_by_id(int port, int seq_id)
  912. {
  913. int m4u_index = m4u_port_2_m4u_id(port);
  914. int m4u_slave_id = m4u_port_2_m4u_slave_id(port);
  915. unsigned long m4u_base = gM4UBaseAddr[m4u_index];
  916. M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + M4U_SEQ_NUM(m4u_index)*m4u_slave_id;
  917. int ret = 0;
  918. mutex_lock(&gM4u_seq_mutex);
  919. {
  920. pSeq[seq_id].Enabled = 0;
  921. }
  922. mutex_unlock(&gM4u_seq_mutex);
  923. spin_lock(&gM4u_reg_lock);
  924. M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(m4u_slave_id, seq_id), 0);
  925. M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(m4u_slave_id, seq_id), 0);
  926. spin_unlock(&gM4u_reg_lock);
  927. return ret;
  928. }
  929. /*
  930. static int m4u_invalid_seq_range_by_mva(int m4u_index, int m4u_slave_id, unsigned int MVAStart, unsigned int MVAEnd)
  931. {
  932. unsigned int i;
  933. unsigned int m4u_base = gM4UBaseAddr[m4u_index];
  934. M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + SEQ_NR_PER_M4U_SLAVE*m4u_slave_id;
  935. int ret=-1;
  936. MVAStart &= ~M4U_SEQ_ALIGN_MSK;
  937. MVAEnd |= M4U_SEQ_ALIGN_MSK;
  938. mutex_lock(&gM4u_seq_mutex);
  939. for(i=0; i<SEQ_NR_PER_M4U_SLAVE; i++) {
  940. if(pSeq[i].Enabled == 1 &&
  941. pSeq[i].MVAStart>=MVAStart &&
  942. pSeq[i].MVAEnd<=MVAEnd) {
  943. pSeq[i].Enabled = 0;
  944. spin_lock(&gM4u_reg_lock);
  945. M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(m4u_slave_id,i), 0);
  946. M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(m4u_slave_id,i), 0);
  947. spin_unlock(&gM4u_reg_lock);
  948. break;
  949. }
  950. }
  951. mutex_unlock(&gM4u_seq_mutex);
  952. return ret;
  953. }
  954. */
  955. static int _m4u_config_port(int port, int virt, int sec, int dis, int dir)
  956. {
  957. int m4u_index = m4u_port_2_m4u_id(port);
  958. unsigned long m4u_base = gM4UBaseAddr[m4u_index];
  959. unsigned long larb_base;
  960. unsigned int larb, larb_port;
  961. int ret = 0;
  962. M4ULOG_HIGH("config_port:%s,v%d,s%d\n",
  963. m4u_get_port_name(port), virt, sec);
  964. /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], MMProfileFlagStart, port, virt); */
  965. spin_lock(&gM4u_reg_lock);
  966. /* Direction, one bit for each port, 1:-, 0:+ */
  967. m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIR(port),
  968. F_MMU_PFH_DIR(port, 1), F_MMU_PFH_DIR(port, dir));
  969. m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIST(port),
  970. F_MMU_PFH_DIST_MASK(port), F_MMU_PFH_DIST_VAL(port, dis));
  971. if (m4u_index == 0) {
  972. int mmu_en = 0;
  973. larb = m4u_port_2_larb_id(port);
  974. larb_port = m4u_port_2_larb_port(port);
  975. larb_base = gLarbBaseAddr[larb];
  976. m4uHw_set_field_by_mask(larb_base, SMI_LARB_MMU_EN,
  977. F_SMI_MMU_EN(larb_port, 1), F_SMI_MMU_EN(larb_port, !!(virt)));
  978. m4uHw_set_field_by_mask(larb_base, SMI_LARB_SEC_EN,
  979. F_SMI_SEC_EN(larb_port, 1), F_SMI_SEC_EN(larb_port, !!(sec)));
  980. /* multimedia engines will should set domain as 3. */
  981. /* m4uHw_set_field_by_mask(larb_base, REG_SMI_LARB_DOMN_OF_PORT(larb_port), */
  982. /* F_SMI_DOMN(larb_port, 0x3), F_SMI_DOMN(larb_port, pM4uPort->domain)); */
  983. /* debug use */
  984. mmu_en = m4uHw_get_field_by_mask(larb_base, SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1));
  985. if (!!(mmu_en) != virt)
  986. M4ULOG_HIGH(
  987. "m4u_config_port error, port=%s, Virtuality=%d, mmu_en=%x (%x, %x)\n",
  988. m4u_get_port_name(port), virt, mmu_en,
  989. M4U_ReadReg32(larb_base, SMI_LARB_MMU_EN),
  990. F_SMI_MMU_EN(larb_port, 1));
  991. } else {
  992. larb_port = m4u_port_2_larb_port(port);
  993. m4uHw_set_field_by_mask(gPericfgBaseAddr, REG_PERIAXI_BUS_CTL3,
  994. F_PERI_MMU_EN(larb_port, 1), F_PERI_MMU_EN(larb_port, !!(virt)));
  995. }
  996. spin_unlock(&gM4u_reg_lock);
  997. /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], MMProfileFlagEnd, dis, dir); */
  998. return ret;
  999. }
  1000. static inline void _m4u_port_clock_toggle(int m4u_index, int larb, int on)
  1001. {
  1002. unsigned long long start, end;
  1003. /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], MMProfileFlagStart, larb, on); */
  1004. if (m4u_index == 0) {
  1005. start = sched_clock();
  1006. if (on) {
  1007. smi_common_clock_on();
  1008. larb_clock_on(larb);
  1009. } else {
  1010. larb_clock_off(larb);
  1011. smi_common_clock_off();
  1012. }
  1013. end = sched_clock();
  1014. if (end-start > 50000000ULL) /* unit is ns */
  1015. M4ULOG_HIGH("warn: larb%d clock %d time: %lld ns\n", larb, on, end-start);
  1016. }
  1017. /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], MMProfileFlagEnd, 0, 0); */
  1018. }
  1019. int m4u_config_port(M4U_PORT_STRUCT *pM4uPort) /* native */
  1020. {
  1021. int m4u_index;
  1022. M4U_PORT_ID PortID;
  1023. int larb;
  1024. int ret;
  1025. #ifdef M4U_TEE_SERVICE_ENABLE
  1026. unsigned int larb_port, mmu_en = 0, sec_en = 0;
  1027. #endif
  1028. if (pM4uPort->ePortID < 0 || pM4uPort->ePortID > M4U_PORT_UNKNOWN) {
  1029. M4UERR("port is unknown,error port is %d\n", pM4uPort->ePortID);
  1030. return -1;
  1031. }
  1032. PortID = (pM4uPort->ePortID);
  1033. m4u_index = m4u_port_2_m4u_id(PortID);
  1034. larb = m4u_port_2_larb_id(PortID);
  1035. _m4u_port_clock_toggle(m4u_index, larb, 1);
  1036. #ifdef M4U_TEE_SERVICE_ENABLE
  1037. larb_port = m4u_port_2_larb_port(PortID);
  1038. /* mmu_en =
  1039. * !!(m4uHw_get_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1))); */
  1040. /* sec_en =
  1041. * !!(m4uHw_get_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_SEC_EN, F_SMI_SEC_EN(larb_port, 1))); */
  1042. M4ULOG_HIGH("m4u_config_port: %s, m4u_tee_en:%d, mmu_en: %d -> %d, sec_en:%d -> %d\n",
  1043. m4u_get_port_name(PortID), m4u_tee_en, mmu_en,
  1044. pM4uPort->Virtuality, sec_en, pM4uPort->Security);
  1045. #if 0
  1046. if (mmu_en == pM4uPort->Virtuality && sec_en == pM4uPort->Security) {
  1047. _m4u_port_clock_toggle(m4u_index, larb, 0);
  1048. return 0;
  1049. }
  1050. #endif
  1051. if (m4u_tee_en)
  1052. m4u_config_port_tee(pM4uPort);
  1053. else
  1054. #endif
  1055. {
  1056. ret = _m4u_config_port(PortID, pM4uPort->Virtuality,
  1057. pM4uPort->Security, pM4uPort->Distance, pM4uPort->Direction);
  1058. }
  1059. _m4u_port_clock_toggle(m4u_index, larb, 0);
  1060. return 0;
  1061. }
  1062. void m4u_port_array_init(struct m4u_port_array *port_array)
  1063. {
  1064. memset(port_array, 0, sizeof(struct m4u_port_array));
  1065. }
  1066. int m4u_port_array_add(struct m4u_port_array *port_array,
  1067. int port, int m4u_en, int secure)
  1068. {
  1069. if (port >= M4U_PORT_NR) {
  1070. M4UMSG("error: port_array_add, port=%d, v(%d), s(%d)\n", port, m4u_en, secure);
  1071. return -1;
  1072. }
  1073. port_array->ports[port] = M4U_PORT_ATTR_EN;
  1074. if (m4u_en)
  1075. port_array->ports[port] |= M4U_PORT_ATTR_VIRTUAL;
  1076. if (secure)
  1077. port_array->ports[port] |= M4U_PORT_ATTR_SEC;
  1078. return 0;
  1079. }
  1080. int m4u_config_port_array(struct m4u_port_array *port_array)
  1081. {
  1082. int port, larb, larb_port;
  1083. int ret = 0;
  1084. unsigned int config_larb[SMI_LARB_NR];
  1085. unsigned int regOri[SMI_LARB_NR];
  1086. unsigned int regNew[SMI_LARB_NR];
  1087. unsigned int change = 0;
  1088. unsigned char m4u_port_array[(M4U_PORT_NR+1)/2];
  1089. memset(config_larb, 0, SMI_LARB_NR * sizeof(unsigned int));
  1090. memset(regOri, 0, SMI_LARB_NR * sizeof(unsigned int));
  1091. memset(regNew, 0, SMI_LARB_NR * sizeof(unsigned int));
  1092. memset(m4u_port_array, 0, (M4U_PORT_NR+1)/2 * sizeof(unsigned char));
  1093. for (port = 0; port < M4U_PORT_NR; port++) {
  1094. if (port_array->ports[port] && M4U_PORT_ATTR_EN != 0) {
  1095. unsigned int value;
  1096. larb = m4u_port_2_larb_id(port);
  1097. larb_port = m4u_port_2_larb_port(port);
  1098. config_larb[larb] |= (1 << larb_port);
  1099. value = (!!(port_array->ports[port] && M4U_PORT_ATTR_VIRTUAL))<<larb_port;
  1100. regOri[larb] = M4U_ReadReg32(gLarbBaseAddr[larb], SMI_LARB_MMU_EN);
  1101. regNew[larb] = (regOri[larb] & (~(1 << larb_port)))
  1102. | (regNew[larb] & (~(1 << larb_port))) | value;
  1103. #ifdef M4U_TEE_SERVICE_ENABLE
  1104. {
  1105. unsigned char attr = ((!!value)<<1)|0x1;
  1106. if (port%2)
  1107. m4u_port_array[port/2] |= (attr<<4);
  1108. else
  1109. m4u_port_array[port/2] |= attr;
  1110. }
  1111. #endif
  1112. M4ULOG_LOW("m4u_config_port_array 0, 0x%x, 0x%x, 0x%x, port_array: 0x%x\n",
  1113. port_array->ports[port], value, regNew[larb], m4u_port_array[port/2]);
  1114. }
  1115. }
  1116. for (larb = 0; larb < SMI_LARB_NR; larb++) {
  1117. if (0 != config_larb[larb]) {
  1118. _m4u_port_clock_toggle(0, larb, 1);
  1119. #ifdef M4U_TEE_SERVICE_ENABLE
  1120. if (m4u_tee_en)
  1121. change = 1;
  1122. else
  1123. #endif
  1124. {
  1125. regOri[larb] = M4U_ReadReg32(gLarbBaseAddr[larb], SMI_LARB_MMU_EN);
  1126. M4ULOG_LOW("m4u_config_port_array 2 larb: %d ori reg: 0x%x, new reg: 0x%x\n",
  1127. larb, regOri[larb], regNew[larb]);
  1128. if (regOri[larb] != regNew[larb])
  1129. change = 1;
  1130. }
  1131. }
  1132. M4ULOG_MID("m4u_config_port_array 1: larb: %d, [0x%x], %d\n", larb, config_larb[larb], change);
  1133. }
  1134. #ifdef M4U_TEE_SERVICE_ENABLE
  1135. if (m4u_tee_en && 1 == change) {
  1136. m4u_config_port_array_tee(m4u_port_array);
  1137. for (larb = 0; larb < SMI_LARB_NR; larb++)
  1138. if (0 != config_larb[larb])
  1139. _m4u_port_clock_toggle(0, larb, 0);
  1140. return ret;
  1141. }
  1142. #endif
  1143. for (larb = 0; larb < SMI_LARB_NR; larb++) {
  1144. if (0 != config_larb[larb] && 1 == change) {
  1145. M4ULOG_MID("m4u_config_port_array larb: %d ori reg: 0x%x, new reg: 0x%x\n",
  1146. larb, regOri[larb], regNew[larb]);
  1147. spin_lock(&gM4u_reg_lock);
  1148. m4uHw_set_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_MMU_EN, config_larb[larb], regNew[larb]);
  1149. spin_unlock(&gM4u_reg_lock);
  1150. }
  1151. if (0 != config_larb[larb])
  1152. _m4u_port_clock_toggle(0, larb, 0);
  1153. }
  1154. return ret;
  1155. }
  1156. void m4u_get_perf_counter(int m4u_index, int m4u_slave_id, M4U_PERF_COUNT *pM4U_perf_count)
  1157. {
  1158. unsigned long m4u_base = gM4UBaseAddr[m4u_index];
  1159. pM4U_perf_count->transaction_cnt = M4U_ReadReg32(m4u_base, REG_MMU_ACC_CNT(m4u_slave_id));
  1160. pM4U_perf_count->main_tlb_miss_cnt = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_MSCNT(m4u_slave_id));
  1161. pM4U_perf_count->pfh_tlb_miss_cnt = M4U_ReadReg32(m4u_base, REG_MMU_PF_MSCNT);
  1162. pM4U_perf_count->pfh_cnt = M4U_ReadReg32(m4u_base, REG_MMU_PF_CNT); /* /> Prefetch count */
  1163. pM4U_perf_count->rs_perf_cnt = M4U_ReadReg32(m4u_base, REG_MMU_RS_PERF_CNT(m4u_slave_id));
  1164. }
  1165. int m4u_monitor_start(int m4u_id)
  1166. {
  1167. unsigned long m4u_base;
  1168. if (m4u_id < 0) {
  1169. M4UERR("ERROR m4u id ,error id is %d\n", m4u_id);
  1170. return -1;
  1171. }
  1172. m4u_base = gM4UBaseAddr[m4u_id];
  1173. M4UINFO("====m4u_monitor_start: %d======\n", m4u_id);
  1174. /* clear GMC performance counter */
  1175. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
  1176. F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(1));
  1177. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
  1178. F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(0));
  1179. /* enable GMC performance monitor */
  1180. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
  1181. F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(1));
  1182. return 0;
  1183. }
  1184. /**
  1185. * @brief ,
  1186. * @param
  1187. * @return
  1188. */
  1189. int m4u_monitor_stop(int m4u_id)
  1190. {
  1191. M4U_PERF_COUNT cnt;
  1192. int m4u_index = m4u_id;
  1193. unsigned long m4u_base;
  1194. if (m4u_id < 0) {
  1195. M4UERR("ERROR m4u id ,error id is %d\n", m4u_id);
  1196. return -1;
  1197. }
  1198. m4u_base = gM4UBaseAddr[m4u_id];
  1199. /* disable GMC performance monitor */
  1200. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
  1201. F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(0));
  1202. m4u_get_perf_counter(m4u_index, 0, &cnt);
  1203. /* read register get the count */
  1204. M4ULOG_MID("[M4U%d-%d] total:%u, main miss:%u, pfh miss(walk):%u, auto pfh:%u\n",
  1205. m4u_id, 0,
  1206. cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt, cnt.pfh_cnt);
  1207. return 0;
  1208. }
  1209. void m4u_print_perf_counter(int m4u_index, int m4u_slave_id, const char *msg)
  1210. {
  1211. M4U_PERF_COUNT cnt;
  1212. M4UINFO("====m4u performance count for %s m4u%d_%d======\n", msg, m4u_index, m4u_slave_id);
  1213. m4u_get_perf_counter(m4u_index, m4u_slave_id, &cnt);
  1214. M4UINFO("total trans=%u, main_miss=%u, pfh_miss=%u, pfh_cnt=%u, rs_perf_cnt=%u\n",
  1215. cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt, cnt.pfh_cnt, cnt.rs_perf_cnt);
  1216. }
  1217. #define M4U_REG_BACKUP_SIZE (100*sizeof(unsigned int))
  1218. static unsigned int *pM4URegBackUp;
  1219. static unsigned int gM4u_reg_backup_real_size;
  1220. #define __M4U_BACKUP(base, reg, back) ((back) = M4U_ReadReg32(base, reg))
  1221. void __M4U_RESTORE(unsigned long base, unsigned int reg, unsigned int back) {M4U_WriteReg32(base, reg, back); }
  1222. int m4u_reg_backup(void)
  1223. {
  1224. unsigned int *pReg = pM4URegBackUp;
  1225. unsigned long m4u_base;
  1226. int m4u_id, m4u_slave;
  1227. int seq, mau;
  1228. unsigned int real_size;
  1229. int pfh_dist, pfh_dir;
  1230. for (m4u_id = 0; m4u_id < TOTAL_M4U_NUM; m4u_id++) {
  1231. m4u_base = gM4UBaseAddr[m4u_id];
  1232. __M4U_BACKUP(m4u_base, REG_MMUg_PT_BASE , *(pReg++));
  1233. __M4U_BACKUP(m4u_base, REG_MMUg_PT_BASE_SEC , *(pReg++));
  1234. __M4U_BACKUP(m4u_base, REG_MMU_SEC_ABORT_INFO , *(pReg++));
  1235. __M4U_BACKUP(m4u_base, REG_MMU_STANDARD_AXI_MODE , *(pReg++));
  1236. __M4U_BACKUP(m4u_base, REG_MMU_PRIORITY , *(pReg++));
  1237. __M4U_BACKUP(m4u_base, REG_MMU_DCM_DIS , *(pReg++));
  1238. __M4U_BACKUP(m4u_base, REG_MMU_WR_LEN , *(pReg++));
  1239. __M4U_BACKUP(m4u_base, REG_MMU_HW_DEBUG , *(pReg++));
  1240. __M4U_BACKUP(m4u_base, REG_MMU_NON_BLOCKING_DIS , *(pReg++));
  1241. __M4U_BACKUP(m4u_base, REG_MMU_LEGACY_4KB_MODE , *(pReg++));
  1242. for (pfh_dist = 0; pfh_dist < MMU_PFH_DIST_NR; pfh_dist++)
  1243. __M4U_BACKUP(m4u_base, REG_MMU_PFH_DIST_NR(pfh_dist) , *(pReg++));
  1244. for (pfh_dir = 0; pfh_dir < MMU_PFH_DIR_NR; pfh_dir++)
  1245. __M4U_BACKUP(m4u_base, REG_MMU_PFH_DIR_NR(pfh_dir) , *(pReg++));
  1246. __M4U_BACKUP(m4u_base, REG_MMU_CTRL_REG , *(pReg++));
  1247. __M4U_BACKUP(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++));
  1248. __M4U_BACKUP(m4u_base, REG_MMU_INT_L2_CONTROL , *(pReg++));
  1249. __M4U_BACKUP(m4u_base, REG_MMU_INT_MAIN_CONTROL , *(pReg++));
  1250. for (m4u_slave = 0; m4u_slave < M4U_SLAVE_NUM(m4u_id); m4u_slave++) {
  1251. for (seq = 0; seq < M4U_SEQ_NUM(m4u_id); seq++) {
  1252. __M4U_BACKUP(m4u_base, REG_MMU_SQ_START(m4u_slave, seq) , *(pReg++));
  1253. __M4U_BACKUP(m4u_base, REG_MMU_SQ_END(m4u_slave, seq) , *(pReg++));
  1254. }
  1255. for (mau = 0; mau < MAU_NR_PER_M4U_SLAVE; mau++) {
  1256. __M4U_BACKUP(m4u_base, REG_MMU_MAU_START(m4u_slave, mau) , *(pReg++));
  1257. __M4U_BACKUP(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave, mau) , *(pReg++));
  1258. __M4U_BACKUP(m4u_base, REG_MMU_MAU_END(m4u_slave, mau) , *(pReg++));
  1259. __M4U_BACKUP(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave, mau) , *(pReg++));
  1260. __M4U_BACKUP(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave, mau) , *(pReg++));
  1261. }
  1262. __M4U_BACKUP(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave) , *(pReg++));
  1263. __M4U_BACKUP(m4u_base, REG_MMU_MAU_IO(m4u_slave) , *(pReg++));
  1264. __M4U_BACKUP(m4u_base, REG_MMU_MAU_RW(m4u_slave) , *(pReg++));
  1265. __M4U_BACKUP(m4u_base, REG_MMU_MAU_VA(m4u_slave) , *(pReg++));
  1266. }
  1267. }
  1268. /* check register size (to prevent overflow) */
  1269. real_size = (pReg - pM4URegBackUp);
  1270. if (real_size > M4U_REG_BACKUP_SIZE)
  1271. m4u_aee_print("m4u_reg overflow! %d>%d\n", real_size, (int)M4U_REG_BACKUP_SIZE);
  1272. gM4u_reg_backup_real_size = real_size;
  1273. return 0;
  1274. }
  1275. int m4u_reg_restore(void)
  1276. {
  1277. unsigned int *pReg = pM4URegBackUp;
  1278. unsigned long m4u_base;
  1279. int m4u_id, m4u_slave;
  1280. int seq, mau;
  1281. unsigned int real_size;
  1282. int pfh_dist, pfh_dir;
  1283. for (m4u_id = 0; m4u_id < TOTAL_M4U_NUM; m4u_id++) {
  1284. m4u_base = gM4UBaseAddr[m4u_id];
  1285. __M4U_RESTORE(m4u_base, REG_MMUg_PT_BASE , *(pReg++));
  1286. __M4U_RESTORE(m4u_base, REG_MMUg_PT_BASE_SEC , *(pReg++));
  1287. __M4U_RESTORE(m4u_base, REG_MMU_SEC_ABORT_INFO , *(pReg++));
  1288. __M4U_RESTORE(m4u_base, REG_MMU_STANDARD_AXI_MODE , *(pReg++));
  1289. __M4U_RESTORE(m4u_base, REG_MMU_PRIORITY , *(pReg++));
  1290. __M4U_RESTORE(m4u_base, REG_MMU_DCM_DIS , *(pReg++));
  1291. __M4U_RESTORE(m4u_base, REG_MMU_WR_LEN , *(pReg++));
  1292. __M4U_RESTORE(m4u_base, REG_MMU_HW_DEBUG , *(pReg++));
  1293. __M4U_RESTORE(m4u_base, REG_MMU_NON_BLOCKING_DIS , *(pReg++));
  1294. __M4U_RESTORE(m4u_base, REG_MMU_LEGACY_4KB_MODE , *(pReg++));
  1295. for (pfh_dist = 0; pfh_dist < MMU_PFH_DIST_NR; pfh_dist++)
  1296. __M4U_RESTORE(m4u_base, REG_MMU_PFH_DIST_NR(pfh_dist) , *(pReg++));
  1297. for (pfh_dir = 0; pfh_dir < MMU_PFH_DIR_NR; pfh_dir++)
  1298. __M4U_RESTORE(m4u_base, REG_MMU_PFH_DIR_NR(pfh_dir) , *(pReg++));
  1299. __M4U_RESTORE(m4u_base, REG_MMU_CTRL_REG , *(pReg++));
  1300. __M4U_RESTORE(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++));
  1301. __M4U_RESTORE(m4u_base, REG_MMU_INT_L2_CONTROL , *(pReg++));
  1302. __M4U_RESTORE(m4u_base, REG_MMU_INT_MAIN_CONTROL , *(pReg++));
  1303. for (m4u_slave = 0; m4u_slave < M4U_SLAVE_NUM(m4u_id); m4u_slave++) {
  1304. for (seq = 0; seq < M4U_SEQ_NUM(m4u_id); seq++) {
  1305. __M4U_RESTORE(m4u_base, REG_MMU_SQ_START(m4u_slave, seq) , *(pReg++));
  1306. __M4U_RESTORE(m4u_base, REG_MMU_SQ_END(m4u_slave, seq) , *(pReg++));
  1307. }
  1308. for (mau = 0; mau < MAU_NR_PER_M4U_SLAVE; mau++) {
  1309. __M4U_RESTORE(m4u_base, REG_MMU_MAU_START(m4u_slave, mau) , *(pReg++));
  1310. __M4U_RESTORE(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave, mau) , *(pReg++));
  1311. __M4U_RESTORE(m4u_base, REG_MMU_MAU_END(m4u_slave, mau) , *(pReg++));
  1312. __M4U_RESTORE(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave, mau) , *(pReg++));
  1313. __M4U_RESTORE(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave, mau) , *(pReg++));
  1314. }
  1315. __M4U_RESTORE(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave) , *(pReg++));
  1316. __M4U_RESTORE(m4u_base, REG_MMU_MAU_IO(m4u_slave) , *(pReg++));
  1317. __M4U_RESTORE(m4u_base, REG_MMU_MAU_RW(m4u_slave) , *(pReg++));
  1318. __M4U_RESTORE(m4u_base, REG_MMU_MAU_VA(m4u_slave) , *(pReg++));
  1319. }
  1320. }
  1321. /* check register size (to prevent overflow) */
  1322. real_size = (pReg - pM4URegBackUp);
  1323. if (real_size != gM4u_reg_backup_real_size)
  1324. m4u_aee_print("m4u_reg_retore %d!=%d\n", real_size, gM4u_reg_backup_real_size);
  1325. return 0;
  1326. }
  1327. static unsigned int larb_reg_backup_buf[SMI_LARB_NR][6];
  1328. void m4u_larb_backup(int larb_idx)
  1329. {
  1330. unsigned long larb_base;
  1331. if (larb_idx >= SMI_LARB_NR) {
  1332. M4UMSG("error: %s larb_idx = %d\n", __func__, larb_idx);
  1333. return;
  1334. }
  1335. larb_base = gLarbBaseAddr[larb_idx];
  1336. M4ULOG_MID("larb(%d) backup\n", larb_idx);
  1337. #ifdef M4U_TEE_SERVICE_ENABLE
  1338. if (m4u_tee_en)
  1339. /* m4u_larb_backup_sec(larb_idx); */
  1340. #endif
  1341. {
  1342. __M4U_BACKUP(larb_base, SMI_LARB_MMU_EN, larb_reg_backup_buf[larb_idx][0]);
  1343. __M4U_BACKUP(larb_base, SMI_LARB_SEC_EN, larb_reg_backup_buf[larb_idx][1]);
  1344. __M4U_BACKUP(larb_base, SMI_LARB_DOMN_0, larb_reg_backup_buf[larb_idx][2]);
  1345. __M4U_BACKUP(larb_base, SMI_LARB_DOMN_1, larb_reg_backup_buf[larb_idx][3]);
  1346. __M4U_BACKUP(larb_base, SMI_LARB_DOMN_2, larb_reg_backup_buf[larb_idx][4]);
  1347. __M4U_BACKUP(larb_base, SMI_LARB_DOMN_3, larb_reg_backup_buf[larb_idx][5]);
  1348. }
  1349. }
  1350. void m4u_larb_restore(int larb_idx)
  1351. {
  1352. unsigned long larb_base;
  1353. if (larb_idx >= SMI_LARB_NR) {
  1354. M4UMSG("error: %s larb_idx = %d\n", __func__, larb_idx);
  1355. return;
  1356. }
  1357. larb_base = gLarbBaseAddr[larb_idx];
  1358. M4ULOG_MID("larb(%d) restore\n", larb_idx);
  1359. #ifdef M4U_TEE_SERVICE_ENABLE
  1360. if (m4u_tee_en) {
  1361. /* m4u_larb_restore_sec(larb_idx); */
  1362. } else
  1363. #endif
  1364. {
  1365. __M4U_RESTORE(larb_base, SMI_LARB_MMU_EN, larb_reg_backup_buf[larb_idx][0]);
  1366. __M4U_RESTORE(larb_base, SMI_LARB_SEC_EN, larb_reg_backup_buf[larb_idx][1]);
  1367. __M4U_RESTORE(larb_base, SMI_LARB_DOMN_0, larb_reg_backup_buf[larb_idx][2]);
  1368. __M4U_RESTORE(larb_base, SMI_LARB_DOMN_1, larb_reg_backup_buf[larb_idx][3]);
  1369. __M4U_RESTORE(larb_base, SMI_LARB_DOMN_2, larb_reg_backup_buf[larb_idx][4]);
  1370. __M4U_RESTORE(larb_base, SMI_LARB_DOMN_3, larb_reg_backup_buf[larb_idx][5]);
  1371. }
  1372. }
  1373. void m4u_print_port_status(struct seq_file *seq, int only_print_active)
  1374. {
  1375. int port, mmu_en, sec;
  1376. int m4u_index, larb, larb_port;
  1377. unsigned long larb_base;
  1378. M4U_PRINT_LOG_OR_SEQ(seq, "m4u_print_port_status ========>\n");
  1379. smi_common_clock_on();
  1380. larb_clock_all_on();
  1381. for (port = 0; port < gM4u_port_num; port++) {
  1382. m4u_index = m4u_port_2_m4u_id(port);
  1383. if (m4u_index == 0) {
  1384. larb = m4u_port_2_larb_id(port);
  1385. larb_port = m4u_port_2_larb_port(port);
  1386. larb_base = gLarbBaseAddr[larb];
  1387. mmu_en = m4uHw_get_field_by_mask(larb_base, SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1));
  1388. sec = m4uHw_get_field_by_mask(larb_base, SMI_LARB_SEC_EN, F_SMI_SEC_EN(larb_port, 1));
  1389. } else {
  1390. larb_port = m4u_port_2_larb_port(port);
  1391. mmu_en = m4uHw_get_field_by_mask(gPericfgBaseAddr,
  1392. REG_PERIAXI_BUS_CTL3, F_PERI_MMU_EN(larb_port, 1));
  1393. }
  1394. if (only_print_active && !mmu_en)
  1395. continue;
  1396. M4U_PRINT_LOG_OR_SEQ(seq, "%s(%d),", m4u_get_port_name(port), !!mmu_en);
  1397. }
  1398. larb_clock_all_off();
  1399. smi_common_clock_off();
  1400. M4U_PRINT_LOG_OR_SEQ(seq, "\n");
  1401. }
  1402. /*
  1403. static int m4u_enable_prefetch(M4U_PORT_ID PortID)
  1404. {
  1405. unsigned long m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(PortID)];
  1406. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_PFH_DIS(1), F_MMU_CTRL_PFH_DIS(0));
  1407. return 0;
  1408. }
  1409. static int m4u_disable_prefetch(M4U_PORT_ID PortID)
  1410. {
  1411. unsigned long m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(PortID)];
  1412. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_PFH_DIS(1), F_MMU_CTRL_PFH_DIS(1));
  1413. return 0;
  1414. }
  1415. static int m4u_enable_error_hang(int m4u_id)
  1416. {
  1417. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  1418. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_INT_HANG_en(1), F_MMU_CTRL_INT_HANG_en(1));
  1419. return 0;
  1420. }
  1421. static int m4u_disable_error_hang(int m4u_id)
  1422. {
  1423. unsigned long m4u_base = gM4UBaseAddr[m4u_id];
  1424. m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_INT_HANG_en(1), F_MMU_CTRL_INT_HANG_en(0));
  1425. return 0;
  1426. }
  1427. */
  1428. int m4u_register_reclaim_callback(int port, m4u_reclaim_mva_callback_t *fn, void *data)
  1429. {
  1430. if (port > M4U_PORT_UNKNOWN) {
  1431. M4UMSG("%s fail, port=%d\n", __func__, port);
  1432. return -1;
  1433. }
  1434. gM4uPort[port].reclaim_fn = fn;
  1435. gM4uPort[port].reclaim_data = data;
  1436. return 0;
  1437. }
  1438. int m4u_unregister_reclaim_callback(int port)
  1439. {
  1440. if (port > M4U_PORT_UNKNOWN) {
  1441. M4UMSG("%s fail, port=%d\n", __func__, port);
  1442. return -1;
  1443. }
  1444. gM4uPort[port].reclaim_fn = NULL;
  1445. gM4uPort[port].reclaim_data = NULL;
  1446. return 0;
  1447. }
  1448. int m4u_reclaim_notify(int port, unsigned int mva, unsigned int size)
  1449. {
  1450. int i;
  1451. for (i = 0; i < M4U_PORT_UNKNOWN; i++)
  1452. if (gM4uPort[i].reclaim_fn)
  1453. gM4uPort[i].reclaim_fn(port, mva, size, gM4uPort[i].reclaim_data);
  1454. return 0;
  1455. }
  1456. int m4u_register_fault_callback(int port, m4u_fault_callback_t *fn, void *data)
  1457. {
  1458. if (port > M4U_PORT_UNKNOWN) {
  1459. M4UMSG("%s fail, port=%d\n", __func__, port);
  1460. return -1;
  1461. }
  1462. gM4uPort[port].fault_fn = fn;
  1463. gM4uPort[port].fault_data = data;
  1464. return 0;
  1465. }
  1466. int m4u_unregister_fault_callback(int port)
  1467. {
  1468. if (port > M4U_PORT_UNKNOWN) {
  1469. M4UMSG("%s fail, port=%d\n", __func__, port);
  1470. return -1;
  1471. }
  1472. gM4uPort[port].fault_fn = NULL;
  1473. gM4uPort[port].fault_data = NULL;
  1474. return 0;
  1475. }
  1476. int m4u_enable_tf(int port, bool fgenable)
  1477. {
  1478. gM4uPort[port].enable_tf = fgenable;
  1479. return 0;
  1480. }
  1481. /* ============================================================================== */
  1482. static struct timer_list m4u_isr_pause_timer;
  1483. static void m4u_isr_restart(unsigned long unused)
  1484. {
  1485. M4UMSG("restart m4u irq\n");
  1486. m4u_intr_modify_all(1);
  1487. }
  1488. static int m4u_isr_pause_timer_init(void)
  1489. {
  1490. init_timer(&m4u_isr_pause_timer);
  1491. m4u_isr_pause_timer.function = m4u_isr_restart;
  1492. return 0;
  1493. }
  1494. static int m4u_isr_pause(int delay)
  1495. {
  1496. m4u_intr_modify_all(0); /* disable all intr */
  1497. m4u_isr_pause_timer.expires = jiffies + delay*HZ; /* delay seconds */
  1498. add_timer(&m4u_isr_pause_timer);
  1499. M4UMSG("warning: stop m4u irq for %ds\n", delay);
  1500. return 0;
  1501. }
  1502. static void m4u_isr_record(void)
  1503. {
  1504. static int m4u_isr_cnt;
  1505. static unsigned long first_jiffies;
  1506. /* we allow one irq in 1s, or we will disable them after 5s. */
  1507. if (!m4u_isr_cnt || time_after(jiffies, first_jiffies + m4u_isr_cnt*HZ)) {
  1508. m4u_isr_cnt = 1;
  1509. first_jiffies = jiffies;
  1510. } else {
  1511. m4u_isr_cnt++;
  1512. if (m4u_isr_cnt >= 5) {
  1513. /* 5 irqs come in 5s, too many ! */
  1514. /* disable irq for a while, to avoid HWT timeout */
  1515. m4u_isr_pause(10);
  1516. m4u_isr_cnt = 0;
  1517. }
  1518. }
  1519. }
  1520. #define MMU_INT_REPORT(mmu, mmu_2nd_id, id) M4UMSG("iommu%d_%d " #id "(0x%x) int happens!!\n", mmu, mmu_2nd_id, id)
  1521. irqreturn_t MTK_M4U_isr(int irq, void *dev_id)
  1522. {
  1523. unsigned long m4u_base;
  1524. unsigned int m4u_index;
  1525. if (irq == gM4uDev->irq_num[0]) {
  1526. m4u_base = gM4UBaseAddr[0];
  1527. m4u_index = 0;
  1528. } else {
  1529. M4UMSG("MTK_M4U_isr(), Invalid irq number %d\n", irq);
  1530. return -1;
  1531. }
  1532. {
  1533. /* L2 interrupt */
  1534. unsigned int regval = M4U_ReadReg32(m4u_base, REG_MMU_L2_FAULT_ST);
  1535. M4UMSG("m4u L2 interrupt sta=0x%x\n", regval);
  1536. if (regval&F_INT_L2_MULTI_HIT_FAULT)
  1537. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MULTI_HIT_FAULT);
  1538. if (regval&F_INT_L2_TABLE_WALK_FAULT) {
  1539. unsigned int fault_va, layer;
  1540. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_TABLE_WALK_FAULT);
  1541. fault_va = M4U_ReadReg32(m4u_base, REG_MMU_TBWALK_FAULT_VA);
  1542. layer = fault_va&1;
  1543. fault_va &= (~1);
  1544. m4u_aee_print("L2 table walk fault: mva=0x%x, layer=%d\n", fault_va, layer);
  1545. }
  1546. if (regval&F_INT_L2_PFH_DMA_FIFO_OVERFLOW)
  1547. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_DMA_FIFO_OVERFLOW);
  1548. if (regval&F_INT_L2_MISS_DMA_FIFO_OVERFLOW)
  1549. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_DMA_FIFO_OVERFLOW);
  1550. if (regval&F_INT_L2_INVALD_DONE)
  1551. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_INVALD_DONE);
  1552. if (regval&F_INT_L2_PFH_OUT_FIFO_ERROR)
  1553. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_OUT_FIFO_ERROR);
  1554. if (regval&F_INT_L2_PFH_IN_FIFO_ERROR)
  1555. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_IN_FIFO_ERROR);
  1556. if (regval&F_INT_L2_MISS_OUT_FIFO_ERROR)
  1557. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_OUT_FIFO_ERROR);
  1558. if (regval&F_INT_L2_MISS_IN_FIFO_ERR)
  1559. MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_IN_FIFO_ERR);
  1560. }
  1561. {
  1562. unsigned int IntrSrc = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_FAULT_ST);
  1563. int m4u_slave_id;
  1564. unsigned int regval;
  1565. int layer, write, m4u_port;
  1566. unsigned int fault_mva, fault_pa;
  1567. M4UMSG("m4u main interrupt happened: sta=0x%x\n", IntrSrc);
  1568. if (IntrSrc & (F_INT_MMU0_MAIN_MSK | F_INT_MMU0_MAU_MSK))
  1569. m4u_slave_id = 0;
  1570. else {
  1571. m4u_clear_intr(m4u_index);
  1572. return 0;
  1573. }
  1574. /* read error info from registers */
  1575. fault_mva = M4U_ReadReg32(m4u_base, REG_MMU_FAULT_VA(m4u_slave_id));
  1576. layer = !!(fault_mva & F_MMU_FAULT_VA_LAYER_BIT);
  1577. write = !!(fault_mva & F_MMU_FAULT_VA_WRITE_BIT);
  1578. fault_mva &= F_MMU_FAULT_VA_MSK;
  1579. fault_pa = M4U_ReadReg32(m4u_base, REG_MMU_INVLD_PA(m4u_slave_id));
  1580. regval = M4U_ReadReg32(m4u_base, REG_MMU_INT_ID(m4u_slave_id));
  1581. m4u_port = m4u_get_port_by_tf_id(m4u_index, regval);
  1582. /* dump something quickly */
  1583. /* m4u_dump_rs_info(m4u_index, m4u_slave_id); */
  1584. m4u_dump_invalid_main_tlb(m4u_index, m4u_slave_id);
  1585. /* m4u_dump_main_tlb(m4u_index, 0); */
  1586. /* m4u_dump_pfh_tlb(m4u_index); */
  1587. if (IntrSrc & F_INT_TRANSLATION_FAULT(m4u_slave_id)) {
  1588. int bypass_DISP_TF = 0;
  1589. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_TRANSLATION_FAULT(m4u_slave_id));
  1590. M4UMSG("fault: port=%s, mva=0x%x, pa=0x%x, layer=%d, wr=%d, 0x%x\n",
  1591. m4u_get_port_name(m4u_port), fault_mva, fault_pa, layer, write, regval);
  1592. if (M4U_PORT_DISP_OVL0 == m4u_port
  1593. #if defined(CONFIG_ARCH_MT6753)
  1594. || M4U_PORT_DISP_OVL1 == m4u_port || M4U_PORT_DISP_OD_W == m4u_port
  1595. #endif
  1596. ) {
  1597. unsigned int valid_mva = 0;
  1598. unsigned int valid_size = 0;
  1599. unsigned int valid_mva_end = 0;
  1600. m4u_query_mva_info(fault_mva-1, 0, &valid_mva, &valid_size);
  1601. if (0 != valid_mva && 0 != valid_size)
  1602. valid_mva_end = valid_mva+valid_size-1;
  1603. if ((0 != valid_mva_end && fault_mva < valid_mva_end+SZ_4K)
  1604. || m4u_pte_invalid(m4u_get_domain_by_port(m4u_port), fault_mva)) {
  1605. M4UMSG("bypass disp TF, valid mva=0x%x, size=0x%x, mva_end=0x%x\n",
  1606. valid_mva, valid_size, valid_mva_end);
  1607. bypass_DISP_TF = 1;
  1608. }
  1609. }
  1610. if (gM4uPort[m4u_port].enable_tf == 1 && bypass_DISP_TF == 0) {
  1611. m4u_dump_pte_nolock(m4u_get_domain_by_port(m4u_port), fault_mva);
  1612. /* m4u_print_port_status(NULL, 1); */
  1613. /* call user's callback to dump user registers */
  1614. if (m4u_port < M4U_PORT_UNKNOWN && gM4uPort[m4u_port].fault_fn)
  1615. gM4uPort[m4u_port].fault_fn(m4u_port, fault_mva, gM4uPort[m4u_port].fault_data);
  1616. m4u_dump_buf_info(NULL);
  1617. m4u_aee_print(
  1618. "\nCRDISPATCH_KEY:M4U_%s\ntranslation fault: port=%s, mva=0x%x, pa=0x%x\n",
  1619. m4u_get_port_name(m4u_port), m4u_get_port_name(m4u_port),
  1620. fault_mva, fault_pa);
  1621. }
  1622. MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, m4u_port, fault_mva);
  1623. }
  1624. if (IntrSrc & F_INT_MAIN_MULTI_HIT_FAULT(m4u_slave_id))
  1625. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAIN_MULTI_HIT_FAULT(m4u_slave_id));
  1626. if (IntrSrc & F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(m4u_slave_id))
  1627. if (!(IntrSrc & F_INT_TRANSLATION_FAULT(m4u_slave_id)))
  1628. MMU_INT_REPORT(m4u_index, m4u_slave_id,
  1629. F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(m4u_slave_id));
  1630. if (IntrSrc & F_INT_ENTRY_REPLACEMENT_FAULT(m4u_slave_id))
  1631. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_ENTRY_REPLACEMENT_FAULT(m4u_slave_id));
  1632. if (IntrSrc & F_INT_TLB_MISS_FAULT(m4u_slave_id))
  1633. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_TLB_MISS_FAULT(m4u_slave_id));
  1634. if (IntrSrc & F_INT_MISS_FIFO_ERR(m4u_slave_id))
  1635. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MISS_FIFO_ERR(m4u_slave_id));
  1636. if (IntrSrc & F_INT_PFH_FIFO_ERR(m4u_slave_id))
  1637. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_PFH_FIFO_ERR(m4u_slave_id));
  1638. if (IntrSrc & F_INT_MAU(m4u_slave_id, 0)) {
  1639. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 0));
  1640. __mau_dump_status(m4u_index, m4u_slave_id, 0);
  1641. }
  1642. if (IntrSrc & F_INT_MAU(m4u_slave_id, 1)) {
  1643. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 1));
  1644. __mau_dump_status(m4u_index, m4u_slave_id, 1);
  1645. }
  1646. if (IntrSrc & F_INT_MAU(m4u_slave_id, 2)) {
  1647. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 2));
  1648. __mau_dump_status(m4u_index, m4u_slave_id, 2);
  1649. }
  1650. if (IntrSrc & F_INT_MAU(m4u_slave_id, 3)) {
  1651. MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 3));
  1652. __mau_dump_status(m4u_index, m4u_slave_id, 3);
  1653. }
  1654. m4u_clear_intr(m4u_index);
  1655. m4u_isr_record();
  1656. }
  1657. return IRQ_HANDLED;
  1658. }
  1659. m4u_domain_t *m4u_get_domain_by_port(M4U_PORT_ID port)
  1660. {
  1661. return &gM4uDomain;
  1662. }
  1663. m4u_domain_t *m4u_get_domain_by_id(int id)
  1664. {
  1665. return &gM4uDomain;
  1666. }
  1667. int m4u_get_domain_nr(void)
  1668. {
  1669. return 1;
  1670. }
  1671. int m4u_reg_init(m4u_domain_t *m4u_domain, unsigned long ProtectPA, int m4u_id)
  1672. {
  1673. unsigned int regval;
  1674. int i;
  1675. M4UINFO("m4u_reg_init, ProtectPA = 0x%lx\n", ProtectPA);
  1676. /* m4u clock is in infra domain, we never close this clock. */
  1677. m4u_clock_on();
  1678. #ifdef M4U_FPGAPORTING
  1679. #if 0
  1680. if (0 == m4u_id) {
  1681. unsigned long MMconfigBaseAddr;
  1682. struct device_node *node = NULL;
  1683. node = of_find_compatible_node(NULL, NULL, "mediatek,mmsys_config");
  1684. MMconfigBaseAddr = (unsigned long)of_iomap(node, 0);
  1685. M4UINFO("MMconfigBaseAddr: 0x%lx\n", MMconfigBaseAddr);
  1686. M4U_WriteReg32(MMconfigBaseAddr, 0x108, 0xffffffff);
  1687. }
  1688. #endif
  1689. #endif
  1690. /* ============================================= */
  1691. /* SMI registers */
  1692. /* ============================================= */
  1693. /*bus selection:
  1694. control which m4u_slave each larb routes to.
  1695. this register is in smi_common domain
  1696. Threre is only one AXI channel in K2, so don't need to set
  1697. */
  1698. /* ========================================= */
  1699. /* larb init */
  1700. /* ========================================= */
  1701. if (0 == m4u_id) {
  1702. struct device_node *node = NULL;
  1703. for (i = 0; i < SMI_LARB_NR; i++) {
  1704. node = of_find_compatible_node(NULL, NULL, gM4U_SMILARB[i]);
  1705. if (NULL == node)
  1706. M4UINFO("init larb %d error\n", i);
  1707. else {
  1708. gLarbBaseAddr[i] = (unsigned long)of_iomap(node, 0);
  1709. /* set mm engine domain */
  1710. larb_clock_on(i);
  1711. M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_0, DOMAIN_VALUE);
  1712. M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_1, DOMAIN_VALUE);
  1713. M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_2, DOMAIN_VALUE);
  1714. M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_3, DOMAIN_VALUE);
  1715. larb_clock_off(i);
  1716. M4UINFO("init larb %d, 0x%lx\n", i, gLarbBaseAddr[i]);
  1717. }
  1718. }
  1719. }
  1720. /* ========================================= */
  1721. /* perisys init */
  1722. /* ========================================= */
  1723. if (1 == m4u_id) {
  1724. struct device_node *node = NULL;
  1725. node = of_find_compatible_node(NULL, NULL, "mediatek,PERICFG");
  1726. gPericfgBaseAddr = (unsigned long)of_iomap(node, 0);
  1727. M4UINFO("gPericfgBaseAddr: 0x%lx\n", gPericfgBaseAddr);
  1728. }
  1729. /* ============================================= */
  1730. /* m4u registers */
  1731. /* ============================================= */
  1732. M4UINFO("m4u hw init id = %d, base address: 0x%lx, pgd_pa: 0x%x\n",
  1733. m4u_id, gM4UBaseAddr[m4u_id], (unsigned int)m4u_domain->pgd_pa);
  1734. {
  1735. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMUg_PT_BASE, (unsigned int)m4u_domain->pgd_pa);
  1736. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMUg_PT_BASE_SEC, (unsigned int)m4u_domain->pgd_pa);
  1737. regval = M4U_ReadReg32(gM4UBaseAddr[m4u_id], REG_MMU_CTRL_REG);
  1738. if (0 == m4u_id) { /* mm_iommu */
  1739. regval = regval|F_MMU_CTRL_PFH_DIS(0)
  1740. |F_MMU_CTRL_MONITOR_EN(0)
  1741. |F_MMU_CTRL_MONITOR_CLR(0)
  1742. |F_MMU_CTRL_TF_PROTECT_SEL(2)
  1743. |F_MMU_CTRL_INT_HANG_EN(0);
  1744. }
  1745. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_CTRL_REG, regval);
  1746. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_MMU_COHERENCE_EN, 1);
  1747. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_MMU_TABLE_WALK_DIS, 0);
  1748. /* enable all interrupts */
  1749. m4u_enable_intr(m4u_id);
  1750. /* set translation fault proctection buffer address */
  1751. if (!gM4U_4G_DRAM_Mode)
  1752. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_IVRP_PADDR,
  1753. (unsigned int)F_MMU_IVRP_PA_SET(ProtectPA));
  1754. else
  1755. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_IVRP_PADDR,
  1756. (unsigned int)F_MMU_IVRP_4G_DRAM_PA_SET(ProtectPA));
  1757. /* enable DCM */
  1758. M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_DCM_DIS, 0);
  1759. m4u_invalid_tlb_all(m4u_id);
  1760. }
  1761. /* special settings for mmu0 (multimedia iommu) */
  1762. if (0 == m4u_id) {
  1763. unsigned long m4u_base = gM4UBaseAddr[0];
  1764. /* 2 disable in-order-write */
  1765. M4U_WriteReg32(m4u_base, REG_MMU_IN_ORDER_WR_EN, 0);
  1766. /* 3 non-standard AXI mode */
  1767. M4U_WriteReg32(m4u_base, REG_MMU_STANDARD_AXI_MODE, 0);
  1768. /* 4 write command throttling mode */
  1769. m4uHw_set_field_by_mask(m4u_base, REG_MMU_WR_LEN, F_BIT_SET(5), 0);
  1770. }
  1771. return 0;
  1772. }
  1773. int m4u_domain_init(struct m4u_device *m4u_dev, void *priv_reserve)
  1774. {
  1775. M4UINFO("m4u_domain_init\n");
  1776. memset(&gM4uDomain, 0, sizeof(gM4uDomain));
  1777. gM4uDomain.pgsize_bitmap = M4U_PGSIZES;
  1778. mutex_init(&gM4uDomain.pgtable_mutex);
  1779. m4u_pgtable_init(m4u_dev, &gM4uDomain);
  1780. m4u_mvaGraph_init(priv_reserve);
  1781. return 0;
  1782. }
  1783. int m4u_reset(int m4u_id)
  1784. {
  1785. m4u_invalid_tlb_all(m4u_id);
  1786. m4u_clear_intr(m4u_id);
  1787. return 0;
  1788. }
  1789. int m4u_hw_init(struct m4u_device *m4u_dev, int m4u_id)
  1790. {
  1791. unsigned long pProtectVA;
  1792. phys_addr_t ProtectPA;
  1793. #if !defined(CONFIG_MTK_CLKMGR)
  1794. int i;
  1795. gM4uDev->infra_m4u = devm_clk_get(gM4uDev->pDev[m4u_id], "infra_m4u");
  1796. if (IS_ERR(gM4uDev->infra_m4u)) {
  1797. M4UMSG("cannot get infra m4u clock\n");
  1798. return PTR_ERR(gM4uDev->infra_m4u);
  1799. }
  1800. for (i = SMI_COMMON_CLK; i < SMI_CLK_NUM; i++) {
  1801. gM4uDev->smi_clk[i] = devm_clk_get(gM4uDev->pDev[m4u_id], smi_clk_name[i]);
  1802. if (IS_ERR(gM4uDev->smi_clk[i])) {
  1803. M4UMSG("cannot get %s clock\n", smi_clk_name[i]);
  1804. return PTR_ERR(gM4uDev->smi_clk[i]);
  1805. }
  1806. }
  1807. smi_common_clock_on();
  1808. smi_larb0_clock_on();
  1809. #endif
  1810. #ifdef M4U_4GBDRAM
  1811. gM4U_4G_DRAM_Mode = enable_4G();
  1812. #endif
  1813. M4UMSG("4G DRAM Mode is: %d\n", gM4U_4G_DRAM_Mode);
  1814. gM4UBaseAddr[m4u_id] = m4u_dev->m4u_base[m4u_id];
  1815. pProtectVA = (unsigned long) kmalloc(TF_PROTECT_BUFFER_SIZE*2, GFP_KERNEL|__GFP_ZERO);
  1816. if (NULL == (void *)pProtectVA) {
  1817. M4UMSG("Physical memory not available.\n");
  1818. return -1;
  1819. }
  1820. pProtectVA = (pProtectVA+(TF_PROTECT_BUFFER_SIZE-1))&(~(TF_PROTECT_BUFFER_SIZE-1));
  1821. ProtectPA = virt_to_phys((void *)pProtectVA);
  1822. if (ProtectPA & (TF_PROTECT_BUFFER_SIZE-1)) {
  1823. M4UMSG("protect buffer (0x%pa) not align.\n", &ProtectPA);
  1824. return -1;
  1825. }
  1826. M4UINFO("protect memory va=0x%pa, pa=0x%pa.\n", &pProtectVA, &ProtectPA);
  1827. pM4URegBackUp = kmalloc(M4U_REG_BACKUP_SIZE, GFP_KERNEL|__GFP_ZERO);
  1828. if (pM4URegBackUp == NULL) {
  1829. M4UMSG("Physical memory not available size=%d.\n", (int)M4U_REG_BACKUP_SIZE);
  1830. return -1;
  1831. }
  1832. spin_lock_init(&gM4u_reg_lock);
  1833. m4u_reg_init(&gM4uDomain, ProtectPA, m4u_id);
  1834. if (request_irq(m4u_dev->irq_num[m4u_id], MTK_M4U_isr, IRQF_TRIGGER_LOW, "m4u", NULL)) {
  1835. M4UMSG("request M4U%d IRQ line failed\n", m4u_id);
  1836. return -ENODEV;
  1837. }
  1838. M4UMSG("request_irq, irq_num=%d\n", m4u_dev->irq_num[m4u_id]);
  1839. m4u_isr_pause_timer_init();
  1840. m4u_monitor_start(m4u_id);
  1841. /* mau_start_monitor(0, 0, 0, 0, 1, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff); */
  1842. /* mau_start_monitor(0, 0, 1, 1, 1, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff); */
  1843. /* mau_start_monitor(0, 0, 2, 0, 0, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff); */
  1844. /* config MDP related port default use M4U */
  1845. if (0 == m4u_id) {
  1846. M4U_PORT_STRUCT port;
  1847. port.Direction = 0;
  1848. port.Distance = 1;
  1849. port.domain = 0;
  1850. port.Security = 0;
  1851. port.Virtuality = 1;
  1852. port.ePortID = M4U_PORT_MDP_RDMA;
  1853. m4u_config_port(&port);
  1854. port.ePortID = M4U_PORT_MDP_WDMA;
  1855. m4u_config_port(&port);
  1856. port.ePortID = M4U_PORT_MDP_WROT;
  1857. m4u_config_port(&port);
  1858. }
  1859. return 0;
  1860. }
  1861. int m4u_hw_deinit(struct m4u_device *m4u_dev, int m4u_id)
  1862. {
  1863. #if 1
  1864. free_irq(m4u_dev->irq_num[m4u_id], NULL);
  1865. #else
  1866. free_irq(MM_IOMMU_IRQ_B_ID, NULL);
  1867. free_irq(PERISYS_IOMMU_IRQ_B_ID, NULL);
  1868. #endif
  1869. return 0;
  1870. }
  1871. int m4u_dump_reg_for_smi_hang_issue(void)
  1872. {
  1873. /*NOTES: m4u_monitor_start() must be called before using m4u */
  1874. /*please check m4u_hw_init() to ensure that */
  1875. M4UMSG("====== dump m4u reg start =======>\n");
  1876. if (0 == gM4UBaseAddr[0]) {
  1877. M4UMSG("gM4UBaseAddr[0] is NULL\n");
  1878. return 0;
  1879. }
  1880. M4UMSG("0x44 = 0x%x\n", M4U_ReadReg32(gM4UBaseAddr[0], 0x44));
  1881. m4u_print_perf_counter(0, 0, "m4u");
  1882. m4u_dump_rs_info(0, 0);
  1883. return 0;
  1884. }