mmprofile.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990
  1. #include <linux/miscdevice.h>
  2. #include <linux/fs.h>
  3. #include <linux/file.h>
  4. #include <linux/mm.h>
  5. #include <linux/cdev.h>
  6. #include <asm/page.h>
  7. #include <asm/io.h>
  8. #include <generated/autoconf.h>
  9. #include <linux/module.h>
  10. #include <linux/mm.h>
  11. #include <linux/init.h>
  12. #include <linux/device.h>
  13. #include <linux/platform_device.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/atomic.h>
  16. /* #include <asm/mach-types.h> */
  17. #include <asm/cacheflush.h>
  18. #include <asm/io.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/slab.h>
  21. #include <linux/list.h>
  22. #include <linux/mutex.h>
  23. #include <linux/hardirq.h>
  24. #include <linux/sched.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/ftrace_event.h>
  27. #include <linux/bug.h>
  28. #define MMPROFILE_INTERNAL
  29. #include <mmprofile_internal.h>
  30. #ifdef CONFIG_MTK_EXTMEM
  31. #include <linux/exm_driver.h>
  32. #endif
  33. /* #pragma GCC optimize ("O0") */
  34. #define MMP_DEVNAME "mmp"
  35. #define MMProfileDefaultBufferSize 0x18000
  36. #define MMProfileDefaultMetaBufferSize 0x800000
  37. #define MMProfileDumpBlockSize (1024*4)
  38. #define TAG_MMPROFILE "mmprofile"
  39. #ifdef CONFIG_TRACING
  40. #define ENABLE_MMP_TRACING
  41. #ifdef ENABLE_MMP_TRACING
  42. #define MMP_TRACING
  43. #endif
  44. #endif /* CONFIG_TRACING */
  45. static bool mmp_log_on;
  46. static bool mmp_trace_log_on;
  47. #define MMP_LOG(prio, fmt, arg...) \
  48. do { \
  49. if (mmp_log_on) \
  50. pr_debug("MMP:%s(): "fmt"\n", __func__, ##arg); \
  51. } while (0)
  52. #define MMP_MSG(fmt, arg...) pr_warn("MMP: %s(): "fmt"\n", __func__, ##arg)
  53. typedef struct {
  54. MMProfile_EventInfo_t event_info;
  55. struct list_head list;
  56. } MMProfile_RegTable_t;
  57. typedef struct {
  58. struct list_head list;
  59. unsigned int block_size;
  60. unsigned int cookie;
  61. MMP_MetaDataType data_type;
  62. unsigned int data_size;
  63. unsigned char meta_data[1];
  64. } MMProfile_MetaDataBlock_t;
  65. static int bMMProfileInitBuffer;
  66. static unsigned int MMProfile_MetaDataCookie = 1;
  67. static DEFINE_MUTEX(MMProfile_BufferInitMutex);
  68. static DEFINE_MUTEX(MMProfile_RegTableMutex);
  69. static DEFINE_MUTEX(MMProfile_MetaBufferMutex);
  70. static MMProfile_Event_t *pMMProfileRingBuffer;
  71. static unsigned char *pMMProfileMetaBuffer;
  72. static MMProfile_Global_t MMProfileGlobals
  73. __aligned(PAGE_SIZE) = {
  74. .buffer_size_record = MMProfileDefaultBufferSize,
  75. .new_buffer_size_record = MMProfileDefaultBufferSize,
  76. .buffer_size_bytes = ((sizeof(MMProfile_Event_t) * MMProfileDefaultBufferSize +
  77. (PAGE_SIZE - 1)) & (~(PAGE_SIZE - 1))),
  78. .record_size = sizeof(MMProfile_Event_t),
  79. .meta_buffer_size = MMProfileDefaultMetaBufferSize,
  80. .new_meta_buffer_size = MMProfileDefaultMetaBufferSize,
  81. .selected_buffer = MMProfilePrimaryBuffer,
  82. .reg_event_index = sizeof(MMProfileStaticEvents) / sizeof(MMP_StaticEvent_t),
  83. .max_event_count = MMProfileMaxEventCount,
  84. };
  85. static MMProfile_RegTable_t MMProfile_RegTable = {
  86. .list = LIST_HEAD_INIT(MMProfile_RegTable.list),
  87. };
  88. static struct list_head MMProfile_MetaBufferList = LIST_HEAD_INIT(MMProfile_MetaBufferList);
  89. static unsigned char MMProfileDumpBlock[MMProfileDumpBlockSize];
  90. /* Internal functions begin */
  91. static int MMProfileRegisterStaticEvents(int sync);
  92. static void MMProfileForceStart(int start);
  93. unsigned int MMProfileGetDumpSize(void)
  94. {
  95. unsigned int size;
  96. MMP_LOG(ANDROID_LOG_DEBUG, "+enable %u, start %u", MMProfileGlobals.enable,
  97. MMProfileGlobals.start);
  98. MMProfileForceStart(0);
  99. if (MMProfileRegisterStaticEvents(0) == 0)
  100. return 0;
  101. size = sizeof(MMProfile_Global_t);
  102. size += sizeof(MMProfile_EventInfo_t) * (MMProfileGlobals.reg_event_index + 1);
  103. size += MMProfileGlobals.buffer_size_bytes;
  104. MMP_LOG(ANDROID_LOG_DEBUG, "-size %u", size);
  105. return size;
  106. }
  107. static unsigned int MMProfileFillDumpBlock(void *pSrc, void *pDst,
  108. unsigned int *pSrcPos, unsigned int *pDstPos,
  109. unsigned int SrcSize, unsigned int DstSize)
  110. {
  111. unsigned int SrcLeft = SrcSize - *pSrcPos;
  112. unsigned int DstLeft = DstSize - *pDstPos;
  113. if ((SrcLeft == 0) || (DstLeft == 0))
  114. return 0;
  115. if (SrcLeft < DstLeft) {
  116. memcpy(((unsigned char *)pDst) + *pDstPos, ((unsigned char *)pSrc) + *pSrcPos,
  117. SrcLeft);
  118. *pSrcPos += SrcLeft;
  119. *pDstPos += SrcLeft;
  120. return SrcLeft;
  121. }
  122. memcpy(((unsigned char *)pDst) + *pDstPos, ((unsigned char *)pSrc) + *pSrcPos,
  123. DstLeft);
  124. *pSrcPos += DstLeft;
  125. *pDstPos += DstLeft;
  126. return DstLeft;
  127. }
  128. void MMProfileGetDumpBuffer(unsigned int Start, unsigned long *pAddr, unsigned int *pSize)
  129. {
  130. unsigned int total_pos = Start;
  131. unsigned int region_pos;
  132. unsigned int block_pos = 0;
  133. unsigned int region_base = 0;
  134. unsigned int copy_size;
  135. *pAddr = (unsigned long)MMProfileDumpBlock;
  136. *pSize = MMProfileDumpBlockSize;
  137. if (!bMMProfileInitBuffer) {
  138. MMP_LOG(ANDROID_LOG_DEBUG, "Ringbuffer is not initialized");
  139. *pSize = 0;
  140. return;
  141. }
  142. if (total_pos < (region_base + sizeof(MMProfile_Global_t))) {
  143. /* Global structure */
  144. region_pos = total_pos;
  145. copy_size =
  146. MMProfileFillDumpBlock(&MMProfileGlobals, MMProfileDumpBlock, &region_pos,
  147. &block_pos, sizeof(MMProfile_Global_t),
  148. MMProfileDumpBlockSize);
  149. if (block_pos == MMProfileDumpBlockSize)
  150. return;
  151. total_pos = region_base + sizeof(MMProfile_Global_t);
  152. }
  153. region_base += sizeof(MMProfile_Global_t);
  154. if (MMProfileRegisterStaticEvents(0) == 0) {
  155. MMP_LOG(ANDROID_LOG_DEBUG, "static event not register");
  156. *pSize = 0;
  157. return;
  158. }
  159. if (total_pos <
  160. (region_base +
  161. sizeof(MMProfile_EventInfo_t) * (MMProfileGlobals.reg_event_index + 1))) {
  162. /* Register table */
  163. MMP_Event index;
  164. MMProfile_RegTable_t *pRegTable;
  165. MMProfile_EventInfo_t EventInfoDummy = { 0, "" };
  166. unsigned int SrcPos;
  167. unsigned int Pos = 0;
  168. region_pos = total_pos - region_base;
  169. if (mutex_trylock(&MMProfile_RegTableMutex) == 0) {
  170. MMP_LOG(ANDROID_LOG_DEBUG, "fail to get reg lock");
  171. *pSize = 0;
  172. return;
  173. }
  174. if (Pos + sizeof(MMProfile_EventInfo_t) > region_pos) {
  175. if (region_pos > Pos)
  176. SrcPos = region_pos - Pos;
  177. else
  178. SrcPos = 0;
  179. copy_size =
  180. MMProfileFillDumpBlock(&EventInfoDummy, MMProfileDumpBlock, &SrcPos,
  181. &block_pos, sizeof(MMProfile_EventInfo_t),
  182. MMProfileDumpBlockSize);
  183. if (block_pos == MMProfileDumpBlockSize) {
  184. mutex_unlock(&MMProfile_RegTableMutex);
  185. return;
  186. }
  187. }
  188. Pos += sizeof(MMProfile_EventInfo_t);
  189. index = MMP_RootEvent;
  190. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  191. if (Pos + sizeof(MMProfile_EventInfo_t) > region_pos) {
  192. if (region_pos > Pos)
  193. SrcPos = region_pos - Pos;
  194. else
  195. SrcPos = 0;
  196. copy_size =
  197. MMProfileFillDumpBlock(&(pRegTable->event_info),
  198. MMProfileDumpBlock, &SrcPos, &block_pos,
  199. sizeof(MMProfile_EventInfo_t),
  200. MMProfileDumpBlockSize);
  201. if (block_pos == MMProfileDumpBlockSize) {
  202. mutex_unlock(&MMProfile_RegTableMutex);
  203. return;
  204. }
  205. }
  206. Pos += sizeof(MMProfile_EventInfo_t);
  207. index++;
  208. }
  209. mutex_unlock(&MMProfile_RegTableMutex);
  210. total_pos =
  211. region_base +
  212. sizeof(MMProfile_EventInfo_t) * (MMProfileGlobals.reg_event_index + 1);
  213. }
  214. region_base += sizeof(MMProfile_EventInfo_t) * (MMProfileGlobals.reg_event_index + 1);
  215. if (total_pos < (region_base + MMProfileGlobals.buffer_size_bytes)) {
  216. /* Primary buffer */
  217. region_pos = total_pos - region_base;
  218. copy_size =
  219. MMProfileFillDumpBlock(pMMProfileRingBuffer, MMProfileDumpBlock, &region_pos,
  220. &block_pos, MMProfileGlobals.buffer_size_bytes,
  221. MMProfileDumpBlockSize);
  222. if (block_pos == MMProfileDumpBlockSize)
  223. return;
  224. } else {
  225. *pSize = 0;
  226. }
  227. MMP_LOG(ANDROID_LOG_DEBUG, "end t=%u,r =%u,block_pos=%u", total_pos, region_base,
  228. block_pos);
  229. *pSize = block_pos;
  230. }
  231. static void MMProfileInitBuffer(void)
  232. {
  233. if (!MMProfileGlobals.enable)
  234. return;
  235. if (in_interrupt())
  236. return;
  237. mutex_lock(&MMProfile_BufferInitMutex);
  238. if ((!bMMProfileInitBuffer) ||
  239. (MMProfileGlobals.buffer_size_record != MMProfileGlobals.new_buffer_size_record) ||
  240. (MMProfileGlobals.meta_buffer_size != MMProfileGlobals.new_meta_buffer_size)) {
  241. /* Initialize */
  242. /* Allocate memory. */
  243. unsigned int bResetRingBuffer = 0;
  244. unsigned int bResetMetaBuffer = 0;
  245. if (!pMMProfileRingBuffer) {
  246. MMProfileGlobals.buffer_size_record =
  247. MMProfileGlobals.new_buffer_size_record;
  248. MMProfileGlobals.buffer_size_bytes =
  249. ((sizeof(MMProfile_Event_t) * MMProfileGlobals.buffer_size_record +
  250. (PAGE_SIZE - 1)) & (~(PAGE_SIZE - 1)));
  251. bResetRingBuffer = 1;
  252. } else if (MMProfileGlobals.buffer_size_record !=
  253. MMProfileGlobals.new_buffer_size_record) {
  254. vfree(pMMProfileRingBuffer);
  255. MMProfileGlobals.buffer_size_record =
  256. MMProfileGlobals.new_buffer_size_record;
  257. MMProfileGlobals.buffer_size_bytes =
  258. ((sizeof(MMProfile_Event_t) * MMProfileGlobals.buffer_size_record +
  259. (PAGE_SIZE - 1)) & (~(PAGE_SIZE - 1)));
  260. bResetRingBuffer = 1;
  261. }
  262. if (bResetRingBuffer) {
  263. pMMProfileRingBuffer =
  264. #ifdef CONFIG_MTK_EXTMEM
  265. (MMProfile_Event_t *)
  266. extmem_malloc_page_align(MMProfileGlobals.buffer_size_bytes);
  267. #else
  268. vmalloc(MMProfileGlobals.buffer_size_bytes);
  269. #endif
  270. }
  271. MMP_LOG(ANDROID_LOG_DEBUG, "pMMProfileRingBuffer=0x%08lx",
  272. (unsigned long)pMMProfileRingBuffer);
  273. if (!pMMProfileMetaBuffer) {
  274. MMProfileGlobals.meta_buffer_size = MMProfileGlobals.new_meta_buffer_size;
  275. bResetMetaBuffer = 1;
  276. } else if (MMProfileGlobals.meta_buffer_size !=
  277. MMProfileGlobals.new_meta_buffer_size) {
  278. vfree(pMMProfileMetaBuffer);
  279. MMProfileGlobals.meta_buffer_size = MMProfileGlobals.new_meta_buffer_size;
  280. bResetMetaBuffer = 1;
  281. }
  282. if (bResetMetaBuffer) {
  283. pMMProfileMetaBuffer =
  284. #ifdef CONFIG_MTK_EXTMEM
  285. (unsigned char *)
  286. extmem_malloc_page_align(MMProfileGlobals.meta_buffer_size);
  287. #else
  288. vmalloc(MMProfileGlobals.meta_buffer_size);
  289. #endif
  290. }
  291. MMP_LOG(ANDROID_LOG_DEBUG, "pMMProfileMetaBuffer=0x%08lx",
  292. (unsigned long)pMMProfileMetaBuffer);
  293. if ((!pMMProfileRingBuffer) || (!pMMProfileMetaBuffer)) {
  294. if (pMMProfileRingBuffer) {
  295. vfree(pMMProfileRingBuffer);
  296. pMMProfileRingBuffer = NULL;
  297. }
  298. if (pMMProfileMetaBuffer) {
  299. vfree(pMMProfileMetaBuffer);
  300. pMMProfileMetaBuffer = NULL;
  301. }
  302. bMMProfileInitBuffer = 0;
  303. mutex_unlock(&MMProfile_BufferInitMutex);
  304. MMP_LOG(ANDROID_LOG_DEBUG, "Cannot allocate buffer");
  305. return;
  306. }
  307. if (bResetRingBuffer)
  308. memset((void *)(pMMProfileRingBuffer), 0,
  309. MMProfileGlobals.buffer_size_bytes);
  310. if (bResetMetaBuffer) {
  311. memset((void *)(pMMProfileMetaBuffer), 0,
  312. MMProfileGlobals.meta_buffer_size);
  313. /* Initialize the first block in meta buffer. */
  314. {
  315. MMProfile_MetaDataBlock_t *pBlock =
  316. (MMProfile_MetaDataBlock_t *) pMMProfileMetaBuffer;
  317. pBlock->block_size = MMProfileGlobals.meta_buffer_size;
  318. INIT_LIST_HEAD(&MMProfile_MetaBufferList);
  319. list_add_tail(&(pBlock->list), &MMProfile_MetaBufferList);
  320. }
  321. }
  322. bMMProfileInitBuffer = 1;
  323. }
  324. mutex_unlock(&MMProfile_BufferInitMutex);
  325. }
  326. static void MMProfileResetBuffer(void)
  327. {
  328. if (!MMProfileGlobals.enable)
  329. return;
  330. if (bMMProfileInitBuffer) {
  331. memset((void *)(pMMProfileRingBuffer), 0, MMProfileGlobals.buffer_size_bytes);
  332. MMProfileGlobals.write_pointer = 0;
  333. mutex_lock(&MMProfile_MetaBufferMutex);
  334. MMProfile_MetaDataCookie = 1;
  335. memset((void *)(pMMProfileMetaBuffer), 0, MMProfileGlobals.meta_buffer_size);
  336. /* Initialize the first block in meta buffer. */
  337. {
  338. MMProfile_MetaDataBlock_t *pBlock =
  339. (MMProfile_MetaDataBlock_t *) pMMProfileMetaBuffer;
  340. pBlock->block_size = MMProfileGlobals.meta_buffer_size;
  341. INIT_LIST_HEAD(&MMProfile_MetaBufferList);
  342. list_add_tail(&(pBlock->list), &MMProfile_MetaBufferList);
  343. }
  344. mutex_unlock(&MMProfile_MetaBufferMutex);
  345. }
  346. }
  347. static void MMProfileForceStart(int start)
  348. {
  349. MMP_MSG("start: %d", start);
  350. if (!MMProfileGlobals.enable)
  351. return;
  352. MMP_LOG(ANDROID_LOG_DEBUG, "+start %d", start);
  353. if (start && (!MMProfileGlobals.start)) {
  354. MMProfileInitBuffer();
  355. MMProfileResetBuffer();
  356. }
  357. MMProfileGlobals.start = start;
  358. MMP_LOG(ANDROID_LOG_DEBUG, "-start=%d", MMProfileGlobals.start);
  359. }
  360. /* this function only used by other kernel modules. */
  361. void MMProfileStart(int start)
  362. {
  363. #ifndef FORBID_MMP_START
  364. MMProfileForceStart(start);
  365. #endif
  366. }
  367. void MMProfileEnable(int enable)
  368. {
  369. MMP_MSG("enable: %d", enable);
  370. if (enable)
  371. MMProfileRegisterStaticEvents(1);
  372. MMProfileGlobals.enable = enable;
  373. if (enable == 0)
  374. MMProfileForceStart(0);
  375. }
  376. /* if using remote tool (PC side) or adb shell command, can always start mmp */
  377. static void MMProfileRemoteStart(int start)
  378. {
  379. MMP_MSG("remote start: %d", start);
  380. if (!MMProfileGlobals.enable)
  381. return;
  382. MMP_LOG(ANDROID_LOG_DEBUG, "remote +start %d", start);
  383. if (start && (!MMProfileGlobals.start)) {
  384. MMProfileInitBuffer();
  385. MMProfileResetBuffer();
  386. }
  387. MMProfileGlobals.start = start;
  388. MMP_LOG(ANDROID_LOG_DEBUG, "remote -start=%d", MMProfileGlobals.start);
  389. }
  390. static MMP_Event MMProfileFindEventInt(MMP_Event parent, const char *name)
  391. {
  392. MMP_Event index;
  393. MMProfile_RegTable_t *pRegTable;
  394. index = MMP_RootEvent;
  395. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  396. if ((parent == 0) || (pRegTable->event_info.parentId == parent)) {
  397. if (strncmp(pRegTable->event_info.name, name, MMProfileEventNameMaxLen) ==
  398. 0) {
  399. return index;
  400. }
  401. }
  402. index++;
  403. }
  404. return 0;
  405. }
  406. static int MMProfileGetEventName(MMP_Event event, char *name, size_t *size)
  407. {
  408. MMP_Event curr_event = event; /* current event for seraching */
  409. MMProfile_EventInfo_t *eventInfo[32]; /* event info for all level of the event */
  410. int infoCnt = 0;
  411. int found = 0;
  412. int ret = -1;
  413. if ((NULL == name) || (NULL == size)) {
  414. /* parameters invalid */
  415. return ret;
  416. }
  417. while (1) {
  418. MMProfile_RegTable_t *pRegTable;
  419. int curr_found = 0;
  420. MMP_Event index = MMP_RootEvent;
  421. /* check the event */
  422. if ((MMP_InvalidEvent == curr_event)
  423. || (curr_event > MMProfileGlobals.reg_event_index)) {
  424. /* the event invalid */
  425. break;
  426. }
  427. if (infoCnt >= ARRAY_SIZE(eventInfo)) {
  428. /* the level of event is out of limite */
  429. found = 1;
  430. break;
  431. }
  432. /* search the info for the event */
  433. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  434. if (index == curr_event) {
  435. /* find this event */
  436. curr_found = 1;
  437. eventInfo[infoCnt] = &pRegTable->event_info;
  438. break;
  439. }
  440. index++;
  441. }
  442. if (!curr_found) {
  443. /* can not find the event */
  444. break;
  445. }
  446. if ((MMP_RootEvent == eventInfo[infoCnt]->parentId) ||
  447. (MMP_InvalidEvent == eventInfo[infoCnt]->parentId)) {
  448. /* find all path for the event */
  449. found = 1;
  450. infoCnt++;
  451. break;
  452. }
  453. /* search the parent of the event */
  454. curr_event = eventInfo[infoCnt]->parentId;
  455. infoCnt++;
  456. }
  457. if (found) {
  458. size_t needLen = 0;
  459. size_t actualLen = 0;
  460. int infoCntUsed = 0;
  461. int i;
  462. BUG_ON(!(infoCnt > 0));
  463. for (i = 0; i < infoCnt; i++) {
  464. needLen += strlen(eventInfo[i]->name) + 1; /* after each name has a ':' or '\0' */
  465. if (needLen <= *size) {
  466. /* buffer size is ok */
  467. infoCntUsed = i + 1;
  468. }
  469. }
  470. for (i = infoCntUsed - 1; i >= 0; i--) {
  471. strcpy(&name[actualLen], eventInfo[i]->name);
  472. actualLen += strlen(eventInfo[i]->name);
  473. if (i > 0) {
  474. /* not the last name */
  475. name[actualLen] = ':';
  476. }
  477. actualLen++;
  478. }
  479. ret = (int)actualLen;
  480. *size = needLen;
  481. }
  482. return ret;
  483. }
  484. static int MMProfileConfigEvent(MMP_Event event, char *name, MMP_Event parent, int sync)
  485. {
  486. MMP_Event index;
  487. MMProfile_RegTable_t *pRegTable;
  488. if (in_interrupt())
  489. return 0;
  490. if ((event >= MMP_MaxStaticEvent) ||
  491. (event >= MMProfileMaxEventCount) || (event == MMP_InvalidEvent)) {
  492. return 0;
  493. }
  494. if (sync) {
  495. mutex_lock(&MMProfile_RegTableMutex);
  496. } else {
  497. if (mutex_trylock(&MMProfile_RegTableMutex) == 0)
  498. return 0;
  499. }
  500. index = MMProfileFindEventInt(parent, name);
  501. if (index) {
  502. mutex_unlock(&MMProfile_RegTableMutex);
  503. return 1;
  504. }
  505. pRegTable = kmalloc(sizeof(MMProfile_RegTable_t), GFP_KERNEL);
  506. if (!pRegTable) {
  507. mutex_unlock(&MMProfile_RegTableMutex);
  508. return 0;
  509. }
  510. strncpy(pRegTable->event_info.name, name, MMProfileEventNameMaxLen);
  511. pRegTable->event_info.name[MMProfileEventNameMaxLen] = 0;
  512. pRegTable->event_info.parentId = parent;
  513. list_add_tail(&(pRegTable->list), &(MMProfile_RegTable.list));
  514. mutex_unlock(&MMProfile_RegTableMutex);
  515. return 1;
  516. }
  517. static int MMProfileRegisterStaticEvents(int sync)
  518. {
  519. static unsigned int bStaticEventRegistered;
  520. unsigned int static_event_count = 0;
  521. unsigned int i;
  522. int ret = 1;
  523. if (in_interrupt())
  524. return 0;
  525. if (bStaticEventRegistered)
  526. return 1;
  527. static_event_count = sizeof(MMProfileStaticEvents) / sizeof(MMP_StaticEvent_t);
  528. for (i = 0; i < static_event_count; i++) {
  529. ret = ret
  530. && MMProfileConfigEvent(MMProfileStaticEvents[i].event,
  531. MMProfileStaticEvents[i].name,
  532. MMProfileStaticEvents[i].parent, sync);
  533. }
  534. bStaticEventRegistered = 1;
  535. return ret;
  536. }
  537. /* the MMP_TRACING is defined only when CONFIG_TRACING is defined and we enable mmp to trace its API. */
  538. #ifdef MMP_TRACING
  539. static unsigned long __read_mostly tracing_mark_write_addr;
  540. static inline void __mt_update_tracing_mark_write_addr(void)
  541. {
  542. if (unlikely(0 == tracing_mark_write_addr))
  543. tracing_mark_write_addr = kallsyms_lookup_name("tracing_mark_write");
  544. }
  545. static inline void mmp_kernel_trace_begin(char *name)
  546. {
  547. if (mmp_trace_log_on) {
  548. __mt_update_tracing_mark_write_addr();
  549. event_trace_printk(tracing_mark_write_addr, "B|%d|%s\n", current->tgid, name);
  550. }
  551. }
  552. static inline void mmp_kernel_trace_counter(char *name, int count)
  553. {
  554. if (mmp_trace_log_on) {
  555. __mt_update_tracing_mark_write_addr();
  556. event_trace_printk(tracing_mark_write_addr,
  557. "C|%d|%s|%d\n", in_interrupt() ? -1 : current->tgid, name, count);
  558. }
  559. }
  560. static inline void mmp_kernel_trace_end(void)
  561. {
  562. if (mmp_trace_log_on) {
  563. __mt_update_tracing_mark_write_addr();
  564. event_trace_printk(tracing_mark_write_addr, "E\n");
  565. }
  566. }
  567. #else
  568. static inline void mmp_kernel_trace_begin(char *name)
  569. {
  570. }
  571. static inline void mmp_kernel_trace_end(void)
  572. {
  573. }
  574. static inline void mmp_kernel_trace_counter(char *name, int count)
  575. {
  576. }
  577. #endif
  578. /* continue to use 32-bit value to store time value (separate into 2) */
  579. static void system_time(unsigned int *low, unsigned int *high)
  580. {
  581. unsigned long long temp;
  582. temp = sched_clock();
  583. *low = (unsigned int)(temp & 0xffffffff);
  584. *high = (unsigned int)((temp >> 32) & 0xffffffff);
  585. /* MMP_LOG(ANDROID_LOG_VERBOSE,"system_time,0x%08x,0x%08x", *high, *low); */
  586. }
  587. static void MMProfileLog_Int(MMP_Event event, MMP_LogType type, unsigned long data1,
  588. unsigned long data2, unsigned int meta_data_cookie)
  589. {
  590. char name[256];
  591. size_t prefix_len;
  592. size_t size;
  593. if (!MMProfileGlobals.enable)
  594. return;
  595. if (bMMProfileInitBuffer && MMProfileGlobals.start
  596. && (MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_ENABLED)) {
  597. MMProfile_Event_t *pEvent = NULL;
  598. unsigned int index;
  599. unsigned int lock;
  600. /* Event ID 0 and 1 are protected. They are not allowed for logging. */
  601. if (unlikely(event < 2))
  602. return;
  603. index = (atomic_inc_return((atomic_t *) &(MMProfileGlobals.write_pointer)) - 1)
  604. % (MMProfileGlobals.buffer_size_record);
  605. lock = atomic_inc_return((atomic_t *) &(pMMProfileRingBuffer[index].lock));
  606. if (unlikely(lock > 1)) {
  607. /* Do not reduce lock count since it need to be marked as invalid. */
  608. /* atomic_dec(&(pMMProfile_Globol->ring_buffer[index].lock)); */
  609. while (1) {
  610. index =
  611. (atomic_inc_return
  612. ((atomic_t *) &(MMProfileGlobals.write_pointer)) - 1)
  613. % (MMProfileGlobals.buffer_size_record);
  614. lock =
  615. atomic_inc_return((atomic_t *) &
  616. (pMMProfileRingBuffer[index].lock));
  617. /* Do not reduce lock count since it need to be marked as invalid. */
  618. if (likely(lock == 1))
  619. break;
  620. }
  621. }
  622. pEvent = (MMProfile_Event_t *) &(pMMProfileRingBuffer[index]);
  623. system_time(&(pEvent->timeLow), &(pEvent->timeHigh));
  624. pEvent->id = event;
  625. pEvent->flag = type;
  626. pEvent->data1 = (unsigned int)data1;
  627. pEvent->data2 = (unsigned int)data2;
  628. pEvent->meta_data_cookie = meta_data_cookie;
  629. lock = atomic_dec_return((atomic_t *) &(pEvent->lock));
  630. if (unlikely(lock > 0)) {
  631. /* Someone has marked this record as invalid. Kill this record. */
  632. pEvent->id = 0;
  633. pEvent->lock = 0;
  634. }
  635. if ((MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_FTRACE)
  636. || (type & MMProfileFlagSystrace)) {
  637. /* ignore interrupt */
  638. if (in_interrupt())
  639. return;
  640. memset((void *)name, 0, 256);
  641. name[0] = 'M';
  642. name[1] = 'M';
  643. name[2] = 'P';
  644. name[3] = ':';
  645. prefix_len = strlen(name);
  646. size = sizeof(name) - prefix_len;
  647. if (MMProfileGetEventName(event, &name[prefix_len], &size) > 0) {
  648. if (type & MMProfileFlagStart) {
  649. mmp_kernel_trace_begin(name);
  650. } else if (type & MMProfileFlagEnd) {
  651. mmp_kernel_trace_end();
  652. } else if (type & MMProfileFlagPulse) {
  653. mmp_kernel_trace_counter(name, 1);
  654. mmp_kernel_trace_counter(name, 0);
  655. }
  656. }
  657. }
  658. }
  659. }
  660. static long MMProfileLogMetaInt(MMP_Event event, MMP_LogType type, MMP_MetaData_t *pMetaData,
  661. long bFromUser)
  662. {
  663. unsigned long retn;
  664. if (!MMProfileGlobals.enable)
  665. return 0;
  666. if (bMMProfileInitBuffer && MMProfileGlobals.start
  667. && (MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_ENABLED)) {
  668. MMProfile_MetaDataBlock_t *pNode = NULL;
  669. unsigned long block_size;
  670. if (unlikely(!pMetaData))
  671. return -1;
  672. block_size =
  673. ((offsetof(MMProfile_MetaDataBlock_t, meta_data) + pMetaData->size) + 3) & (~3);
  674. if (block_size > MMProfileGlobals.meta_buffer_size)
  675. return -2;
  676. mutex_lock(&MMProfile_MetaBufferMutex);
  677. pNode = list_entry(MMProfile_MetaBufferList.prev, MMProfile_MetaDataBlock_t, list);
  678. /* If the tail block has been used, move the first block to tail and use it for new meta data. */
  679. if (pNode->data_size > 0) {
  680. list_move_tail(MMProfile_MetaBufferList.next, &MMProfile_MetaBufferList);
  681. pNode =
  682. list_entry(MMProfile_MetaBufferList.prev, MMProfile_MetaDataBlock_t,
  683. list);
  684. }
  685. /* Migrate a block with enough size. The room is collected by sacrificing least recent used blocks. */
  686. while (pNode->block_size < block_size) {
  687. MMProfile_MetaDataBlock_t *pNextNode =
  688. list_entry(pNode->list.next, MMProfile_MetaDataBlock_t, list);
  689. if (&(pNextNode->list) == &MMProfile_MetaBufferList)
  690. pNextNode =
  691. list_entry(pNextNode->list.next, MMProfile_MetaDataBlock_t,
  692. list);
  693. list_del(&(pNextNode->list));
  694. pNode->block_size += pNextNode->block_size;
  695. }
  696. /* Split the block if left memory is enough for a new block. */
  697. if (((unsigned long)pNode + block_size) <
  698. ((unsigned long)pMMProfileMetaBuffer + MMProfileGlobals.meta_buffer_size)
  699. && ((unsigned long)pNode + block_size) >
  700. ((unsigned long)pMMProfileMetaBuffer + MMProfileGlobals.meta_buffer_size -
  701. offsetof(MMProfile_MetaDataBlock_t, meta_data))) {
  702. block_size =
  703. (unsigned long)pMMProfileMetaBuffer +
  704. MMProfileGlobals.meta_buffer_size - (unsigned long)pNode;
  705. }
  706. if ((pNode->block_size - block_size) >=
  707. offsetof(MMProfile_MetaDataBlock_t, meta_data)) {
  708. MMProfile_MetaDataBlock_t *pNewNode =
  709. (MMProfile_MetaDataBlock_t *) ((unsigned long)pNode + block_size);
  710. if ((unsigned long)pNewNode >=
  711. ((unsigned long)pMMProfileMetaBuffer +
  712. MMProfileGlobals.meta_buffer_size))
  713. pNewNode =
  714. (MMProfile_MetaDataBlock_t *) ((unsigned long)pNewNode -
  715. MMProfileGlobals.
  716. meta_buffer_size);
  717. pNewNode->block_size = pNode->block_size - block_size;
  718. pNewNode->data_size = 0;
  719. list_add(&(pNewNode->list), &(pNode->list));
  720. pNode->block_size = block_size;
  721. }
  722. /* Fill data */
  723. pNode->data_size = pMetaData->size;
  724. pNode->data_type = pMetaData->data_type;
  725. pNode->cookie = MMProfile_MetaDataCookie;
  726. MMProfileLog_Int(event, type, pMetaData->data1, pMetaData->data2,
  727. MMProfile_MetaDataCookie);
  728. MMProfile_MetaDataCookie++;
  729. if (MMProfile_MetaDataCookie == 0)
  730. MMProfile_MetaDataCookie++;
  731. if (((unsigned long)(pNode->meta_data) + pMetaData->size) >
  732. ((unsigned long)pMMProfileMetaBuffer + MMProfileGlobals.meta_buffer_size)) {
  733. unsigned long left_size =
  734. (unsigned long)pMMProfileMetaBuffer +
  735. MMProfileGlobals.meta_buffer_size - (unsigned long)(pNode->meta_data);
  736. if (bFromUser) {
  737. retn =
  738. copy_from_user(pNode->meta_data, pMetaData->pData, left_size);
  739. retn =
  740. copy_from_user(pMMProfileMetaBuffer,
  741. (void *)((unsigned long)(pMetaData->pData) +
  742. left_size),
  743. pMetaData->size - left_size);
  744. } else {
  745. memcpy(pNode->meta_data, pMetaData->pData, left_size);
  746. memcpy(pMMProfileMetaBuffer,
  747. (void *)((unsigned long)(pMetaData->pData) + left_size),
  748. pMetaData->size - left_size);
  749. }
  750. } else {
  751. if (bFromUser)
  752. retn =
  753. copy_from_user(pNode->meta_data, pMetaData->pData,
  754. pMetaData->size);
  755. else
  756. memcpy(pNode->meta_data, pMetaData->pData, pMetaData->size);
  757. }
  758. mutex_unlock(&MMProfile_MetaBufferMutex);
  759. }
  760. return 0;
  761. }
  762. /* Internal functions end */
  763. /* Exposed APIs begin */
  764. MMP_Event MMProfileRegisterEvent(MMP_Event parent, const char *name)
  765. {
  766. MMP_Event index;
  767. MMProfile_RegTable_t *pRegTable;
  768. if (!MMProfileGlobals.enable)
  769. return 0;
  770. if (in_interrupt())
  771. return 0;
  772. mutex_lock(&MMProfile_RegTableMutex);
  773. /* index = atomic_inc_return((atomic_t*)&(MMProfileGlobals.reg_event_index)); */
  774. if (MMProfileGlobals.reg_event_index >= (MMProfileMaxEventCount - 1)) {
  775. mutex_unlock(&MMProfile_RegTableMutex);
  776. return 0;
  777. }
  778. /* Check if this event has already been registered. */
  779. index = MMProfileFindEventInt(parent, name);
  780. if (index) {
  781. mutex_unlock(&MMProfile_RegTableMutex);
  782. return index;
  783. }
  784. /* Check if the parent exists. */
  785. if ((parent == 0) || (parent > MMProfileGlobals.reg_event_index)) {
  786. mutex_unlock(&MMProfile_RegTableMutex);
  787. return 0;
  788. }
  789. /* Now register the new event. */
  790. pRegTable = kmalloc(sizeof(MMProfile_RegTable_t), GFP_KERNEL);
  791. if (!pRegTable) {
  792. mutex_unlock(&MMProfile_RegTableMutex);
  793. return 0;
  794. }
  795. index = ++(MMProfileGlobals.reg_event_index);
  796. if (strlen(name) > MMProfileEventNameMaxLen) {
  797. memcpy(pRegTable->event_info.name, name, MMProfileEventNameMaxLen);
  798. pRegTable->event_info.name[MMProfileEventNameMaxLen] = 0;
  799. } else
  800. strcpy(pRegTable->event_info.name, name);
  801. pRegTable->event_info.parentId = parent;
  802. list_add_tail(&(pRegTable->list), &(MMProfile_RegTable.list));
  803. MMProfileGlobals.event_state[index] = 0;
  804. mutex_unlock(&MMProfile_RegTableMutex);
  805. return index;
  806. }
  807. EXPORT_SYMBOL(MMProfileRegisterEvent);
  808. MMP_Event MMProfileFindEvent(MMP_Event parent, const char *name)
  809. {
  810. MMP_Event event;
  811. if (!MMProfileGlobals.enable)
  812. return 0;
  813. if (in_interrupt())
  814. return 0;
  815. mutex_lock(&MMProfile_RegTableMutex);
  816. event = MMProfileFindEventInt(parent, name);
  817. mutex_unlock(&MMProfile_RegTableMutex);
  818. return event;
  819. }
  820. EXPORT_SYMBOL(MMProfileFindEvent);
  821. void MMProfileEnableFTraceEvent(MMP_Event event, long enable, long ftrace)
  822. {
  823. unsigned int state;
  824. if (!MMProfileGlobals.enable)
  825. return;
  826. if ((event < 2) || (event >= MMProfileMaxEventCount))
  827. return;
  828. state = enable ? MMP_EVENT_STATE_ENABLED : 0;
  829. if (enable && ftrace)
  830. state |= MMP_EVENT_STATE_FTRACE;
  831. MMProfileGlobals.event_state[event] = state;
  832. }
  833. EXPORT_SYMBOL(MMProfileEnableFTraceEvent);
  834. void MMProfileEnableEvent(MMP_Event event, long enable)
  835. {
  836. MMProfileEnableFTraceEvent(event, enable, 0);
  837. }
  838. EXPORT_SYMBOL(MMProfileEnableEvent);
  839. void MMProfileEnableFTraceEventRecursive(MMP_Event event, long enable, long ftrace)
  840. {
  841. MMP_Event index;
  842. MMProfile_RegTable_t *pRegTable;
  843. index = MMP_RootEvent;
  844. MMProfileEnableFTraceEvent(event, enable, ftrace);
  845. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  846. if (pRegTable->event_info.parentId == event)
  847. MMProfileEnableFTraceEventRecursive(index, enable, ftrace);
  848. index++;
  849. }
  850. }
  851. EXPORT_SYMBOL(MMProfileEnableFTraceEventRecursive);
  852. void MMProfileEnableEventRecursive(MMP_Event event, long enable)
  853. {
  854. MMP_Event index;
  855. MMProfile_RegTable_t *pRegTable;
  856. index = MMP_RootEvent;
  857. MMProfileEnableEvent(event, enable);
  858. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  859. if (pRegTable->event_info.parentId == event)
  860. MMProfileEnableEventRecursive(index, enable);
  861. index++;
  862. }
  863. }
  864. EXPORT_SYMBOL(MMProfileEnableEventRecursive);
  865. long MMProfileQueryEnable(MMP_Event event)
  866. {
  867. if (!MMProfileGlobals.enable)
  868. return 0;
  869. if (event == MMP_InvalidEvent)
  870. return MMProfileGlobals.enable;
  871. return !!(MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_ENABLED);
  872. }
  873. EXPORT_SYMBOL(MMProfileQueryEnable);
  874. void MMProfileLogEx(MMP_Event event, MMP_LogType type, unsigned long data1, unsigned long data2)
  875. {
  876. MMProfileLog_Int(event, type, data1, data2, 0);
  877. }
  878. EXPORT_SYMBOL(MMProfileLogEx);
  879. void MMProfileLog(MMP_Event event, MMP_LogType type)
  880. {
  881. MMProfileLogEx(event, type, 0, 0);
  882. }
  883. EXPORT_SYMBOL(MMProfileLog);
  884. long MMProfileLogMeta(MMP_Event event, MMP_LogType type, MMP_MetaData_t *pMetaData)
  885. {
  886. if (!MMProfileGlobals.enable)
  887. return 0;
  888. if (in_interrupt())
  889. return 0;
  890. return MMProfileLogMetaInt(event, type, pMetaData, 0);
  891. }
  892. EXPORT_SYMBOL(MMProfileLogMeta);
  893. long MMProfileLogMetaStructure(MMP_Event event, MMP_LogType type,
  894. MMP_MetaDataStructure_t *pMetaData)
  895. {
  896. int ret = 0;
  897. if (!MMProfileGlobals.enable)
  898. return 0;
  899. if (in_interrupt())
  900. return 0;
  901. if (bMMProfileInitBuffer && MMProfileGlobals.start
  902. && (MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_ENABLED)) {
  903. MMP_MetaData_t MetaData;
  904. MetaData.data1 = pMetaData->data1;
  905. MetaData.data2 = pMetaData->data2;
  906. MetaData.data_type = MMProfileMetaStructure;
  907. MetaData.size = 32 + pMetaData->struct_size;
  908. MetaData.pData = vmalloc(MetaData.size);
  909. if (!MetaData.pData)
  910. return -1;
  911. memcpy(MetaData.pData, pMetaData->struct_name, 32);
  912. memcpy((void *)((unsigned long)(MetaData.pData) + 32), pMetaData->pData,
  913. pMetaData->struct_size);
  914. ret = MMProfileLogMeta(event, type, &MetaData);
  915. vfree(MetaData.pData);
  916. }
  917. return ret;
  918. }
  919. EXPORT_SYMBOL(MMProfileLogMetaStructure);
  920. long MMProfileLogMetaStringEx(MMP_Event event, MMP_LogType type, unsigned long data1,
  921. unsigned long data2, const char *str)
  922. {
  923. long ret = 0;
  924. if (!MMProfileGlobals.enable)
  925. return 0;
  926. if (in_interrupt())
  927. return 0;
  928. if (bMMProfileInitBuffer && MMProfileGlobals.start
  929. && (MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_ENABLED)) {
  930. MMP_MetaData_t MetaData;
  931. MetaData.data1 = data1;
  932. MetaData.data2 = data2;
  933. MetaData.data_type = MMProfileMetaStringMBS;
  934. MetaData.size = strlen(str) + 1;
  935. MetaData.pData = vmalloc(MetaData.size);
  936. if (!MetaData.pData)
  937. return -1;
  938. strcpy((char *)MetaData.pData, str);
  939. ret = MMProfileLogMeta(event, type, &MetaData);
  940. vfree(MetaData.pData);
  941. }
  942. return ret;
  943. }
  944. EXPORT_SYMBOL(MMProfileLogMetaStringEx);
  945. long MMProfileLogMetaString(MMP_Event event, MMP_LogType type, const char *str)
  946. {
  947. return MMProfileLogMetaStringEx(event, type, 0, 0, str);
  948. }
  949. EXPORT_SYMBOL(MMProfileLogMetaString);
  950. long MMProfileLogMetaBitmap(MMP_Event event, MMP_LogType type, MMP_MetaDataBitmap_t *pMetaData)
  951. {
  952. int ret = 0;
  953. if (!MMProfileGlobals.enable)
  954. return 0;
  955. if (in_interrupt())
  956. return 0;
  957. if (bMMProfileInitBuffer && MMProfileGlobals.start
  958. && (MMProfileGlobals.event_state[event] & MMP_EVENT_STATE_ENABLED)) {
  959. MMP_MetaData_t MetaData;
  960. char *pSrc, *pDst;
  961. long pitch;
  962. MetaData.data1 = pMetaData->data1;
  963. MetaData.data2 = pMetaData->data2;
  964. MetaData.data_type = MMProfileMetaBitmap;
  965. MetaData.size = sizeof(MMP_MetaDataBitmap_t) + pMetaData->data_size;
  966. MetaData.pData = vmalloc(MetaData.size);
  967. if (!MetaData.pData)
  968. return -1;
  969. pSrc = (char *)pMetaData->pData + pMetaData->start_pos;
  970. pDst = (char *)((unsigned long)(MetaData.pData) + sizeof(MMP_MetaDataBitmap_t));
  971. pitch = pMetaData->pitch;
  972. memcpy(MetaData.pData, pMetaData, sizeof(MMP_MetaDataBitmap_t));
  973. if (pitch < 0)
  974. ((MMP_MetaDataBitmap_t *) (MetaData.pData))->pitch = -pitch;
  975. if ((pitch > 0) && (pMetaData->down_sample_x == 1)
  976. && (pMetaData->down_sample_y == 1))
  977. memcpy(pDst, pSrc, pMetaData->data_size);
  978. else {
  979. unsigned int x, y, x0, y0;
  980. unsigned int new_width, new_height;
  981. unsigned int Bpp = pMetaData->bpp / 8;
  982. new_width = (pMetaData->width - 1) / pMetaData->down_sample_x + 1;
  983. new_height = (pMetaData->height - 1) / pMetaData->down_sample_y + 1;
  984. MMP_LOG(ANDROID_LOG_DEBUG, "n(%u,%u),o(%u, %u,%d,%u) ", new_width,
  985. new_height, pMetaData->width, pMetaData->height, pMetaData->pitch,
  986. pMetaData->bpp);
  987. for (y = 0, y0 = 0; y < pMetaData->height;
  988. y0++, y += pMetaData->down_sample_y) {
  989. if (pMetaData->down_sample_x == 1)
  990. memcpy(pDst + new_width * Bpp * y0,
  991. pSrc + pMetaData->pitch * y, pMetaData->width * Bpp);
  992. else {
  993. for (x = 0, x0 = 0; x < pMetaData->width;
  994. x0++, x += pMetaData->down_sample_x) {
  995. memcpy(pDst + (new_width * y0 + x0) * Bpp,
  996. pSrc + pMetaData->pitch * y + x * Bpp, Bpp);
  997. }
  998. }
  999. }
  1000. MetaData.size = sizeof(MMP_MetaDataBitmap_t) + new_width * Bpp * new_height;
  1001. }
  1002. ret = MMProfileLogMeta(event, type, &MetaData);
  1003. vfree(MetaData.pData);
  1004. }
  1005. return ret;
  1006. }
  1007. EXPORT_SYMBOL(MMProfileLogMetaBitmap);
  1008. /* Exposed APIs end */
  1009. /* Debug FS begin */
  1010. static struct dentry *g_pDebugFSDir;
  1011. static struct dentry *g_pDebugFSStart;
  1012. static struct dentry *g_pDebugFSBuffer;
  1013. static struct dentry *g_pDebugFSGlobal;
  1014. static struct dentry *g_pDebugFSReset;
  1015. static struct dentry *g_pDebugFSEnable;
  1016. static struct dentry *g_pDebugFSMMP;
  1017. static ssize_t mmprofile_dbgfs_reset_write(struct file *file, const char __user *buf, size_t size,
  1018. loff_t *ppos)
  1019. {
  1020. MMProfileResetBuffer();
  1021. return 1;
  1022. }
  1023. static ssize_t mmprofile_dbgfs_start_read(struct file *file, char __user *buf, size_t size,
  1024. loff_t *ppos)
  1025. {
  1026. char str[32];
  1027. int r;
  1028. MMP_LOG(ANDROID_LOG_DEBUG, "start=%d", MMProfileGlobals.start);
  1029. r = sprintf(str, "start = %d\n", MMProfileGlobals.start);
  1030. return simple_read_from_buffer(buf, size, ppos, str, r);
  1031. }
  1032. static ssize_t mmprofile_dbgfs_start_write(struct file *file, const char __user *buf, size_t size,
  1033. loff_t *ppos)
  1034. {
  1035. unsigned int str;
  1036. int start;
  1037. ssize_t ret;
  1038. ret = simple_write_to_buffer(&str, 4, ppos, buf, size);
  1039. if ((str & 0xFF) == 0x30)
  1040. start = 0;
  1041. else
  1042. start = 1;
  1043. MMP_LOG(ANDROID_LOG_DEBUG, "start=%d", start);
  1044. MMProfileForceStart(start);
  1045. return ret;
  1046. }
  1047. static ssize_t mmprofile_dbgfs_enable_read(struct file *file, char __user *buf, size_t size,
  1048. loff_t *ppos)
  1049. {
  1050. char str[32];
  1051. int r;
  1052. MMP_LOG(ANDROID_LOG_DEBUG, "enable=%d", MMProfileGlobals.enable);
  1053. r = sprintf(str, "enable = %d\n", MMProfileGlobals.enable);
  1054. return simple_read_from_buffer(buf, size, ppos, str, r);
  1055. }
  1056. static ssize_t mmprofile_dbgfs_enable_write(struct file *file, const char __user *buf, size_t size,
  1057. loff_t *ppos)
  1058. {
  1059. unsigned int str;
  1060. int enable;
  1061. ssize_t ret;
  1062. ret = simple_write_to_buffer(&str, 4, ppos, buf, size);
  1063. if ((str & 0xFF) == 0x30)
  1064. enable = 0;
  1065. else
  1066. enable = 1;
  1067. MMP_LOG(ANDROID_LOG_DEBUG, "enable=%d", enable);
  1068. MMProfileEnable(enable);
  1069. return ret;
  1070. }
  1071. static ssize_t mmprofile_dbgfs_buffer_read(struct file *file, char __user *buf, size_t size,
  1072. loff_t *ppos)
  1073. {
  1074. static unsigned int backup_state;
  1075. unsigned int copy_size = 0;
  1076. unsigned int total_copy = 0;
  1077. unsigned long Addr;
  1078. if (!bMMProfileInitBuffer)
  1079. return -EFAULT;
  1080. MMP_LOG(ANDROID_LOG_VERBOSE, "size=%ld ppos=%d", (unsigned long)size, (int)(*ppos));
  1081. if (*ppos == 0) {
  1082. backup_state = MMProfileGlobals.start;
  1083. MMProfileForceStart(0);
  1084. }
  1085. while (size > 0) {
  1086. MMProfileGetDumpBuffer(*ppos, &Addr, &copy_size);
  1087. if (copy_size == 0) {
  1088. if (backup_state)
  1089. MMProfileForceStart(1);
  1090. break;
  1091. }
  1092. if (size >= copy_size) {
  1093. size -= copy_size;
  1094. } else {
  1095. copy_size = size;
  1096. size = 0;
  1097. }
  1098. if (copy_to_user(buf + total_copy, (void *)Addr, copy_size)) {
  1099. MMP_LOG(ANDROID_LOG_DEBUG, "fail to copytouser total_copy=%d", total_copy);
  1100. break;
  1101. }
  1102. *ppos += copy_size;
  1103. total_copy += copy_size;
  1104. }
  1105. return total_copy;
  1106. /* return simple_read_from_buffer(buf, size, ppos, pMMProfileRingBuffer, MMProfileGlobals.buffer_size_bytes); */
  1107. }
  1108. static ssize_t mmprofile_dbgfs_global_read(struct file *file, char __user *buf, size_t size,
  1109. loff_t *ppos)
  1110. {
  1111. return simple_read_from_buffer(buf, size, ppos, &MMProfileGlobals, MMProfileGlobalsSize);
  1112. }
  1113. static ssize_t mmprofile_dbgfs_global_write(struct file *file, const char __user *buf, size_t size,
  1114. loff_t *ppos)
  1115. {
  1116. return simple_write_to_buffer(&MMProfileGlobals, MMProfileGlobalsSize, ppos, buf, size);
  1117. }
  1118. static const struct file_operations mmprofile_dbgfs_enable_fops = {
  1119. .read = mmprofile_dbgfs_enable_read,
  1120. .write = mmprofile_dbgfs_enable_write,
  1121. .llseek = generic_file_llseek,
  1122. };
  1123. static const struct file_operations mmprofile_dbgfs_start_fops = {
  1124. .read = mmprofile_dbgfs_start_read,
  1125. .write = mmprofile_dbgfs_start_write,
  1126. .llseek = generic_file_llseek,
  1127. };
  1128. static const struct file_operations mmprofile_dbgfs_reset_fops = {
  1129. .write = mmprofile_dbgfs_reset_write,
  1130. .llseek = generic_file_llseek,
  1131. };
  1132. static const struct file_operations mmprofile_dbgfs_buffer_fops = {
  1133. .read = mmprofile_dbgfs_buffer_read,
  1134. .llseek = generic_file_llseek,
  1135. };
  1136. static const struct file_operations mmprofile_dbgfs_global_fops = {
  1137. .read = mmprofile_dbgfs_global_read,
  1138. .write = mmprofile_dbgfs_global_write,
  1139. .llseek = generic_file_llseek,
  1140. };
  1141. /* Debug FS end */
  1142. static char cmd_buf[128];
  1143. static void process_dbg_cmd(char *cmd)
  1144. {
  1145. if (0 == strncmp(cmd, "mmp_log_on:", 11)) {
  1146. char *p = (char *)cmd + 11;
  1147. unsigned long value;
  1148. if (0 == kstrtoul(p, 10, &value) && 0 != value)
  1149. mmp_log_on = 1;
  1150. else
  1151. mmp_log_on = 0;
  1152. MMP_MSG("mmp_log_on=%d\n", mmp_log_on);
  1153. } else if (0 == strncmp(cmd, "mmp_trace_log_on:", 17)) {
  1154. char *p = (char *)cmd + 17;
  1155. unsigned long value;
  1156. if (0 == kstrtoul(p, 10, &value) && 0 != value)
  1157. mmp_trace_log_on = 1;
  1158. else
  1159. mmp_trace_log_on = 0;
  1160. MMP_MSG("mmp_trace_log_on=%d\n", mmp_trace_log_on);
  1161. } else {
  1162. MMP_MSG("invalid mmp debug command: %s\n", NULL != cmd ? cmd : "(empty)");
  1163. }
  1164. }
  1165. /* Driver specific begin */
  1166. /*
  1167. static dev_t mmprofile_devno;
  1168. static struct cdev *mmprofile_cdev;
  1169. static struct class *mmprofile_class = NULL;
  1170. */
  1171. static int mmprofile_release(struct inode *inode, struct file *file)
  1172. {
  1173. return 0;
  1174. }
  1175. static int mmprofile_open(struct inode *inode, struct file *file)
  1176. {
  1177. return 0;
  1178. }
  1179. static ssize_t mmprofile_read(struct file *file, char __user *data, size_t len, loff_t *ppos)
  1180. {
  1181. return 0;
  1182. }
  1183. static ssize_t mmprofile_write(struct file *file, const char __user *data, size_t len,
  1184. loff_t *ppos)
  1185. {
  1186. ssize_t ret;
  1187. size_t length = len;
  1188. if (length > 127)
  1189. length = 127;
  1190. ret = length;
  1191. if (copy_from_user(&cmd_buf, data, length))
  1192. return -EFAULT;
  1193. cmd_buf[length] = 0;
  1194. process_dbg_cmd(cmd_buf);
  1195. return ret;
  1196. }
  1197. static long mmprofile_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1198. {
  1199. int ret = 0;
  1200. unsigned long retn;
  1201. switch (cmd) {
  1202. case MMP_IOC_ENABLE:
  1203. if ((arg == 0) || (arg == 1))
  1204. MMProfileEnable((int)arg);
  1205. else
  1206. ret = -EINVAL;
  1207. break;
  1208. case MMP_IOC_REMOTESTART: /* if using remote tool (PC side) or adb shell command, can always start mmp */
  1209. if ((arg == 0) || (arg == 1))
  1210. MMProfileRemoteStart((int)arg);
  1211. else
  1212. ret = -EINVAL;
  1213. break;
  1214. case MMP_IOC_START:
  1215. if ((arg == 0) || (arg == 1))
  1216. MMProfileForceStart((int)arg);
  1217. else
  1218. ret = -EINVAL;
  1219. break;
  1220. case MMP_IOC_TIME:
  1221. {
  1222. unsigned int time_low;
  1223. unsigned int time_high;
  1224. unsigned long long time;
  1225. system_time(&time_low, &time_high);
  1226. time = time_low + ((unsigned long long)time_high << 32);
  1227. put_user(time, (unsigned long long *)arg);
  1228. }
  1229. break;
  1230. case MMP_IOC_REGEVENT:
  1231. {
  1232. MMProfile_EventInfo_t event_info;
  1233. retn =
  1234. copy_from_user(&event_info, (void *)arg, sizeof(MMProfile_EventInfo_t));
  1235. event_info.name[MMProfileEventNameMaxLen] = 0;
  1236. event_info.parentId =
  1237. MMProfileRegisterEvent(event_info.parentId, event_info.name);
  1238. retn =
  1239. copy_to_user((void *)arg, &event_info, sizeof(MMProfile_EventInfo_t));
  1240. }
  1241. break;
  1242. case MMP_IOC_FINDEVENT:
  1243. {
  1244. MMProfile_EventInfo_t event_info;
  1245. retn =
  1246. copy_from_user(&event_info, (void *)arg, sizeof(MMProfile_EventInfo_t));
  1247. event_info.name[MMProfileEventNameMaxLen] = 0;
  1248. mutex_lock(&MMProfile_RegTableMutex);
  1249. event_info.parentId =
  1250. MMProfileFindEventInt(event_info.parentId, event_info.name);
  1251. mutex_unlock(&MMProfile_RegTableMutex);
  1252. retn =
  1253. copy_to_user((void *)arg, &event_info, sizeof(MMProfile_EventInfo_t));
  1254. }
  1255. break;
  1256. case MMP_IOC_ENABLEEVENT:
  1257. {
  1258. MMP_Event event;
  1259. unsigned int enable;
  1260. unsigned int recursive;
  1261. unsigned int ftrace;
  1262. get_user(event, (unsigned int *)arg);
  1263. get_user(enable, (unsigned int *)(arg + 4));
  1264. get_user(recursive, (unsigned int *)(arg + 8));
  1265. get_user(ftrace, (unsigned int *)(arg + 12));
  1266. if (recursive) {
  1267. mutex_lock(&MMProfile_RegTableMutex);
  1268. MMProfileEnableFTraceEventRecursive(event, enable, ftrace);
  1269. mutex_unlock(&MMProfile_RegTableMutex);
  1270. } else
  1271. MMProfileEnableFTraceEvent(event, enable, ftrace);
  1272. }
  1273. break;
  1274. case MMP_IOC_LOG:
  1275. {
  1276. MMP_Event event;
  1277. MMP_LogType type;
  1278. unsigned int data1;
  1279. unsigned int data2;
  1280. get_user(event, (unsigned int *)arg);
  1281. get_user(type, (unsigned int *)(arg + 4));
  1282. get_user(data1, (unsigned int *)(arg + 8));
  1283. get_user(data2, (unsigned int *)(arg + 12));
  1284. MMProfileLogEx(event, type, data1, data2);
  1285. }
  1286. break;
  1287. case MMP_IOC_DUMPEVENTINFO:
  1288. {
  1289. MMP_Event index;
  1290. MMProfile_RegTable_t *pRegTable;
  1291. MMProfile_EventInfo_t *pEventInfoUser = (MMProfile_EventInfo_t *) arg;
  1292. MMProfile_EventInfo_t EventInfoDummy = { 0, "" };
  1293. MMProfileRegisterStaticEvents(1);
  1294. mutex_lock(&MMProfile_RegTableMutex);
  1295. retn =
  1296. copy_to_user(pEventInfoUser, &EventInfoDummy,
  1297. sizeof(MMProfile_EventInfo_t));
  1298. index = MMP_RootEvent;
  1299. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  1300. retn =
  1301. copy_to_user(&pEventInfoUser[index], &(pRegTable->event_info),
  1302. sizeof(MMProfile_EventInfo_t));
  1303. index++;
  1304. }
  1305. for (; index < MMProfileMaxEventCount; index++) {
  1306. retn =
  1307. copy_to_user(&pEventInfoUser[index], &EventInfoDummy,
  1308. sizeof(MMProfile_EventInfo_t));
  1309. }
  1310. mutex_unlock(&MMProfile_RegTableMutex);
  1311. }
  1312. break;
  1313. case MMP_IOC_METADATALOG:
  1314. {
  1315. MMProfile_MetaLog_t MetaLog;
  1316. retn = copy_from_user(&MetaLog, (void *)arg, sizeof(MMProfile_MetaLog_t));
  1317. MMProfileLogMetaInt(MetaLog.id, MetaLog.type, &(MetaLog.meta_data), 1);
  1318. }
  1319. break;
  1320. case MMP_IOC_DUMPMETADATA:
  1321. {
  1322. unsigned int meta_data_count = 0;
  1323. unsigned int offset = 0;
  1324. unsigned int index;
  1325. unsigned int buffer_size = 0;
  1326. MMProfile_MetaDataBlock_t *pMetaDataBlock;
  1327. MMProfile_MetaData_t *pMetaData = (MMProfile_MetaData_t *) (arg + 8);
  1328. mutex_lock(&MMProfile_MetaBufferMutex);
  1329. list_for_each_entry(pMetaDataBlock, &MMProfile_MetaBufferList, list) {
  1330. if (pMetaDataBlock->data_size > 0) {
  1331. put_user(pMetaDataBlock->cookie,
  1332. &(pMetaData[meta_data_count].cookie));
  1333. put_user(pMetaDataBlock->data_size,
  1334. &(pMetaData[meta_data_count].data_size));
  1335. put_user(pMetaDataBlock->data_type,
  1336. &(pMetaData[meta_data_count].data_type));
  1337. buffer_size += pMetaDataBlock->data_size;
  1338. meta_data_count++;
  1339. }
  1340. }
  1341. put_user(meta_data_count, (unsigned int *)arg);
  1342. /* pr_debug("[mmprofile_ioctl] meta_data_count=%d meta_data_size=%x\n",
  1343. meta_data_count, buffer_size); */
  1344. offset = 8 + sizeof(MMProfile_MetaData_t) * meta_data_count;
  1345. index = 0;
  1346. list_for_each_entry(pMetaDataBlock, &MMProfile_MetaBufferList, list) {
  1347. if (pMetaDataBlock->data_size > 0) {
  1348. put_user(offset - 8, &(pMetaData[index].data_offset));
  1349. /* pr_debug("[mmprofile_ioctl] MetaRecord: offset=%x size=%x\n",
  1350. offset-8, pMetaDataBlock->data_size); */
  1351. if (((unsigned long)(pMetaDataBlock->meta_data) +
  1352. pMetaDataBlock->data_size) >
  1353. ((unsigned long)pMMProfileMetaBuffer +
  1354. MMProfileGlobals.meta_buffer_size)) {
  1355. unsigned long left_size =
  1356. (unsigned long)pMMProfileMetaBuffer +
  1357. MMProfileGlobals.meta_buffer_size -
  1358. (unsigned long)(pMetaDataBlock->meta_data);
  1359. retn =
  1360. copy_to_user((void *)(arg + offset),
  1361. pMetaDataBlock->meta_data,
  1362. left_size);
  1363. retn =
  1364. copy_to_user((void *)(arg + offset + left_size),
  1365. pMMProfileMetaBuffer,
  1366. pMetaDataBlock->data_size -
  1367. left_size);
  1368. } else
  1369. retn =
  1370. copy_to_user((void *)(arg + offset),
  1371. pMetaDataBlock->meta_data,
  1372. pMetaDataBlock->data_size);
  1373. offset = (offset + pMetaDataBlock->data_size + 3) & (~3);
  1374. index++;
  1375. }
  1376. }
  1377. put_user(offset - 8, (unsigned int *)(arg + 4));
  1378. /* pr_debug("[mmprofile_ioctl] Finished: offset=%x\n", offset-8); */
  1379. mutex_unlock(&MMProfile_MetaBufferMutex);
  1380. }
  1381. break;
  1382. case MMP_IOC_SELECTBUFFER:
  1383. MMProfileGlobals.selected_buffer = arg;
  1384. break;
  1385. case MMP_IOC_TRYLOG:
  1386. if ((!MMProfileGlobals.enable) ||
  1387. (!bMMProfileInitBuffer) ||
  1388. (!MMProfileGlobals.start) ||
  1389. (arg >= MMProfileMaxEventCount) ||
  1390. (!(MMProfileGlobals.event_state[arg] & MMP_EVENT_STATE_ENABLED)))
  1391. ret = -EINVAL;
  1392. break;
  1393. case MMP_IOC_ISENABLE:
  1394. {
  1395. MMP_Event event;
  1396. get_user(event, (unsigned int *)arg);
  1397. put_user(MMProfileQueryEnable(event), (unsigned int *)arg);
  1398. }
  1399. break;
  1400. case MMP_IOC_TEST:
  1401. {
  1402. }
  1403. break;
  1404. default:
  1405. ret = -EINVAL;
  1406. break;
  1407. }
  1408. return ret;
  1409. }
  1410. #ifdef CONFIG_COMPAT
  1411. static long mmprofile_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
  1412. {
  1413. int ret = 0;
  1414. unsigned long retn;
  1415. switch (cmd) {
  1416. case MMP_IOC_ENABLE:
  1417. if ((arg == 0) || (arg == 1))
  1418. MMProfileEnable((int)arg);
  1419. else
  1420. ret = -EINVAL;
  1421. break;
  1422. case MMP_IOC_REMOTESTART: /* if using remote tool (PC side) or adb shell command, can always start mmp */
  1423. if ((arg == 0) || (arg == 1))
  1424. MMProfileRemoteStart((int)arg);
  1425. else
  1426. ret = -EINVAL;
  1427. break;
  1428. case MMP_IOC_START:
  1429. if ((arg == 0) || (arg == 1))
  1430. MMProfileForceStart((int)arg);
  1431. else
  1432. ret = -EINVAL;
  1433. break;
  1434. case MMP_IOC_TIME:
  1435. {
  1436. unsigned int time_low;
  1437. unsigned int time_high;
  1438. unsigned long long time;
  1439. system_time(&time_low, &time_high);
  1440. time = time_low + ((unsigned long long)time_high << 32);
  1441. put_user(time, (unsigned long long *)arg);
  1442. }
  1443. break;
  1444. case MMP_IOC_REGEVENT:
  1445. {
  1446. MMProfile_EventInfo_t event_info;
  1447. retn =
  1448. copy_from_user(&event_info, (void *)arg, sizeof(MMProfile_EventInfo_t));
  1449. event_info.name[MMProfileEventNameMaxLen] = 0;
  1450. event_info.parentId =
  1451. MMProfileRegisterEvent(event_info.parentId, event_info.name);
  1452. retn =
  1453. copy_to_user((void *)arg, &event_info, sizeof(MMProfile_EventInfo_t));
  1454. }
  1455. break;
  1456. case MMP_IOC_FINDEVENT:
  1457. {
  1458. MMProfile_EventInfo_t event_info;
  1459. retn =
  1460. copy_from_user(&event_info, (void *)arg, sizeof(MMProfile_EventInfo_t));
  1461. event_info.name[MMProfileEventNameMaxLen] = 0;
  1462. mutex_lock(&MMProfile_RegTableMutex);
  1463. event_info.parentId =
  1464. MMProfileFindEventInt(event_info.parentId, event_info.name);
  1465. mutex_unlock(&MMProfile_RegTableMutex);
  1466. retn =
  1467. copy_to_user((void *)arg, &event_info, sizeof(MMProfile_EventInfo_t));
  1468. }
  1469. break;
  1470. case MMP_IOC_ENABLEEVENT:
  1471. {
  1472. MMP_Event event;
  1473. unsigned int enable;
  1474. unsigned int recursive;
  1475. unsigned int ftrace;
  1476. get_user(event, (unsigned int *)arg);
  1477. get_user(enable, (unsigned int *)(arg + 4));
  1478. get_user(recursive, (unsigned int *)(arg + 8));
  1479. get_user(ftrace, (unsigned int *)(arg + 12));
  1480. if (recursive) {
  1481. mutex_lock(&MMProfile_RegTableMutex);
  1482. MMProfileEnableFTraceEventRecursive(event, enable, ftrace);
  1483. mutex_unlock(&MMProfile_RegTableMutex);
  1484. } else
  1485. MMProfileEnableFTraceEvent(event, enable, ftrace);
  1486. }
  1487. break;
  1488. case MMP_IOC_LOG:
  1489. {
  1490. MMP_Event event;
  1491. MMP_LogType type;
  1492. unsigned int data1;
  1493. unsigned int data2;
  1494. get_user(event, (unsigned int *)arg);
  1495. get_user(type, (unsigned int *)(arg + 4));
  1496. get_user(data1, (unsigned int *)(arg + 8));
  1497. get_user(data2, (unsigned int *)(arg + 12));
  1498. MMProfileLogEx(event, type, data1, data2);
  1499. }
  1500. break;
  1501. case MMP_IOC_DUMPEVENTINFO:
  1502. {
  1503. MMP_Event index;
  1504. MMProfile_RegTable_t *pRegTable;
  1505. MMProfile_EventInfo_t *pEventInfoUser = (MMProfile_EventInfo_t *) arg;
  1506. MMProfile_EventInfo_t EventInfoDummy = { 0, "" };
  1507. MMProfileRegisterStaticEvents(1);
  1508. mutex_lock(&MMProfile_RegTableMutex);
  1509. retn =
  1510. copy_to_user(pEventInfoUser, &EventInfoDummy,
  1511. sizeof(MMProfile_EventInfo_t));
  1512. index = MMP_RootEvent;
  1513. list_for_each_entry(pRegTable, &(MMProfile_RegTable.list), list) {
  1514. retn =
  1515. copy_to_user(&pEventInfoUser[index], &(pRegTable->event_info),
  1516. sizeof(MMProfile_EventInfo_t));
  1517. index++;
  1518. }
  1519. for (; index < MMProfileMaxEventCount; index++) {
  1520. retn =
  1521. copy_to_user(&pEventInfoUser[index], &EventInfoDummy,
  1522. sizeof(MMProfile_EventInfo_t));
  1523. }
  1524. mutex_unlock(&MMProfile_RegTableMutex);
  1525. }
  1526. break;
  1527. case MMP_IOC_METADATALOG:
  1528. {
  1529. MMProfile_MetaLog_t MetaLog;
  1530. retn = copy_from_user(&MetaLog, (void *)arg, sizeof(MMProfile_MetaLog_t));
  1531. MMProfileLogMetaInt(MetaLog.id, MetaLog.type, &(MetaLog.meta_data), 1);
  1532. }
  1533. break;
  1534. case MMP_IOC_DUMPMETADATA:
  1535. {
  1536. unsigned int meta_data_count = 0;
  1537. unsigned int offset = 0;
  1538. unsigned int index;
  1539. unsigned int buffer_size = 0;
  1540. MMProfile_MetaDataBlock_t *pMetaDataBlock;
  1541. MMProfile_MetaData_t *pMetaData = (MMProfile_MetaData_t *) (arg + 8);
  1542. mutex_lock(&MMProfile_MetaBufferMutex);
  1543. list_for_each_entry(pMetaDataBlock, &MMProfile_MetaBufferList, list) {
  1544. if (pMetaDataBlock->data_size > 0) {
  1545. put_user(pMetaDataBlock->cookie,
  1546. &(pMetaData[meta_data_count].cookie));
  1547. put_user(pMetaDataBlock->data_size,
  1548. &(pMetaData[meta_data_count].data_size));
  1549. put_user(pMetaDataBlock->data_type,
  1550. &(pMetaData[meta_data_count].data_type));
  1551. buffer_size += pMetaDataBlock->data_size;
  1552. meta_data_count++;
  1553. }
  1554. }
  1555. put_user(meta_data_count, (unsigned int *)arg);
  1556. /* pr_debug("[mmprofile_ioctl] meta_data_count=%d meta_data_size=%x\n",
  1557. meta_data_count, buffer_size); */
  1558. offset = 8 + sizeof(MMProfile_MetaData_t) * meta_data_count;
  1559. index = 0;
  1560. list_for_each_entry(pMetaDataBlock, &MMProfile_MetaBufferList, list) {
  1561. if (pMetaDataBlock->data_size > 0) {
  1562. put_user(offset - 8, &(pMetaData[index].data_offset));
  1563. /* pr_debug("[mmprofile_ioctl] MetaRecord: offset=%x size=%x\n",
  1564. offset-8, pMetaDataBlock->data_size); */
  1565. if (((unsigned long)(pMetaDataBlock->meta_data) +
  1566. pMetaDataBlock->data_size) >
  1567. ((unsigned long)pMMProfileMetaBuffer +
  1568. MMProfileGlobals.meta_buffer_size)) {
  1569. unsigned long left_size =
  1570. (unsigned long)pMMProfileMetaBuffer +
  1571. MMProfileGlobals.meta_buffer_size -
  1572. (unsigned long)(pMetaDataBlock->meta_data);
  1573. retn =
  1574. copy_to_user((void *)(arg + offset),
  1575. pMetaDataBlock->meta_data,
  1576. left_size);
  1577. retn =
  1578. copy_to_user((void *)(arg + offset + left_size),
  1579. pMMProfileMetaBuffer,
  1580. pMetaDataBlock->data_size -
  1581. left_size);
  1582. } else
  1583. retn =
  1584. copy_to_user((void *)(arg + offset),
  1585. pMetaDataBlock->meta_data,
  1586. pMetaDataBlock->data_size);
  1587. offset = (offset + pMetaDataBlock->data_size + 3) & (~3);
  1588. index++;
  1589. }
  1590. }
  1591. put_user(offset - 8, (unsigned int *)(arg + 4));
  1592. /* pr_debug("[mmprofile_ioctl] Finished: offset=%x\n", offset-8); */
  1593. mutex_unlock(&MMProfile_MetaBufferMutex);
  1594. }
  1595. break;
  1596. case MMP_IOC_SELECTBUFFER:
  1597. MMProfileGlobals.selected_buffer = arg;
  1598. break;
  1599. case MMP_IOC_TRYLOG:
  1600. if ((!MMProfileGlobals.enable) ||
  1601. (!bMMProfileInitBuffer) ||
  1602. (!MMProfileGlobals.start) ||
  1603. (arg >= MMProfileMaxEventCount) ||
  1604. (!(MMProfileGlobals.event_state[arg] & MMP_EVENT_STATE_ENABLED)))
  1605. ret = -EINVAL;
  1606. break;
  1607. case MMP_IOC_ISENABLE:
  1608. {
  1609. MMP_Event event;
  1610. get_user(event, (unsigned int *)arg);
  1611. put_user(MMProfileQueryEnable(event), (unsigned int *)arg);
  1612. }
  1613. break;
  1614. case MMP_IOC_TEST:
  1615. {
  1616. }
  1617. break;
  1618. default:
  1619. ret = -EINVAL;
  1620. break;
  1621. }
  1622. return ret;
  1623. }
  1624. #endif
  1625. static int mmprofile_mmap(struct file *file, struct vm_area_struct *vma)
  1626. {
  1627. unsigned int pos = 0;
  1628. unsigned int i = 0;
  1629. if (MMProfileGlobals.selected_buffer == MMProfileGlobalsBuffer) {
  1630. /* vma->vm_flags |= VM_RESERVED; */
  1631. /* vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); */
  1632. pos = vma->vm_start;
  1633. for (i = 0; i < MMProfileGlobalsSize; i += PAGE_SIZE, pos += PAGE_SIZE) {
  1634. unsigned long pfn;
  1635. /* pr_debug("[mmprofile_mmap] mmap pos=0x%08x va=0x%08x pa=0x%08x pfn=0x%08x\n",
  1636. pos, (unsigned long)(&MMProfileGlobals) + i,
  1637. virt_to_phys((unsigned long)(&MMProfileGlobals) + i),
  1638. phys_to_pfn(__virt_to_phys((unsigned long)(&MMProfileGlobals) + i))); */
  1639. /* flush_dcache_page(virt_to_page((void*)((unsigned long)(&MMProfileGlobals) + i))); */
  1640. pfn = __phys_to_pfn(__virt_to_phys((unsigned long)(&MMProfileGlobals) + i));
  1641. if (remap_pfn_range
  1642. (vma, pos, pfn, PAGE_SIZE, vma->vm_page_prot | PAGE_SHARED))
  1643. return -EAGAIN;
  1644. /* pr_debug("pfn: 0x%08x\n", pfn); */
  1645. }
  1646. } else if (MMProfileGlobals.selected_buffer == MMProfilePrimaryBuffer) {
  1647. MMProfileInitBuffer();
  1648. if (!bMMProfileInitBuffer)
  1649. return -EAGAIN;
  1650. /* vma->vm_flags |= VM_RESERVED; */
  1651. /* vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); */
  1652. pos = vma->vm_start;
  1653. for (i = 0; i < MMProfileGlobals.buffer_size_bytes;
  1654. i += PAGE_SIZE, pos += PAGE_SIZE) {
  1655. /* pr_debug("[mmprofile_mmap] mmap pos=0x%08x va=0x%08x pfn=0x%08x\n",
  1656. pos, (void*)((unsigned long)pMMProfileRingBuffer + i),
  1657. vmalloc_to_pfn((void*)((unsigned long)pMMProfileRingBuffer + i))); */
  1658. /* flush_dcache_page(vmalloc_to_page((void*)((unsigned long)(pMMProfileRingBuffer) + i))); */
  1659. if (remap_pfn_range
  1660. (vma, pos,
  1661. vmalloc_to_pfn((void *)((unsigned long)pMMProfileRingBuffer + i)),
  1662. PAGE_SIZE, vma->vm_page_prot | PAGE_SHARED))
  1663. return -EAGAIN;
  1664. }
  1665. } else
  1666. return -EINVAL;
  1667. return 0;
  1668. }
  1669. const struct file_operations mmprofile_fops = {
  1670. .owner = THIS_MODULE,
  1671. .unlocked_ioctl = mmprofile_ioctl,
  1672. .open = mmprofile_open,
  1673. .release = mmprofile_release,
  1674. .read = mmprofile_read,
  1675. .write = mmprofile_write,
  1676. .mmap = mmprofile_mmap,
  1677. #ifdef CONFIG_COMPAT
  1678. .compat_ioctl = mmprofile_ioctl_compat,
  1679. #endif
  1680. };
  1681. static int mmprofile_probe(void)
  1682. {
  1683. #if 0
  1684. struct class_device *class_dev = 0;
  1685. int ret = alloc_chrdev_region(&mmprofile_devno, 0, 1, MMP_DEVNAME);
  1686. mmprofile_cdev = cdev_alloc();
  1687. mmprofile_cdev->owner = THIS_MODULE;
  1688. mmprofile_cdev->ops = &mmprofile_fops;
  1689. ret = cdev_add(mmprofile_cdev, mmprofile_devno, 1);
  1690. mmprofile_class = class_create(THIS_MODULE, MMP_DEVNAME);
  1691. class_dev =
  1692. (struct class_device *)device_create(mmprofile_class, NULL, mmprofile_devno, NULL,
  1693. MMP_DEVNAME);
  1694. #endif
  1695. mmp_log_on = false;
  1696. mmp_trace_log_on = false;
  1697. /* Create debugfs */
  1698. g_pDebugFSDir = debugfs_create_dir("mmprofile", NULL);
  1699. if (g_pDebugFSDir) {
  1700. /* Create debugfs files. */
  1701. g_pDebugFSMMP =
  1702. debugfs_create_file("mmp", S_IFREG | S_IRUGO, g_pDebugFSDir, NULL,
  1703. &mmprofile_fops);
  1704. g_pDebugFSEnable =
  1705. debugfs_create_file("enable", S_IRUSR | S_IWUSR, g_pDebugFSDir, NULL,
  1706. &mmprofile_dbgfs_enable_fops);
  1707. g_pDebugFSStart =
  1708. debugfs_create_file("start", S_IRUSR | S_IWUSR, g_pDebugFSDir, NULL,
  1709. &mmprofile_dbgfs_start_fops);
  1710. g_pDebugFSBuffer =
  1711. debugfs_create_file("buffer", S_IRUSR, g_pDebugFSDir, NULL,
  1712. &mmprofile_dbgfs_buffer_fops);
  1713. g_pDebugFSGlobal =
  1714. debugfs_create_file("global", S_IRUSR, g_pDebugFSDir, NULL,
  1715. &mmprofile_dbgfs_global_fops);
  1716. g_pDebugFSReset =
  1717. debugfs_create_file("reset", S_IWUSR, g_pDebugFSDir, NULL,
  1718. &mmprofile_dbgfs_reset_fops);
  1719. }
  1720. /* // Read NVRAM configuration */
  1721. /* { */
  1722. /* struct file *filp; */
  1723. /* unsigned int enable; */
  1724. /* unsigned int start; */
  1725. /* mm_segment_t old_fs; */
  1726. /* old_fs = get_fs(); */
  1727. /* set_fs(KERNEL_DS); */
  1728. /* filp = filp_open(CONFIG_MMPROFILE_PATH, O_RDONLY, 0777); */
  1729. /* if (IS_ERR(filp)) */
  1730. /* { */
  1731. /* pr_debug("[mmprofile] NVM: Cannot open configuration file %s\n", CONFIG_MMPROFILE_PATH); */
  1732. /* MMProfileEnable(0); */
  1733. /* } */
  1734. /* else */
  1735. /* { */
  1736. /* filp->f_op->llseek(filp, 0, SEEK_SET); */
  1737. /* filp->f_op->read(filp, (char*)(&enable), 4, &filp->f_pos); */
  1738. /* filp->f_op->read(filp, (char*)(&start), 4, &filp->f_pos); */
  1739. /* filp_close(filp, NULL); */
  1740. /* pr_debug("[mmprofile] NVM: enable=%d start=%d.\n", enable, start); */
  1741. /* if (enable == 1) */
  1742. /* { */
  1743. /* MMProfileEnable(1); */
  1744. /* if (start == 1) */
  1745. /* MMProfileStart(1); */
  1746. /* else */
  1747. /* MMProfileStart(0); */
  1748. /* } */
  1749. /* else */
  1750. /* MMProfileEnable(0); */
  1751. /* } */
  1752. /* set_fs(old_fs); */
  1753. /* } */
  1754. return 0;
  1755. }
  1756. static int mmprofile_remove(void)
  1757. {
  1758. debugfs_remove(g_pDebugFSDir);
  1759. debugfs_remove(g_pDebugFSEnable);
  1760. debugfs_remove(g_pDebugFSStart);
  1761. debugfs_remove(g_pDebugFSGlobal);
  1762. debugfs_remove(g_pDebugFSBuffer);
  1763. debugfs_remove(g_pDebugFSReset);
  1764. debugfs_remove(g_pDebugFSMMP);
  1765. return 0;
  1766. }
  1767. #if 0
  1768. static struct platform_driver mmprofile_driver = {
  1769. .probe = mmprofile_probe,
  1770. .remove = mmprofile_remove,
  1771. .driver = {.name = MMP_DEVNAME}
  1772. };
  1773. static struct platform_device mmprofile_device = {
  1774. .name = MMP_DEVNAME,
  1775. .id = 0,
  1776. };
  1777. #endif
  1778. static int __init mmprofile_init(void)
  1779. {
  1780. #if 0
  1781. if (platform_device_register(&mmprofile_device))
  1782. return -ENODEV;
  1783. if (platform_driver_register(&mmprofile_driver)) {
  1784. platform_device_unregister(&mmprofile_device);
  1785. return -ENODEV;
  1786. }
  1787. #endif
  1788. mmprofile_probe();
  1789. return 0;
  1790. }
  1791. static void __exit mmprofile_exit(void)
  1792. {
  1793. #if 0
  1794. device_destroy(mmprofile_class, mmprofile_devno);
  1795. class_destroy(mmprofile_class);
  1796. cdev_del(mmprofile_cdev);
  1797. unregister_chrdev_region(mmprofile_devno, 1);
  1798. platform_driver_unregister(&mmprofile_driver);
  1799. platform_device_unregister(&mmprofile_device);
  1800. #endif
  1801. mmprofile_remove();
  1802. }
  1803. /* Driver specific end */
  1804. module_init(mmprofile_init);
  1805. module_exit(mmprofile_exit);
  1806. MODULE_AUTHOR("Tianshu Qiu <tianshu.qiu@mediatek.com>");
  1807. MODULE_DESCRIPTION("MMProfile Driver");
  1808. MODULE_LICENSE("GPL");