ged_dvfs.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221
  1. /*
  2. * Copyright (C) 2015 MediaTek Inc.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/sched.h>
  15. #ifdef GED_DVFS_ENABLE
  16. #include <mt-plat/mt_boot.h>
  17. #include <mt_gpufreq.h>
  18. #endif
  19. #include <trace/events/mtk_events.h>
  20. #include <mt-plat/mtk_gpu_utility.h>
  21. #include <asm/siginfo.h>
  22. #include <linux/sched.h>
  23. #include <linux/signal.h>
  24. #include "ged_dvfs.h"
  25. #include "ged_monitor_3D_fence.h"
  26. #include "ged_profile_dvfs.h"
  27. #include "ged_log.h"
  28. #include "ged_base.h"
  29. #define MTK_DEFER_DVFS_WORK_MS 10000
  30. #define MTK_DVFS_SWITCH_INTERVAL_MS 50//16//100
  31. /*Definition of GED_DVFS_SKIP_ROUNDS is to skip DVFS when boost raised
  32. the value stands for counting down rounds of DVFS period
  33. Current using vsync that would be 16ms as period,
  34. below boost at (32, 48] seconds per boost
  35. #define GED_DVFS_SKIP_ROUNDS 3 */
  36. #define GED_DVFS_SKIP_ROUNDS 3
  37. extern GED_LOG_BUF_HANDLE ghLogBuf_DVFS;
  38. extern GED_LOG_BUF_HANDLE ghLogBuf_ged_srv;
  39. static struct mutex gsDVFSLock;
  40. static struct mutex gsVSyncOffsetLock;
  41. static unsigned int g_iSkipCount=0;
  42. static int g_dvfs_skip_round=0;
  43. static unsigned int gpu_power = 0;
  44. static unsigned int gpu_dvfs_enable;
  45. MTK_GPU_DVFS_TYPE g_CommitType=0;
  46. unsigned long g_ulCommitFreq=0;
  47. #ifdef GED_DVFS_ENABLE
  48. static unsigned int boost_gpu_enable;
  49. static unsigned int gpu_bottom_freq;
  50. static unsigned int gpu_cust_boost_freq;
  51. static unsigned int gpu_cust_upbound_freq;
  52. static unsigned int g_ui32PreFreqID;
  53. static unsigned int g_bottom_freq_id;
  54. static unsigned int g_cust_upbound_freq_id;
  55. #endif
  56. static unsigned int g_computed_freq_id = 0;
  57. static unsigned int gpu_debug_enable;
  58. static unsigned int g_cust_boost_freq_id;
  59. unsigned int g_gpu_timer_based_emu;
  60. static unsigned int gpu_pre_loading = 0;
  61. unsigned int gpu_loading = 0;
  62. unsigned int gpu_av_loading = 0;
  63. unsigned int gpu_sub_loading = 0;
  64. static unsigned int gpu_block = 0;
  65. static unsigned int gpu_idle = 0;
  66. unsigned long g_um_gpu_tar_freq = 0;
  67. spinlock_t g_sSpinLock;
  68. unsigned long g_ulCalResetTS_us = 0; // calculate loading reset time stamp
  69. unsigned long g_ulPreCalResetTS_us = 0; // previous calculate loading reset time stamp
  70. unsigned long g_ulWorkingPeriod_us = 0; // last frame half, t0
  71. unsigned long g_ulPreDVFS_TS_us = 0; // record previous DVFS applying time stamp
  72. static unsigned long gL_ulCalResetTS_us = 0; // calculate loading reset time stamp
  73. static unsigned long gL_ulPreCalResetTS_us = 0; // previous calculate loading reset time stamp
  74. static unsigned long gL_ulWorkingPeriod_us = 0; // last frame half, t0
  75. static unsigned int g_ui32FreqIDFromPolicy = 0;
  76. unsigned long g_ulvsync_period;
  77. static GED_DVFS_TUNING_MODE g_eTuningMode = 0;
  78. unsigned int g_ui32EventStatus = 0;
  79. unsigned int g_ui32EventDebugStatus = 0;
  80. static int g_VsyncOffsetLevel = 0;
  81. static int g_probe_pid=GED_NO_UM_SERVICE;
  82. typedef void (*gpufreq_input_boost_notify)(unsigned int );
  83. typedef void (*gpufreq_power_limit_notify)(unsigned int );
  84. extern void mt_gpufreq_input_boost_notify_registerCB(gpufreq_input_boost_notify pCB);
  85. extern void mt_gpufreq_power_limit_notify_registerCB(gpufreq_power_limit_notify pCB);
  86. extern void (*mtk_boost_gpu_freq_fp)(void);
  87. extern void (*mtk_set_bottom_gpu_freq_fp)(unsigned int);
  88. extern unsigned int (*mtk_get_bottom_gpu_freq_fp)(void);
  89. extern unsigned int (*mtk_custom_get_gpu_freq_level_count_fp)(void);
  90. extern void (*mtk_custom_boost_gpu_freq_fp)(unsigned int ui32FreqLevel);
  91. extern void (*mtk_custom_upbound_gpu_freq_fp)(unsigned int ui32FreqLevel);
  92. extern unsigned int (*mtk_get_custom_boost_gpu_freq_fp)(void);
  93. extern unsigned int (*mtk_get_custom_upbound_gpu_freq_fp)(void);
  94. extern unsigned int (*mtk_get_gpu_loading_fp)(void);
  95. extern unsigned int (*mtk_get_gpu_block_fp)(void);
  96. extern unsigned int (*mtk_get_gpu_idle_fp)(void);
  97. extern void (*mtk_do_gpu_dvfs_fp)(unsigned long t, long phase, unsigned long ul3DFenceDoneTime);
  98. extern void (*mtk_gpu_dvfs_set_mode_fp)(int eMode);
  99. extern unsigned int (*mtk_get_gpu_sub_loading_fp)(void);
  100. extern unsigned long (*mtk_get_vsync_based_target_freq_fp)(void);
  101. extern void (*mtk_get_gpu_dvfs_from_fp)(MTK_GPU_DVFS_TYPE* peType, unsigned long *pulFreq);
  102. extern unsigned long (*mtk_get_gpu_bottom_freq_fp)(void);
  103. extern unsigned long (*mtk_get_gpu_custom_boost_freq_fp)(void);
  104. extern unsigned long (*mtk_get_gpu_custom_upbound_freq_fp)(void);
  105. extern void ged_monitor_3D_fence_set_enable(GED_BOOL bEnable);
  106. static bool ged_dvfs_policy(
  107. unsigned int ui32GPULoading, unsigned int* pui32NewFreqID,
  108. unsigned long t, long phase, unsigned long ul3DFenceDoneTime, bool bRefreshed);
  109. unsigned long ged_gas_query_mode(void);
  110. unsigned long ged_query_info( GED_INFO eType)
  111. {
  112. unsigned int gpu_loading;
  113. unsigned int gpu_block;
  114. unsigned int gpu_idle;
  115. switch(eType)
  116. {
  117. case GED_LOADING:
  118. mtk_get_gpu_loading(&gpu_loading);
  119. return gpu_loading;
  120. case GED_IDLE:
  121. mtk_get_gpu_idle(&gpu_idle);
  122. return gpu_idle;
  123. case GED_BLOCKING:
  124. mtk_get_gpu_block(&gpu_block);
  125. return gpu_block;
  126. #ifdef GED_DVFS_ENABLE
  127. case GED_PRE_FREQ:
  128. return mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID);
  129. case GED_PRE_FREQ_IDX:
  130. return g_ui32PreFreqID;
  131. case GED_CUR_FREQ:
  132. return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_cur_freq_index());
  133. case GED_CUR_FREQ_IDX:
  134. return mt_gpufreq_get_cur_freq_index();
  135. case GED_MAX_FREQ_IDX:
  136. return mt_gpufreq_get_dvfs_table_num()-1;
  137. case GED_MAX_FREQ_IDX_FREQ:
  138. return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_dvfs_table_num()-1);
  139. case GED_MIN_FREQ_IDX:
  140. return 0;
  141. case GED_MIN_FREQ_IDX_FREQ:
  142. return mt_gpufreq_get_freq_by_idx(0);
  143. #endif
  144. case GED_EVENT_GAS_MODE:
  145. return ged_gas_query_mode();
  146. case GED_3D_FENCE_DONE_TIME:
  147. return ged_monitor_3D_fence_done_time();
  148. case GED_VSYNC_OFFSET:
  149. return ged_dvfs_vsync_offset_level_get();
  150. case GED_EVENT_STATUS:
  151. return g_ui32EventStatus;
  152. case GED_EVENT_DEBUG_STATUS:
  153. return g_ui32EventDebugStatus;
  154. case GED_SRV_SUICIDE:
  155. ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT);
  156. return g_probe_pid;
  157. case GED_PRE_HALF_PERIOD:
  158. return g_ulWorkingPeriod_us;
  159. case GED_LATEST_START:
  160. return g_ulPreCalResetTS_us;
  161. default:
  162. return 0;
  163. }
  164. }
  165. EXPORT_SYMBOL(ged_query_info);
  166. //-----------------------------------------------------------------------------
  167. void (*ged_dvfs_cal_gpu_utilization_fp)(unsigned int* pui32Loading , unsigned int* pui32Block,unsigned int* pui32Idle) = NULL;
  168. EXPORT_SYMBOL(ged_dvfs_cal_gpu_utilization_fp);
  169. //-----------------------------------------------------------------------------
  170. bool ged_dvfs_cal_gpu_utilization(unsigned int* pui32Loading , unsigned int* pui32Block,unsigned int* pui32Idle)
  171. {
  172. if (NULL != ged_dvfs_cal_gpu_utilization_fp)
  173. {
  174. ged_dvfs_cal_gpu_utilization_fp(pui32Loading, pui32Block, pui32Idle);
  175. gpu_sub_loading = *pui32Loading;
  176. return true;
  177. }
  178. return false;
  179. }
  180. //-----------------------------------------------------------------------------
  181. // void (*ged_dvfs_gpu_freq_commit_fp)(unsigned long ui32NewFreqID)
  182. // call back function
  183. // This shall be registered in vendor's GPU driver,
  184. // since each IP has its own rule
  185. void (*ged_dvfs_gpu_freq_commit_fp)(unsigned long ui32NewFreqID, GED_DVFS_COMMIT_TYPE eCommitType, int* pbCommited) = NULL;
  186. EXPORT_SYMBOL(ged_dvfs_gpu_freq_commit_fp);
  187. //-----------------------------------------------------------------------------
  188. bool ged_dvfs_gpu_freq_commit(unsigned long ui32NewFreqID, GED_DVFS_COMMIT_TYPE eCommitType)
  189. {
  190. int bCommited=false;
  191. #ifdef GED_DVFS_ENABLE
  192. unsigned long ui32CurFreqID;
  193. ui32CurFreqID = mt_gpufreq_get_cur_freq_index();
  194. if (NULL != ged_dvfs_gpu_freq_commit_fp)
  195. {
  196. if (ui32NewFreqID > g_bottom_freq_id)
  197. {
  198. ui32NewFreqID = g_bottom_freq_id;
  199. g_CommitType = MTK_GPU_DVFS_TYPE_SMARTBOOST;
  200. }
  201. if (ui32NewFreqID > g_cust_boost_freq_id)
  202. {
  203. ui32NewFreqID = g_cust_boost_freq_id;
  204. g_CommitType = MTK_GPU_DVFS_TYPE_CUSTOMIZATION;
  205. }
  206. // up bound
  207. if (ui32NewFreqID < g_cust_upbound_freq_id)
  208. {
  209. ui32NewFreqID = g_cust_upbound_freq_id;
  210. g_CommitType = MTK_GPU_DVFS_TYPE_CUSTOMIZATION;
  211. }
  212. // thermal power limit
  213. if (ui32NewFreqID < mt_gpufreq_get_thermal_limit_index())
  214. {
  215. ui32NewFreqID = mt_gpufreq_get_thermal_limit_index();
  216. g_CommitType = MTK_GPU_DVFS_TYPE_THERMAL;
  217. }
  218. g_ulCommitFreq = mt_gpufreq_get_freq_by_idx(ui32NewFreqID);
  219. // do change
  220. if (ui32NewFreqID != ui32CurFreqID)
  221. {
  222. // call to DVFS module
  223. ged_dvfs_gpu_freq_commit_fp(ui32NewFreqID, eCommitType, &bCommited);
  224. /*
  225. * To-Do: refine previous freq contributions,
  226. * since it is possible to have multiple freq settings in previous execution period
  227. * Does this fatal for precision?
  228. */
  229. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] new freq ID commited: idx=%lu type=%u",ui32NewFreqID, eCommitType);
  230. if(true==bCommited)
  231. {
  232. ged_log_trace_counter("Freq-idx",ui32NewFreqID);
  233. ged_log_trace_counter("commit-type",eCommitType);
  234. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] commited true");
  235. g_ui32PreFreqID = ui32CurFreqID;
  236. }
  237. }
  238. }
  239. #endif
  240. return bCommited;
  241. }
  242. unsigned long get_ns_period_from_fps(unsigned int ui32Fps)
  243. {
  244. return 1000000/ui32Fps;
  245. }
  246. void ged_dvfs_set_tuning_mode(GED_DVFS_TUNING_MODE eMode)
  247. {
  248. g_eTuningMode=eMode;
  249. }
  250. void ged_dvfs_set_tuning_mode_wrap(int eMode)
  251. {
  252. ged_dvfs_set_tuning_mode( (GED_DVFS_TUNING_MODE) eMode) ;
  253. }
  254. GED_DVFS_TUNING_MODE ged_dvfs_get_tuning_mode()
  255. {
  256. return g_eTuningMode;
  257. }
  258. //g_i32EvenStatus
  259. GED_ERROR ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_SWITCH_CMD eEvent, bool bSwitch)
  260. {
  261. unsigned int ui32BeforeSwitchInterpret;
  262. unsigned int ui32BeforeDebugInterpret;
  263. GED_ERROR ret = GED_OK;
  264. mutex_lock(&gsVSyncOffsetLock);
  265. ui32BeforeSwitchInterpret = g_ui32EventStatus;
  266. ui32BeforeDebugInterpret = g_ui32EventDebugStatus;
  267. switch(eEvent)
  268. {
  269. case GED_DVFS_VSYNC_OFFSET_FORCE_ON:
  270. g_ui32EventDebugStatus |= GED_EVENT_FORCE_ON;
  271. g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_OFF);
  272. break;
  273. case GED_DVFS_VSYNC_OFFSET_FORCE_OFF:
  274. g_ui32EventDebugStatus |= GED_EVENT_FORCE_OFF;
  275. g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_ON);
  276. break;
  277. case GED_DVFS_VSYNC_OFFSET_DEBUG_CLEAR_EVENT:
  278. g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_ON);
  279. g_ui32EventDebugStatus &= (~GED_EVENT_FORCE_OFF);
  280. break;
  281. case GED_DVFS_VSYNC_OFFSET_TOUCH_EVENT:
  282. if(GED_TRUE==bSwitch) // touch boost
  283. {
  284. ged_dvfs_boost_gpu_freq();
  285. }
  286. (bSwitch)? (g_ui32EventStatus|=GED_EVENT_TOUCH): (g_ui32EventStatus&= (~GED_EVENT_TOUCH));
  287. break;
  288. case GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT:
  289. (bSwitch)? (g_ui32EventStatus|=GED_EVENT_THERMAL): (g_ui32EventStatus&= (~GED_EVENT_THERMAL));
  290. break;
  291. case GED_DVFS_VSYNC_OFFSET_WFD_EVENT:
  292. (bSwitch)? (g_ui32EventStatus|=GED_EVENT_WFD): (g_ui32EventStatus&= (~GED_EVENT_WFD));
  293. break;
  294. case GED_DVFS_VSYNC_OFFSET_MHL_EVENT:
  295. (bSwitch)? (g_ui32EventStatus|=GED_EVENT_MHL): (g_ui32EventStatus&= (~GED_EVENT_MHL));
  296. break;
  297. case GED_DVFS_VSYNC_OFFSET_GAS_EVENT:
  298. (bSwitch)? (g_ui32EventStatus|=GED_EVENT_GAS): (g_ui32EventStatus&= (~GED_EVENT_GAS));
  299. ged_monitor_3D_fence_set_enable(!bSwitch); // switch smartboost
  300. break;
  301. default:
  302. GED_LOGE("%s: not acceptable event:%u \n", __func__, eEvent);
  303. ret = GED_ERROR_INVALID_PARAMS;
  304. goto CHECK_OUT;
  305. }
  306. if(ui32BeforeSwitchInterpret != g_ui32EventStatus || ui32BeforeDebugInterpret != g_ui32EventDebugStatus
  307. || g_ui32EventDebugStatus&GED_EVENT_NOT_SYNC)
  308. {
  309. ged_log_trace_counter("vsync-offset event",g_ui32EventStatus);
  310. ged_log_trace_counter("vsync-offset debug",g_ui32EventDebugStatus);
  311. ret = ged_dvfs_probe_signal(GED_DVFS_VSYNC_OFFSET_SIGNAL_EVENT);
  312. }
  313. CHECK_OUT:
  314. mutex_unlock(&gsVSyncOffsetLock);
  315. return ret;
  316. }
  317. void ged_dvfs_vsync_offset_level_set(int i32level)
  318. {
  319. g_VsyncOffsetLevel = i32level;
  320. ged_log_trace_counter("vsync-offset",g_VsyncOffsetLevel);
  321. }
  322. int ged_dvfs_vsync_offset_level_get()
  323. {
  324. return g_VsyncOffsetLevel;
  325. }
  326. GED_ERROR ged_dvfs_um_commit( unsigned long gpu_tar_freq, bool bFallback)
  327. {
  328. #ifdef ENABLE_COMMON_DVFS
  329. int i32MaxLevel = 0;
  330. unsigned int ui32NewFreqID;
  331. int i ;
  332. unsigned long gpu_freq ;
  333. unsigned int sentinalLoading=0;
  334. #ifdef GED_DVFS_ENABLE
  335. unsigned int ui32CurFreqID;
  336. i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1);
  337. ui32CurFreqID = mt_gpufreq_get_cur_freq_index();
  338. #endif
  339. if(g_gpu_timer_based_emu)
  340. {
  341. return GED_INTENTIONAL_BLOCK;
  342. }
  343. #ifdef GED_DVFS_UM_CAL
  344. mutex_lock(&gsDVFSLock);
  345. if(gL_ulCalResetTS_us - g_ulPreDVFS_TS_us !=0)
  346. {
  347. sentinalLoading = (( gpu_loading * (gL_ulCalResetTS_us - gL_ulPreCalResetTS_us) ) + 100*gL_ulWorkingPeriod_us ) / (gL_ulCalResetTS_us - g_ulPreDVFS_TS_us);
  348. if(sentinalLoading > 100)
  349. {
  350. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] g_ulCalResetTS_us: %lu g_ulPreDVFS_TS_us: %lu",gL_ulCalResetTS_us, g_ulPreDVFS_TS_us);
  351. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] gpu_loading: %u g_ulPreCalResetTS_us:%lu",gpu_loading, gL_ulPreCalResetTS_us);
  352. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] g_ulWorkingPeriod_us: %lu",gL_ulWorkingPeriod_us);
  353. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] gpu_av_loading: WTF");
  354. if(gL_ulWorkingPeriod_us==0)
  355. sentinalLoading = gpu_loading;
  356. else
  357. sentinalLoading = 100;
  358. }
  359. gpu_loading = sentinalLoading;
  360. }
  361. else
  362. {
  363. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] gpu_av_loading: 5566/ %u",gpu_loading);
  364. gpu_loading =0 ;
  365. }
  366. gpu_pre_loading = gpu_av_loading;
  367. gpu_av_loading = gpu_loading;
  368. g_ulPreDVFS_TS_us = gL_ulCalResetTS_us;
  369. if(gpu_tar_freq&0x1) // Magic to kill ged_srv
  370. {
  371. ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT);
  372. }
  373. if(bFallback==true) // in the fallback mode, gpu_tar_freq taking as freq index
  374. {
  375. ged_dvfs_policy(gpu_loading, &ui32NewFreqID, 0, 0, 0, true);
  376. }
  377. else
  378. {
  379. // Search suitable frequency level
  380. g_CommitType = MTK_GPU_DVFS_TYPE_VSYNCBASED;
  381. g_um_gpu_tar_freq = gpu_tar_freq;
  382. ui32NewFreqID = i32MaxLevel;
  383. for (i = 0; i <= i32MaxLevel; i++)
  384. {
  385. #ifdef GED_DVFS_ENABLE
  386. gpu_freq = mt_gpufreq_get_freq_by_idx(i);
  387. #endif
  388. if (gpu_tar_freq > gpu_freq)
  389. {
  390. if(i==0)
  391. ui32NewFreqID = 0;
  392. else
  393. ui32NewFreqID = i-1;
  394. break;
  395. }
  396. }
  397. }
  398. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] rdy to commit (%u)",ui32NewFreqID);
  399. g_computed_freq_id = ui32NewFreqID;
  400. ged_dvfs_gpu_freq_commit(ui32NewFreqID, GED_DVFS_DEFAULT_COMMIT);
  401. mutex_unlock(&gsDVFSLock);
  402. #endif
  403. #else
  404. gpu_pre_loading = 0;
  405. #endif
  406. return GED_OK;
  407. }
  408. static bool ged_dvfs_policy(
  409. unsigned int ui32GPULoading, unsigned int* pui32NewFreqID,
  410. unsigned long t, long phase, unsigned long ul3DFenceDoneTime, bool bRefreshed)
  411. {
  412. #ifdef GED_DVFS_ENABLE
  413. int i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1);
  414. unsigned int ui32GPUFreq = mt_gpufreq_get_cur_freq_index();
  415. unsigned int sentinalLoading = 0;
  416. int i32NewFreqID = (int)ui32GPUFreq;
  417. g_um_gpu_tar_freq = 0;
  418. if(false==bRefreshed)
  419. {
  420. if(gL_ulCalResetTS_us - g_ulPreDVFS_TS_us !=0)
  421. {
  422. sentinalLoading = (( gpu_loading * (gL_ulCalResetTS_us - gL_ulPreCalResetTS_us) ) + 100*gL_ulWorkingPeriod_us ) / (gL_ulCalResetTS_us - g_ulPreDVFS_TS_us);
  423. if(sentinalLoading >100)
  424. {
  425. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K1] g_ulCalResetTS_us: %lu g_ulPreDVFS_TS_us: %lu",gL_ulCalResetTS_us, g_ulPreDVFS_TS_us);
  426. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K1] gpu_loading: %u g_ulPreCalResetTS_us:%lu",gpu_loading, gL_ulPreCalResetTS_us);
  427. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K1] g_ulWorkingPeriod_us: %lu",gL_ulWorkingPeriod_us);
  428. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K1] gpu_av_loading: WTF");
  429. if(gL_ulWorkingPeriod_us==0)
  430. sentinalLoading = gpu_loading;
  431. else
  432. sentinalLoading = 100;
  433. }
  434. gpu_loading = sentinalLoading;
  435. }
  436. else
  437. {
  438. ged_log_buf_print(ghLogBuf_DVFS, "[GED_K1] gpu_av_loading: 5566 / %u",gpu_loading);
  439. gpu_loading = 0;
  440. }
  441. g_ulPreDVFS_TS_us = gL_ulCalResetTS_us;
  442. gpu_pre_loading = gpu_av_loading;
  443. ui32GPULoading = gpu_loading;
  444. gpu_av_loading = gpu_loading;
  445. }
  446. if(g_gpu_timer_based_emu) // conventional timer-based policy
  447. {
  448. if (ui32GPULoading >= 99)
  449. {
  450. i32NewFreqID = 0;
  451. }
  452. else if (ui32GPULoading <= 1)
  453. {
  454. i32NewFreqID = i32MaxLevel;
  455. }
  456. else if (ui32GPULoading >= 85)
  457. {
  458. i32NewFreqID -= 2;
  459. }
  460. else if (ui32GPULoading <= 30)
  461. {
  462. i32NewFreqID += 2;
  463. }
  464. else if (ui32GPULoading >= 70)
  465. {
  466. i32NewFreqID -= 1;
  467. }
  468. else if (ui32GPULoading <= 50)
  469. {
  470. i32NewFreqID += 1;
  471. }
  472. if (i32NewFreqID < ui32GPUFreq)
  473. {
  474. if (gpu_pre_loading * 17 / 10 < ui32GPULoading)
  475. {
  476. i32NewFreqID -= 1;
  477. }
  478. }
  479. else if (i32NewFreqID > ui32GPUFreq)
  480. {
  481. if (ui32GPULoading * 17 / 10 < gpu_pre_loading)
  482. {
  483. i32NewFreqID += 1;
  484. }
  485. }
  486. g_CommitType = MTK_GPU_DVFS_TYPE_TIMERBASED;
  487. }
  488. else if(GED_DVFS_TIMER_BACKUP==phase) // easy to boost in offscreen cases
  489. {
  490. if (ui32GPULoading >= 50)
  491. {
  492. i32NewFreqID -= 1;
  493. }
  494. else if (ui32GPULoading <= 30)
  495. {
  496. i32NewFreqID += 1;
  497. }
  498. g_CommitType = MTK_GPU_DVFS_TYPE_TIMERBASED;
  499. }
  500. else // vsync-based fallback mode
  501. {
  502. if (ui32GPULoading >= 70)
  503. {
  504. i32NewFreqID -= 1;
  505. }
  506. else if (ui32GPULoading <= 50)
  507. {
  508. i32NewFreqID += 1;
  509. }
  510. g_CommitType = MTK_GPU_DVFS_TYPE_FALLBACK;
  511. }
  512. if (i32NewFreqID > i32MaxLevel)
  513. {
  514. i32NewFreqID = i32MaxLevel;
  515. }
  516. else if (i32NewFreqID < 0)
  517. {
  518. i32NewFreqID = 0;
  519. }
  520. *pui32NewFreqID = (unsigned int)i32NewFreqID;
  521. return *pui32NewFreqID != ui32GPUFreq ? GED_TRUE : GED_FALSE;
  522. #else
  523. return GED_FALSE;
  524. #endif
  525. }
  526. static void ged_dvfs_freq_input_boostCB(unsigned int ui32BoostFreqID)
  527. {
  528. #ifdef GED_DVFS_ENABLE
  529. if (0 < g_iSkipCount)
  530. {
  531. return;
  532. }
  533. if (boost_gpu_enable == 0)
  534. {
  535. return;
  536. }
  537. mutex_lock(&gsDVFSLock);
  538. if (ui32BoostFreqID < mt_gpufreq_get_cur_freq_index())
  539. {
  540. if (ged_dvfs_gpu_freq_commit(ui32BoostFreqID,GED_DVFS_INPUT_BOOST_COMMIT ))
  541. {
  542. g_dvfs_skip_round = GED_DVFS_SKIP_ROUNDS; // of course this must be fixed
  543. }
  544. }
  545. mutex_unlock(&gsDVFSLock);
  546. #endif
  547. }
  548. #ifdef GED_DVFS_ENABLE
  549. static void ged_dvfs_freq_thermal_limitCB(unsigned int ui32LimitFreqID)
  550. {
  551. if (0 < g_iSkipCount)
  552. {
  553. return;
  554. }
  555. if(ui32LimitFreqID == 0) // thermal event disable
  556. ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT , GED_FALSE);
  557. else
  558. ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT , GED_TRUE);
  559. mutex_lock(&gsDVFSLock);
  560. if (ui32LimitFreqID > mt_gpufreq_get_cur_freq_index())
  561. {
  562. if (ged_dvfs_gpu_freq_commit(ui32LimitFreqID, GED_DVFS_SET_LIMIT_COMMIT))
  563. {
  564. g_dvfs_skip_round = 0;//GED_DVFS_SKIP_ROUNDS; // of course this must be fixed
  565. }
  566. }
  567. mutex_unlock(&gsDVFSLock);
  568. }
  569. #endif
  570. void ged_dvfs_boost_gpu_freq(void)
  571. {
  572. if (gpu_debug_enable)
  573. {
  574. GED_LOGE("%s", __func__);
  575. }
  576. ged_dvfs_freq_input_boostCB(0);
  577. }
  578. #ifdef GED_DVFS_ENABLE
  579. static void ged_dvfs_set_bottom_gpu_freq(unsigned int ui32FreqLevel)
  580. {
  581. unsigned int ui32MaxLevel;
  582. if (gpu_debug_enable)
  583. {
  584. GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel);
  585. }
  586. ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
  587. if (ui32MaxLevel < ui32FreqLevel)
  588. {
  589. ui32FreqLevel = ui32MaxLevel;
  590. }
  591. mutex_lock(&gsDVFSLock);
  592. // 0 => The highest frequency
  593. // table_num - 1 => The lowest frequency
  594. g_bottom_freq_id = ui32MaxLevel - ui32FreqLevel;
  595. gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id);
  596. //if current id is larger, ie lower freq, we need to reflect immedately
  597. if(g_bottom_freq_id < mt_gpufreq_get_cur_freq_index())
  598. ged_dvfs_gpu_freq_commit(g_bottom_freq_id, GED_DVFS_SET_BOTTOM_COMMIT);
  599. mutex_unlock(&gsDVFSLock);
  600. }
  601. static unsigned int ged_dvfs_get_gpu_freq_level_count(void)
  602. {
  603. return mt_gpufreq_get_dvfs_table_num();
  604. }
  605. static void ged_dvfs_custom_boost_gpu_freq(unsigned int ui32FreqLevel)
  606. {
  607. unsigned int ui32MaxLevel;
  608. if (gpu_debug_enable)
  609. {
  610. GED_LOGE("%s: freq = %d", __func__ ,ui32FreqLevel);
  611. }
  612. ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
  613. if (ui32MaxLevel < ui32FreqLevel)
  614. {
  615. ui32FreqLevel = ui32MaxLevel;
  616. }
  617. mutex_lock(&gsDVFSLock);
  618. // 0 => The highest frequency
  619. // table_num - 1 => The lowest frequency
  620. g_cust_boost_freq_id = ui32MaxLevel - ui32FreqLevel;
  621. gpu_cust_boost_freq = mt_gpufreq_get_freq_by_idx(g_cust_boost_freq_id);
  622. if (g_cust_boost_freq_id < mt_gpufreq_get_cur_freq_index())
  623. {
  624. ged_dvfs_gpu_freq_commit(g_cust_boost_freq_id, GED_DVFS_CUSTOM_BOOST_COMMIT);
  625. }
  626. mutex_unlock(&gsDVFSLock);
  627. }
  628. static void ged_dvfs_custom_ceiling_gpu_freq(unsigned int ui32FreqLevel)
  629. {
  630. unsigned int ui32MaxLevel;
  631. if (gpu_debug_enable)
  632. {
  633. GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel);
  634. }
  635. ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
  636. if (ui32MaxLevel < ui32FreqLevel)
  637. {
  638. ui32FreqLevel = ui32MaxLevel;
  639. }
  640. mutex_lock(&gsDVFSLock);
  641. // 0 => The highest frequency
  642. // table_num - 1 => The lowest frequency
  643. g_cust_upbound_freq_id = ui32MaxLevel - ui32FreqLevel;
  644. gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id);
  645. if (g_cust_upbound_freq_id > mt_gpufreq_get_cur_freq_index())
  646. {
  647. ged_dvfs_gpu_freq_commit(g_cust_upbound_freq_id, GED_DVFS_CUSTOM_CEIL_COMMIT);
  648. }
  649. mutex_unlock(&gsDVFSLock);
  650. }
  651. #endif
  652. unsigned int ged_dvfs_get_custom_boost_gpu_freq(void)
  653. {
  654. #ifdef GED_DVFS_ENABLE
  655. unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
  656. #else
  657. unsigned int ui32MaxLevel = 0;
  658. #endif
  659. return ui32MaxLevel - g_cust_boost_freq_id;
  660. }
  661. void ged_dvfs_save_loading_page(void) // Need spinlocked
  662. {
  663. gL_ulCalResetTS_us = g_ulCalResetTS_us;
  664. gL_ulPreCalResetTS_us = g_ulPreCalResetTS_us;
  665. gL_ulWorkingPeriod_us = g_ulWorkingPeriod_us;
  666. g_ulWorkingPeriod_us = 0; // set as zero for next time
  667. }
  668. void ged_dvfs_cal_gpu_utilization_force()
  669. {
  670. unsigned long ui32IRQFlags;
  671. unsigned int loading;
  672. unsigned int block;
  673. unsigned int idle;
  674. unsigned long long t;
  675. unsigned long ulwork;
  676. t = ged_get_time();
  677. do_div(t,1000);
  678. ged_dvfs_cal_gpu_utilization(&loading, &block, &idle);
  679. spin_lock_irqsave(&g_sSpinLock, ui32IRQFlags);
  680. ulwork = (( t - g_ulCalResetTS_us ) * loading );
  681. do_div(ulwork, 100);
  682. g_ulWorkingPeriod_us += ulwork;
  683. g_ulPreCalResetTS_us = g_ulCalResetTS_us;
  684. g_ulCalResetTS_us = t;
  685. spin_unlock_irqrestore(&g_sSpinLock, ui32IRQFlags);
  686. }
  687. void ged_dvfs_run(unsigned long t, long phase, unsigned long ul3DFenceDoneTime)
  688. {
  689. bool bError;
  690. unsigned long ui32IRQFlags;
  691. //ged_profile_dvfs_record_SW_vsync(t, phase, ul3DFenceDoneTime);
  692. mutex_lock(&gsDVFSLock);
  693. //gpu_pre_loading = gpu_loading;
  694. if (0 == gpu_dvfs_enable)
  695. {
  696. gpu_power = 0;
  697. gpu_loading = 0;
  698. gpu_block= 0;
  699. gpu_idle = 0;
  700. goto EXIT_ged_dvfs_run;
  701. }
  702. // SKIP for keeping boost freq
  703. if(g_dvfs_skip_round>0)
  704. {
  705. g_dvfs_skip_round--;
  706. //goto EXIT_ged_dvfs_run;
  707. }
  708. if (g_iSkipCount > 0)
  709. {
  710. gpu_power = 0;
  711. gpu_loading = 0;
  712. gpu_block= 0;
  713. gpu_idle = 0;
  714. g_iSkipCount -= 1;
  715. }
  716. else
  717. {
  718. bError=ged_dvfs_cal_gpu_utilization(&gpu_loading, &gpu_block, &gpu_idle);
  719. spin_lock_irqsave(&g_sSpinLock,ui32IRQFlags);
  720. g_ulPreCalResetTS_us = g_ulCalResetTS_us;
  721. g_ulCalResetTS_us = t;
  722. ged_dvfs_save_loading_page();
  723. spin_unlock_irqrestore(&g_sSpinLock,ui32IRQFlags);
  724. #ifdef GED_DVFS_UM_CAL
  725. if(GED_DVFS_TIMER_BACKUP==phase) // timer-backup DVFS use only
  726. #endif
  727. {
  728. if (ged_dvfs_policy(gpu_loading, &g_ui32FreqIDFromPolicy, t, phase, ul3DFenceDoneTime, false))
  729. {
  730. g_computed_freq_id = g_ui32FreqIDFromPolicy;
  731. ged_dvfs_gpu_freq_commit(g_ui32FreqIDFromPolicy, GED_DVFS_DEFAULT_COMMIT);
  732. }
  733. }
  734. }
  735. if(gpu_debug_enable)
  736. {
  737. #ifdef GED_DVFS_ENABLE
  738. GED_LOGE("%s:gpu_loading=%d %d, g_iSkipCount=%d",__func__, gpu_loading, mt_gpufreq_get_cur_freq_index(), g_iSkipCount);
  739. #endif
  740. }
  741. EXIT_ged_dvfs_run:
  742. mutex_unlock(&gsDVFSLock);
  743. }
  744. #ifdef GED_DVFS_ENABLE
  745. static unsigned int ged_dvfs_get_bottom_gpu_freq(void)
  746. {
  747. unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
  748. return ui32MaxLevel - g_bottom_freq_id;
  749. }
  750. static unsigned int ged_dvfs_get_custom_ceiling_gpu_freq(void)
  751. {
  752. unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1;
  753. return ui32MaxLevel - g_cust_upbound_freq_id;
  754. }
  755. void ged_dvfs_sw_vsync_query_data(GED_DVFS_UM_QUERY_PACK* psQueryData)
  756. {
  757. psQueryData->ui32GPULoading = gpu_loading;
  758. psQueryData->ui32GPUFreqID = mt_gpufreq_get_cur_freq_index();
  759. psQueryData->gpu_cur_freq = mt_gpufreq_get_freq_by_idx(psQueryData->ui32GPUFreqID) ;
  760. psQueryData->gpu_pre_freq = mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID);
  761. psQueryData->nsOffset = ged_dvfs_vsync_offset_level_get();
  762. psQueryData->ulWorkingPeriod_us = gL_ulWorkingPeriod_us;
  763. psQueryData->ulPreCalResetTS_us = gL_ulPreCalResetTS_us;
  764. }
  765. static unsigned long ged_get_gpu_bottom_freq(void)
  766. {
  767. return mt_gpufreq_get_freq_by_idx(g_bottom_freq_id);
  768. }
  769. static unsigned long ged_get_gpu_custom_boost_freq(void)
  770. {
  771. return mt_gpufreq_get_freq_by_idx(g_cust_boost_freq_id);
  772. }
  773. static unsigned long ged_get_gpu_custom_upbound_freq(void)
  774. {
  775. return mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id);
  776. }
  777. #endif
  778. void ged_dvfs_track_latest_record( MTK_GPU_DVFS_TYPE* peType, unsigned long *pulFreq)
  779. {
  780. *peType = g_CommitType;
  781. *pulFreq = g_ulCommitFreq;
  782. }
  783. unsigned long ged_dvfs_get_gpu_tar_freq(void)
  784. {
  785. return g_um_gpu_tar_freq;
  786. }
  787. unsigned int ged_dvfs_get_sub_gpu_loading(void)
  788. {
  789. return gpu_sub_loading;
  790. }
  791. unsigned int ged_dvfs_get_gpu_loading(void)
  792. {
  793. return gpu_av_loading;
  794. }
  795. unsigned int ged_dvfs_get_gpu_blocking(void)
  796. {
  797. return gpu_block;
  798. }
  799. unsigned int ged_dvfs_get_gpu_idle(void)
  800. {
  801. return 100 - gpu_av_loading;
  802. }
  803. void ged_dvfs_get_gpu_cur_freq(GED_DVFS_FREQ_DATA* psData)
  804. {
  805. #ifdef GED_DVFS_ENABLE
  806. psData->ui32Idx = mt_gpufreq_get_cur_freq_index();
  807. psData->ulFreq = mt_gpufreq_get_freq_by_idx(psData->ui32Idx);
  808. #endif
  809. }
  810. void ged_dvfs_get_gpu_pre_freq(GED_DVFS_FREQ_DATA* psData)
  811. {
  812. #ifdef GED_DVFS_ENABLE
  813. psData->ui32Idx = g_ui32PreFreqID;
  814. psData->ulFreq = mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID);
  815. #endif
  816. }
  817. GED_ERROR ged_dvfs_probe_signal(int signo)
  818. {
  819. int cache_pid=GED_NO_UM_SERVICE;
  820. struct task_struct *t=NULL;
  821. struct siginfo info;
  822. info.si_signo = signo;
  823. info.si_code = SI_QUEUE;
  824. info.si_int = 1234;
  825. if(cache_pid!=g_probe_pid)
  826. {
  827. cache_pid = g_probe_pid;
  828. if(g_probe_pid==GED_NO_UM_SERVICE)
  829. t = NULL;
  830. else
  831. t = pid_task(find_vpid(g_probe_pid), PIDTYPE_PID);
  832. }
  833. if(t!=NULL)
  834. {
  835. send_sig_info(signo, &info, t);
  836. ged_log_buf_print(ghLogBuf_ged_srv, "[GED_K] send signo %d to ged_srv [%d]",signo, g_probe_pid);
  837. return GED_OK;
  838. }
  839. else
  840. {
  841. g_probe_pid = GED_NO_UM_SERVICE;
  842. ged_log_buf_print(ghLogBuf_ged_srv, "[GED_K] ged_srv not running");
  843. return GED_ERROR_INVALID_PARAMS;
  844. }
  845. }
  846. void set_target_fps(int i32FPS)
  847. {
  848. g_ulvsync_period = get_ns_period_from_fps(i32FPS);
  849. }
  850. unsigned long ged_gas_query_mode()
  851. {
  852. if (g_ui32EventStatus & GED_EVENT_GAS)
  853. return GAS_CATEGORY_OTHERS;
  854. else
  855. return GAS_CATEGORY_GAME;
  856. }
  857. GED_ERROR ged_dvfs_probe(int pid)
  858. {
  859. // lock here, wait vsync to relief
  860. //wait_for_completion(&gsVSyncOffsetLock);
  861. if(GED_VSYNC_OFFSET_NOT_SYNC ==pid)
  862. {
  863. g_ui32EventDebugStatus |= GED_EVENT_NOT_SYNC;
  864. return GED_OK;
  865. }
  866. if(GED_VSYNC_OFFSET_SYNC ==pid)
  867. {
  868. g_ui32EventDebugStatus &= (~GED_EVENT_NOT_SYNC);
  869. return GED_OK;
  870. }
  871. g_probe_pid = pid;
  872. /* clear bits among start */
  873. if(g_probe_pid!=GED_NO_UM_SERVICE)
  874. {
  875. g_ui32EventStatus &= (~GED_EVENT_TOUCH);
  876. g_ui32EventStatus &= (~GED_EVENT_WFD);
  877. g_ui32EventStatus &= (~GED_EVENT_GAS);
  878. g_ui32EventDebugStatus = 0;
  879. }
  880. ged_log_buf_print(ghLogBuf_ged_srv, "[GED_K] ged_srv pid: %d",g_probe_pid);
  881. return GED_OK;
  882. }
  883. GED_ERROR ged_dvfs_system_init()
  884. {
  885. mutex_init(&gsDVFSLock);
  886. mutex_init(&gsVSyncOffsetLock);
  887. // initial as locked, signal when vsync_sw_notify
  888. g_iSkipCount = MTK_DEFER_DVFS_WORK_MS / MTK_DVFS_SWITCH_INTERVAL_MS;
  889. g_ulvsync_period = get_ns_period_from_fps(60);
  890. #ifdef GED_DVFS_ENABLE
  891. gpu_dvfs_enable = 1;
  892. #else
  893. gpu_dvfs_enable = 0;
  894. #endif
  895. g_dvfs_skip_round = 0;
  896. #ifdef GED_DVFS_ENABLE
  897. g_bottom_freq_id = mt_gpufreq_get_dvfs_table_num() - 1;
  898. gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id);
  899. g_cust_boost_freq_id = mt_gpufreq_get_dvfs_table_num() - 1;
  900. gpu_cust_boost_freq = mt_gpufreq_get_freq_by_idx(g_cust_boost_freq_id);
  901. g_cust_upbound_freq_id = 0;
  902. gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id);
  903. #ifdef ENABLE_TIMER_BACKUP
  904. g_gpu_timer_based_emu=0;
  905. #else
  906. g_gpu_timer_based_emu=1;
  907. #endif
  908. // GPU HAL fp mount
  909. //mt_gpufreq_input_boost_notify_registerCB(ged_dvfs_freq_input_boostCB); // MTKFreqInputBoostCB
  910. mt_gpufreq_power_limit_notify_registerCB(ged_dvfs_freq_thermal_limitCB); // MTKFreqPowerLimitCB
  911. mtk_boost_gpu_freq_fp = ged_dvfs_boost_gpu_freq;
  912. mtk_set_bottom_gpu_freq_fp = ged_dvfs_set_bottom_gpu_freq;
  913. mtk_get_bottom_gpu_freq_fp = ged_dvfs_get_bottom_gpu_freq;
  914. mtk_custom_get_gpu_freq_level_count_fp = ged_dvfs_get_gpu_freq_level_count;
  915. mtk_custom_boost_gpu_freq_fp = ged_dvfs_custom_boost_gpu_freq;
  916. mtk_custom_upbound_gpu_freq_fp = ged_dvfs_custom_ceiling_gpu_freq;
  917. mtk_get_custom_boost_gpu_freq_fp = ged_dvfs_get_custom_boost_gpu_freq;
  918. mtk_get_custom_upbound_gpu_freq_fp = ged_dvfs_get_custom_ceiling_gpu_freq;
  919. mtk_get_gpu_loading_fp = ged_dvfs_get_gpu_loading;
  920. mtk_get_gpu_block_fp = ged_dvfs_get_gpu_blocking;
  921. mtk_get_gpu_idle_fp = ged_dvfs_get_gpu_idle;
  922. mtk_do_gpu_dvfs_fp = ged_dvfs_run;
  923. mtk_gpu_dvfs_set_mode_fp = ged_dvfs_set_tuning_mode_wrap;
  924. mtk_get_gpu_sub_loading_fp = ged_dvfs_get_sub_gpu_loading;
  925. mtk_get_vsync_based_target_freq_fp = ged_dvfs_get_gpu_tar_freq;
  926. mtk_get_gpu_dvfs_from_fp = ged_dvfs_track_latest_record;
  927. mtk_get_gpu_bottom_freq_fp = ged_get_gpu_bottom_freq;
  928. mtk_get_gpu_custom_boost_freq_fp = ged_get_gpu_custom_boost_freq;
  929. mtk_get_gpu_custom_upbound_freq_fp = ged_get_gpu_custom_upbound_freq;
  930. spin_lock_init(&g_sSpinLock);
  931. #endif
  932. return GED_OK;
  933. }
  934. void ged_dvfs_system_exit()
  935. {
  936. mutex_destroy(&gsDVFSLock);
  937. mutex_destroy(&gsVSyncOffsetLock);
  938. }
  939. #ifdef ENABLE_COMMON_DVFS
  940. module_param(gpu_loading, uint, 0644);
  941. module_param(gpu_block, uint, 0644);
  942. module_param(gpu_idle, uint, 0644);
  943. module_param(gpu_dvfs_enable, uint, 0644);
  944. module_param(boost_gpu_enable, uint, 0644);
  945. module_param(gpu_debug_enable, uint, 0644);
  946. module_param(gpu_bottom_freq, uint, 0644);
  947. module_param(gpu_cust_boost_freq, uint, 0644);
  948. module_param(gpu_cust_upbound_freq, uint, 0644);
  949. module_param(g_gpu_timer_based_emu, uint, 0644);
  950. #endif