mt_pbm.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. #define pr_fmt(fmt) "[PBM] " fmt
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/init.h>
  5. #include <linux/kobject.h>
  6. #include <linux/wakelock.h>
  7. #include <linux/kthread.h>
  8. #include <linux/atomic.h>
  9. #include <linux/mutex.h>
  10. #include <linux/delay.h>
  11. #include <linux/string.h>
  12. #include <linux/sysfs.h>
  13. #include <linux/sched/rt.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/suspend.h>
  17. #include <linux/proc_fs.h>
  18. #include <mach/mt_pbm.h>
  19. #include <mach/upmu_sw.h>
  20. #include <mt-plat/upmu_common.h>
  21. #include <mt_cpufreq.h>
  22. #include <mt_gpufreq.h>
  23. #include <mach/mt_thermal.h>
  24. #include <mach/mt_ppm_api.h>
  25. #if MD_POWER_METER_ENABLE
  26. #include "mt_spm_vcore_dvfs.h"
  27. #include "mt_ccci_common.h"
  28. #endif
  29. #ifndef DISABLE_PBM_FEATURE
  30. /* reference PMIC */
  31. /* extern kal_uint32 PMIC_IMM_GetOneChannelValue(kal_uint8 dwChannel, int deCount, int trimd); */
  32. /* #define DLPT_PRIO_PBM 0 */
  33. /* void (*dlpt_callback)(unsigned int); */
  34. /* void register_dlpt_notify( void (*dlpt_callback)(unsigned int), int i){} */
  35. /* reference mt_cpufreq.h and mt_gpufreq.h */
  36. /* unsigned int mt_cpufreq_get_leakage_mw(int i){return 111;} */
  37. /* unsigned int mt_gpufreq_get_leakage_mw(void){return 111;} */
  38. /* void mt_ppm_dlpt_set_limit_by_pbm(unsigned int limited_power){} */
  39. /* void mt_gpufreq_set_power_limit_by_pbm(unsigned int limited_power){} */
  40. static bool mt_pbm_debug;
  41. #define pbm_emerg(fmt, args...) pr_emerg(fmt, ##args)
  42. #define pbm_alert(fmt, args...) pr_alert(fmt, ##args)
  43. #define pbm_crit(fmt, args...) pr_crit(fmt, ##args)
  44. #define pbm_err(fmt, args...) pr_err(fmt, ##args)
  45. #define pbm_warn(fmt, args...) pr_warn(fmt, ##args)
  46. #define pbm_notice(fmt, args...) pr_debug(fmt, ##args)
  47. #define pbm_info(fmt, args...) pr_debug(fmt, ##args)
  48. #define pbm_debug(fmt, args...) \
  49. do { \
  50. if (mt_pbm_debug) \
  51. pr_crit(fmt, ##args); \
  52. } while (0)
  53. #define BIT_CHECK(a, b) ((a) & (1<<(b)))
  54. static struct hpf hpf_ctrl = {
  55. .switch_md1 = 1,
  56. .switch_md3 = 0,
  57. .switch_gpu = 0,
  58. .switch_flash = 0,
  59. .md1_ccci_ready = 0,
  60. .md3_ccci_ready = 0,
  61. .cpu_volt = 1000, /* 1V = boot up voltage */
  62. .gpu_volt = 0,
  63. .cpu_num = 1, /* default cpu0 core */
  64. .loading_leakage = 0,
  65. .loading_dlpt = 0,
  66. .loading_md1 = MD1_MAX_PW,
  67. .loading_md3 = MD3_MAX_PW,
  68. .loading_cpu = 0,
  69. .loading_gpu = 0,
  70. .loading_flash = POWER_FLASH, /* fixed */
  71. };
  72. static struct pbm pbm_ctrl = {
  73. /* feature key */
  74. .feature_en = 1,
  75. .pbm_drv_done = 0,
  76. .hpf_en = 63, /* bin: 111111 (Flash, GPU, CPU, MD3, MD1, DLPT) */
  77. };
  78. #if MD_POWER_METER_ENABLE
  79. static int section_level[SECTION_NUM+1] = { GUARDING_PATTERN,
  80. BIT_SECTION_1,
  81. BIT_SECTION_2,
  82. BIT_SECTION_3,
  83. BIT_SECTION_4,
  84. BIT_SECTION_5,
  85. BIT_SECTION_6 };
  86. static int md1_section_level_2g[SECTION_NUM+1] = { GUARDING_PATTERN,
  87. VAL_MD1_2G_SECTION_1,
  88. VAL_MD1_2G_SECTION_2,
  89. VAL_MD1_2G_SECTION_3,
  90. VAL_MD1_2G_SECTION_4,
  91. VAL_MD1_2G_SECTION_5,
  92. VAL_MD1_2G_SECTION_6 };
  93. static int md1_section_level_3g[SECTION_NUM+1] = { GUARDING_PATTERN,
  94. VAL_MD1_3G_SECTION_1,
  95. VAL_MD1_3G_SECTION_2,
  96. VAL_MD1_3G_SECTION_3,
  97. VAL_MD1_3G_SECTION_4,
  98. VAL_MD1_3G_SECTION_5,
  99. VAL_MD1_3G_SECTION_6 };
  100. static int md1_section_level_4g[SECTION_NUM+1] = { GUARDING_PATTERN,
  101. VAL_MD1_4G_SECTION_1,
  102. VAL_MD1_4G_SECTION_2,
  103. VAL_MD1_4G_SECTION_3,
  104. VAL_MD1_4G_SECTION_4,
  105. VAL_MD1_4G_SECTION_5,
  106. VAL_MD1_4G_SECTION_6 };
  107. static int md1_section_level_tdd[SECTION_NUM+1] = { GUARDING_PATTERN,
  108. VAL_MD1_TDD_SECTION_1,
  109. VAL_MD1_TDD_SECTION_2,
  110. VAL_MD1_TDD_SECTION_3,
  111. VAL_MD1_TDD_SECTION_4,
  112. VAL_MD1_TDD_SECTION_5,
  113. VAL_MD1_TDD_SECTION_6 };
  114. static int md3_section_level[SECTION_NUM+1] = { GUARDING_PATTERN,
  115. VAL_MD3_SECTION_1,
  116. VAL_MD3_SECTION_2,
  117. VAL_MD3_SECTION_3,
  118. VAL_MD3_SECTION_4,
  119. VAL_MD3_SECTION_5,
  120. VAL_MD3_SECTION_6 };
  121. static int md1_scenario_pwr[SCENARIO_NUM] = { PW_CAT6_CA_DATALINK,
  122. PW_NON_CA_DATALINK,
  123. PW_PAGING,
  124. PW_POSITION,
  125. PW_CELL_SEARCH,
  126. PW_CELL_MANAGEMENT,
  127. PW_TALKING_2G,
  128. PW_DATALINK_2G,
  129. PW_TALKING_3G,
  130. PW_DATALINK_3G };
  131. static int md1_pa_pwr_2g[SECTION_NUM+1] = { GUARDING_PATTERN,
  132. PW_MD1_PA_2G_SECTION_1,
  133. PW_MD1_PA_2G_SECTION_2,
  134. PW_MD1_PA_2G_SECTION_3,
  135. PW_MD1_PA_2G_SECTION_4,
  136. PW_MD1_PA_2G_SECTION_5,
  137. PW_MD1_PA_2G_SECTION_6 };
  138. static int md1_pa_pwr_3g[SECTION_NUM+1] = { GUARDING_PATTERN,
  139. PW_MD1_PA_3G_SECTION_1,
  140. PW_MD1_PA_3G_SECTION_2,
  141. PW_MD1_PA_3G_SECTION_3,
  142. PW_MD1_PA_3G_SECTION_4,
  143. PW_MD1_PA_3G_SECTION_5,
  144. PW_MD1_PA_3G_SECTION_6 };
  145. static int md1_pa_pwr_4g[SECTION_NUM+1] = { GUARDING_PATTERN,
  146. PW_MD1_PA_4G_SECTION_1,
  147. PW_MD1_PA_4G_SECTION_2,
  148. PW_MD1_PA_4G_SECTION_3,
  149. PW_MD1_PA_4G_SECTION_4,
  150. PW_MD1_PA_4G_SECTION_5,
  151. PW_MD1_PA_4G_SECTION_6 };
  152. static int md3_pa_pwr[SECTION_NUM+1] = { GUARDING_PATTERN,
  153. PW_MD3_PA_SECTION_1,
  154. PW_MD3_PA_SECTION_2,
  155. PW_MD3_PA_SECTION_3,
  156. PW_MD3_PA_SECTION_4,
  157. PW_MD3_PA_SECTION_5,
  158. PW_MD3_PA_SECTION_6 };
  159. #endif
  160. int g_dlpt_need_do = 1;
  161. static DEFINE_MUTEX(pbm_mutex);
  162. static DEFINE_MUTEX(pbm_table_lock);
  163. static struct task_struct *pbm_thread;
  164. static atomic_t kthread_nreq = ATOMIC_INIT(0);
  165. /* extern u32 get_devinfo_with_index(u32 index); */
  166. /*
  167. * weak function
  168. */
  169. #if 1
  170. __weak int tscpu_get_min_cpu_pwr(void)
  171. {
  172. pbm_crit("tscpu_get_min_cpu_pwr not ready\n");
  173. return 0;
  174. }
  175. #endif
  176. int get_battery_volt(void)
  177. {
  178. return PMIC_IMM_GetOneChannelValue(PMIC_AUX_BATSNS_AP, 5, 1);
  179. /* return 3900; */
  180. }
  181. unsigned int ma_to_mw(unsigned int val)
  182. {
  183. unsigned int bat_vol = 0;
  184. unsigned int ret_val = 0;
  185. bat_vol = get_battery_volt(); /* return mV */
  186. ret_val = (bat_vol * val) / 1000; /* mW = (mV * mA)/1000 */
  187. pbm_crit("[%s] %d(mV) * %d(mA) = %d(mW)\n", __func__, bat_vol, val, ret_val);
  188. return ret_val;
  189. }
  190. void dump_kicker_info(void)
  191. {
  192. struct hpf *hpfmgr = &hpf_ctrl;
  193. #if 1
  194. pbm_debug("(M1/M3/F/G)=%d,%d,%d,%d;(C/G)=%ld,%ld\n",
  195. hpfmgr->switch_md1,
  196. hpfmgr->switch_md3,
  197. hpfmgr->switch_flash,
  198. hpfmgr->switch_gpu, hpfmgr->loading_cpu, hpfmgr->loading_gpu);
  199. #else
  200. pbm_debug
  201. ("[***] Switch (MD1: %d, MD2: %d, GPU: %d, Flash: %d, CPU_volt: %d, GPU_volt: %d, CPU_num: %d)\n",
  202. hpfmgr->switch_md1, hpfmgr->switch_md2, hpfmgr->switch_gpu, hpfmgr->switch_flash,
  203. hpfmgr->cpu_volt, hpfmgr->gpu_volt, hpfmgr->cpu_num);
  204. pbm_debug
  205. ("[***] Resource (DLPT: %ld, Leakage: %ld, MD: %ld, CPU: %ld, GPU: %ld, Flash: %ld)\n",
  206. hpfmgr->loading_dlpt, hpfmgr->loading_leakage, hpfmgr->loading_md, hpfmgr->loading_cpu,
  207. hpfmgr->loading_gpu, hpfmgr->loading_flash);
  208. #endif
  209. }
  210. int hpf_get_power_leakage(void)
  211. {
  212. struct hpf *hpfmgr = &hpf_ctrl;
  213. unsigned int leakage_cpu = 0, leakage_gpu = 0;
  214. leakage_cpu = mt_cpufreq_get_leakage_mw(0);
  215. leakage_gpu = mt_gpufreq_get_leakage_mw();
  216. hpfmgr->loading_leakage = leakage_cpu + leakage_gpu;
  217. pbm_debug("[%s] %ld=%d+%d\n", __func__, hpfmgr->loading_leakage, leakage_cpu, leakage_gpu);
  218. return hpfmgr->loading_leakage;
  219. }
  220. unsigned long hpf_get_power_cpu(void)
  221. {
  222. struct hpf *hpfmgr = &hpf_ctrl;
  223. return hpfmgr->loading_cpu;
  224. }
  225. unsigned long hpf_get_power_gpu(void)
  226. {
  227. struct hpf *hpfmgr = &hpf_ctrl;
  228. if (hpfmgr->switch_gpu)
  229. return hpfmgr->loading_gpu;
  230. else
  231. return 0;
  232. }
  233. unsigned long hpf_get_power_flash(void)
  234. {
  235. struct hpf *hpfmgr = &hpf_ctrl;
  236. if (hpfmgr->switch_flash)
  237. return hpfmgr->loading_flash;
  238. else
  239. return 0;
  240. }
  241. unsigned long hpf_get_power_dlpt(void)
  242. {
  243. struct hpf *hpfmgr = &hpf_ctrl;
  244. return hpfmgr->loading_dlpt;
  245. }
  246. #if MD_POWER_METER_ENABLE
  247. static void init_md1_section_level(void)
  248. {
  249. u32 *share_mem;
  250. u32 mem_2g = 0, mem_3g = 0, mem_4g = 0, mem_tdd = 0;
  251. int section;
  252. share_mem = (u32 *)get_smem_start_addr(MD_SYS1, 0, NULL);
  253. for (section = 1; section <= SECTION_NUM; section++) {
  254. mem_2g |= md1_section_level_2g[section] << section_level[section];
  255. mem_3g |= md1_section_level_3g[section] << section_level[section];
  256. mem_4g |= md1_section_level_4g[section] << section_level[section];
  257. mem_tdd |= md1_section_level_tdd[section] << section_level[section];
  258. }
  259. /* Get 4 byte = 32 bit */
  260. mem_2g &= SECTION_LEN;
  261. mem_3g &= SECTION_LEN;
  262. mem_4g &= SECTION_LEN;
  263. mem_tdd &= SECTION_LEN;
  264. share_mem[SECTION_LEVLE_2G] = mem_2g;
  265. share_mem[SECTION_LEVLE_3G] = mem_3g;
  266. share_mem[SECTION_LEVLE_4G] = mem_4g;
  267. share_mem[SECTION_LEVLE_TDD] = mem_tdd;
  268. pbm_crit("AP2MD1 section level, 2G: 0x%x(0x%x), 3G: 0x%x(0x%x), 4G: 0x%x(0x%x), TDD: 0x%x(0x%x), addr: 0x%p\n",
  269. mem_2g, share_mem[SECTION_LEVLE_2G],
  270. mem_3g, share_mem[SECTION_LEVLE_3G],
  271. mem_4g, share_mem[SECTION_LEVLE_4G],
  272. mem_tdd, share_mem[SECTION_LEVLE_TDD],
  273. share_mem);
  274. }
  275. static void init_md3_section_level(void)
  276. {
  277. u32 *share_mem;
  278. u32 mem_c2k = 0;
  279. int section;
  280. share_mem = (u32 *)get_smem_start_addr(MD_SYS3, 0, NULL);
  281. for (section = 1; section <= SECTION_NUM; section++)
  282. mem_c2k |= md3_section_level[section] << section_level[section];
  283. /* Get 4 byte = 32 bit */
  284. mem_c2k &= SECTION_LEN;
  285. share_mem[SECTION_LEVLE_C2K] = mem_c2k;
  286. pbm_crit("AP2MD3 section level, C2K: 0x%x(0x%x), addr: 0x%p\n",
  287. mem_c2k, share_mem[SECTION_LEVLE_C2K],
  288. share_mem);
  289. }
  290. void init_md_section_level(enum pbm_kicker kicker)
  291. {
  292. struct hpf *hpfmgr = &hpf_ctrl;
  293. if (kicker == KR_MD1) {
  294. init_md1_section_level();
  295. hpfmgr->md1_ccci_ready = 1;
  296. } else if (kicker == KR_MD3) {
  297. init_md3_section_level();
  298. hpfmgr->md3_ccci_ready = 1;
  299. } else {
  300. pbm_crit("unknown MD kicker: %d\n", kicker);
  301. }
  302. pbm_crit("MD section level init, MD1: %d, MD3: %d\n", hpfmgr->md1_ccci_ready, hpfmgr->md3_ccci_ready);
  303. }
  304. static int get_md1_scenario(void)
  305. {
  306. #ifndef TEST_MD_POWER
  307. u32 share_reg;
  308. int pw_scenario = 0, scenario = -1;
  309. int i;
  310. /* get scenario from share-register on spm */
  311. share_reg = spm_vcorefs_get_MD_status();
  312. /* get scenario index when working & max power (bit4 and bit5 no use) */
  313. for (i = 0; i < SCENARIO_NUM; i++) {
  314. if (share_reg & (1 << i)) {
  315. if (md1_scenario_pwr[i] >= pw_scenario) {
  316. pw_scenario = md1_scenario_pwr[i];
  317. scenario = i;
  318. }
  319. }
  320. }
  321. scenario = (scenario < 0) ? PAGING : scenario;
  322. pbm_debug("MD1 scenario: 0x%x, reg: 0x%x, pw: %d\n",
  323. ((scenario < 0) ? PAGING : scenario), share_reg, md1_scenario_pwr[scenario]);
  324. return scenario;
  325. #else
  326. u32 share_reg;
  327. int pw_scenario = 0, scenario = -1;
  328. int i, j;
  329. for (j = 0; j < SCENARIO_NUM; j++) {
  330. share_reg = 0;
  331. pw_scenario = 0;
  332. scenario = -1;
  333. share_reg |= (1 << j);
  334. /* get scenario index of working & max power (bit4 and bit5 no use) */
  335. for (i = 0; i < SCENARIO_NUM; i++) {
  336. if (share_reg & (1 << i)) {
  337. if (md1_scenario_pwr[i] >= pw_scenario) {
  338. pw_scenario = md1_scenario_pwr[i];
  339. scenario = i;
  340. }
  341. }
  342. }
  343. scenario = (scenario < 0) ? PAGING : scenario;
  344. pbm_debug("MD1 scenario: 0x%x, reg: 0x%x, pw: %d\n",
  345. ((scenario < 0) ? PAGING : scenario), share_reg, md1_scenario_pwr[scenario]);
  346. }
  347. return scenario;
  348. #endif
  349. }
  350. static int get_md1_2g_dbm_power(u32 *share_mem)
  351. {
  352. static u32 bef_share_mem;
  353. static int pa_power, rf_power;
  354. int section;
  355. if (share_mem[DBM_2G_TABLE] == bef_share_mem) {
  356. pbm_debug("MD1 2G dBm, no TX power, reg: 0x%x(0x%x) return 0, pa: %d, rf: %d\n",
  357. share_mem[DBM_2G_TABLE], bef_share_mem, pa_power, rf_power);
  358. return 0;
  359. }
  360. for (section = 1; section <= SECTION_NUM; section++) {
  361. if (((share_mem[DBM_2G_TABLE] >> section_level[section]) & SECTION_VALUE) !=
  362. ((bef_share_mem >> section_level[section]) & SECTION_VALUE)) {
  363. /* get PA power */
  364. pa_power = md1_pa_pwr_2g[section];
  365. /* get RF power */
  366. if (section == SECTION_NUM)
  367. rf_power = PW_MD1_RF_SECTION_2;
  368. else
  369. rf_power = PW_MD1_RF_SECTION_1;
  370. pbm_debug("MD1 2G dBm update, reg: 0x%x, bef_reg: 0x%x, pa: %d, rf: %d, section: %d\n",
  371. share_mem[DBM_2G_TABLE], bef_share_mem, pa_power, rf_power, section);
  372. bef_share_mem = share_mem[DBM_2G_TABLE];
  373. break;
  374. }
  375. }
  376. return pa_power + rf_power;
  377. }
  378. static int get_md1_3g_dbm_power(u32 *share_mem)
  379. {
  380. static u32 bef_share_mem;
  381. static int pa_power, rf_power;
  382. int section;
  383. if (share_mem[DBM_3G_TABLE] == bef_share_mem) {
  384. pbm_debug("MD1 3G dBm, no TX power, reg: 0x%x(0x%x) return 0, pa: %d, rf: %d\n",
  385. share_mem[DBM_3G_TABLE], bef_share_mem, pa_power, rf_power);
  386. return 0;
  387. }
  388. for (section = 1; section <= SECTION_NUM; section++) {
  389. if (((share_mem[DBM_3G_TABLE] >> section_level[section]) & SECTION_VALUE) !=
  390. ((bef_share_mem >> section_level[section]) & SECTION_VALUE)) {
  391. /* get PA power */
  392. pa_power = md1_pa_pwr_3g[section];
  393. /* get RF power */
  394. if (section == SECTION_NUM)
  395. rf_power = PW_MD1_RF_SECTION_2;
  396. else
  397. rf_power = PW_MD1_RF_SECTION_1;
  398. pbm_debug("MD1 3G dBm update, reg: 0x%x, bef_reg: 0x%x, pa: %d, rf: %d, section: %d\n",
  399. share_mem[DBM_3G_TABLE], bef_share_mem, pa_power, rf_power, section);
  400. bef_share_mem = share_mem[DBM_3G_TABLE];
  401. break;
  402. }
  403. }
  404. return pa_power + rf_power;
  405. }
  406. static int get_md1_4g_dbm_power(u32 *share_mem)
  407. {
  408. static u32 bef_share_mem;
  409. static int pa_power, rf_power;
  410. int section;
  411. if (share_mem[DBM_4G_TABLE] == bef_share_mem) {
  412. pbm_debug("MD1 4G dBm, no TX power, reg: 0x%x(0x%x) return 0, pa: %d, rf: %d\n",
  413. share_mem[DBM_4G_TABLE], bef_share_mem, pa_power, rf_power);
  414. return 0;
  415. }
  416. for (section = 1; section <= SECTION_NUM; section++) {
  417. if (((share_mem[DBM_4G_TABLE] >> section_level[section]) & SECTION_VALUE) !=
  418. ((bef_share_mem >> section_level[section]) & SECTION_VALUE)) {
  419. /* get PA power */
  420. pa_power = md1_pa_pwr_4g[section];
  421. /* get RF power */
  422. if (section == SECTION_NUM)
  423. rf_power = PW_MD1_RF_SECTION_2;
  424. else
  425. rf_power = PW_MD1_RF_SECTION_1;
  426. pbm_debug("MD1 4G dBm update, reg: 0x%x, bef_reg: 0x%x, pa: %d, rf: %d, section: %d\n",
  427. share_mem[DBM_4G_TABLE], bef_share_mem, pa_power, rf_power, section);
  428. bef_share_mem = share_mem[DBM_4G_TABLE];
  429. break;
  430. }
  431. }
  432. return pa_power + rf_power;
  433. }
  434. static int get_md3_dBm_power(void)
  435. {
  436. u32 *share_mem;
  437. static u32 bef_share_mem;
  438. static int pa_power, rf_power;
  439. int section;
  440. int i;
  441. /* get dBm table from share-memory on EMI */
  442. share_mem = (u32 *)get_smem_start_addr(MD_SYS3, 0, NULL);
  443. if (share_mem == NULL) {
  444. pbm_debug("MD3 share_mem is NULL, , use max pa and rf power (1956 + 280)\n");
  445. return 1956 + 280;
  446. }
  447. pbm_debug("[%s] share mem addr: 0x%p\n", __func__, share_mem);
  448. for (i = 0; i < SHARE_MEM_BLOCK_NUM; i++)
  449. pbm_debug("section: %d, value: 0x%x\n", i, share_mem[i]);
  450. if (share_mem[DBM_C2K_TABLE] == bef_share_mem) {
  451. pbm_debug("MD3 dBm, no TX power, reg: 0x%x(0x%x) return 0, pa: %d, rf: %d\n",
  452. share_mem[DBM_C2K_TABLE], bef_share_mem, pa_power, rf_power);
  453. return 0;
  454. }
  455. for (section = 1; section <= SECTION_NUM; section++) {
  456. if (((share_mem[DBM_C2K_TABLE] >> section_level[section]) & SECTION_VALUE) !=
  457. ((bef_share_mem >> section_level[section]) & SECTION_VALUE)) {
  458. /* get PA power */
  459. pa_power = md3_pa_pwr[section];
  460. /* get RF power */
  461. if (section == SECTION_NUM)
  462. rf_power = PW_MD3_RF_SECTION_2;
  463. else
  464. rf_power = PW_MD3_RF_SECTION_1;
  465. pbm_debug("MD3 dBm update, reg: 0x%x, bef_reg: 0x%x, pa: %d, rf: %d, section: %d\n",
  466. share_mem[DBM_C2K_TABLE], bef_share_mem, pa_power, rf_power, section);
  467. bef_share_mem = share_mem[DBM_C2K_TABLE];
  468. break;
  469. }
  470. }
  471. return pa_power + rf_power;
  472. }
  473. static int get_md1_dBm_power(int scenario)
  474. {
  475. u32 *share_mem;
  476. int dbm_power;
  477. int i;
  478. if (scenario == PAGING) {
  479. pbm_debug("MD1 is paging, dBm pw: 0\n");
  480. return 0;
  481. }
  482. /* get dBm table from share-memory on EMI */
  483. share_mem = (u32 *)get_smem_start_addr(MD_SYS1, 0, NULL);
  484. if (share_mem == NULL) {
  485. pbm_debug("MD1 share_mem is NULL, use max pa and rf power (1965 + 512)\n");
  486. return 1965 + 512;
  487. }
  488. pbm_debug("[%s] share mem addr: 0x%p\n", __func__, share_mem);
  489. for (i = 0; i < SHARE_MEM_BLOCK_NUM; i++)
  490. pbm_debug("section: %d, value: 0x%x\n", i, share_mem[i]);
  491. if (scenario == TALKING_2G || scenario == DATALINK_2G)
  492. dbm_power = get_md1_2g_dbm_power(share_mem);
  493. else if (scenario == TALKING_3G || scenario == DATALINK_3G)
  494. dbm_power = get_md1_3g_dbm_power(share_mem);
  495. else if (scenario == CAT6_CA_DATALINK || scenario == NON_CA_DATALINK || scenario == POSITION)
  496. dbm_power = get_md1_4g_dbm_power(share_mem);
  497. else
  498. dbm_power = 0;
  499. return dbm_power;
  500. }
  501. #else
  502. void init_md_section_level(enum pbm_kicker kicker)
  503. {
  504. pbm_crit("MD_POWER_METER_ENABLE:0\n");
  505. }
  506. #endif
  507. #ifdef TEST_MD_POWER
  508. static void test_md_dbm_power(void)
  509. {
  510. u32 i, j, y, z = 1, section[1] = {0};
  511. /* get MD1 2G dBm raw data */
  512. for (i = 1; i <= SECTION_NUM; i++) {
  513. for (j = 1; j <= SECTION_VALUE; j++) {
  514. /* get section level value to y */
  515. y = (section[DBM_2G_TABLE] >> section_level[i]) & SECTION_VALUE;
  516. y = (y+1) << section_level[i];
  517. /* clean need assign section level to 0 */
  518. z = ~((z | SECTION_VALUE) << section_level[i]);
  519. section[DBM_2G_TABLE] &= z;
  520. /* re-assign the value from y to section table */
  521. section[DBM_2G_TABLE] |= y;
  522. get_md1_2g_dbm_power(section);
  523. }
  524. }
  525. }
  526. #endif
  527. unsigned long hpf_get_power_md1(void)
  528. {
  529. struct hpf *hpfmgr = &hpf_ctrl;
  530. #if MD_POWER_METER_ENABLE
  531. u32 pw_scenario, pw_dBm;
  532. int scenario;
  533. #endif
  534. if (hpfmgr->switch_md1) {
  535. #if MD_POWER_METER_ENABLE
  536. if (!hpfmgr->md1_ccci_ready)
  537. return MD1_MAX_PW;
  538. /* get max scenario index */
  539. scenario = get_md1_scenario();
  540. /* get scenario power */
  541. pw_scenario = md1_scenario_pwr[scenario];
  542. /* get dBm power */
  543. pw_dBm = get_md1_dBm_power(scenario);
  544. hpfmgr->loading_md1 = pw_scenario + pw_dBm;
  545. #else
  546. return MD1_MAX_PW;
  547. #endif
  548. } else {
  549. hpfmgr->loading_md1 = 0;
  550. }
  551. return hpfmgr->loading_md1;
  552. }
  553. unsigned long hpf_get_power_md3(void)
  554. {
  555. struct hpf *hpfmgr = &hpf_ctrl;
  556. #if MD_POWER_METER_ENABLE
  557. u32 pw_scenario, pw_dBm;
  558. #endif
  559. if (hpfmgr->switch_md3) {
  560. #if MD_POWER_METER_ENABLE
  561. if (!hpfmgr->md3_ccci_ready)
  562. return MD3_MAX_PW;
  563. /* get scenario power */
  564. pw_scenario = PW_MD3;
  565. pbm_debug("MD3 scenario pw: %d\n", pw_scenario);
  566. /* get dBm power */
  567. pw_dBm = get_md3_dBm_power();
  568. hpfmgr->loading_md3 = pw_scenario + pw_dBm;
  569. #else
  570. return MD3_MAX_PW;
  571. #endif
  572. } else {
  573. hpfmgr->loading_md3 = 0;
  574. }
  575. return hpfmgr->loading_md3;
  576. }
  577. static void pbm_allocate_budget_manager(void)
  578. {
  579. int _dlpt = 0, leakage = 0, md1 = 0, md3 = 0, dlpt = 0, cpu = 0, gpu = 0, flash = 0;
  580. int tocpu = 0, togpu = 0;
  581. int multiple = 0;
  582. int cpu_lower_bound = tscpu_get_min_cpu_pwr();
  583. mutex_lock(&pbm_table_lock);
  584. /* dump_kicker_info(); */
  585. leakage = hpf_get_power_leakage();
  586. md1 = hpf_get_power_md1();
  587. md3 = hpf_get_power_md3();
  588. dlpt = hpf_get_power_dlpt();
  589. cpu = hpf_get_power_cpu();
  590. gpu = hpf_get_power_gpu();
  591. flash = hpf_get_power_flash();
  592. mutex_unlock(&pbm_table_lock);
  593. /* no any resource can allocate */
  594. if (dlpt == 0) {
  595. pbm_debug("DLPT=0\n");
  596. return;
  597. }
  598. _dlpt = dlpt - (leakage + md1 + md3 + flash);
  599. if (_dlpt < 0)
  600. _dlpt = 0;
  601. /* if gpu no need resource, so all allocate to cpu */
  602. if (gpu == 0) {
  603. tocpu = _dlpt;
  604. /* check CPU lower bound */
  605. if (tocpu < cpu_lower_bound)
  606. tocpu = cpu_lower_bound;
  607. if (tocpu <= 0)
  608. tocpu = 1;
  609. mt_ppm_dlpt_set_limit_by_pbm(tocpu);
  610. } else {
  611. multiple = (_dlpt * 1000) / (cpu + gpu);
  612. if (multiple > 0) {
  613. tocpu = (multiple * cpu) / 1000;
  614. togpu = (multiple * gpu) / 1000;
  615. } else {
  616. tocpu = 1;
  617. togpu = 1;
  618. }
  619. /* check CPU lower bound */
  620. if (tocpu < cpu_lower_bound) {
  621. tocpu = cpu_lower_bound;
  622. togpu = _dlpt - cpu_lower_bound;
  623. }
  624. if (tocpu <= 0)
  625. tocpu = 1;
  626. if (togpu <= 0)
  627. togpu = 1;
  628. mt_ppm_dlpt_set_limit_by_pbm(tocpu);
  629. mt_gpufreq_set_power_limit_by_pbm(togpu);
  630. }
  631. if (mt_pbm_debug) {
  632. pbm_debug("(C/G)=%d,%d => (D/L/M1/M3/F/C/G)=%d,%d,%d,%d,%d,%d,%d (Multi:%d),%d\n",
  633. cpu, gpu, dlpt, leakage, md1, md3, flash, tocpu, togpu, multiple, cpu_lower_bound);
  634. } else {
  635. if ((cpu > tocpu) || (gpu > togpu))
  636. pbm_crit("(C/G)=%d,%d => (D/L/M1/M3/F/C/G)=%d,%d,%d,%d,%d,%d,%d (Multi:%d),%d\n",
  637. cpu, gpu, dlpt, leakage, md1, md3, flash, tocpu, togpu, multiple, cpu_lower_bound);
  638. }
  639. }
  640. static bool pbm_func_enable_check(void)
  641. {
  642. struct pbm *pwrctrl = &pbm_ctrl;
  643. if (!pwrctrl->feature_en || !pwrctrl->pbm_drv_done) {
  644. pbm_crit("feature_en: %d, pbm_drv_done: %d\n", pwrctrl->feature_en, pwrctrl->pbm_drv_done);
  645. return false;
  646. }
  647. return true;
  648. }
  649. static bool pbm_update_table_info(enum pbm_kicker kicker, struct mrp *mrpmgr)
  650. {
  651. struct hpf *hpfmgr = &hpf_ctrl;
  652. bool is_update = false;
  653. switch (kicker) {
  654. case KR_DLPT: /* kicker 0 */
  655. if (hpfmgr->loading_dlpt != mrpmgr->loading_dlpt) {
  656. hpfmgr->loading_dlpt = mrpmgr->loading_dlpt;
  657. is_update = true;
  658. }
  659. break;
  660. case KR_MD1: /* kicker 1 */
  661. if (hpfmgr->switch_md1 != mrpmgr->switch_md) {
  662. hpfmgr->switch_md1 = mrpmgr->switch_md;
  663. is_update = true;
  664. }
  665. break;
  666. case KR_MD3: /* kicker 2 */
  667. if (hpfmgr->switch_md3 != mrpmgr->switch_md) {
  668. hpfmgr->switch_md3 = mrpmgr->switch_md;
  669. is_update = true;
  670. }
  671. break;
  672. case KR_CPU: /* kicker 3 */
  673. hpfmgr->cpu_volt = mrpmgr->cpu_volt;
  674. if (hpfmgr->loading_cpu != mrpmgr->loading_cpu
  675. || hpfmgr->cpu_num != mrpmgr->cpu_num) {
  676. hpfmgr->loading_cpu = mrpmgr->loading_cpu;
  677. hpfmgr->cpu_num = mrpmgr->cpu_num;
  678. is_update = true;
  679. }
  680. break;
  681. case KR_GPU: /* kicker 4 */
  682. hpfmgr->gpu_volt = mrpmgr->gpu_volt;
  683. if (hpfmgr->switch_gpu != mrpmgr->switch_gpu
  684. || hpfmgr->loading_gpu != mrpmgr->loading_gpu) {
  685. hpfmgr->switch_gpu = mrpmgr->switch_gpu;
  686. hpfmgr->loading_gpu = mrpmgr->loading_gpu;
  687. is_update = true;
  688. }
  689. break;
  690. case KR_FLASH: /* kicker 5 */
  691. if (hpfmgr->switch_flash != mrpmgr->switch_flash) {
  692. hpfmgr->switch_flash = mrpmgr->switch_flash;
  693. is_update = true;
  694. }
  695. break;
  696. default:
  697. pbm_crit("[%s] ERROR, unknown kicker [%d]\n", __func__, kicker);
  698. BUG();
  699. break;
  700. }
  701. return is_update;
  702. }
  703. static void pbm_wake_up_thread(enum pbm_kicker kicker, struct mrp *mrpmgr)
  704. {
  705. if (atomic_read(&kthread_nreq) <= 0) {
  706. atomic_inc(&kthread_nreq);
  707. wake_up_process(pbm_thread);
  708. }
  709. while (kicker == KR_FLASH && mrpmgr->switch_flash == 1) {
  710. if (atomic_read(&kthread_nreq) == 0)
  711. return;
  712. }
  713. }
  714. static void mtk_power_budget_manager(enum pbm_kicker kicker, struct mrp *mrpmgr)
  715. {
  716. bool pbm_enable = false;
  717. bool pbm_update = false;
  718. mutex_lock(&pbm_table_lock);
  719. pbm_update = pbm_update_table_info(kicker, mrpmgr);
  720. mutex_unlock(&pbm_table_lock);
  721. if (!pbm_update)
  722. return;
  723. pbm_enable = pbm_func_enable_check();
  724. if (!pbm_enable)
  725. return;
  726. pbm_wake_up_thread(kicker, mrpmgr);
  727. }
  728. /*
  729. * kicker: 0
  730. * who call : PMIC
  731. * i_max: mA
  732. * condition: persentage decrease 1%, then update i_max
  733. */
  734. void kicker_pbm_by_dlpt(unsigned int i_max)
  735. {
  736. struct pbm *pwrctrl = &pbm_ctrl;
  737. struct mrp mrpmgr;
  738. mrpmgr.loading_dlpt = ma_to_mw(i_max);
  739. if (BIT_CHECK(pwrctrl->hpf_en, KR_DLPT))
  740. mtk_power_budget_manager(KR_DLPT, &mrpmgr);
  741. }
  742. /*
  743. * kicker: 1, 2
  744. * who call : MD1, MD3
  745. * condition: on/off
  746. */
  747. void kicker_pbm_by_md(enum pbm_kicker kicker, bool status)
  748. {
  749. struct pbm *pwrctrl = &pbm_ctrl;
  750. struct mrp mrpmgr;
  751. mrpmgr.switch_md = status;
  752. if (BIT_CHECK(pwrctrl->hpf_en, kicker))
  753. mtk_power_budget_manager(kicker, &mrpmgr);
  754. }
  755. /*
  756. * kicker: 3
  757. * who call : CPU
  758. * loading: mW
  759. * condition: opp changed
  760. */
  761. void kicker_pbm_by_cpu(unsigned int loading, int core, int voltage)
  762. {
  763. struct pbm *pwrctrl = &pbm_ctrl;
  764. struct mrp mrpmgr;
  765. mrpmgr.loading_cpu = loading;
  766. mrpmgr.cpu_num = core;
  767. mrpmgr.cpu_volt = voltage;
  768. if (BIT_CHECK(pwrctrl->hpf_en, KR_CPU))
  769. mtk_power_budget_manager(KR_CPU, &mrpmgr);
  770. }
  771. /*
  772. * kicker: 4
  773. * who call : GPU
  774. * loading: mW
  775. * condition: opp changed
  776. */
  777. void kicker_pbm_by_gpu(bool status, unsigned int loading, int voltage)
  778. {
  779. struct pbm *pwrctrl = &pbm_ctrl;
  780. struct mrp mrpmgr;
  781. mrpmgr.switch_gpu = status;
  782. mrpmgr.loading_gpu = loading;
  783. mrpmgr.gpu_volt = voltage;
  784. if (BIT_CHECK(pwrctrl->hpf_en, KR_GPU))
  785. mtk_power_budget_manager(KR_GPU, &mrpmgr);
  786. }
  787. /*
  788. * kicker: 5
  789. * who call : Flash
  790. * condition: on/off
  791. */
  792. void kicker_pbm_by_flash(bool status)
  793. {
  794. struct pbm *pwrctrl = &pbm_ctrl;
  795. struct mrp mrpmgr;
  796. mrpmgr.switch_flash = status;
  797. if (BIT_CHECK(pwrctrl->hpf_en, KR_FLASH))
  798. mtk_power_budget_manager(KR_FLASH, &mrpmgr);
  799. }
  800. /* extern int g_dlpt_stop; in mt_pbm.h*/
  801. int g_dlpt_state_sync = 0;
  802. static int pbm_thread_handle(void *data)
  803. {
  804. while (1) {
  805. set_current_state(TASK_INTERRUPTIBLE);
  806. if (kthread_should_stop())
  807. break;
  808. if (atomic_read(&kthread_nreq) <= 0) {
  809. schedule();
  810. continue;
  811. }
  812. mutex_lock(&pbm_mutex);
  813. if (g_dlpt_need_do == 1) {
  814. if (g_dlpt_stop == 0) {
  815. pbm_allocate_budget_manager();
  816. g_dlpt_state_sync = 0;
  817. } else {
  818. pbm_err("DISABLE PBM\n");
  819. if (g_dlpt_state_sync == 0) {
  820. mt_ppm_dlpt_set_limit_by_pbm(0);
  821. mt_gpufreq_set_power_limit_by_pbm(0);
  822. g_dlpt_state_sync = 1;
  823. pbm_err("Release DLPT limit\n");
  824. }
  825. }
  826. }
  827. atomic_dec(&kthread_nreq);
  828. mutex_unlock(&pbm_mutex);
  829. }
  830. __set_current_state(TASK_RUNNING);
  831. return 0;
  832. }
  833. static int create_pbm_kthread(void)
  834. {
  835. struct pbm *pwrctrl = &pbm_ctrl;
  836. pbm_thread = kthread_create(pbm_thread_handle, (void *)NULL, "pbm");
  837. if (IS_ERR(pbm_thread))
  838. return PTR_ERR(pbm_thread);
  839. wake_up_process(pbm_thread);
  840. pwrctrl->pbm_drv_done = 1; /* avoid other hpf call thread before thread init done */
  841. return 0;
  842. }
  843. static int
  844. _mt_pbm_pm_callback(struct notifier_block *nb,
  845. unsigned long action, void *ptr)
  846. {
  847. switch (action) {
  848. case PM_SUSPEND_PREPARE:
  849. pbm_err("PM_SUSPEND_PREPARE:start\n");
  850. mutex_lock(&pbm_mutex);
  851. g_dlpt_need_do = 0;
  852. mutex_unlock(&pbm_mutex);
  853. pbm_err("PM_SUSPEND_PREPARE:end\n");
  854. break;
  855. case PM_HIBERNATION_PREPARE:
  856. break;
  857. case PM_POST_SUSPEND:
  858. pbm_err("PM_POST_SUSPEND:start\n");
  859. mutex_lock(&pbm_mutex);
  860. g_dlpt_need_do = 1;
  861. mutex_unlock(&pbm_mutex);
  862. pbm_err("PM_POST_SUSPEND:end\n");
  863. break;
  864. case PM_POST_HIBERNATION:
  865. break;
  866. default:
  867. return NOTIFY_DONE;
  868. }
  869. return NOTIFY_OK;
  870. }
  871. #if 1 /* CONFIG_PBM_PROC_FS */
  872. /*
  873. * show current debug status
  874. */
  875. static int mt_pbm_debug_proc_show(struct seq_file *m, void *v)
  876. {
  877. if (mt_pbm_debug)
  878. seq_puts(m, "pbm debug enabled\n");
  879. else
  880. seq_puts(m, "pbm debug disabled\n");
  881. return 0;
  882. }
  883. /*
  884. * enable debug message
  885. */
  886. static ssize_t mt_pbm_debug_proc_write(struct file *file, const char __user *buffer,
  887. size_t count, loff_t *data)
  888. {
  889. char desc[32];
  890. int len = 0;
  891. int debug = 0;
  892. len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
  893. if (copy_from_user(desc, buffer, len))
  894. return 0;
  895. desc[len] = '\0';
  896. /* if (sscanf(desc, "%d", &debug) == 1) { */
  897. if (kstrtoint(desc, 10, &debug) == 0) {
  898. if (debug == 0)
  899. mt_pbm_debug = 0;
  900. else if (debug == 1)
  901. mt_pbm_debug = 1;
  902. else
  903. pbm_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
  904. } else
  905. pbm_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
  906. return count;
  907. }
  908. #define PROC_FOPS_RW(name) \
  909. static int mt_ ## name ## _proc_open(struct inode *inode, struct file *file) \
  910. { \
  911. return single_open(file, mt_ ## name ## _proc_show, PDE_DATA(inode)); \
  912. } \
  913. static const struct file_operations mt_ ## name ## _proc_fops = { \
  914. .owner = THIS_MODULE, \
  915. .open = mt_ ## name ## _proc_open, \
  916. .read = seq_read, \
  917. .llseek = seq_lseek, \
  918. .release = single_release, \
  919. .write = mt_ ## name ## _proc_write, \
  920. }
  921. #define PROC_FOPS_RO(name) \
  922. static int mt_ ## name ## _proc_open(struct inode *inode, struct file *file) \
  923. { \
  924. return single_open(file, mt_ ## name ## _proc_show, PDE_DATA(inode)); \
  925. } \
  926. static const struct file_operations mt_ ## name ## _proc_fops = { \
  927. .owner = THIS_MODULE, \
  928. .open = mt_ ## name ## _proc_open, \
  929. .read = seq_read, \
  930. .llseek = seq_lseek, \
  931. .release = single_release, \
  932. }
  933. #define PROC_ENTRY(name) {__stringify(name), &mt_ ## name ## _proc_fops}
  934. PROC_FOPS_RW(pbm_debug);
  935. static int mt_pbm_create_procfs(void)
  936. {
  937. struct proc_dir_entry *dir = NULL;
  938. int i;
  939. struct pentry {
  940. const char *name;
  941. const struct file_operations *fops;
  942. };
  943. const struct pentry entries[] = {
  944. PROC_ENTRY(pbm_debug),
  945. };
  946. dir = proc_mkdir("pbm", NULL);
  947. if (!dir) {
  948. pbm_err("fail to create /proc/pbm @ %s()\n", __func__);
  949. return -ENOMEM;
  950. }
  951. for (i = 0; i < ARRAY_SIZE(entries); i++) {
  952. if (!proc_create
  953. (entries[i].name, S_IRUGO | S_IWUSR | S_IWGRP, dir, entries[i].fops))
  954. pbm_err("@%s: create /proc/pbm/%s failed\n", __func__,
  955. entries[i].name);
  956. }
  957. return 0;
  958. }
  959. #endif /* CONFIG_PBM_PROC_FS */
  960. static int __init pbm_module_init(void)
  961. {
  962. int ret = 0;
  963. #if 1 /* CONFIG_PBM_PROC_FS */
  964. mt_pbm_create_procfs();
  965. #endif
  966. pm_notifier(_mt_pbm_pm_callback, 0);
  967. register_dlpt_notify(&kicker_pbm_by_dlpt, DLPT_PRIO_PBM);
  968. ret = create_pbm_kthread();
  969. #ifdef TEST_MD_POWER
  970. pbm_crit("share_reg: %x", spm_vcorefs_get_MD_status());
  971. test_md_dbm_power();
  972. get_md1_scenario();
  973. #endif
  974. pbm_crit("pbm_module_init : Done\n");
  975. if (ret) {
  976. pbm_err("FAILED TO CREATE PBM KTHREAD\n");
  977. return ret;
  978. }
  979. return ret;
  980. }
  981. #else /* #ifndef DISABLE_PBM_FEATURE */
  982. void kicker_pbm_by_dlpt(unsigned int i_max)
  983. {
  984. }
  985. void kicker_pbm_by_md(enum pbm_kicker kicker, bool status)
  986. {
  987. }
  988. void kicker_pbm_by_cpu(unsigned int loading, int core, int voltage)
  989. {
  990. }
  991. void kicker_pbm_by_gpu(bool status, unsigned int loading, int voltage)
  992. {
  993. }
  994. void kicker_pbm_by_flash(bool status)
  995. {
  996. }
  997. void init_md_section_level(enum pbm_kicker kicker)
  998. {
  999. }
  1000. static int __init pbm_module_init(void)
  1001. {
  1002. pr_crit("DISABLE_PBM_FEATURE is defined.\n");
  1003. return 0;
  1004. }
  1005. #endif /* #ifndef DISABLE_PBM_FEATURE */
  1006. static void __exit pbm_module_exit(void)
  1007. {
  1008. }
  1009. module_init(pbm_module_init);
  1010. module_exit(pbm_module_exit);
  1011. MODULE_DESCRIPTION("PBM Driver v0.1");