mt_pbm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. #define pr_fmt(fmt) "[PBM] " fmt
  2. #include <linux/kernel.h>
  3. #include <linux/module.h>
  4. #include <linux/init.h>
  5. #include <linux/kobject.h>
  6. #include <linux/wakelock.h>
  7. #include <linux/kthread.h>
  8. #include <linux/atomic.h>
  9. #include <linux/mutex.h>
  10. #include <linux/delay.h>
  11. #include <linux/string.h>
  12. #include <linux/sysfs.h>
  13. #include <linux/sched/rt.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/suspend.h>
  17. #include <linux/proc_fs.h>
  18. #include <mach/mt_pbm.h>
  19. #include <mach/upmu_sw.h>
  20. #include <mt-plat/upmu_common.h>
  21. #include <mt_cpufreq.h>
  22. #include <mt_gpufreq.h>
  23. #include <mach/mt_thermal.h>
  24. #ifndef DISABLE_PBM_FEATURE
  25. /* reference PMIC */
  26. /* extern kal_uint32 PMIC_IMM_GetOneChannelValue(kal_uint8 dwChannel, int deCount, int trimd); */
  27. /* #define DLPT_PRIO_PBM 0 */
  28. /* void (*dlpt_callback)(unsigned int); */
  29. /* void register_dlpt_notify( void (*dlpt_callback)(unsigned int), int i){} */
  30. /* reference mt_cpufreq.h and mt_gpufreq.h */
  31. /* unsigned int mt_cpufreq_get_leakage_mw(int i){return 111;} */
  32. /* unsigned int mt_gpufreq_get_leakage_mw(void){return 111;} */
  33. /* void mt_cpufreq_set_power_limit_by_pbm(unsigned int limited_power){} */
  34. /* void mt_gpufreq_set_power_limit_by_pbm(unsigned int limited_power){} */
  35. static bool mt_pbm_debug;
  36. #define pbm_emerg(fmt, args...) pr_emerg(fmt, ##args)
  37. #define pbm_alert(fmt, args...) pr_alert(fmt, ##args)
  38. #define pbm_crit(fmt, args...) pr_crit(fmt, ##args)
  39. #define pbm_err(fmt, args...) pr_err(fmt, ##args)
  40. #define pbm_warn(fmt, args...) pr_warn(fmt, ##args)
  41. #define pbm_notice(fmt, args...) pr_debug(fmt, ##args)
  42. #define pbm_info(fmt, args...) pr_debug(fmt, ##args)
  43. #define pbm_debug(fmt, args...) \
  44. do { \
  45. if (mt_pbm_debug) \
  46. pr_crit(fmt, ##args); \
  47. } while (0)
  48. #define BIT_CHECK(a, b) ((a) & (1<<(b)))
  49. #define POWER_MD 1800 /* mW */
  50. #define POWER_FLASH 2500 /* mW */
  51. static struct hpf hpf_ctrl = {
  52. .switch_md1 = 1,
  53. .switch_md2 = 0,
  54. .switch_gpu = 0,
  55. .switch_flash = 0,
  56. .cpu_volt = 1000, /* 1V = boot up voltage */
  57. .gpu_volt = 0,
  58. .cpu_num = 1, /* default cpu0 core */
  59. .loading_leakage = 0,
  60. .loading_dlpt = 0,
  61. .loading_md = POWER_MD, /* fixed */
  62. .loading_cpu = 0,
  63. .loading_gpu = 0,
  64. .loading_flash = POWER_FLASH, /* fixed */
  65. };
  66. static struct pbm pbm_ctrl = {
  67. /* feature key */
  68. .feature_en = 1,
  69. .pbm_drv_done = 0,
  70. .hpf_en = 31, /* bin: 11111 (Flash, GPU, CPU, MD, DLPT) */
  71. };
  72. int g_dlpt_need_do = 1;
  73. static DEFINE_MUTEX(pbm_mutex);
  74. static DEFINE_MUTEX(pbm_table_lock);
  75. static struct task_struct *pbm_thread;
  76. static atomic_t kthread_nreq = ATOMIC_INIT(0);
  77. /* extern u32 get_devinfo_with_index(u32 index); */
  78. int get_battery_volt(void)
  79. {
  80. return PMIC_IMM_GetOneChannelValue(MT6328_AUX_BATSNS_AP, 5, 1);
  81. /* return 3900; */
  82. }
  83. unsigned int ma_to_mw(unsigned int val)
  84. {
  85. unsigned int bat_vol = 0;
  86. unsigned int ret_val = 0;
  87. bat_vol = get_battery_volt(); /* return mV */
  88. ret_val = (bat_vol * val) / 1000; /* mW = (mV * mA)/1000 */
  89. pbm_crit("[%s] %d(mV) * %d(mA) = %d(mW)\n", __func__, bat_vol, val, ret_val);
  90. return ret_val;
  91. }
  92. void dump_kicker_info(void)
  93. {
  94. struct hpf *hpfmgr = &hpf_ctrl;
  95. #if 1
  96. pbm_debug("(M/F/G)=%d,%d,%d;(C/G)=%ld,%ld\n",
  97. hpfmgr->switch_md1,
  98. hpfmgr->switch_flash,
  99. hpfmgr->switch_gpu, hpfmgr->loading_cpu, hpfmgr->loading_gpu);
  100. #else
  101. pbm_debug
  102. ("[***] Switch (MD1: %d, MD2: %d, GPU: %d, Flash: %d, CPU_volt: %d, GPU_volt: %d, CPU_num: %d)\n",
  103. hpfmgr->switch_md1, hpfmgr->switch_md2, hpfmgr->switch_gpu, hpfmgr->switch_flash,
  104. hpfmgr->cpu_volt, hpfmgr->gpu_volt, hpfmgr->cpu_num);
  105. pbm_debug
  106. ("[***] Resource (DLPT: %ld, Leakage: %ld, MD: %ld, CPU: %ld, GPU: %ld, Flash: %ld)\n",
  107. hpfmgr->loading_dlpt, hpfmgr->loading_leakage, hpfmgr->loading_md, hpfmgr->loading_cpu,
  108. hpfmgr->loading_gpu, hpfmgr->loading_flash);
  109. #endif
  110. }
  111. int hpf_get_power_leakage(void)
  112. {
  113. struct hpf *hpfmgr = &hpf_ctrl;
  114. unsigned int leakage_cpu = 0, leakage_gpu = 0;
  115. leakage_cpu = mt_cpufreq_get_leakage_mw(0);
  116. leakage_gpu = mt_gpufreq_get_leakage_mw();
  117. hpfmgr->loading_leakage = leakage_cpu + leakage_gpu;
  118. pbm_debug("[%s] %ld=%d+%d\n", __func__, hpfmgr->loading_leakage, leakage_cpu, leakage_gpu);
  119. return hpfmgr->loading_leakage;
  120. }
  121. unsigned long hpf_get_power_cpu(void)
  122. {
  123. struct hpf *hpfmgr = &hpf_ctrl;
  124. return hpfmgr->loading_cpu;
  125. }
  126. unsigned long hpf_get_power_gpu(void)
  127. {
  128. struct hpf *hpfmgr = &hpf_ctrl;
  129. if (hpfmgr->switch_gpu)
  130. return hpfmgr->loading_gpu;
  131. else
  132. return 0;
  133. }
  134. unsigned long hpf_get_power_flash(void)
  135. {
  136. struct hpf *hpfmgr = &hpf_ctrl;
  137. if (hpfmgr->switch_flash)
  138. return hpfmgr->loading_flash;
  139. else
  140. return 0;
  141. }
  142. unsigned long hpf_get_power_dlpt(void)
  143. {
  144. struct hpf *hpfmgr = &hpf_ctrl;
  145. return hpfmgr->loading_dlpt;
  146. }
  147. unsigned long hpf_get_power_md(void)
  148. {
  149. struct hpf *hpfmgr = &hpf_ctrl;
  150. if (hpfmgr->switch_md1 | hpfmgr->switch_md2)
  151. return hpfmgr->loading_md;
  152. else
  153. return 0;
  154. }
  155. static void pbm_allocate_budget_manager(void)
  156. {
  157. int _dlpt = 0, leakage = 0, md = 0, dlpt = 0, cpu = 0, gpu = 0, flash = 0;
  158. int tocpu = 0, togpu = 0;
  159. int multiple = 0;
  160. int cpu_lower_bound = tscpu_get_min_cpu_pwr();
  161. mutex_lock(&pbm_table_lock);
  162. /* dump_kicker_info(); */
  163. leakage = hpf_get_power_leakage();
  164. md = hpf_get_power_md();
  165. dlpt = hpf_get_power_dlpt();
  166. cpu = hpf_get_power_cpu();
  167. gpu = hpf_get_power_gpu();
  168. flash = hpf_get_power_flash();
  169. mutex_unlock(&pbm_table_lock);
  170. /* no any resource can allocate */
  171. if (dlpt == 0) {
  172. pbm_debug("DLPT=0\n");
  173. return;
  174. }
  175. _dlpt = dlpt - (leakage + md + flash);
  176. if (_dlpt < 0)
  177. _dlpt = 0;
  178. /* if gpu no need resource, so all allocate to cpu */
  179. if (gpu == 0) {
  180. tocpu = _dlpt;
  181. /* check CPU lower bound */
  182. if (tocpu < cpu_lower_bound)
  183. tocpu = cpu_lower_bound;
  184. if (tocpu <= 0)
  185. tocpu = 1;
  186. mt_cpufreq_set_power_limit_by_pbm(tocpu);
  187. } else {
  188. multiple = (_dlpt * 1000) / (cpu + gpu);
  189. if (multiple > 0) {
  190. tocpu = (multiple * cpu) / 1000;
  191. togpu = (multiple * gpu) / 1000;
  192. } else {
  193. tocpu = 1;
  194. togpu = 1;
  195. }
  196. /* check CPU lower bound */
  197. if (tocpu < cpu_lower_bound) {
  198. tocpu = cpu_lower_bound;
  199. togpu = _dlpt - cpu_lower_bound;
  200. }
  201. if (tocpu <= 0)
  202. tocpu = 1;
  203. if (togpu <= 0)
  204. togpu = 1;
  205. mt_cpufreq_set_power_limit_by_pbm(tocpu);
  206. mt_gpufreq_set_power_limit_by_pbm(togpu);
  207. }
  208. if (mt_pbm_debug) {
  209. pbm_debug("(C/G)=%d,%d => (D/L/M/F/C/G)=%d,%d,%d,%d,%d,%d (Multi:%d),%d\n",
  210. cpu, gpu, dlpt, leakage, md, flash, tocpu, togpu, multiple, cpu_lower_bound);
  211. } else {
  212. if ((cpu > tocpu) || (gpu > togpu))
  213. pbm_crit("(C/G)=%d,%d => (D/L/M/F/C/G)=%d,%d,%d,%d,%d,%d (Multi:%d),%d\n",
  214. cpu, gpu, dlpt, leakage, md, flash, tocpu, togpu, multiple, cpu_lower_bound);
  215. }
  216. }
  217. static bool pbm_func_enable_check(void)
  218. {
  219. struct pbm *pwrctrl = &pbm_ctrl;
  220. if (!pwrctrl->feature_en || !pwrctrl->pbm_drv_done) {
  221. pbm_crit("feature_en: %d, pbm_drv_done: %d\n", pwrctrl->feature_en, pwrctrl->pbm_drv_done);
  222. return false;
  223. }
  224. return true;
  225. }
  226. static bool pbm_update_table_info(enum pbm_kicker kicker, struct mrp *mrpmgr)
  227. {
  228. struct hpf *hpfmgr = &hpf_ctrl;
  229. bool is_update = false;
  230. switch (kicker) {
  231. case KR_DLPT: /* kicker 0 */
  232. if (hpfmgr->loading_dlpt != mrpmgr->loading_dlpt) {
  233. hpfmgr->loading_dlpt = mrpmgr->loading_dlpt;
  234. is_update = true;
  235. }
  236. break;
  237. case KR_MD: /* kicker 1 */
  238. if (mrpmgr->idMD == MD1) {
  239. if (hpfmgr->switch_md1 != mrpmgr->switch_md) {
  240. hpfmgr->switch_md1 = mrpmgr->switch_md;
  241. is_update = true;
  242. }
  243. }
  244. if (mrpmgr->idMD == MD2) {
  245. if (hpfmgr->switch_md2 != mrpmgr->switch_md) {
  246. hpfmgr->switch_md2 = mrpmgr->switch_md;
  247. is_update = true;
  248. }
  249. }
  250. break;
  251. case KR_CPU: /* kicker 2 */
  252. hpfmgr->cpu_volt = mrpmgr->cpu_volt;
  253. if (hpfmgr->loading_cpu != mrpmgr->loading_cpu
  254. || hpfmgr->cpu_num != mrpmgr->cpu_num) {
  255. hpfmgr->loading_cpu = mrpmgr->loading_cpu;
  256. hpfmgr->cpu_num = mrpmgr->cpu_num;
  257. is_update = true;
  258. }
  259. break;
  260. case KR_GPU: /* kicker 3 */
  261. hpfmgr->gpu_volt = mrpmgr->gpu_volt;
  262. if (hpfmgr->switch_gpu != mrpmgr->switch_gpu
  263. || hpfmgr->loading_gpu != mrpmgr->loading_gpu) {
  264. hpfmgr->switch_gpu = mrpmgr->switch_gpu;
  265. hpfmgr->loading_gpu = mrpmgr->loading_gpu;
  266. is_update = true;
  267. }
  268. break;
  269. case KR_FLASH: /* kicker 4 */
  270. if (hpfmgr->switch_flash != mrpmgr->switch_flash) {
  271. hpfmgr->switch_flash = mrpmgr->switch_flash;
  272. is_update = true;
  273. }
  274. break;
  275. default:
  276. pbm_crit("[%s] ERROR, unknown kicker [%d]\n", __func__, kicker);
  277. is_update = false;
  278. break;
  279. }
  280. return is_update;
  281. }
  282. static void pbm_wake_up_thread(enum pbm_kicker kicker, struct mrp *mrpmgr)
  283. {
  284. if (atomic_read(&kthread_nreq) <= 0) {
  285. atomic_inc(&kthread_nreq);
  286. wake_up_process(pbm_thread);
  287. }
  288. while (kicker == KR_FLASH && mrpmgr->switch_flash == 1) {
  289. if (atomic_read(&kthread_nreq) == 0)
  290. return;
  291. }
  292. }
  293. static void mtk_power_budget_manager(enum pbm_kicker kicker, struct mrp *mrpmgr)
  294. {
  295. bool pbm_enable = false;
  296. bool pbm_update = false;
  297. mutex_lock(&pbm_table_lock);
  298. pbm_update = pbm_update_table_info(kicker, mrpmgr);
  299. mutex_unlock(&pbm_table_lock);
  300. if (!pbm_update)
  301. return;
  302. pbm_enable = pbm_func_enable_check();
  303. if (!pbm_enable)
  304. return;
  305. pbm_wake_up_thread(kicker, mrpmgr);
  306. }
  307. /*
  308. * kicker: 0
  309. * who call : PMIC
  310. * i_max: mA
  311. * condition: persentage decrease 1%, then update i_max
  312. */
  313. void kicker_pbm_by_dlpt(unsigned int i_max)
  314. {
  315. struct pbm *pwrctrl = &pbm_ctrl;
  316. struct mrp mrpmgr;
  317. mrpmgr.loading_dlpt = ma_to_mw(i_max);
  318. if (BIT_CHECK(pwrctrl->hpf_en, KR_DLPT))
  319. mtk_power_budget_manager(KR_DLPT, &mrpmgr);
  320. }
  321. /*
  322. * kicker: 1
  323. * who call : MD
  324. * condition: on/off
  325. */
  326. void kicker_pbm_by_md(enum md_id id, bool status)
  327. {
  328. struct pbm *pwrctrl = &pbm_ctrl;
  329. struct mrp mrpmgr;
  330. mrpmgr.idMD = id;
  331. mrpmgr.switch_md = status;
  332. if (BIT_CHECK(pwrctrl->hpf_en, KR_MD))
  333. mtk_power_budget_manager(KR_MD, &mrpmgr);
  334. }
  335. /*
  336. * kicker: 2
  337. * who call : CPU
  338. * loading: mW
  339. * condition: opp changed
  340. */
  341. void kicker_pbm_by_cpu(unsigned int loading, int core, int voltage)
  342. {
  343. struct pbm *pwrctrl = &pbm_ctrl;
  344. struct mrp mrpmgr;
  345. mrpmgr.loading_cpu = loading;
  346. mrpmgr.cpu_num = core;
  347. mrpmgr.cpu_volt = voltage;
  348. if (BIT_CHECK(pwrctrl->hpf_en, KR_CPU))
  349. mtk_power_budget_manager(KR_CPU, &mrpmgr);
  350. }
  351. /*
  352. * kicker: 3
  353. * who call : GPU
  354. * loading: mW
  355. * condition: opp changed
  356. */
  357. void kicker_pbm_by_gpu(bool status, unsigned int loading, int voltage)
  358. {
  359. struct pbm *pwrctrl = &pbm_ctrl;
  360. struct mrp mrpmgr;
  361. mrpmgr.switch_gpu = status;
  362. mrpmgr.loading_gpu = loading;
  363. mrpmgr.gpu_volt = voltage;
  364. if (BIT_CHECK(pwrctrl->hpf_en, KR_GPU))
  365. mtk_power_budget_manager(KR_GPU, &mrpmgr);
  366. }
  367. /*
  368. * kicker: 4
  369. * who call : Flash
  370. * condition: on/off
  371. */
  372. void kicker_pbm_by_flash(bool status)
  373. {
  374. struct pbm *pwrctrl = &pbm_ctrl;
  375. struct mrp mrpmgr;
  376. mrpmgr.switch_flash = status;
  377. if (BIT_CHECK(pwrctrl->hpf_en, KR_FLASH))
  378. mtk_power_budget_manager(KR_FLASH, &mrpmgr);
  379. }
  380. /* extern int g_dlpt_stop; in mt_pbm.h*/
  381. int g_dlpt_state_sync = 0;
  382. static int pbm_thread_handle(void *data)
  383. {
  384. while (1) {
  385. set_current_state(TASK_INTERRUPTIBLE);
  386. if (kthread_should_stop())
  387. break;
  388. if (atomic_read(&kthread_nreq) <= 0) {
  389. schedule();
  390. continue;
  391. }
  392. mutex_lock(&pbm_mutex);
  393. if (g_dlpt_need_do == 1) {
  394. if (g_dlpt_stop == 0) {
  395. pbm_allocate_budget_manager();
  396. g_dlpt_state_sync = 0;
  397. } else {
  398. pbm_err("DISABLE PBM\n");
  399. if (g_dlpt_state_sync == 0) {
  400. mt_cpufreq_set_power_limit_by_pbm(0);
  401. mt_gpufreq_set_power_limit_by_pbm(0);
  402. g_dlpt_state_sync = 1;
  403. pbm_err("Release DLPT limit\n");
  404. }
  405. }
  406. }
  407. atomic_dec(&kthread_nreq);
  408. mutex_unlock(&pbm_mutex);
  409. }
  410. __set_current_state(TASK_RUNNING);
  411. return 0;
  412. }
  413. static int create_pbm_kthread(void)
  414. {
  415. struct pbm *pwrctrl = &pbm_ctrl;
  416. pbm_thread = kthread_create(pbm_thread_handle, (void *)NULL, "pbm");
  417. if (IS_ERR(pbm_thread))
  418. return PTR_ERR(pbm_thread);
  419. wake_up_process(pbm_thread);
  420. pwrctrl->pbm_drv_done = 1; /* avoid other hpf call thread before thread init done */
  421. return 0;
  422. }
  423. static int
  424. _mt_pbm_pm_callback(struct notifier_block *nb,
  425. unsigned long action, void *ptr)
  426. {
  427. switch (action) {
  428. case PM_SUSPEND_PREPARE:
  429. pbm_err("PM_SUSPEND_PREPARE:start\n");
  430. mutex_lock(&pbm_mutex);
  431. g_dlpt_need_do = 0;
  432. mutex_unlock(&pbm_mutex);
  433. pbm_err("PM_SUSPEND_PREPARE:end\n");
  434. break;
  435. case PM_HIBERNATION_PREPARE:
  436. break;
  437. case PM_POST_SUSPEND:
  438. pbm_err("PM_POST_SUSPEND:start\n");
  439. mutex_lock(&pbm_mutex);
  440. g_dlpt_need_do = 1;
  441. mutex_unlock(&pbm_mutex);
  442. pbm_err("PM_POST_SUSPEND:end\n");
  443. break;
  444. case PM_POST_HIBERNATION:
  445. break;
  446. default:
  447. return NOTIFY_DONE;
  448. }
  449. return NOTIFY_OK;
  450. }
  451. #if 1 /* CONFIG_PBM_PROC_FS */
  452. /*
  453. * show current debug status
  454. */
  455. static int mt_pbm_debug_proc_show(struct seq_file *m, void *v)
  456. {
  457. if (mt_pbm_debug)
  458. seq_puts(m, "pbm debug enabled\n");
  459. else
  460. seq_puts(m, "pbm debug disabled\n");
  461. return 0;
  462. }
  463. /*
  464. * enable debug message
  465. */
  466. static ssize_t mt_pbm_debug_proc_write(struct file *file, const char __user *buffer,
  467. size_t count, loff_t *data)
  468. {
  469. char desc[32];
  470. int len = 0;
  471. int debug = 0;
  472. len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
  473. if (copy_from_user(desc, buffer, len))
  474. return 0;
  475. desc[len] = '\0';
  476. /* if (sscanf(desc, "%d", &debug) == 1) { */
  477. if (kstrtoint(desc, 10, &debug) == 0) {
  478. if (debug == 0)
  479. mt_pbm_debug = 0;
  480. else if (debug == 1)
  481. mt_pbm_debug = 1;
  482. else
  483. pbm_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
  484. } else
  485. pbm_warn("bad argument!! should be 0 or 1 [0: disable, 1: enable]\n");
  486. return count;
  487. }
  488. #define PROC_FOPS_RW(name) \
  489. static int mt_ ## name ## _proc_open(struct inode *inode, struct file *file) \
  490. { \
  491. return single_open(file, mt_ ## name ## _proc_show, PDE_DATA(inode)); \
  492. } \
  493. static const struct file_operations mt_ ## name ## _proc_fops = { \
  494. .owner = THIS_MODULE, \
  495. .open = mt_ ## name ## _proc_open, \
  496. .read = seq_read, \
  497. .llseek = seq_lseek, \
  498. .release = single_release, \
  499. .write = mt_ ## name ## _proc_write, \
  500. }
  501. #define PROC_FOPS_RO(name) \
  502. static int mt_ ## name ## _proc_open(struct inode *inode, struct file *file) \
  503. { \
  504. return single_open(file, mt_ ## name ## _proc_show, PDE_DATA(inode)); \
  505. } \
  506. static const struct file_operations mt_ ## name ## _proc_fops = { \
  507. .owner = THIS_MODULE, \
  508. .open = mt_ ## name ## _proc_open, \
  509. .read = seq_read, \
  510. .llseek = seq_lseek, \
  511. .release = single_release, \
  512. }
  513. #define PROC_ENTRY(name) {__stringify(name), &mt_ ## name ## _proc_fops}
  514. PROC_FOPS_RW(pbm_debug);
  515. static int mt_pbm_create_procfs(void)
  516. {
  517. struct proc_dir_entry *dir = NULL;
  518. int i;
  519. struct pentry {
  520. const char *name;
  521. const struct file_operations *fops;
  522. };
  523. const struct pentry entries[] = {
  524. PROC_ENTRY(pbm_debug),
  525. };
  526. dir = proc_mkdir("pbm", NULL);
  527. if (!dir) {
  528. pbm_err("fail to create /proc/pbm @ %s()\n", __func__);
  529. return -ENOMEM;
  530. }
  531. for (i = 0; i < ARRAY_SIZE(entries); i++) {
  532. if (!proc_create
  533. (entries[i].name, S_IRUGO | S_IWUSR | S_IWGRP, dir, entries[i].fops))
  534. pbm_err("@%s: create /proc/pbm/%s failed\n", __func__,
  535. entries[i].name);
  536. }
  537. return 0;
  538. }
  539. #endif /* CONFIG_PBM_PROC_FS */
  540. static int __init pbm_module_init(void)
  541. {
  542. int ret = 0;
  543. #if 1 /* CONFIG_PBM_PROC_FS */
  544. mt_pbm_create_procfs();
  545. #endif
  546. pm_notifier(_mt_pbm_pm_callback, 0);
  547. register_dlpt_notify(&kicker_pbm_by_dlpt, DLPT_PRIO_PBM);
  548. ret = create_pbm_kthread();
  549. pbm_crit("pbm_module_init : Done\n");
  550. if (ret) {
  551. pbm_err("FAILED TO CREATE PBM KTHREAD\n");
  552. return ret;
  553. }
  554. return ret;
  555. }
  556. #else /* #ifndef DISABLE_PBM_FEATURE */
  557. void kicker_pbm_by_dlpt(unsigned int i_max)
  558. {
  559. }
  560. void kicker_pbm_by_md(enum md_id id, bool status)
  561. {
  562. }
  563. void kicker_pbm_by_cpu(unsigned int loading, int core, int voltage)
  564. {
  565. }
  566. void kicker_pbm_by_gpu(bool status, unsigned int loading, int voltage)
  567. {
  568. }
  569. void kicker_pbm_by_flash(bool status)
  570. {
  571. }
  572. static int __init pbm_module_init(void)
  573. {
  574. pr_crit("DISABLE_PBM_FEATURE is defined.\n");
  575. return 0;
  576. }
  577. #endif /* #ifndef DISABLE_PBM_FEATURE */
  578. static void __exit pbm_module_exit(void)
  579. {
  580. }
  581. module_init(pbm_module_init);
  582. module_exit(pbm_module_exit);
  583. MODULE_AUTHOR("Max Yu <max.yu@mediatek.com>");
  584. MODULE_DESCRIPTION("PBM Driver v0.1");