memory-lowpower-task.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. #define pr_fmt(fmt) "memory-lowpower-task: " fmt
  2. #include <linux/sched.h>
  3. #include <linux/freezer.h>
  4. #include <linux/kthread.h>
  5. #include <linux/err.h>
  6. #include <linux/printk.h>
  7. #include <linux/init.h>
  8. #include <linux/rwlock.h>
  9. /* Trigger method for screen on/off */
  10. #ifdef CONFIG_HAS_EARLYSUSPEND
  11. #include <linux/earlysuspend.h>
  12. #else
  13. #include <linux/fb.h>
  14. #endif
  15. /* Memory lowpower private header file */
  16. #include "internal.h"
  17. /* Print wrapper */
  18. #define MLPT_PRINT(args...) do {} while (0) /* pr_alert(args) */
  19. #define MLPT_PRERR(args...) do {} while (0) /* pr_alert(args) */
  20. /* Profile Timing */
  21. #ifdef CONFIG_MLPT_PROFILE
  22. static unsigned long long start_ns, end_ns;
  23. #define MLPT_START_PROFILE() {start_ns = sched_clock(); }
  24. #define MLPT_END_PROFILE() do {\
  25. end_ns = sched_clock();\
  26. MLPT_PRINT(" {{{Elapsed[%llu]ns}}}\n", (end_ns - start_ns));\
  27. } while (0)
  28. #else /* !CONFIG_MLPT_PROFILE */
  29. #define MLPT_START_PROFILE() do {} while (0)
  30. #define MLPT_END_PROFILE() do {} while (0)
  31. #endif
  32. /* List of memory lowpower features' specific operations */
  33. static LIST_HEAD(memory_lowpower_handlers);
  34. static DEFINE_MUTEX(memory_lowpower_lock);
  35. /* Control parameters for memory lowpower task */
  36. static struct task_struct *memory_lowpower_task;
  37. static enum power_state memory_lowpower_action;
  38. static unsigned long memory_lowpower_state;
  39. static int get_cma_aligned; /* in PAGE_SIZE order */
  40. static int get_cma_num; /* Number of allocation */
  41. static unsigned long get_cma_size; /* in PAGES */
  42. static struct page **cma_aligned_pages;
  43. /*
  44. * Set aligned allocation -
  45. * @aligned: Requested alignment of pages (in PAGE_SIZE order).
  46. */
  47. void set_memory_lowpower_aligned(int aligned)
  48. {
  49. unsigned long size, num;
  50. /* No need to update */
  51. if ((get_cma_aligned != 0 && get_cma_aligned <= aligned) || aligned < 0)
  52. return;
  53. /* cma_aligned_pages is in use */
  54. if (get_cma_aligned != 0 && cma_aligned_pages[0] != NULL)
  55. return;
  56. /* Check whether size is a multiple of num */
  57. size = (memory_lowpower_cma_size() >> PAGE_SHIFT);
  58. num = size >> aligned;
  59. if (size != (num << aligned))
  60. return;
  61. /* Update aligned allocation */
  62. get_cma_aligned = aligned;
  63. get_cma_size = 1 << aligned;
  64. get_cma_num = num;
  65. if (cma_aligned_pages != NULL) {
  66. kfree(cma_aligned_pages);
  67. cma_aligned_pages = NULL;
  68. }
  69. /* If it is page-aligned, cma_aligned_pages is not needed */
  70. if (num != size)
  71. cma_aligned_pages = kcalloc(num, sizeof(*cma_aligned_pages), GFP_KERNEL);
  72. MLPT_PRINT("%s: aligned[%d] size[%lu] num[%d] array[%p]\n",
  73. __func__, get_cma_aligned, get_cma_size, get_cma_num, cma_aligned_pages);
  74. }
  75. /* Insert allocated buffer */
  76. static void insert_buffer(struct page *page, int bound)
  77. {
  78. #ifdef MLPT_INSERT_SORT
  79. struct page *tmp;
  80. int i;
  81. MLPT_PRINT("%s: PFN[%lu]\n", __func__, page_to_pfn(page));
  82. for (i = 0; i < bound; i++) {
  83. MLPT_PRINT("[%d] ", i);
  84. if (page < cma_aligned_pages[i]) {
  85. tmp = cma_aligned_pages[i];
  86. MLPT_PRINT("Swap (page)PFN[%lu] (tmp)PFN[%lu]\n", page_to_pfn(page), page_to_pfn(tmp));
  87. cma_aligned_pages[i] = page;
  88. page = tmp;
  89. }
  90. MLPT_PRINT("\n");
  91. }
  92. cma_aligned_pages[i] = page;
  93. #else
  94. MLPT_PRINT("%s: PFN[%lu] index[%d]\n", __func__, page_to_pfn(page), bound);
  95. cma_aligned_pages[bound] = page;
  96. #endif
  97. }
  98. /* Wrapper for memory lowpower CMA allocation */
  99. static int acquire_memory(void)
  100. {
  101. int i = 0, ret;
  102. struct page *page;
  103. /* Full allocation */
  104. if (cma_aligned_pages == NULL)
  105. return get_memory_lowpower_cma();
  106. /* Find the 1st null position */
  107. while (i < get_cma_num && cma_aligned_pages[i] != NULL)
  108. ++i;
  109. /* Aligned allocation */
  110. while (i < get_cma_num) {
  111. ret = get_memory_lowpower_cma_aligned(get_cma_size, get_cma_aligned, &page);
  112. if (ret)
  113. break;
  114. MLPT_PRINT("%s: PFN[%lu] allocated for [%d]\n", __func__, page_to_pfn(page), i);
  115. insert_buffer(page, i);
  116. ++i;
  117. }
  118. return 0;
  119. }
  120. /* Wrapper for memory lowpower CMA free */
  121. static int release_memory(void)
  122. {
  123. int i = 0, ret;
  124. struct page **pages;
  125. /* Full release */
  126. if (cma_aligned_pages == NULL)
  127. return put_memory_lowpower_cma();
  128. /* Aligned release */
  129. pages = cma_aligned_pages;
  130. do {
  131. if (pages[i] == NULL)
  132. break;
  133. ret = put_memory_lowpower_cma_aligned(get_cma_size, pages[i]);
  134. if (!ret) {
  135. MLPT_PRINT("%s: PFN[%lu] released for [%d]\n", __func__, page_to_pfn(pages[i]), i);
  136. pages[i] = NULL;
  137. }
  138. } while (++i < get_cma_num);
  139. return 0;
  140. }
  141. /* Query CMA allocated buffer */
  142. static void memory_range(int which, unsigned long *spfn, unsigned long *epfn)
  143. {
  144. *spfn = *epfn = 0;
  145. /* Sanity check */
  146. if (which >= get_cma_num)
  147. goto out;
  148. /* Range of full allocation */
  149. if (cma_aligned_pages == NULL) {
  150. *spfn = __phys_to_pfn(memory_lowpower_cma_base());
  151. *epfn = __phys_to_pfn(memory_lowpower_cma_base() + memory_lowpower_cma_size());
  152. goto out;
  153. }
  154. /* Range of aligned allocation */
  155. if (cma_aligned_pages[which] != NULL) {
  156. *spfn = page_to_pfn(cma_aligned_pages[which]);
  157. *epfn = *spfn + get_cma_size;
  158. }
  159. out:
  160. MLPT_PRINT("%s: [%d] spfn[%lu] epfn[%lu]\n", __func__, which, *spfn, *epfn);
  161. }
  162. /* Register API for memory lowpower operation */
  163. void register_memory_lowpower_operation(struct memory_lowpower_operation *handler)
  164. {
  165. struct list_head *pos;
  166. mutex_lock(&memory_lowpower_lock);
  167. list_for_each(pos, &memory_lowpower_handlers) {
  168. struct memory_lowpower_operation *e;
  169. e = list_entry(pos, struct memory_lowpower_operation, link);
  170. if (e->level > handler->level)
  171. break;
  172. }
  173. list_add_tail(&handler->link, pos);
  174. mutex_unlock(&memory_lowpower_lock);
  175. }
  176. /* Unregister API for memory lowpower operation */
  177. void unregister_memory_lowpower_operation(struct memory_lowpower_operation *handler)
  178. {
  179. mutex_lock(&memory_lowpower_lock);
  180. list_del(&handler->link);
  181. mutex_unlock(&memory_lowpower_lock);
  182. }
  183. /* Screen-on cb operations */
  184. static void __go_to_screenon(void)
  185. {
  186. struct memory_lowpower_operation *pos;
  187. int ret = 0;
  188. int disabled[NR_MLP_LEVEL] = { 0, };
  189. /* Apply HW actions if needed */
  190. if (!MlpsEnable(&memory_lowpower_state))
  191. return;
  192. /* Disable actions */
  193. list_for_each_entry(pos, &memory_lowpower_handlers, link) {
  194. if (pos->disable != NULL) {
  195. ret = pos->disable();
  196. if (ret) {
  197. disabled[pos->level] += ret;
  198. MLPT_PRERR("Fail disable: level[%d] ret[%d]\n", pos->level, ret);
  199. ret = 0;
  200. }
  201. }
  202. }
  203. /* Restore actions */
  204. list_for_each_entry(pos, &memory_lowpower_handlers, link) {
  205. if (pos->restore != NULL) {
  206. ret = pos->restore();
  207. if (ret) {
  208. disabled[pos->level] += ret;
  209. MLPT_PRERR("Fail restore: level[%d] ret[%d]\n", pos->level, ret);
  210. ret = 0;
  211. }
  212. }
  213. }
  214. /* Clear ENABLE state */
  215. ClearMlpsEnable(&memory_lowpower_state);
  216. if (IS_ENABLED(CONFIG_MTK_DCS) && !disabled[MLP_LEVEL_DCS])
  217. ClearMlpsEnableDCS(&memory_lowpower_state);
  218. if (IS_ENABLED(CONFIG_MTK_PASR) && !disabled[MLP_LEVEL_PASR])
  219. ClearMlpsEnablePASR(&memory_lowpower_state);
  220. }
  221. /* Screen-on operations */
  222. static void go_to_screenon(void)
  223. {
  224. MLPT_PRINT("%s:+\n", __func__);
  225. MLPT_START_PROFILE();
  226. /* Should be SCREENOFF|SCREENIDLE -> SCREENON */
  227. if (MlpsScreenOn(&memory_lowpower_state) &&
  228. !MlpsScreenIdle(&memory_lowpower_state)) {
  229. MLPT_PRERR("Wrong state[%lu]\n", memory_lowpower_state);
  230. goto out;
  231. }
  232. /* HW-related flow for screenon */
  233. __go_to_screenon();
  234. /* Currently in SCREENOFF */
  235. if (!MlpsScreenOn(&memory_lowpower_state))
  236. SetMlpsScreenOn(&memory_lowpower_state);
  237. /* Currently in SCREENIDLE */
  238. if (MlpsScreenIdle(&memory_lowpower_state))
  239. ClearMlpsScreenIdle(&memory_lowpower_state);
  240. /* Release pages */
  241. release_memory();
  242. out:
  243. MLPT_END_PROFILE();
  244. MLPT_PRINT("%s:-\n", __func__);
  245. }
  246. /* Screen-off cb operations */
  247. static void __go_to_screenoff(void)
  248. {
  249. struct memory_lowpower_operation *pos;
  250. int ret = 0;
  251. int enabled[NR_MLP_LEVEL] = { 0, };
  252. /* Apply HW actions if needed */
  253. if (!IS_ACTION_SCREENOFF(memory_lowpower_action))
  254. return;
  255. /* Config actions */
  256. list_for_each_entry(pos, &memory_lowpower_handlers, link) {
  257. if (pos->config != NULL) {
  258. ret = pos->config(get_cma_num, memory_range);
  259. if (ret) {
  260. enabled[pos->level] += ret;
  261. MLPT_PRERR("Fail config: level[%d] ret[%d]\n", pos->level, ret);
  262. ret = 0;
  263. }
  264. }
  265. }
  266. /* Enable actions */
  267. list_for_each_entry(pos, &memory_lowpower_handlers, link) {
  268. if (pos->enable != NULL) {
  269. ret = pos->enable();
  270. if (ret) {
  271. enabled[pos->level] += ret;
  272. MLPT_PRERR("Fail enable: level[%d] ret[%d]\n", pos->level, ret);
  273. ret = 0;
  274. }
  275. }
  276. }
  277. /* Set ENABLE state */
  278. SetMlpsEnable(&memory_lowpower_state);
  279. if (IS_ENABLED(CONFIG_MTK_DCS) && !enabled[MLP_LEVEL_DCS])
  280. SetMlpsEnableDCS(&memory_lowpower_state);
  281. if (IS_ENABLED(CONFIG_MTK_PASR) && !enabled[MLP_LEVEL_PASR])
  282. SetMlpsEnablePASR(&memory_lowpower_state);
  283. }
  284. /* Screen-off operations */
  285. static void go_to_screenoff(void)
  286. {
  287. MLPT_PRINT("%s:+\n", __func__);
  288. MLPT_START_PROFILE();
  289. /* Should be SCREENON -> SCREENOFF */
  290. if (!MlpsScreenOn(&memory_lowpower_state)) {
  291. MLPT_PRERR("Wrong state[%lu]\n", memory_lowpower_state);
  292. goto out;
  293. }
  294. /* Collect free pages */
  295. do {
  296. /* Try to collect free pages. If done or can't proceed, then break. */
  297. if (!acquire_memory())
  298. break;
  299. /* Action is changed, just leave here. */
  300. if (!IS_ACTION_SCREENOFF(memory_lowpower_action))
  301. goto out;
  302. } while (1);
  303. /* Clear SCREENON state */
  304. ClearMlpsScreenOn(&memory_lowpower_state);
  305. /* HW-related flow for screenoff */
  306. __go_to_screenoff();
  307. out:
  308. MLPT_END_PROFILE();
  309. MLPT_PRINT("%s:-\n", __func__);
  310. }
  311. /* Screen-idle operations */
  312. static void go_to_screenidle(void)
  313. {
  314. MLPT_PRINT("%s:+\n", __func__);
  315. MLPT_START_PROFILE();
  316. /* Actions for screenidle - TBD */
  317. MLPT_END_PROFILE();
  318. MLPT_PRINT("%s:-\n", __func__);
  319. }
  320. /*
  321. * Main entry for memory lowpower operations -
  322. * No set_freezable(), no try_to_freeze().
  323. */
  324. static int memory_lowpower_entry(void *p)
  325. {
  326. enum power_state current_action = MLP_INIT;
  327. /* Call freezer_do_not_count to skip me */
  328. freezer_do_not_count();
  329. /* Start actions */
  330. do {
  331. /* Start running */
  332. set_current_state(TASK_RUNNING);
  333. do {
  334. /* Take proper actions */
  335. current_action = memory_lowpower_action;
  336. switch (current_action) {
  337. case MLP_SCREENON:
  338. go_to_screenon();
  339. break;
  340. case MLP_SCREENOFF:
  341. go_to_screenoff();
  342. break;
  343. case MLP_SCREENIDLE:
  344. go_to_screenidle();
  345. break;
  346. default:
  347. MLPT_PRINT("%s: Invalid action[%d]\n", __func__, current_action);
  348. }
  349. } while (current_action != memory_lowpower_action);
  350. /* Schedule me */
  351. set_current_state(TASK_INTERRUPTIBLE);
  352. schedule();
  353. } while (1);
  354. return 0;
  355. }
  356. #ifdef CONFIG_PM
  357. #ifdef CONFIG_HAS_EARLYSUSPEND
  358. /* Early suspend/resume callbacks & descriptor */
  359. static void memory_lowpower_early_suspend(struct early_suspend *h)
  360. {
  361. MLPT_PRINT("%s: SCREENOFF!\n", __func__);
  362. memory_lowpower_action = MLP_SCREENOFF;
  363. /* Wake up task */
  364. wake_up_process(memory_lowpower_task);
  365. }
  366. static void memory_lowpower_late_resume(struct early_suspend *h)
  367. {
  368. MLPT_PRINT("%s: SCREENON!\n", __func__);
  369. memory_lowpower_action = MLP_SCREENON;
  370. /* Wake up task */
  371. wake_up_process(memory_lowpower_task);
  372. }
  373. static struct early_suspend early_suspend_descriptor = {
  374. .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
  375. .suspend = memory_lowpower_early_suspend,
  376. .resume = memory_lowpower_late_resume,
  377. };
  378. #else /* !CONFIG_HAS_EARLYSUSPEND */
  379. /* FB event notifier */
  380. static int memory_lowpower_fb_event(struct notifier_block *notifier, unsigned long event, void *data)
  381. {
  382. struct fb_event *fb_event = data;
  383. int *blank = fb_event->data;
  384. int new_status = *blank ? 1 : 0;
  385. switch (event) {
  386. case FB_EVENT_BLANK:
  387. if (new_status == 0) {
  388. MLPT_PRINT("%s: SCREENON!\n", __func__);
  389. memory_lowpower_action = MLP_SCREENON;
  390. wake_up_process(memory_lowpower_task);
  391. } else {
  392. MLPT_PRINT("%s: SCREENOFF!\n", __func__);
  393. memory_lowpower_action = MLP_SCREENOFF;
  394. wake_up_process(memory_lowpower_task);
  395. }
  396. }
  397. return NOTIFY_DONE;
  398. }
  399. static struct notifier_block fb_notifier_block = {
  400. .notifier_call = memory_lowpower_fb_event,
  401. .priority = 0,
  402. };
  403. #endif
  404. static int __init memory_lowpower_init_pm_ops(void)
  405. {
  406. #ifdef CONFIG_HAS_EARLYSUSPEND
  407. register_early_suspend(&early_suspend_descriptor);
  408. #else
  409. if (fb_register_client(&fb_notifier_block) != 0)
  410. return -1;
  411. #endif
  412. return 0;
  413. }
  414. #endif
  415. int __init memory_lowpower_task_init(void)
  416. {
  417. int ret = 0;
  418. /* Start a kernel thread */
  419. memory_lowpower_task = kthread_run(memory_lowpower_entry, NULL, "memory_lowpower_task");
  420. if (IS_ERR(memory_lowpower_task)) {
  421. MLPT_PRERR("Failed to start memory_lowpower_task!\n");
  422. ret = PTR_ERR(memory_lowpower_task);
  423. goto out;
  424. }
  425. #ifdef CONFIG_PM
  426. /* Initialize PM ops */
  427. ret = memory_lowpower_init_pm_ops();
  428. if (ret != 0) {
  429. MLPT_PRERR("Failed to init pm ops!\n");
  430. kthread_stop(memory_lowpower_task);
  431. memory_lowpower_task = NULL;
  432. goto out;
  433. }
  434. #endif
  435. /* Set expected current state */
  436. SetMlpsInit(&memory_lowpower_state);
  437. SetMlpsScreenOn(&memory_lowpower_state);
  438. out:
  439. MLPT_PRINT("%s: memory_power_state[%lu]\n", __func__, memory_lowpower_state);
  440. return ret;
  441. }
  442. late_initcall(memory_lowpower_task_init);