mt_spm_internal.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/atomic.h>
  5. #include <linux/string.h>
  6. #include <linux/delay.h>
  7. #include <linux/of_fdt.h>
  8. #include <mach/mt_spm_mtcmos_internal.h>
  9. #include <asm/setup.h>
  10. #include "mt_spm_internal.h"
  11. #include "mt_spm_vcore_dvfs.h"
  12. #include <mt-plat/upmu_common.h>
  13. /**************************************
  14. * Config and Parameter
  15. **************************************/
  16. #define LOG_BUF_SIZE 256
  17. #if defined(CONFIG_ARCH_MT6797)
  18. /* CPU_PWR_STATUS */
  19. /* CPU_PWR_STATUS_2ND */
  20. #define MP0_CPU0 (1U << 15)
  21. #define MP0_CPU1 (1U << 14)
  22. #define MP0_CPU2 (1U << 13)
  23. #define MP0_CPU3 (1U << 12)
  24. #define MP1_CPU0 (1U << 11)
  25. #define MP1_CPU1 (1U << 10)
  26. #define MP1_CPU2 (1U << 9)
  27. #define MP1_CPU3 (1U << 8)
  28. #define MP2_CPU0 (1U << 7)
  29. #define MP2_CPU1 (1U << 6)
  30. #define MP2_CPU2 (1U << 5)
  31. #define MP2_CPU3 (1U << 4)
  32. #define MP3_CPU0 (1U << 3)
  33. #define MP3_CPU1 (1U << 2)
  34. #define MP3_CPU2 (1U << 1)
  35. #define MP3_CPU3 (1U << 0)
  36. #endif
  37. /**************************************
  38. * Define and Declare
  39. **************************************/
  40. DEFINE_SPINLOCK(__spm_lock);
  41. atomic_t __spm_mainpll_req = ATOMIC_INIT(0);
  42. static u32 pcm_timer_ramp_max = 1;
  43. static u32 pcm_timer_ramp_max_sec_loop = 1;
  44. const char *wakesrc_str[32] = {
  45. [0] = " R12_PCM_TIMER",
  46. [1] = " R12_MD32_WDT_EVENT_B",
  47. [2] = " R12_KP_IRQ_B",
  48. [3] = " R12_APWDT_EVENT_B",
  49. [4] = " R12_APXGPT1_EVENT_B",
  50. [5] = " R12_CONN2AP_SPM_WAKEUP_B",
  51. [6] = " R12_EINT_EVENT_B",
  52. [7] = " R12_CONN_WDT_IRQ_B",
  53. [8] = " R12_CCIF0_EVENT_B",
  54. [9] = " R12_LOWBATTERY_IRQ_B",
  55. [10] = " R12_MD32_SPM_IRQ_B",
  56. [11] = " R12_26M_WAKE",
  57. [12] = " R12_26M_SLEEP",
  58. [13] = " R12_PCM_WDT_WAKEUP_B",
  59. [14] = " R12_USB_CDSC_B",
  60. [15] = " R12_USB_POWERDWN_B",
  61. [16] = " R12_C2K_WDT_IRQ_B",
  62. [17] = " R12_EINT_EVENT_SECURE_B",
  63. [18] = " R12_CCIF1_EVENT_B",
  64. [19] = " R12_UART0_IRQ_B",
  65. [20] = " R12_AFE_IRQ_MCU_B",
  66. [21] = " R12_THERM_CTRL_EVENT_B",
  67. [22] = " R12_SYS_CIRQ_IRQ_B",
  68. [23] = " R12_MD2_WDT_B",
  69. [24] = " R12_CSYSPWREQ_B",
  70. [25] = " R12_MD1_WDT_B",
  71. [26] = " R12_CLDMA_EVENT_B",
  72. [27] = " R12_SEJ_WDT_GPT_B",
  73. [28] = " R12_ALL_MD32_WAKEUP_B",
  74. [29] = " R12_CPU_IRQ_B",
  75. [30] = " R12_APSRC_WAKE",
  76. [31] = " R12_APSRC_SLEEP",
  77. };
  78. #if defined(CONFIG_ARCH_MT6755)
  79. #define SPM_CPU_PWR_STATUS PWR_STATUS
  80. #define SPM_CPU_PWR_STATUS_2ND PWR_STATUS_2ND
  81. unsigned int spm_cpu_bitmask[NR_CPUS] = {
  82. CA7_CPU0,
  83. CA7_CPU1,
  84. CA7_CPU2,
  85. CA7_CPU3,
  86. CA15_CPU0,
  87. CA15_CPU1,
  88. CA15_CPU2,
  89. CA15_CPU3
  90. };
  91. unsigned int spm_cpu_bitmask_all = CA15_CPU3 |
  92. CA15_CPU2 |
  93. CA15_CPU1 |
  94. CA15_CPU0 |
  95. CA7_CPU3 |
  96. CA7_CPU2 |
  97. CA7_CPU1 | CA7_CPU0;
  98. #elif defined(CONFIG_ARCH_MT6797)
  99. #define SPM_CPU_PWR_STATUS CPU_PWR_STATUS
  100. #define SPM_CPU_PWR_STATUS_2ND CPU_PWR_STATUS_2ND
  101. /* FIXME: use `NR_CPUS` after CONFIG_NR_CPUS workaround fixed */
  102. unsigned int spm_cpu_bitmask[10] = {
  103. MP0_CPU0,
  104. MP0_CPU1,
  105. MP0_CPU2,
  106. MP0_CPU3,
  107. MP1_CPU0,
  108. MP1_CPU1,
  109. MP1_CPU2,
  110. MP1_CPU3,
  111. MP2_CPU0,
  112. MP2_CPU1
  113. };
  114. unsigned int spm_cpu_bitmask_all = MP0_CPU0 |
  115. MP0_CPU1 |
  116. MP0_CPU2 |
  117. MP0_CPU3 |
  118. MP1_CPU0 |
  119. MP1_CPU1 |
  120. MP1_CPU2 |
  121. MP1_CPU3 |
  122. MP2_CPU0 | MP2_CPU1;
  123. #endif
  124. /**************************************
  125. * Function and API
  126. **************************************/
  127. void __spm_reset_and_init_pcm(const struct pcm_desc *pcmdesc)
  128. {
  129. u32 con1;
  130. int retry = 0, timeout = 2000;
  131. /* [Vcorefs] backup r0 to POWER_ON_VAL0 for MEM Ctrl should work during PCM reset */
  132. if (spm_read(PCM_REG1_DATA) == 0x1) {
  133. con1 = spm_read(SPM_WAKEUP_EVENT_MASK);
  134. spm_write(SPM_WAKEUP_EVENT_MASK, (con1 & ~(0x1)));
  135. #ifdef SPM_VCORE_EN_MT6797
  136. spm_write(SPM_SW_RSV_1, (spm_read(SPM_SW_RSV_1) & (~0xF)) | SPM_OFFLOAD);
  137. #endif
  138. spm_write(SPM_CPU_WAKEUP_EVENT, 1);
  139. while ((spm_read(SPM_IRQ_STA) & PCM_IRQ_ROOT_MASK_LSB) == 0) {
  140. if (retry > timeout) {
  141. pr_err("[VcoreFS] CPU waiting F/W ack fail, PCM_FSM_STA: 0x%x, timeout: %d\n",
  142. spm_read(PCM_FSM_STA), timeout);
  143. pr_err("[VcoreFS] R6: 0x%x, R15: 0x%x\n",
  144. spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  145. #ifdef SPM_VCORE_EN_MT6797
  146. BUG();
  147. #else
  148. __check_dvfs_halt_source(__spm_vcore_dvfs.pwrctrl->dvfs_halt_src_chk);
  149. pr_err("[VcoreFS] Next R15=0x%x\n", spm_read(PCM_REG15_DATA));
  150. pr_err("[VcoreFS] Next R6=0x%x\n", spm_read(PCM_REG6_DATA));
  151. pr_err("[VcoreFS] Next PCM_FSM_STA=0x%x\n", spm_read(PCM_FSM_STA));
  152. pr_err("[VcoreFs] Next IRQ_STA=0x%x\n", spm_read(SPM_IRQ_STA));
  153. #endif
  154. }
  155. udelay(1);
  156. retry++;
  157. }
  158. spm_write(SPM_CPU_WAKEUP_EVENT, 0);
  159. spm_write(SPM_WAKEUP_EVENT_MASK, con1);
  160. /* backup mem control from r0 to POWER_ON_VAL0 */
  161. if (spm_read(SPM_POWER_ON_VAL0) != spm_read(PCM_REG0_DATA)) {
  162. spm_crit("VAL0 from 0x%x to 0x%x\n", spm_read(SPM_POWER_ON_VAL0), spm_read(PCM_REG0_DATA));
  163. spm_write(SPM_POWER_ON_VAL0, spm_read(PCM_REG0_DATA));
  164. }
  165. /* disable r0 and r7 to control power */
  166. spm_write(PCM_PWR_IO_EN, 0);
  167. /* [Vcorefs] disable pcm timer after leaving FW */
  168. spm_write(PCM_CON1, SPM_REGWR_CFG_KEY | (spm_read(PCM_CON1) & ~PCM_TIMER_EN_LSB));
  169. #ifdef SPM_VCORE_EN_MT6797
  170. /* backup vcore state from REG6[24:23] to RSV_5[1:0] */
  171. spm_write(SPM_SW_RSV_5, (spm_read(SPM_SW_RSV_5) & ~(0x3)) |
  172. ((spm_read(PCM_REG6_DATA) & SPM_VCORE_STA_REG) >> 23));
  173. #endif
  174. }
  175. /* reset PCM */
  176. spm_write(PCM_CON0, SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB | PCM_SW_RESET_LSB);
  177. spm_write(PCM_CON0, SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB);
  178. BUG_ON((spm_read(PCM_FSM_STA) & 0x7fffff) != PCM_FSM_STA_DEF); /* PCM reset failed */
  179. /* init PCM_CON0 (disable event vector) */
  180. spm_write(PCM_CON0, SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB | EN_IM_SLEEP_DVS_LSB);
  181. /* init PCM_CON1 (disable PCM timer but keep PCM WDT setting) */
  182. con1 = spm_read(PCM_CON1) & (PCM_WDT_WAKE_MODE_LSB | PCM_WDT_EN_LSB);
  183. spm_write(PCM_CON1, con1 | SPM_REGWR_CFG_KEY | EVENT_LOCK_EN_LSB |
  184. SPM_SRAM_ISOINT_B_LSB | SPM_SRAM_SLEEP_B_LSB |
  185. (pcmdesc->replace ? 0 : IM_NONRP_EN_LSB) |
  186. MIF_APBEN_LSB | SCP_APB_INTERNAL_EN_LSB);
  187. }
  188. void __spm_kick_im_to_fetch(const struct pcm_desc *pcmdesc)
  189. {
  190. u32 ptr, len, con0;
  191. /* tell IM where is PCM code (use slave mode if code existed) */
  192. if (pcmdesc->base_dma) {
  193. ptr = pcmdesc->base_dma;
  194. /* for 4GB mode */
  195. MAPPING_DRAM_ACCESS_ADDR(ptr);
  196. } else {
  197. ptr = base_va_to_pa(pcmdesc->base);
  198. }
  199. len = pcmdesc->size - 1;
  200. if (spm_read(PCM_IM_PTR) != ptr || spm_read(PCM_IM_LEN) != len || pcmdesc->sess > 2) {
  201. spm_write(PCM_IM_PTR, ptr);
  202. spm_write(PCM_IM_LEN, len);
  203. } else {
  204. spm_write(PCM_CON1, spm_read(PCM_CON1) | SPM_REGWR_CFG_KEY | IM_SLAVE_LSB);
  205. }
  206. /* kick IM to fetch (only toggle IM_KICK) */
  207. con0 = spm_read(PCM_CON0) & ~(IM_KICK_L_LSB | PCM_KICK_L_LSB);
  208. spm_write(PCM_CON0, con0 | SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB | IM_KICK_L_LSB);
  209. spm_write(PCM_CON0, con0 | SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB);
  210. }
  211. void __spm_init_pcm_register(void)
  212. {
  213. /* init r0 with POWER_ON_VAL0 */
  214. spm_write(PCM_REG_DATA_INI, spm_read(SPM_POWER_ON_VAL0));
  215. spm_write(PCM_PWR_IO_EN, PCM_RF_SYNC_R0);
  216. spm_write(PCM_PWR_IO_EN, 0);
  217. /* init r7 with POWER_ON_VAL1 */
  218. spm_write(PCM_REG_DATA_INI, spm_read(SPM_POWER_ON_VAL1));
  219. spm_write(PCM_PWR_IO_EN, PCM_RF_SYNC_R7);
  220. spm_write(PCM_PWR_IO_EN, 0);
  221. }
  222. void __spm_init_event_vector(const struct pcm_desc *pcmdesc)
  223. {
  224. /* init event vector register */
  225. spm_write(PCM_EVENT_VECTOR0, pcmdesc->vec0);
  226. spm_write(PCM_EVENT_VECTOR1, pcmdesc->vec1);
  227. spm_write(PCM_EVENT_VECTOR2, pcmdesc->vec2);
  228. spm_write(PCM_EVENT_VECTOR3, pcmdesc->vec3);
  229. spm_write(PCM_EVENT_VECTOR4, pcmdesc->vec4);
  230. spm_write(PCM_EVENT_VECTOR5, pcmdesc->vec5);
  231. spm_write(PCM_EVENT_VECTOR6, pcmdesc->vec6);
  232. spm_write(PCM_EVENT_VECTOR7, pcmdesc->vec7);
  233. spm_write(PCM_EVENT_VECTOR8, pcmdesc->vec8);
  234. spm_write(PCM_EVENT_VECTOR9, pcmdesc->vec9);
  235. spm_write(PCM_EVENT_VECTOR10, pcmdesc->vec10);
  236. spm_write(PCM_EVENT_VECTOR11, pcmdesc->vec11);
  237. spm_write(PCM_EVENT_VECTOR12, pcmdesc->vec12);
  238. spm_write(PCM_EVENT_VECTOR13, pcmdesc->vec13);
  239. spm_write(PCM_EVENT_VECTOR14, pcmdesc->vec14);
  240. spm_write(PCM_EVENT_VECTOR15, pcmdesc->vec15);
  241. /* event vector will be enabled by PCM itself */
  242. }
  243. void __spm_set_power_control(const struct pwr_ctrl *pwrctrl)
  244. {
  245. /* set other SYS request mask */
  246. spm_write(SPM_AP_STANDBY_CON, (!!pwrctrl->conn_apsrc_sel << 27) |
  247. (!!pwrctrl->conn_mask_b << 26) |
  248. (!!pwrctrl->md_apsrc0_sel << 25) |
  249. (!!pwrctrl->md_apsrc1_sel << 24) |
  250. (spm_read(SPM_AP_STANDBY_CON) & SRCCLKENI_MASK_B_LSB) | /* bit23 */
  251. (!!pwrctrl->lte_mask_b << 22) |
  252. (!!pwrctrl->scp_req_mask_b << 21) |
  253. (!!pwrctrl->md2_req_mask_b << 20) |
  254. (!!pwrctrl->md1_req_mask_b << 19) |
  255. (!!pwrctrl->md_ddr_dbc_en << 18) |
  256. (!!pwrctrl->mcusys_idle_mask << 4) |
  257. (!!pwrctrl->mp1top_idle_mask << 2) |
  258. (!!pwrctrl->mp0top_idle_mask << 1) |
  259. (!!pwrctrl->wfi_op << 0));
  260. spm_write(SPM_SRC_REQ, (!!pwrctrl->cpu_md_dvfs_sop_force_on << 16) |
  261. (!!pwrctrl->spm_flag_run_common_scenario << 10) |
  262. (!!pwrctrl->spm_flag_dis_vproc_vsram_dvs << 9) |
  263. (!!pwrctrl->spm_flag_keep_csyspwrupack_high << 8) |
  264. (!!pwrctrl->spm_ddren_req << 7) |
  265. (!!pwrctrl->spm_dvfs_force_down << 6) |
  266. (!!pwrctrl->spm_dvfs_req << 5) |
  267. (!!pwrctrl->spm_vrf18_req << 4) |
  268. (!!pwrctrl->spm_infra_req << 3) |
  269. (!!pwrctrl->spm_lte_req << 2) |
  270. (!!pwrctrl->spm_f26m_req << 1) |
  271. (!!pwrctrl->spm_apsrc_req << 0));
  272. spm_write(SPM_SRC_MASK,
  273. (!!pwrctrl->conn_srcclkena_dvfs_req_mask_b << 31) |
  274. (!!pwrctrl->md_srcclkena_1_dvfs_req_mask_b << 30) |
  275. (!!pwrctrl->md_srcclkena_0_dvfs_req_mask_b << 29) |
  276. (!!pwrctrl->emi_bw_dvfs_req_mask << 28) |
  277. (!!pwrctrl->md_vrf18_req_1_mask_b << 24) |
  278. (!!pwrctrl->md_vrf18_req_0_mask_b << 23) |
  279. (!!pwrctrl->md_ddr_en_1_mask_b << 22) |
  280. (!!pwrctrl->md_ddr_en_0_mask_b << 21) |
  281. (!!pwrctrl->md32_apsrcreq_infra_mask_b << 20) |
  282. (!!pwrctrl->conn_apsrcreq_infra_mask_b << 19) |
  283. (!!pwrctrl->md_apsrcreq_1_infra_mask_b << 18) |
  284. (!!pwrctrl->md_apsrcreq_0_infra_mask_b << 17) |
  285. (!!pwrctrl->srcclkeni_infra_mask_b << 16) |
  286. (!!pwrctrl->md32_srcclkena_infra_mask_b << 15) |
  287. (!!pwrctrl->conn_srcclkena_infra_mask_b << 14) |
  288. (!!pwrctrl->md_srcclkena_1_infra_mask_b << 13) |
  289. (!!pwrctrl->md_srcclkena_0_infra_mask_b << 12) |
  290. ((pwrctrl->vsync_mask_b & 0x1f) << 7) |
  291. (!!pwrctrl->ccifmd_md2_event_mask_b << 6) |
  292. (!!pwrctrl->ccifmd_md1_event_mask_b << 5) |
  293. (!!pwrctrl->ccif1_to_ap_mask_b << 4) |
  294. (!!pwrctrl->ccif1_to_md_mask_b << 3) |
  295. (!!pwrctrl->ccif0_to_ap_mask_b << 2) |
  296. (!!pwrctrl->ccif0_to_md_mask_b << 1));
  297. spm_write(SPM_SRC2_MASK,
  298. #if defined(CONFIG_ARCH_MT6797)
  299. (!!pwrctrl->disp_od_req_mask_b << 27) |
  300. #endif
  301. (!!pwrctrl->cpu_md_emi_dvfs_req_prot_dis << 26) |
  302. (!!pwrctrl->emi_boost_dvfs_req_mask_b << 25) |
  303. (!!pwrctrl->sdio_on_dvfs_req_mask_b << 24) |
  304. (!!pwrctrl->l1_c2k_rccif_wake_mask_b << 23) |
  305. (!!pwrctrl->ps_c2k_rccif_wake_mask_b << 22) |
  306. (!!pwrctrl->c2k_l1_rccif_wake_mask_b << 21) |
  307. (!!pwrctrl->c2k_ps_rccif_wake_mask_b << 20) |
  308. (!!pwrctrl->mfg_req_mask_b << 19) |
  309. (!!pwrctrl->disp1_req_mask_b << 18) |
  310. (!!pwrctrl->disp_req_mask_b << 17) |
  311. (!!pwrctrl->conn_ddr_en_mask_b << 16) |
  312. ((pwrctrl->vsync_dvfs_halt_mask_b & 0x1f) << 11) | /* 5bit */
  313. (!!pwrctrl->md2_ddr_en_dvfs_halt_mask_b << 10) |
  314. (!!pwrctrl->md1_ddr_en_dvfs_halt_mask_b << 9) |
  315. (!!pwrctrl->cpu_md_dvfs_erq_merge_mask_b << 8) |
  316. (!!pwrctrl->gce_req_mask_b << 7) |
  317. (!!pwrctrl->vdec_req_mask_b << 6) |
  318. ((pwrctrl->dvfs_halt_mask_b & 0x1f) << 0)); /* 5bit */
  319. spm_write(SPM_CLK_CON, (spm_read(SPM_CLK_CON) & ~CC_SRCLKENA_MASK_0) |
  320. (pwrctrl->srclkenai_mask ? CC_SRCLKENA_MASK_0 : 0));
  321. /* set CPU WFI mask */
  322. spm_write(MP1_CPU0_WFI_EN, !!pwrctrl->mp1_cpu0_wfi_en);
  323. spm_write(MP1_CPU1_WFI_EN, !!pwrctrl->mp1_cpu1_wfi_en);
  324. spm_write(MP1_CPU2_WFI_EN, !!pwrctrl->mp1_cpu2_wfi_en);
  325. spm_write(MP1_CPU3_WFI_EN, !!pwrctrl->mp1_cpu3_wfi_en);
  326. spm_write(MP0_CPU0_WFI_EN, !!pwrctrl->mp0_cpu0_wfi_en);
  327. spm_write(MP0_CPU1_WFI_EN, !!pwrctrl->mp0_cpu1_wfi_en);
  328. spm_write(MP0_CPU2_WFI_EN, !!pwrctrl->mp0_cpu2_wfi_en);
  329. spm_write(MP0_CPU3_WFI_EN, !!pwrctrl->mp0_cpu3_wfi_en);
  330. }
  331. void __spm_set_wakeup_event(const struct pwr_ctrl *pwrctrl)
  332. {
  333. u32 val, mask, isr;
  334. /* set PCM timer (set to max when disable) */
  335. if (pwrctrl->timer_val_ramp_en != 0) {
  336. val = pcm_timer_ramp_max;
  337. pcm_timer_ramp_max++;
  338. if (pcm_timer_ramp_max >= 300)
  339. pcm_timer_ramp_max = 1;
  340. } else if (pwrctrl->timer_val_ramp_en_sec != 0) {
  341. val = pcm_timer_ramp_max * 1600; /* 50ms */
  342. pcm_timer_ramp_max += 1;
  343. if (pcm_timer_ramp_max >= 300) /* max 15 sec */
  344. pcm_timer_ramp_max = 1;
  345. pcm_timer_ramp_max_sec_loop++;
  346. if (pcm_timer_ramp_max_sec_loop >= 50) {
  347. pcm_timer_ramp_max_sec_loop = 0;
  348. /* range 6min to 10min */
  349. val = (pcm_timer_ramp_max + 300) * 32000;
  350. }
  351. } else {
  352. if (pwrctrl->timer_val_cust == 0)
  353. val = pwrctrl->timer_val ? : PCM_TIMER_MAX;
  354. else
  355. val = pwrctrl->timer_val_cust;
  356. }
  357. spm_write(PCM_TIMER_VAL, val);
  358. spm_write(PCM_CON1, spm_read(PCM_CON1) | SPM_REGWR_CFG_KEY | PCM_TIMER_EN_LSB);
  359. /* unmask AP wakeup source */
  360. if (pwrctrl->wake_src_cust == 0)
  361. mask = pwrctrl->wake_src;
  362. else
  363. mask = pwrctrl->wake_src_cust;
  364. if (pwrctrl->syspwreq_mask)
  365. mask &= ~WAKE_SRC_R12_CSYSPWREQ_B;
  366. spm_write(SPM_WAKEUP_EVENT_MASK, ~mask);
  367. #if 0
  368. /* unmask MD32 wakeup source */
  369. spm_write(SPM_SLEEP_MD32_WAKEUP_EVENT_MASK, ~pwrctrl->wake_src_md32);
  370. #endif
  371. /* unmask SPM ISR (keep TWAM setting) */
  372. isr = spm_read(SPM_IRQ_MASK) & SPM_TWAM_IRQ_MASK_LSB;
  373. spm_write(SPM_IRQ_MASK, isr | ISRM_RET_IRQ_AUX);
  374. }
  375. void __spm_kick_pcm_to_run(const struct pwr_ctrl *pwrctrl)
  376. {
  377. u32 con0;
  378. /* init register to match PCM expectation */
  379. spm_write(SPM_MAS_PAUSE_MASK_B, 0xffffffff);
  380. spm_write(SPM_MAS_PAUSE2_MASK_B, 0xffffffff);
  381. spm_write(PCM_REG_DATA_INI, 0);
  382. /* set PCM flags and data */
  383. spm_write(SPM_SW_FLAG, pwrctrl->pcm_flags);
  384. spm_write(SPM_SW_RSV_0, pwrctrl->pcm_reserve);
  385. /* lock Infra DCM when PCM runs */
  386. spm_write(SPM_CLK_CON, (spm_read(SPM_CLK_CON) & ~SPM_LOCK_INFRA_DCM_LSB) |
  387. (pwrctrl->infra_dcm_lock ? SPM_LOCK_INFRA_DCM_LSB : 0));
  388. /* enable r0 and r7 to control power */
  389. spm_write(PCM_PWR_IO_EN, (pwrctrl->r0_ctrl_en ? PCM_PWRIO_EN_R0 : 0) |
  390. (pwrctrl->r7_ctrl_en ? PCM_PWRIO_EN_R7 : 0));
  391. /* kick PCM to run (only toggle PCM_KICK) */
  392. con0 = spm_read(PCM_CON0) & ~(IM_KICK_L_LSB | PCM_KICK_L_LSB);
  393. spm_write(PCM_CON0, con0 | SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB | PCM_KICK_L_LSB);
  394. spm_write(PCM_CON0, con0 | SPM_REGWR_CFG_KEY | PCM_CK_EN_LSB);
  395. }
  396. void __spm_get_wakeup_status(struct wake_status *wakesta)
  397. {
  398. /* get PC value if PCM assert (pause abort) */
  399. wakesta->assert_pc = spm_read(PCM_REG_DATA_INI);
  400. /* get wakeup event */
  401. wakesta->r12 = spm_read(SPM_SW_RSV_0);
  402. wakesta->r12_ext = spm_read(PCM_REG12_EXT_DATA);
  403. wakesta->raw_sta = spm_read(SPM_WAKEUP_STA);
  404. wakesta->raw_ext_sta = spm_read(SPM_WAKEUP_EXT_STA);
  405. wakesta->wake_misc = spm_read(SPM_BSI_D0_SR); /* backup of SLEEP_WAKEUP_MISC */
  406. /* get sleep time */
  407. wakesta->timer_out = spm_read(SPM_BSI_D1_SR); /* backup of PCM_TIMER_OUT */
  408. /* get other SYS and co-clock status */
  409. wakesta->r13 = spm_read(PCM_REG13_DATA);
  410. wakesta->idle_sta = spm_read(SUBSYS_IDLE_STA);
  411. /* get debug flag for PCM execution check */
  412. wakesta->debug_flag = spm_read(SPM_SW_DEBUG);
  413. /* get special pattern (0xf0000 or 0x10000) if sleep abort */
  414. wakesta->event_reg = spm_read(SPM_BSI_D2_SR); /* PCM_EVENT_REG_STA */
  415. /* get ISR status */
  416. wakesta->isr = spm_read(SPM_IRQ_STA);
  417. }
  418. void __spm_clean_after_wakeup(void)
  419. {
  420. /* [Vcorefs] can not switch back to POWER_ON_VAL0 here,
  421. the FW stays in VCORE DVFS which use r0 to Ctrl MEM */
  422. /* disable r0 and r7 to control power */
  423. /* spm_write(PCM_PWR_IO_EN, 0); */
  424. /* clean CPU wakeup event */
  425. spm_write(SPM_CPU_WAKEUP_EVENT, 0);
  426. /* [Vcorefs] not disable pcm timer here, due to the
  427. following vcore dvfs will use it for latency check */
  428. /* clean PCM timer event */
  429. /* spm_write(PCM_CON1, SPM_REGWR_CFG_KEY | (spm_read(PCM_CON1) & ~PCM_TIMER_EN_LSB)); */
  430. /* clean wakeup event raw status (for edge trigger event) */
  431. spm_write(SPM_WAKEUP_EVENT_MASK, ~0);
  432. /* clean ISR status (except TWAM) */
  433. spm_write(SPM_IRQ_MASK, spm_read(SPM_IRQ_MASK) | ISRM_ALL_EXC_TWAM);
  434. spm_write(SPM_IRQ_STA, ISRC_ALL_EXC_TWAM);
  435. spm_write(SPM_SW_INT_CLEAR, PCM_SW_INT_ALL);
  436. }
  437. #define spm_print(suspend, fmt, args...) \
  438. do { \
  439. if (!suspend) \
  440. spm_debug(fmt, ##args); \
  441. else \
  442. spm_crit2(fmt, ##args); \
  443. } while (0)
  444. wake_reason_t __spm_output_wake_reason(const struct wake_status *wakesta,
  445. const struct pcm_desc *pcmdesc, bool suspend)
  446. {
  447. int i;
  448. char buf[LOG_BUF_SIZE] = { 0 };
  449. wake_reason_t wr = WR_UNKNOWN;
  450. if (wakesta->assert_pc != 0) {
  451. /* add size check for vcoredvfs */
  452. spm_print(suspend, "PCM ASSERT AT %u (%s%s), r13 = 0x%x, debug_flag = 0x%x\n",
  453. wakesta->assert_pc, (wakesta->assert_pc > pcmdesc->size) ? "NOT " : "",
  454. pcmdesc->version, wakesta->r13, wakesta->debug_flag);
  455. return WR_PCM_ASSERT;
  456. }
  457. if (wakesta->r12 & WAKE_SRC_R12_PCM_TIMER) {
  458. if (wakesta->wake_misc & WAKE_MISC_PCM_TIMER) {
  459. strcat(buf, " PCM_TIMER");
  460. wr = WR_PCM_TIMER;
  461. }
  462. if (wakesta->wake_misc & WAKE_MISC_TWAM) {
  463. strcat(buf, " TWAM");
  464. wr = WR_WAKE_SRC;
  465. }
  466. if (wakesta->wake_misc & WAKE_MISC_CPU_WAKE) {
  467. strcat(buf, " CPU");
  468. wr = WR_WAKE_SRC;
  469. }
  470. }
  471. for (i = 1; i < 32; i++) {
  472. if (wakesta->r12 & (1U << i)) {
  473. if ((strlen(buf) + strlen(wakesrc_str[i])) < LOG_BUF_SIZE)
  474. strncat(buf, wakesrc_str[i], strlen(wakesrc_str[i]));
  475. wr = WR_WAKE_SRC;
  476. }
  477. }
  478. BUG_ON(strlen(buf) >= LOG_BUF_SIZE);
  479. spm_print(suspend, "wake up by%s, timer_out = %u, r13 = 0x%x, debug_flag = 0x%x\n",
  480. buf, wakesta->timer_out, wakesta->r13, wakesta->debug_flag);
  481. spm_print(suspend,
  482. "r12 = 0x%x, r12_ext = 0x%x, raw_sta = 0x%x, idle_sta = 0x%x, event_reg = 0x%x, isr = 0x%x\n",
  483. wakesta->r12, wakesta->r12_ext, wakesta->raw_sta, wakesta->idle_sta,
  484. wakesta->event_reg, wakesta->isr);
  485. spm_print(suspend, "raw_ext_sta = 0x%x, wake_misc = 0x%x", wakesta->raw_ext_sta,
  486. wakesta->wake_misc);
  487. return wr;
  488. }
  489. void __spm_dbgout_md_ddr_en(bool enable)
  490. {
  491. /* set TEST_MODE_CFG */
  492. spm_write(0xf0000230, (spm_read(0xf0000230) & ~(0x7fff << 16)) |
  493. (0x3 << 26) | (0x3 << 21) | (0x3 << 16));
  494. /* set md_ddr_en to GPIO150 */
  495. spm_write(0xf0001500, 0x70e);
  496. spm_write(0xf00057e4, 0x7);
  497. /* set emi_clk_off_req to GPIO140 */
  498. spm_write(0xf000150c, 0x3fe);
  499. spm_write(0xf00057c4, 0x7);
  500. /* enable debug output */
  501. spm_write(PCM_DEBUG_CON, !!enable);
  502. }
  503. unsigned int spm_get_cpu_pwr_status(void)
  504. {
  505. unsigned int pwr_stat[2] = { 0 };
  506. unsigned int stat = 0;
  507. unsigned int ret_stat = 0;
  508. int i;
  509. pwr_stat[0] = spm_read(SPM_CPU_PWR_STATUS);
  510. pwr_stat[1] = spm_read(SPM_CPU_PWR_STATUS_2ND);
  511. stat = (pwr_stat[0] & spm_cpu_bitmask_all) & (pwr_stat[1] & spm_cpu_bitmask_all);
  512. for (i = 0; i < nr_cpu_ids; i++)
  513. if (stat & spm_cpu_bitmask[i])
  514. ret_stat |= (1 << i);
  515. return ret_stat;
  516. }
  517. long int spm_get_current_time_ms(void)
  518. {
  519. struct timeval t;
  520. do_gettimeofday(&t);
  521. return ((t.tv_sec & 0xFFF) * 1000000 + t.tv_usec) / 1000;
  522. }
  523. void __spm_check_md_pdn_power_control(struct pwr_ctrl *pwr_ctrl)
  524. {
  525. if (is_md_c2k_conn_power_off())
  526. pwr_ctrl->pcm_flags |= SPM_FLAG_DIS_MD_INFRA_PDN;
  527. else
  528. pwr_ctrl->pcm_flags &= ~SPM_FLAG_DIS_MD_INFRA_PDN;
  529. }
  530. void __spm_sync_vcore_dvfs_power_control(struct pwr_ctrl *dest_pwr_ctrl, const struct pwr_ctrl *src_pwr_ctrl)
  531. {
  532. /* pwr_ctrl for mask/ctrl register */
  533. dest_pwr_ctrl->dvfs_halt_mask_b = src_pwr_ctrl->dvfs_halt_mask_b;
  534. dest_pwr_ctrl->sdio_on_dvfs_req_mask_b = src_pwr_ctrl->sdio_on_dvfs_req_mask_b;
  535. dest_pwr_ctrl->cpu_md_dvfs_erq_merge_mask_b = src_pwr_ctrl->cpu_md_dvfs_erq_merge_mask_b;
  536. dest_pwr_ctrl->md1_ddr_en_dvfs_halt_mask_b = src_pwr_ctrl->md1_ddr_en_dvfs_halt_mask_b;
  537. dest_pwr_ctrl->md2_ddr_en_dvfs_halt_mask_b = src_pwr_ctrl->md2_ddr_en_dvfs_halt_mask_b;
  538. dest_pwr_ctrl->md_srcclkena_0_dvfs_req_mask_b = src_pwr_ctrl->md_srcclkena_0_dvfs_req_mask_b;
  539. dest_pwr_ctrl->md_srcclkena_1_dvfs_req_mask_b = src_pwr_ctrl->md_srcclkena_1_dvfs_req_mask_b;
  540. dest_pwr_ctrl->conn_srcclkena_dvfs_req_mask_b = src_pwr_ctrl->conn_srcclkena_dvfs_req_mask_b;
  541. dest_pwr_ctrl->vsync_dvfs_halt_mask_b = src_pwr_ctrl->vsync_dvfs_halt_mask_b;
  542. dest_pwr_ctrl->emi_boost_dvfs_req_mask_b = src_pwr_ctrl->emi_boost_dvfs_req_mask_b;
  543. dest_pwr_ctrl->emi_bw_dvfs_req_mask = src_pwr_ctrl->emi_bw_dvfs_req_mask;
  544. dest_pwr_ctrl->cpu_md_emi_dvfs_req_prot_dis = src_pwr_ctrl->cpu_md_emi_dvfs_req_prot_dis;
  545. dest_pwr_ctrl->spm_dvfs_req = src_pwr_ctrl->spm_dvfs_req;
  546. dest_pwr_ctrl->spm_dvfs_force_down = src_pwr_ctrl->spm_dvfs_force_down;
  547. dest_pwr_ctrl->cpu_md_dvfs_sop_force_on = src_pwr_ctrl->cpu_md_dvfs_sop_force_on;
  548. #if defined(SPM_VCORE_EN_MT6755)
  549. dest_pwr_ctrl->dvfs_halt_src_chk = src_pwr_ctrl->dvfs_halt_src_chk;
  550. #endif
  551. /* pwr_ctrl pcm_flag */
  552. if (src_pwr_ctrl->pcm_flags_cust != 0) {
  553. if ((src_pwr_ctrl->pcm_flags_cust & SPM_FLAG_DIS_VCORE_DVS) != 0)
  554. dest_pwr_ctrl->pcm_flags |= SPM_FLAG_DIS_VCORE_DVS;
  555. if ((src_pwr_ctrl->pcm_flags_cust & SPM_FLAG_DIS_VCORE_DFS) != 0)
  556. dest_pwr_ctrl->pcm_flags |= SPM_FLAG_DIS_VCORE_DFS;
  557. if ((src_pwr_ctrl->pcm_flags_cust & SPM_FLAG_EN_MET_DBG_FOR_VCORE_DVFS) != 0)
  558. dest_pwr_ctrl->pcm_flags |= SPM_FLAG_EN_MET_DBG_FOR_VCORE_DVFS;
  559. } else {
  560. if ((src_pwr_ctrl->pcm_flags & SPM_FLAG_DIS_VCORE_DVS) != 0)
  561. dest_pwr_ctrl->pcm_flags |= SPM_FLAG_DIS_VCORE_DVS;
  562. if ((src_pwr_ctrl->pcm_flags & SPM_FLAG_DIS_VCORE_DFS) != 0)
  563. dest_pwr_ctrl->pcm_flags |= SPM_FLAG_DIS_VCORE_DFS;
  564. if ((src_pwr_ctrl->pcm_flags & SPM_FLAG_EN_MET_DBG_FOR_VCORE_DVFS) != 0)
  565. dest_pwr_ctrl->pcm_flags |= SPM_FLAG_EN_MET_DBG_FOR_VCORE_DVFS;
  566. }
  567. }
  568. #if defined(SPM_VCORE_EN_MT6755)
  569. #define MM_DVFS_DISP_HALT_MASK 0x3
  570. #define MM_DVFS_ISP_HALT_MASK 0x4
  571. #define MM_DVFS_GCE_HALT_MASK 0x10
  572. int __check_dvfs_halt_source(int enable)
  573. {
  574. u32 val, orig_val;
  575. val = spm_read(SPM_SRC2_MASK);
  576. orig_val = val;
  577. if (enable == 0) {
  578. pr_err("[VcoreFS]dvfs_halt_src_chk is disabled\n");
  579. return 0;
  580. }
  581. pr_err("[VcoreFS]halt_status(1)=0x%x\n", spm_read(CPU_DVFS_REQ));
  582. if (val & MM_DVFS_ISP_HALT_MASK) {
  583. pr_err("[VcoreFS]isp_halt[0]:src2_mask=0x%x r6=0x%x r15=0x%x\n",
  584. val, spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  585. spm_write(SPM_SRC2_MASK, (val & ~MM_DVFS_ISP_HALT_MASK));
  586. udelay(50);
  587. pr_err("[VcoreFS]isp_halt[1]:src2_mask=0x%x r6=0x%x r15=0x%x\n",
  588. spm_read(SPM_SRC2_MASK), spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  589. }
  590. pr_err("[VcoreFS]halt_status(2)=0x%x\n", spm_read(CPU_DVFS_REQ));
  591. val = spm_read(SPM_SRC2_MASK);
  592. if (val & MM_DVFS_DISP_HALT_MASK) {
  593. pr_err("[VcoreFS]disp_halt[0]:src2_mask=0x%x r6=0x%x r15=0x%x\n",
  594. val, spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  595. spm_write(SPM_SRC2_MASK, (val & ~MM_DVFS_DISP_HALT_MASK));
  596. udelay(50);
  597. pr_err("[VcoreFS]disp_halt[1]:src2_mask=0x%x r6=0x%x r15=0x%x\n",
  598. spm_read(SPM_SRC2_MASK), spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  599. aee_kernel_warning_api(__FILE__, __LINE__,
  600. DB_OPT_DEFAULT | DB_OPT_MMPROFILE_BUFFER | DB_OPT_DISPLAY_HANG_DUMP | DB_OPT_DUMP_DISPLAY,
  601. "DVFS_HALT_DISP", "DVFS_HALT_DISP");
  602. /* primary_display_diagnose(); */ /* todo */
  603. }
  604. pr_err("[VcoreFS]halt_status(3)=0x%x\n", spm_read(CPU_DVFS_REQ));
  605. val = spm_read(SPM_SRC2_MASK);
  606. if (val & MM_DVFS_GCE_HALT_MASK) {
  607. pr_err("[VcoreFS]gce_halt[0]:src2_mask=0x%x r6=0x%x r15=0x%x\n",
  608. val, spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  609. spm_write(SPM_SRC2_MASK, (val & ~MM_DVFS_GCE_HALT_MASK));
  610. udelay(50);
  611. pr_err("[VcoreFS]gce_halt[1]:src2_mask=0x%x r6=0x%x r15=0x%x\n",
  612. spm_read(SPM_SRC2_MASK), spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  613. }
  614. udelay(200);
  615. spm_write(SPM_SRC2_MASK, orig_val);
  616. pr_err("[VcoreFS]restore src_mask=0x%x, r6=0x%x r15=0x%x\n",
  617. spm_read(SPM_SRC2_MASK), spm_read(PCM_REG6_DATA), spm_read(PCM_REG15_DATA));
  618. /* BUG(); */
  619. return 0;
  620. }
  621. #endif
  622. void spm_set_dummy_read_addr(void)
  623. {
  624. u32 rank0_addr, rank1_addr, dram_rank_num;
  625. dram_rank_num = g_dram_info_dummy_read->rank_num;
  626. rank0_addr = g_dram_info_dummy_read->rank_info[0].start;
  627. if (dram_rank_num == 1)
  628. rank1_addr = rank0_addr;
  629. else
  630. rank1_addr = g_dram_info_dummy_read->rank_info[1].start;
  631. spm_crit("dram_rank_num: %d\n", dram_rank_num);
  632. spm_crit("dummy read addr: rank0: 0x%x, rank1: 0x%x\n", rank0_addr, rank1_addr);
  633. spm_write(SPM_PASR_DPD_1, rank0_addr);
  634. spm_write(SPM_PASR_DPD_2, rank1_addr);
  635. }
  636. bool is_md_c2k_conn_power_off(void)
  637. {
  638. u32 md1_pwr_con = 0;
  639. u32 c2k_pwr_con = 0;
  640. u32 conn_pwr_con = 0;
  641. md1_pwr_con = spm_read(MD1_PWR_CON);
  642. c2k_pwr_con = spm_read(C2K_PWR_CON);
  643. conn_pwr_con = spm_read(CONN_PWR_CON);
  644. #if 0
  645. pr_err("md1_pwr_con = 0x%08x, c2k_pwr_con = 0x%08x, conn_pwr_con = 0x%08x\n",
  646. md1_pwr_con, c2k_pwr_con, conn_pwr_con);
  647. #endif
  648. if (!((md1_pwr_con & 0x1F) == 0x12))
  649. return false;
  650. if (!((c2k_pwr_con & 0x1F) == 0x12))
  651. return false;
  652. if (!((conn_pwr_con & 0x1F) == 0x12))
  653. return false;
  654. return true;
  655. }
  656. static u32 pmic_rg_auxadc_ck_pdn_hwen;
  657. static u32 pmic_rg_efuse_ck_pdn;
  658. void __spm_backup_pmic_ck_pdn(void)
  659. {
  660. /* PMIC setting 2015/07/31 by Chia-Lin/Kev */
  661. pmic_read_interface_nolock(MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_ADDR,
  662. &pmic_rg_auxadc_ck_pdn_hwen,
  663. MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_MASK,
  664. MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_SHIFT);
  665. pmic_config_interface_nolock(MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_ADDR,
  666. 0,
  667. MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_MASK,
  668. MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_SHIFT);
  669. pmic_read_interface_nolock(MT6351_PMIC_RG_EFUSE_CK_PDN_ADDR,
  670. &pmic_rg_efuse_ck_pdn,
  671. MT6351_PMIC_RG_EFUSE_CK_PDN_MASK,
  672. MT6351_PMIC_RG_EFUSE_CK_PDN_SHIFT);
  673. pmic_config_interface_nolock(MT6351_PMIC_RG_EFUSE_CK_PDN_ADDR,
  674. 1,
  675. MT6351_PMIC_RG_EFUSE_CK_PDN_MASK,
  676. MT6351_PMIC_RG_EFUSE_CK_PDN_SHIFT);
  677. }
  678. void __spm_restore_pmic_ck_pdn(void)
  679. {
  680. /* PMIC setting 2015/07/31 by Chia-Lin/Kev */
  681. pmic_config_interface_nolock(MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_ADDR,
  682. pmic_rg_auxadc_ck_pdn_hwen,
  683. MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_MASK,
  684. MT6351_PMIC_RG_AUXADC_CK_PDN_HWEN_SHIFT);
  685. pmic_config_interface_nolock(MT6351_PMIC_RG_EFUSE_CK_PDN_ADDR,
  686. pmic_rg_efuse_ck_pdn,
  687. MT6351_PMIC_RG_EFUSE_CK_PDN_MASK,
  688. MT6351_PMIC_RG_EFUSE_CK_PDN_SHIFT);
  689. }
  690. void __spm_bsi_top_init_setting(void)
  691. {
  692. #ifdef CONFIG_ARCH_MT6755
  693. /* BSI_TOP init setting */
  694. spm_write(spm_bsi1cfg + 0x2004, 0x8000A824);
  695. spm_write(spm_bsi1cfg + 0x2010, 0x20001201);
  696. spm_write(spm_bsi1cfg + 0x2014, 0x150b0000);
  697. spm_write(spm_bsi1cfg + 0x2020, 0x0e001841);
  698. spm_write(spm_bsi1cfg + 0x2024, 0x150b0000);
  699. spm_write(spm_bsi1cfg + 0x2030, 0x1);
  700. #endif
  701. }
  702. void __spm_pmic_pg_force_on(void)
  703. {
  704. pmic_config_interface_nolock(MT6351_PMIC_STRUP_DIG_IO_PG_FORCE_ADDR,
  705. 0x1,
  706. MT6351_PMIC_STRUP_DIG_IO_PG_FORCE_MASK,
  707. MT6351_PMIC_STRUP_DIG_IO_PG_FORCE_SHIFT);
  708. pmic_config_interface_nolock(MT6351_PMIC_RG_STRUP_VIO18_PG_ENB_ADDR,
  709. 0x1,
  710. MT6351_PMIC_RG_STRUP_VIO18_PG_ENB_MASK,
  711. MT6351_PMIC_RG_STRUP_VIO18_PG_ENB_SHIFT);
  712. }
  713. void __spm_pmic_pg_force_off(void)
  714. {
  715. pmic_config_interface_nolock(MT6351_PMIC_STRUP_DIG_IO_PG_FORCE_ADDR,
  716. 0x0,
  717. MT6351_PMIC_STRUP_DIG_IO_PG_FORCE_MASK,
  718. MT6351_PMIC_STRUP_DIG_IO_PG_FORCE_SHIFT);
  719. pmic_config_interface_nolock(MT6351_PMIC_RG_STRUP_VIO18_PG_ENB_ADDR,
  720. 0x0,
  721. MT6351_PMIC_RG_STRUP_VIO18_PG_ENB_MASK,
  722. MT6351_PMIC_RG_STRUP_VIO18_PG_ENB_SHIFT);
  723. }
  724. MODULE_DESCRIPTION("SPM-Internal Driver v0.1");