mt_spm.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/interrupt.h>
  5. #include <linux/smp.h>
  6. #include <linux/delay.h>
  7. #include <linux/atomic.h>
  8. #include <linux/irqchip/mt-gic.h>
  9. #include <mt-plat/aee.h>
  10. #include <mt-plat/mt_chip.h>
  11. #include <mach/mt_spm_mtcmos_internal.h>
  12. #include "mt_spm_idle.h"
  13. #if !defined(CONFIG_ARCH_MT6580)
  14. #include <irq.h>
  15. #endif
  16. #include <mach/wd_api.h>
  17. #include "mt_spm_internal.h"
  18. #ifdef CONFIG_OF
  19. #include <linux/of.h>
  20. #include <linux/of_irq.h>
  21. #include <linux/of_address.h>
  22. #endif
  23. void __weak aee_kernel_warning_api(const char *file, const int line, const int db_opt,
  24. const char *module, const char *msg, ...)
  25. {
  26. }
  27. #if defined(CONFIG_ARCH_MT6580)
  28. #define ENABLE_DYNA_LOAD_PCM
  29. #endif
  30. #ifdef ENABLE_DYNA_LOAD_PCM /* for dyna_load_pcm */
  31. /* for request_firmware */
  32. #include <linux/firmware.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/debugfs.h>
  36. #include <linux/dcache.h>
  37. #include <asm/cacheflush.h>
  38. #include <linux/dma-direction.h>
  39. static struct dentry *spm_dir;
  40. static struct dentry *spm_file;
  41. static struct platform_device *pspmdev;
  42. static int dyna_load_pcm_done;
  43. static char *dyna_load_pcm_path[] = {
  44. [DYNA_LOAD_PCM_SUSPEND] = "pcm_suspend.bin",
  45. [DYNA_LOAD_PCM_SODI] = "pcm_sodi.bin",
  46. [DYNA_LOAD_PCM_DEEPIDLE] = "pcm_deepidle.bin",
  47. [DYNA_LOAD_PCM_MAX] = "pcm_path_max",
  48. };
  49. MODULE_FIRMWARE(dyna_load_pcm_path[DYNA_LOAD_PCM_SUSPEND]);
  50. MODULE_FIRMWARE(dyna_load_pcm_path[DYNA_LOAD_PCM_SODI]);
  51. MODULE_FIRMWARE(dyna_load_pcm_path[DYNA_LOAD_PCM_DEEPIDLE]);
  52. struct dyna_load_pcm_t dyna_load_pcm[DYNA_LOAD_PCM_MAX];
  53. /* add char device for spm */
  54. #include <linux/cdev.h>
  55. #define SPM_DETECT_MAJOR 159 /* FIXME */
  56. #define SPM_DETECT_DEV_NUM 1
  57. #define SPM_DETECT_DRVIER_NAME "spm"
  58. #define SPM_DETECT_DEVICE_NAME "spm"
  59. struct class *pspmDetectClass = NULL;
  60. struct device *pspmDetectDev = NULL;
  61. static int gSPMDetectMajor = SPM_DETECT_MAJOR;
  62. static struct cdev gSPMDetectCdev;
  63. #endif /* ENABLE_DYNA_LOAD_PCM */
  64. #ifdef CONFIG_OF
  65. #if !defined(CONFIG_ARCH_MT6580)
  66. void __iomem *spm_base;
  67. void __iomem *scp_i2c0_base;
  68. void __iomem *scp_i2c1_base;
  69. void __iomem *scp_i2c2_base;
  70. void __iomem *i2c4_base;
  71. #include <mt_dramc.h> /* for ucDram_Register_Read () */
  72. #if defined(CONFIG_ARCH_MT6753)
  73. void __iomem *_mcucfg_base;
  74. void __iomem *_mcucfg_phys_base;
  75. #endif
  76. /* device tree + 32 = IRQ number */
  77. u32 spm_irq_0 = 197; /* 165 */
  78. u32 spm_irq_1 = 198; /* 166 */
  79. u32 spm_irq_2 = 199; /* 167 */
  80. u32 spm_irq_3 = 200; /* 168 */
  81. #else
  82. void __iomem *spm_base;
  83. void __iomem *spm_i2c0_base;
  84. void __iomem *spm_i2c1_base;
  85. void __iomem *spm_i2c2_base;
  86. void __iomem *spm_mcucfg_base;
  87. void __iomem *spm_ddrphy_base;
  88. void __iomem *spm_cksys_base;
  89. /* device tree + 32 = IRQ number */
  90. /* 88 + 32 = 120 */
  91. u32 spm_irq_0 = 120;
  92. u32 spm_irq_1 = 121;
  93. u32 spm_irq_2 = 122;
  94. u32 spm_irq_3 = 123;
  95. #endif /* CONFIG_ARCH_MT6580 */
  96. #endif /* CONFIG_OF */
  97. /*
  98. * Config and Parameter
  99. */
  100. #define SPM_MD_DDR_EN_OUT 0
  101. /*
  102. * Define and Declare
  103. */
  104. struct spm_irq_desc {
  105. unsigned int irq;
  106. irq_handler_t handler;
  107. };
  108. static twam_handler_t spm_twam_handler;
  109. void __attribute__((weak)) mt_gic_cfg_irq2cpu(unsigned int irq, unsigned int cpu, unsigned int set)
  110. {
  111. }
  112. void __attribute__((weak)) spm_deepidle_init(void)
  113. {
  114. }
  115. /*
  116. * Init and IRQ Function
  117. */
  118. static irqreturn_t spm_irq0_handler(int irq, void *dev_id)
  119. {
  120. u32 isr;
  121. unsigned long flags;
  122. struct twam_sig twamsig;
  123. spin_lock_irqsave(&__spm_lock, flags);
  124. /* get ISR status */
  125. isr = spm_read(SPM_SLEEP_ISR_STATUS);
  126. if (isr & ISRS_TWAM) {
  127. twamsig.sig0 = spm_read(SPM_SLEEP_TWAM_STATUS0);
  128. twamsig.sig1 = spm_read(SPM_SLEEP_TWAM_STATUS1);
  129. twamsig.sig2 = spm_read(SPM_SLEEP_TWAM_STATUS2);
  130. twamsig.sig3 = spm_read(SPM_SLEEP_TWAM_STATUS3);
  131. }
  132. /* clean ISR status */
  133. spm_write(SPM_SLEEP_ISR_MASK, spm_read(SPM_SLEEP_ISR_MASK) | ISRM_ALL_EXC_TWAM);
  134. spm_write(SPM_SLEEP_ISR_STATUS, isr);
  135. if (isr & ISRS_TWAM)
  136. udelay(100); /* need 3T TWAM clock (32K/26M) */
  137. spm_write(SPM_PCM_SW_INT_CLEAR, PCM_SW_INT0);
  138. spin_unlock_irqrestore(&__spm_lock, flags);
  139. if ((isr & ISRS_TWAM) && spm_twam_handler)
  140. spm_twam_handler(&twamsig);
  141. if (isr & (ISRS_SW_INT0 | ISRS_PCM_RETURN))
  142. spm_err("IRQ0 HANDLER SHOULD NOT BE EXECUTED (0x%x)\n", isr);
  143. return IRQ_HANDLED;
  144. }
  145. static irqreturn_t spm_irq_aux_handler(u32 irq_id)
  146. {
  147. u32 isr;
  148. unsigned long flags;
  149. spin_lock_irqsave(&__spm_lock, flags);
  150. isr = spm_read(SPM_SLEEP_ISR_STATUS);
  151. spm_write(SPM_PCM_SW_INT_CLEAR, (1U << irq_id));
  152. spin_unlock_irqrestore(&__spm_lock, flags);
  153. spm_err("IRQ%u HANDLER SHOULD NOT BE EXECUTED (0x%x)\n", irq_id, isr);
  154. return IRQ_HANDLED;
  155. }
  156. static irqreturn_t spm_irq1_handler(int irq, void *dev_id)
  157. {
  158. return spm_irq_aux_handler(1);
  159. }
  160. static irqreturn_t spm_irq2_handler(int irq, void *dev_id)
  161. {
  162. return spm_irq_aux_handler(2);
  163. }
  164. static irqreturn_t spm_irq3_handler(int irq, void *dev_id)
  165. {
  166. return spm_irq_aux_handler(3);
  167. }
  168. static int spm_irq_register(void)
  169. {
  170. int i, err, r = 0;
  171. #ifdef CONFIG_OF
  172. struct spm_irq_desc irqdesc[] = {
  173. {.irq = 0, .handler = spm_irq0_handler,},
  174. {.irq = 0, .handler = spm_irq1_handler,},
  175. {.irq = 0, .handler = spm_irq2_handler,},
  176. {.irq = 0, .handler = spm_irq3_handler,}
  177. };
  178. irqdesc[0].irq = SPM_IRQ0_ID;
  179. irqdesc[1].irq = SPM_IRQ1_ID;
  180. irqdesc[2].irq = SPM_IRQ2_ID;
  181. irqdesc[3].irq = SPM_IRQ3_ID;
  182. #else
  183. struct spm_irq_desc irqdesc[] = {
  184. {.irq = SPM_IRQ0_ID, .handler = spm_irq0_handler,},
  185. {.irq = SPM_IRQ1_ID, .handler = spm_irq1_handler,},
  186. {.irq = SPM_IRQ2_ID, .handler = spm_irq2_handler,},
  187. {.irq = SPM_IRQ3_ID, .handler = spm_irq3_handler,}
  188. };
  189. #endif
  190. for (i = 0; i < ARRAY_SIZE(irqdesc); i++) {
  191. err = request_irq(irqdesc[i].irq, irqdesc[i].handler,
  192. IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, "SPM", NULL);
  193. if (err) {
  194. spm_err("FAILED TO REQUEST IRQ%d (%d)\n", i, err);
  195. r = -EPERM;
  196. }
  197. /* FIXME: for fpga early porting */
  198. #ifndef CONFIG_ARM64
  199. /* assign each SPM IRQ to each CPU */
  200. mt_gic_cfg_irq2cpu(irqdesc[i].irq, 0, 0);
  201. mt_gic_cfg_irq2cpu(irqdesc[i].irq, i % num_possible_cpus(), 1);
  202. #endif
  203. }
  204. #if defined(CONFIG_ARCH_MT6580)
  205. mt_gic_set_priority(SPM_IRQ0_ID);
  206. #endif
  207. return r;
  208. }
  209. static void spm_register_init(void)
  210. {
  211. unsigned long flags;
  212. #if !defined(CONFIG_ARCH_MT6580)
  213. unsigned int code = mt_get_chip_hw_code();
  214. #endif
  215. #if defined(CONFIG_ARCH_MT6753)
  216. struct resource r;
  217. #endif
  218. #ifdef CONFIG_OF
  219. struct device_node *node;
  220. node = of_find_compatible_node(NULL, NULL, "mediatek,SLEEP");
  221. if (!node)
  222. spm_err("find SLEEP node failed\n");
  223. spm_base = of_iomap(node, 0);
  224. if (!spm_base)
  225. spm_err("base spm_base failed\n");
  226. spm_irq_0 = irq_of_parse_and_map(node, 0);
  227. if (!spm_irq_0)
  228. spm_err("get spm_irq_0 failed\n");
  229. spm_irq_1 = irq_of_parse_and_map(node, 1);
  230. if (!spm_irq_1)
  231. spm_err("get spm_irq_1 failed\n");
  232. spm_irq_2 = irq_of_parse_and_map(node, 2);
  233. if (!spm_irq_2)
  234. spm_err("get spm_irq_2 failed\n");
  235. spm_irq_3 = irq_of_parse_and_map(node, 3);
  236. if (!spm_irq_3)
  237. spm_err("get spm_irq_3 failed\n");
  238. #if defined(CONFIG_ARCH_MT6753)
  239. #define MCUCFG_NODE "mediatek,MCUCFG"
  240. node = of_find_compatible_node(NULL, NULL, MCUCFG_NODE);
  241. if (!node) {
  242. spm_err("error: cannot find node " MCUCFG_NODE);
  243. BUG();
  244. }
  245. if (of_address_to_resource(node, 0, &r)) {
  246. spm_err("error: cannot get phys addr" MCUCFG_NODE);
  247. BUG();
  248. }
  249. _mcucfg_phys_base = (void *)r.start;
  250. _mcucfg_base = (void *)of_iomap(node, 0);
  251. if (!_mcucfg_base) {
  252. spm_err("error: cannot iomap " MCUCFG_NODE);
  253. BUG();
  254. }
  255. #endif
  256. #if !defined(CONFIG_ARCH_MT6580)
  257. node = of_find_compatible_node(NULL, NULL, "mediatek,SCP_I2C0");
  258. if (!node)
  259. spm_err("find SCP_I2C0 node failed\n");
  260. scp_i2c0_base = of_iomap(node, 0);
  261. if (!scp_i2c0_base)
  262. spm_err("base scp_i2c0_base failed\n");
  263. node = of_find_compatible_node(NULL, NULL, "mediatek,SCP_I2C1");
  264. if (!node)
  265. spm_err("find SCP_I2C1 node failed\n");
  266. scp_i2c1_base = of_iomap(node, 0);
  267. if (!scp_i2c1_base)
  268. spm_err("base scp_i2c1_base failed\n");
  269. node = of_find_compatible_node(NULL, NULL, "mediatek,SCP_I2C2");
  270. if (!node)
  271. spm_err("find SCP_I2C2 node failed\n");
  272. scp_i2c2_base = of_iomap(node, 0);
  273. if (!scp_i2c2_base)
  274. spm_err("base scp_i2c2_base failed\n");
  275. spm_err
  276. ("spm_base = %p, scp_i2c0_base = %p, scp_i2c1_base = %p, scp_i2c2_base = %p\n",
  277. spm_base, scp_i2c0_base, scp_i2c1_base, scp_i2c2_base);
  278. spm_err("spm_irq_0 = %d, spm_irq_1 = %d, spm_irq_2 = %d, spm_irq_3 = %d\n", spm_irq_0,
  279. spm_irq_1, spm_irq_2, spm_irq_3);
  280. #else
  281. node = of_find_compatible_node(NULL, NULL, "mediatek,I2C0");
  282. if (!node)
  283. spm_err("find I2C0 node failed\n");
  284. spm_i2c0_base = of_iomap(node, 0);
  285. if (!spm_i2c0_base)
  286. spm_err("base spm_i2c0 failed\n");
  287. node = of_find_compatible_node(NULL, NULL, "mediatek,I2C1");
  288. if (!node)
  289. spm_err("find I2C1 node failed\n");
  290. spm_i2c1_base = of_iomap(node, 0);
  291. if (!spm_i2c1_base)
  292. spm_err("base spm_i2c1 failed\n");
  293. node = of_find_compatible_node(NULL, NULL, "mediatek,I2C2");
  294. if (!node)
  295. spm_err("find I2C2 node failed\n");
  296. spm_i2c2_base = of_iomap(node, 0);
  297. if (!spm_i2c2_base)
  298. spm_err("base spm_i2c2 failed\n");
  299. node = of_find_compatible_node(NULL, NULL, "mediatek,MCUCFG"); /* mcucfg */
  300. if (!node)
  301. spm_err("[MCUCFG] find node failed\n");
  302. spm_mcucfg_base = of_iomap(node, 0);
  303. if (!spm_mcucfg_base)
  304. spm_err("[MCUCFG] base failed\n");
  305. node = of_find_compatible_node(NULL, NULL, "mediatek,TOPCKGEN"); /* cksys */
  306. if (!node)
  307. spm_err("[CLK_CKSYS] find node failed\n");
  308. spm_cksys_base = of_iomap(node, 0);
  309. if (!spm_cksys_base)
  310. spm_err("[CLK_CKSYS] base failed\n");
  311. node = of_find_compatible_node(NULL, NULL, "mediatek,DDRPHY");
  312. if (!node)
  313. spm_err("find DDRPHY node failed\n");
  314. spm_ddrphy_base = of_iomap(node, 0);
  315. if (!spm_ddrphy_base)
  316. spm_err("[DDRPHY] base failed\n");
  317. spm_err("spm_base = %p, spm_i2c0_base = %p, spm_i2c1_base = %p, spm_i2c2_base = %p\n",
  318. spm_base, spm_i2c0_base, spm_i2c1_base, spm_i2c2_base);
  319. spm_err("spm_cksys_base = 0x%p, spm_mcucfg_base = 0x%p, spm_ddrphy_base = 0x%p\n",
  320. spm_cksys_base, spm_mcucfg_base, spm_ddrphy_base);
  321. spm_err("spm_irq_0 = %d, spm_irq_1 = %d, spm_irq_2 = %d, spm_irq_3 = %d\n", spm_irq_0,
  322. spm_irq_1, spm_irq_2, spm_irq_3);
  323. #endif /*CONFIG_ARCH_MT6580*/
  324. #endif /*CONFIG_OF*/
  325. spin_lock_irqsave(&__spm_lock, flags);
  326. /* enable register control */
  327. spm_write(SPM_POWERON_CONFIG_SET, SPM_REGWR_CFG_KEY | SPM_REGWR_EN);
  328. /* init power control register */
  329. spm_write(SPM_POWER_ON_VAL0, 0);
  330. spm_write(SPM_POWER_ON_VAL1, POWER_ON_VAL1_DEF);
  331. spm_write(SPM_PCM_PWR_IO_EN, 0);
  332. #ifdef SPM_VCORE_EN
  333. /* init DVFS status register */
  334. spm_write(SPM_SLEEP_DVFS_STA,
  335. HPM_REQ_STA | /*VRF18_0_STA | */ VCORE_STA_1 /* Vcore 1.15 */);
  336. #endif
  337. /* reset PCM */
  338. spm_write(SPM_PCM_CON0, CON0_CFG_KEY | CON0_PCM_SW_RESET);
  339. spm_write(SPM_PCM_CON0, CON0_CFG_KEY);
  340. /* PCM reset failed */
  341. /* BUG_ON(spm_read(SPM_PCM_FSM_STA) != PCM_FSM_STA_DEF); */
  342. /* init PCM control register */
  343. spm_write(SPM_PCM_CON0, CON0_CFG_KEY | CON0_IM_SLEEP_DVS);
  344. spm_write(SPM_PCM_CON1, CON1_CFG_KEY | CON1_EVENT_LOCK_EN |
  345. CON1_SPM_SRAM_ISO_B | CON1_SPM_SRAM_SLP_B | CON1_MIF_APBEN);
  346. spm_write(SPM_PCM_IM_PTR, 0);
  347. spm_write(SPM_PCM_IM_LEN, 0);
  348. #if !defined(CONFIG_ARCH_MT6580)
  349. /*
  350. * SRCLKENA0: POWER_ON_VAL1 (PWR_IO_EN[7]=0) or
  351. * E1: r7|SRCLKENAI0|SRCLKENAI1|MD1_SRCLKENA (PWR_IO_EN[7]=1)
  352. * E2: r7|SRCLKENAI0 (PWR_IO_EN[7]=1)
  353. * CLKSQ0_OFF: POWER_ON_VAL0 (PWR_IO_EN[0]=0) or r0 (PWR_IO_EN[0]=1)
  354. * SRCLKENA1: MD2_SRCLKENA
  355. * CLKSQ1_OFF: !MD2_SRCLKENA
  356. */
  357. spm_write(SPM_CLK_CON,
  358. spm_read(SPM_CLK_CON) | CC_SRCLKENA_MASK_0 | CC_SYSCLK1_EN_0 | CC_SYSCLK1_EN_1 |
  359. CC_CLKSQ1_SEL | CC_CXO32K_RM_EN_MD2 | CC_CXO32K_RM_EN_MD1 | CC_MD32_DCM_EN);
  360. spm_write(SPM_PCM_SRC_REQ,
  361. SR_CCIF0_TO_AP_MASK_B | SR_CCIF0_TO_MD_MASK_B | SR_CCIF1_TO_AP_MASK_B |
  362. SR_CCIF1_TO_MD_MASK_B);
  363. spm_write(SPM_AP_STANBY_CON, spm_read(SPM_AP_STANBY_CON) | ASC_SRCCLKENI_MASK);
  364. if (0x335 == code)
  365. spm_write(SPM_PCM_RESERVE2, spm_read(SPM_PCM_RESERVE2) | (1U << 4));
  366. else if (0x337 == code)
  367. spm_write(SPM_PCM_RESERVE2, spm_read(SPM_PCM_RESERVE2) | (1U << 4));
  368. #else
  369. /* SRCLKENA: POWER_ON_VAL1 (PWR_IO_EN[7]=0) or POWER_ON_VAL1|r7 (PWR_IO_EN[7]=1) */
  370. /* CLKSQ: POWER_ON_VAL0 (PWR_IO_EN[0]=0) or r0 (PWR_IO_EN[0]=1) */
  371. /* SRCLKENAI will trigger 26M-wake/sleep event */
  372. /* spm_write(SPM_CLK_CON, CC_SRCLKENA_MASK_0 | CC_SYSCLK1_EN_0 | CC_SYSCLK1_EN_1 |
  373. * CC_CLKSQ1_SEL | CC_CXO32K_RM_EN_MD2 | CC_CXO32K_RM_EN_MD1 | CC_MD32_DCM_EN); */
  374. spm_write(SPM_CLK_CON, spm_read(SPM_CLK_CON) | CC_SRCLKENA_MASK_0 | CC_CXO32K_RM_EN_MD1);
  375. /* CC_CLKSQ0_SEL is DONT-CARE in Suspend since PCM_PWR_IO_EN[0]=1 in Suspend */
  376. spm_write(SPM_PCM_SRC_REQ, 0);
  377. /* TODO: check if this means "Set SRCLKENI_MASK=1'b1" */
  378. spm_write(SPM_AP_STANBY_CON, spm_read(SPM_AP_STANBY_CON) | ASC_SRCCLKENI_MASK);
  379. /* unmask gce_busy_mask (set to 1b1); otherwise, gce (cmd-q) can not notify SPM to exit EMI self-refresh */
  380. spm_write(SPM_PCM_MMDDR_MASK, spm_read(SPM_PCM_MMDDR_MASK) | (1U << 4));
  381. #endif
  382. /* clean ISR status */
  383. spm_write(SPM_SLEEP_ISR_MASK, ISRM_ALL);
  384. spm_write(SPM_SLEEP_ISR_STATUS, ISRC_ALL);
  385. spm_write(SPM_PCM_SW_INT_CLEAR, PCM_SW_INT_ALL);
  386. /* output md_ddr_en if needed for debug */
  387. #if SPM_MD_DDR_EN_OUT
  388. __spm_dbgout_md_ddr_en(true);
  389. #endif
  390. spin_unlock_irqrestore(&__spm_lock, flags);
  391. }
  392. int spm_module_init(void)
  393. {
  394. int r = 0;
  395. /* This following setting is moved to LK by WDT init, because of DTS init level issue */
  396. #if !defined(CONFIG_ARCH_MT6580)
  397. struct wd_api *wd_api;
  398. #endif
  399. spm_register_init();
  400. if (spm_irq_register() != 0)
  401. r = -EPERM;
  402. #ifndef CONFIG_MTK_FPGA
  403. #if defined(CONFIG_PM)
  404. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6580) \
  405. || defined(CONFIG_ARCH_MT6735M) || defined(CONFIG_ARCH_MT6753)
  406. if (spm_fs_init() != 0)
  407. r = -EPERM;
  408. #endif
  409. #endif
  410. #endif
  411. #if !defined(CONFIG_ARCH_MT6580)
  412. get_wd_api(&wd_api);
  413. if (wd_api->wd_spmwdt_mode_config) {
  414. wd_api->wd_spmwdt_mode_config(WD_REQ_EN, WD_REQ_RST_MODE);
  415. } else {
  416. spm_err("FAILED TO GET WD API\n");
  417. r = -ENODEV;
  418. }
  419. #endif
  420. #ifndef CONFIG_MTK_FPGA
  421. spm_sodi_init();
  422. /* spm_mcdi_init(); */
  423. #if !defined(CONFIG_ARCH_MT6580)
  424. spm_deepidle_init();
  425. #endif
  426. #endif
  427. if (spm_golden_setting_cmp(1) != 0) {
  428. /* r = -EPERM; */
  429. aee_kernel_warning("SPM Warring", "dram golden setting mismach");
  430. }
  431. #if !defined(CONFIG_ARCH_MT6580)
  432. spm_set_pcm_init_flag();
  433. #endif
  434. #ifdef SPM_VCORE_EN
  435. spm_go_to_vcore_dvfs(SPM_VCORE_DVFS_EN, 0);
  436. #else
  437. #if defined(CONFIG_ARCH_MT6735)
  438. /* only for common solution, no DVS */
  439. spm_go_to_vcore_dvfs(0, 0);
  440. #endif
  441. #endif
  442. return r;
  443. }
  444. #ifdef ENABLE_DYNA_LOAD_PCM /* for dyna_load_pcm */
  445. int spm_load_pcm_firmware(struct platform_device *pdev)
  446. {
  447. const struct firmware *fw;
  448. int err = 0;
  449. int i;
  450. int offset = 0;
  451. if (!pdev)
  452. return err;
  453. if (dyna_load_pcm_done)
  454. return err;
  455. for (i = DYNA_LOAD_PCM_SUSPEND; i < DYNA_LOAD_PCM_MAX; i++) {
  456. u16 firmware_size = 0;
  457. int copy_size = 0;
  458. struct pcm_desc *pdesc = &(dyna_load_pcm[i].desc);
  459. err = request_firmware(&fw, dyna_load_pcm_path[i], &pdev->dev);
  460. if (err) {
  461. pr_debug("Failed to load %s, %d.\n", dyna_load_pcm_path[i], err);
  462. continue;
  463. /* return -EINVAL; */
  464. }
  465. /* Do whatever it takes to load firmware into device. */
  466. offset = 0;
  467. copy_size = 2;
  468. memcpy(&firmware_size, fw->data, copy_size);
  469. offset += copy_size;
  470. copy_size = firmware_size * 4;
  471. memcpy(dyna_load_pcm[i].buf, fw->data + offset, copy_size);
  472. dmac_map_area((void *)dyna_load_pcm[i].buf, PCM_FIRMWARE_SIZE, DMA_TO_DEVICE);
  473. offset += copy_size;
  474. copy_size = sizeof(struct pcm_desc) - offsetof(struct pcm_desc, size);
  475. memcpy((void *)&(dyna_load_pcm[i].desc.size), fw->data + offset, copy_size);
  476. offset += copy_size;
  477. copy_size = fw->size - offset;
  478. memcpy(dyna_load_pcm[i].version, fw->data + offset, copy_size);
  479. pdesc->version = dyna_load_pcm[i].version;
  480. pdesc->base = (u32 *)dyna_load_pcm[i].buf;
  481. release_firmware(fw);
  482. dyna_load_pcm[i].ready = 1;
  483. dyna_load_pcm_done = 1;
  484. }
  485. return err;
  486. }
  487. int spm_load_pcm_firmware_nodev(void)
  488. {
  489. spm_load_pcm_firmware(pspmdev);
  490. return 0;
  491. }
  492. int spm_load_firmware_status(void)
  493. {
  494. return dyna_load_pcm_done;
  495. }
  496. static int spm_dbg_show_firmware(struct seq_file *s, void *unused)
  497. {
  498. int i;
  499. struct pcm_desc *pdesc = NULL;
  500. for (i = DYNA_LOAD_PCM_SUSPEND; i < DYNA_LOAD_PCM_MAX; i++) {
  501. pdesc = &(dyna_load_pcm[i].desc);
  502. seq_printf(s, "#@# %s\n", dyna_load_pcm_path[i]);
  503. if (pdesc->version) {
  504. seq_printf(s, "#@# version = %s\n", pdesc->version);
  505. seq_printf(s, "#@# base = 0x%p\n", pdesc->base);
  506. seq_printf(s, "#@# size = %u\n", pdesc->size);
  507. seq_printf(s, "#@# sess = %u\n", pdesc->sess);
  508. seq_printf(s, "#@# replace = %u\n", pdesc->replace);
  509. seq_printf(s, "#@# vec0 = 0x%x\n", pdesc->vec0);
  510. seq_printf(s, "#@# vec1 = 0x%x\n", pdesc->vec1);
  511. seq_printf(s, "#@# vec2 = 0x%x\n", pdesc->vec2);
  512. seq_printf(s, "#@# vec3 = 0x%x\n", pdesc->vec3);
  513. seq_printf(s, "#@# vec4 = 0x%x\n", pdesc->vec4);
  514. seq_printf(s, "#@# vec5 = 0x%x\n", pdesc->vec5);
  515. seq_printf(s, "#@# vec6 = 0x%x\n", pdesc->vec6);
  516. seq_printf(s, "#@# vec7 = 0x%x\n", pdesc->vec7);
  517. }
  518. }
  519. seq_puts(s, "\n\n");
  520. return 0;
  521. }
  522. static int spm_dbg_open(struct inode *inode, struct file *file)
  523. {
  524. return single_open(file, spm_dbg_show_firmware, &inode->i_private);
  525. }
  526. static const struct file_operations spm_debug_fops = {
  527. .open = spm_dbg_open,
  528. .read = seq_read,
  529. .llseek = seq_lseek,
  530. .release = single_release,
  531. };
  532. static int SPM_detect_open(struct inode *inode, struct file *file)
  533. {
  534. pr_debug("open major %d minor %d (pid %d)\n", imajor(inode), iminor(inode), current->pid);
  535. spm_load_pcm_firmware_nodev();
  536. return 0;
  537. }
  538. static int SPM_detect_close(struct inode *inode, struct file *file)
  539. {
  540. pr_debug("close major %d minor %d (pid %d)\n", imajor(inode), iminor(inode), current->pid);
  541. return 0;
  542. }
  543. static ssize_t SPM_detect_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  544. {
  545. pr_debug(" ++\n");
  546. pr_debug(" --\n");
  547. return 0;
  548. }
  549. ssize_t SPM_detect_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
  550. {
  551. pr_debug(" ++\n");
  552. pr_debug(" --\n");
  553. return 0;
  554. }
  555. const struct file_operations gSPMDetectFops = {
  556. .open = SPM_detect_open,
  557. .release = SPM_detect_close,
  558. .read = SPM_detect_read,
  559. .write = SPM_detect_write,
  560. };
  561. int spm_module_late_init(void)
  562. {
  563. int i = 0;
  564. dev_t devID = MKDEV(gSPMDetectMajor, 0);
  565. int cdevErr = -1;
  566. int ret = -1;
  567. pspmdev = platform_device_register_simple("spm", 0, NULL, 0);
  568. if (IS_ERR(pspmdev)) {
  569. pr_debug("Failed to register platform device.\n");
  570. return -EINVAL;
  571. }
  572. ret = register_chrdev_region(devID, SPM_DETECT_DEV_NUM, SPM_DETECT_DRVIER_NAME);
  573. if (ret) {
  574. pr_debug("fail to register chrdev\n");
  575. return ret;
  576. }
  577. cdev_init(&gSPMDetectCdev, &gSPMDetectFops);
  578. gSPMDetectCdev.owner = THIS_MODULE;
  579. cdevErr = cdev_add(&gSPMDetectCdev, devID, SPM_DETECT_DEV_NUM);
  580. if (cdevErr) {
  581. pr_debug("cdev_add() fails (%d)\n", cdevErr);
  582. goto err1;
  583. }
  584. pspmDetectClass = class_create(THIS_MODULE, SPM_DETECT_DEVICE_NAME);
  585. if (IS_ERR(pspmDetectClass)) {
  586. pr_debug("class create fail, error code(%ld)\n", PTR_ERR(pspmDetectClass));
  587. goto err1;
  588. }
  589. pspmDetectDev = device_create(pspmDetectClass, NULL, devID, NULL, SPM_DETECT_DEVICE_NAME);
  590. if (IS_ERR(pspmDetectDev)) {
  591. pr_debug("device create fail, error code(%ld)\n", PTR_ERR(pspmDetectDev));
  592. goto err2;
  593. }
  594. pr_debug("driver(major %d) installed success\n", gSPMDetectMajor);
  595. spm_dir = debugfs_create_dir("spm", NULL);
  596. if (spm_dir == NULL) {
  597. pr_debug("Failed to create spm dir in debugfs.\n");
  598. return -EINVAL;
  599. }
  600. spm_file = debugfs_create_file("firmware", S_IRUGO,
  601. spm_dir, NULL, &spm_debug_fops);
  602. for (i = DYNA_LOAD_PCM_SUSPEND; i < DYNA_LOAD_PCM_MAX; i++)
  603. dyna_load_pcm[i].ready = 0;
  604. return 0;
  605. err2:
  606. if (pspmDetectClass) {
  607. class_destroy(pspmDetectClass);
  608. pspmDetectClass = NULL;
  609. }
  610. err1:
  611. if (cdevErr == 0)
  612. cdev_del(&gSPMDetectCdev);
  613. if (ret == 0) {
  614. unregister_chrdev_region(devID, SPM_DETECT_DEV_NUM);
  615. gSPMDetectMajor = -1;
  616. }
  617. pr_debug("fail\n");
  618. return -1;
  619. }
  620. late_initcall(spm_module_late_init);
  621. #endif /* ENABLE_DYNA_LOAD_PCM */
  622. /*
  623. * PLL Request API
  624. */
  625. void spm_mainpll_on_request(const char *drv_name)
  626. {
  627. int req;
  628. req = atomic_inc_return(&__spm_mainpll_req);
  629. spm_debug("%s request MAINPLL on (%d)\n", drv_name, req);
  630. }
  631. EXPORT_SYMBOL(spm_mainpll_on_request);
  632. void spm_mainpll_on_unrequest(const char *drv_name)
  633. {
  634. int req;
  635. req = atomic_dec_return(&__spm_mainpll_req);
  636. spm_debug("%s unrequest MAINPLL on (%d)\n", drv_name, req);
  637. }
  638. EXPORT_SYMBOL(spm_mainpll_on_unrequest);
  639. /*
  640. * TWAM Control API
  641. */
  642. void spm_twam_register_handler(twam_handler_t handler)
  643. {
  644. spm_twam_handler = handler;
  645. }
  646. EXPORT_SYMBOL(spm_twam_register_handler);
  647. void spm_twam_enable_monitor(const struct twam_sig *twamsig, bool speed_mode,
  648. unsigned int window_len)
  649. {
  650. u32 sig0 = 0, sig1 = 0, sig2 = 0, sig3 = 0;
  651. unsigned long flags;
  652. if (twamsig) {
  653. sig0 = twamsig->sig0 & 0x1f;
  654. sig1 = twamsig->sig1 & 0x1f;
  655. sig2 = twamsig->sig2 & 0x1f;
  656. sig3 = twamsig->sig3 & 0x1f;
  657. }
  658. spin_lock_irqsave(&__spm_lock, flags);
  659. spm_write(SPM_SLEEP_ISR_MASK, spm_read(SPM_SLEEP_ISR_MASK) & ~ISRM_TWAM);
  660. spm_write(SPM_SLEEP_TWAM_CON, ((sig3 << 27) |
  661. (sig2 << 22) |
  662. (sig1 << 17) |
  663. (sig0 << 12) |
  664. (TWAM_MON_TYPE_HIGH << 4) |
  665. (TWAM_MON_TYPE_HIGH << 6) |
  666. (TWAM_MON_TYPE_HIGH << 8) |
  667. (TWAM_MON_TYPE_HIGH << 10) |
  668. (speed_mode ? TWAM_CON_SPEED_EN : 0) | TWAM_CON_EN));
  669. spm_write(SPM_SLEEP_TWAM_WINDOW_LEN, window_len);
  670. spin_unlock_irqrestore(&__spm_lock, flags);
  671. spm_crit("enable TWAM for signal %u, %u, %u, %u (%u)\n",
  672. sig0, sig1, sig2, sig3, speed_mode);
  673. }
  674. EXPORT_SYMBOL(spm_twam_enable_monitor);
  675. void spm_twam_disable_monitor(void)
  676. {
  677. unsigned long flags;
  678. spin_lock_irqsave(&__spm_lock, flags);
  679. spm_write(SPM_SLEEP_TWAM_CON, spm_read(SPM_SLEEP_TWAM_CON) & ~TWAM_CON_EN);
  680. spm_write(SPM_SLEEP_ISR_MASK, spm_read(SPM_SLEEP_ISR_MASK) | ISRM_TWAM);
  681. spm_write(SPM_SLEEP_ISR_STATUS, ISRC_TWAM);
  682. spin_unlock_irqrestore(&__spm_lock, flags);
  683. spm_debug("disable TWAM\n");
  684. }
  685. EXPORT_SYMBOL(spm_twam_disable_monitor);
  686. /*
  687. * SPM Goldeng Seting API(MEMPLL Control, DRAMC)
  688. */
  689. struct ddrphy_golden_cfg {
  690. u32 addr;
  691. u32 value;
  692. };
  693. static struct ddrphy_golden_cfg ddrphy_setting[] = {
  694. #if !defined(CONFIG_ARCH_MT6580)
  695. #ifdef CONFIG_OF
  696. {0x5c0, 0x063c0000},
  697. {0x5c4, 0x00000000},
  698. #if defined(CONFIG_ARCH_MT6753)
  699. {0x5c8, 0x0000f410}, /* temp remove mempll2/3 control for golden setting refine */
  700. #else
  701. {0x5c8, 0x0000fC10}, /* temp remove mempll2/3 control for golden setting refine */
  702. #endif
  703. {0x5cc, 0x40101000},
  704. #else
  705. {0xf02135c0, 0x063c0000},
  706. {0xf02135c4, 0x00000000},
  707. #if defined(CONFIG_ARCH_MT6753)
  708. {0xf02135c8, 0x0000fC10}, /* temp remove mempll2/3 control for golden setting refine */
  709. #else
  710. {0xf02135c8, 0x0000fC10}, /* temp remove mempll2/3 control for golden setting refine */
  711. #endif
  712. {0xf02135cc, 0x40101000},
  713. #endif
  714. #else /* CONFIG_ARCH_MT6580 */
  715. #ifdef CONFIG_OF
  716. {0x5c0, 0x063c0000},
  717. {0x5c4, 0x00000000},
  718. {0x5c8, 0x0000fC10}, /* temp remove mempll2/3 control for golden setting refine */
  719. {0x5cc, 0x40101000},
  720. #else
  721. {0xf02085c0, 0x063c0000},
  722. {0xf02085c4, 0x00000000},
  723. {0xf02085c8, 0x0000fC10}, /* temp remove mempll2/3 control for golden setting refine */
  724. {0xf02085cc, 0x40101000},
  725. #endif
  726. #endif /* CONFIG_ARCH_MT6580 */
  727. };
  728. int spm_golden_setting_cmp(bool en)
  729. {
  730. int i, ddrphy_num, r = 0;
  731. if (!en)
  732. return r;
  733. /* Compare Dramc Goldeing Setting */
  734. ddrphy_num = sizeof(ddrphy_setting) / sizeof(ddrphy_setting[0]);
  735. for (i = 0; i < ddrphy_num; i++) {
  736. #ifdef CONFIG_OF
  737. #if !defined(CONFIG_ARCH_MT6580)
  738. if (ucDram_Register_Read(ddrphy_setting[i].addr) != ddrphy_setting[i].value) {
  739. spm_err("dramc setting mismatch addr: 0x%x, val: 0x%x\n",
  740. ddrphy_setting[i].addr,
  741. ucDram_Register_Read(ddrphy_setting[i].addr));
  742. r = -EPERM;
  743. }
  744. #else
  745. if (spm_read(spm_ddrphy_base + ddrphy_setting[i].addr) != ddrphy_setting[i].value) {
  746. spm_err("dramc setting mismatch addr: %p, val: 0x%x\n",
  747. spm_ddrphy_base + ddrphy_setting[i].addr,
  748. spm_read(spm_ddrphy_base + ddrphy_setting[i].addr));
  749. r = -EPERM;
  750. }
  751. #endif /*CONFIG_ARCH_MT6580*/
  752. #else /* CONFIG_OF */
  753. if (spm_read(ddrphy_setting[i].addr) != ddrphy_setting[i].value) {
  754. spm_err("dramc setting mismatch addr: 0x%x, val: 0x%x\n",
  755. ddrphy_setting[i].addr, spm_read(ddrphy_setting[i].addr));
  756. r = -EPERM;
  757. }
  758. #endif /* CONFIG_OF */
  759. }
  760. return r;
  761. }
  762. #if !defined(CONFIG_ARCH_MT6580)
  763. /*
  764. * SPM AP-BSI Protocol Generator
  765. */
  766. #define SPM_BSI_START (1U << 0)
  767. void spm_ap_bsi_gen(unsigned int *clk_buf_cfg)
  768. {
  769. spm_write(SPM_BSI_EN_SR, clk_buf_cfg[BSI_EN_SR]);
  770. spm_write(SPM_BSI_CLK_SR, clk_buf_cfg[BSI_CLK_SR]);
  771. spm_write(SPM_BSI_DO_SR, clk_buf_cfg[BSI_D0_SR]);
  772. spm_write(SPM_BSI_D1_SR, clk_buf_cfg[BSI_D1_SR]);
  773. spm_write(SPM_BSI_D2_SR, clk_buf_cfg[BSI_D2_SR]);
  774. spm_write(SPM_BSI_GEN, spm_read(SPM_BSI_GEN) | SPM_BSI_START); /* SPM_BSI_START */
  775. /* Polling SPM_BSI_START finish */
  776. while ((spm_read(SPM_BSI_GEN) & 0x1))
  777. ;
  778. }
  779. #endif
  780. unsigned int spm_get_cpu_pwr_status(void)
  781. {
  782. unsigned int val[2] = {0};
  783. unsigned int stat = 0;
  784. val[0] = spm_read(SPM_PWR_STATUS);
  785. val[1] = spm_read(SPM_PWR_STATUS_2ND);
  786. stat = (val[0] & (CA15_CPU3 | CA15_CPU2 | CA15_CPU1 | CA15_CPU0 | CA7_CPU3 | CA7_CPU2 | CA7_CPU1 | CA7_CPU0));
  787. stat &= (val[1] & (CA15_CPU3 | CA15_CPU2 | CA15_CPU1 | CA15_CPU0 | CA7_CPU3 | CA7_CPU2 | CA7_CPU1 | CA7_CPU0));
  788. return stat;
  789. }
  790. MODULE_DESCRIPTION("SPM Driver v0.1");