clk-mt6735-pg.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. /*
  2. * Copyright (c) 2014 MediaTek Inc.
  3. * Author: James Liao <jamesjj.liao@mediatek.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <linux/io.h>
  17. #include <linux/slab.h>
  18. #include <linux/delay.h>
  19. #include <linux/clkdev.h>
  20. #include <linux/ratelimit.h>
  21. #include "clk-mtk-v1.h"
  22. #include "clk-mt6735-pg.h"
  23. #include <dt-bindings/clock/mt6735-clk.h>
  24. #define VLTE_SUPPORT
  25. /* Workaround is handled by ccci */
  26. #ifdef VLTE_SUPPORT
  27. /* #include <mach/mt_gpio.h> */
  28. /* #include <mach/upmu_common.h> */
  29. #endif /* VLTE_SUPPORT */
  30. #if !defined(MT_CCF_DEBUG) || !defined(MT_CCF_BRINGUP)
  31. #define MT_CCF_DEBUG 0
  32. #define MT_CCF_BRINGUP 0
  33. #endif
  34. #define CHECK_PWR_ST 1
  35. #ifndef GENMASK
  36. #define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
  37. #endif
  38. #ifdef CONFIG_ARM64
  39. #define IOMEM(a) ((void __force __iomem *)((a)))
  40. #endif
  41. #define clk_readl(addr) readl(addr)
  42. #define clk_writel(val, addr) \
  43. do { writel(val, addr); wmb(); } while (0) /* sync_write */
  44. #define clk_setl(mask, addr) clk_writel(clk_readl(addr) | (mask), addr)
  45. #define clk_clrl(mask, addr) clk_writel(clk_readl(addr) & ~(mask), addr)
  46. #define mt_reg_sync_writel(v, a) \
  47. do { \
  48. __raw_writel((v), IOMEM((a))); \
  49. mb(); /* for mt_reg_sync_writel() */ \
  50. } while (0)
  51. #define spm_read(addr) __raw_readl(IOMEM(addr))
  52. #define spm_write(addr, val) mt_reg_sync_writel(val, addr)
  53. /*
  54. * MTCMOS
  55. */
  56. #define STA_POWER_DOWN 0
  57. #define STA_POWER_ON 1
  58. #define SUBSYS_PWR_DOWN 0
  59. #define SUBSYS_PWR_ON 1
  60. #define PWR_CLK_DIS (1U << 4)
  61. #define PWR_ON_2ND (1U << 3)
  62. #define PWR_ON (1U << 2)
  63. #define PWR_ISO (1U << 1)
  64. #define PWR_RST_B (1U << 0)
  65. struct subsys;
  66. struct subsys_ops {
  67. int (*enable)(struct subsys *sys);
  68. int (*disable)(struct subsys *sys);
  69. int (*get_state)(struct subsys *sys);
  70. };
  71. struct subsys {
  72. const char *name;
  73. uint32_t sta_mask;
  74. void __iomem *ctl_addr;
  75. uint32_t sram_pdn_bits;
  76. uint32_t sram_pdn_ack_bits;
  77. uint32_t bus_prot_mask;
  78. struct subsys_ops *ops;
  79. };
  80. static struct subsys_ops general_sys_ops;
  81. static struct subsys_ops MD1_sys_ops;
  82. static void __iomem *infracfg_base;
  83. static void __iomem *spm_base;
  84. #define INFRACFG_REG(offset) (infracfg_base + offset)
  85. #define SPM_REG(offset) (spm_base + offset)
  86. /**************************************
  87. * for non-CPU MTCMOS
  88. **************************************/
  89. static DEFINE_SPINLOCK(spm_noncpu_lock);
  90. #define spm_mtcmos_noncpu_lock(flags) \
  91. spin_lock_irqsave(&spm_noncpu_lock, flags)
  92. #define spm_mtcmos_noncpu_unlock(flags) \
  93. spin_unlock_irqrestore(&spm_noncpu_lock, flags)
  94. #define SPM_PWR_STATUS SPM_REG(0x060c) /* correct */
  95. #define SPM_PWR_STATUS_2ND SPM_REG(0x0610) /* correct */
  96. #define SPM_MD_PWR_CON SPM_REG(0x0284) /* correct */
  97. #define SPM_C2K_PWR_CON SPM_REG(0x02d4) /* correct */
  98. #define SPM_CONN_PWR_CON SPM_REG(0x0280) /* correct */
  99. #define SPM_DIS_PWR_CON SPM_REG(0x023c)
  100. #define SPM_MFG_PWR_CON SPM_REG(0x0214)
  101. #define SPM_ISP_PWR_CON SPM_REG(0x0238)
  102. #define SPM_VDE_PWR_CON SPM_REG(0x0210)
  103. #define SPM_VEN_PWR_CON SPM_REG(0x0230)
  104. #define SPM_PCM_IM_PTR SPM_REG(0x0318) /* correct */
  105. #define SPM_PCM_IM_LEN SPM_REG(0x031c) /* correct */
  106. #define SPM_SLEEP_CPU_WAKEUP_EVENT SPM_REG(0x0814) /* correct */
  107. #define SPM_PCM_PASR_DPD_3 SPM_REG(0x0b6c) /* correct */
  108. #define INFRA_TOPAXI_PROTECTEN INFRACFG_REG(0x0220) /* correct */
  109. #define INFRA_TOPAXI_PROTECTSTA1 INFRACFG_REG(0x0228) /* correct */
  110. #define C2K_SPM_CTRL INFRACFG_REG(0x0338) /* correct */
  111. #define SPM_PROJECT_CODE 0xb16
  112. #define PWR_RST_B_BIT BIT(0)
  113. #define PWR_ISO_BIT BIT(1)
  114. #define PWR_ON_BIT BIT(2)
  115. #define PWR_ON_2ND_BIT BIT(3)
  116. #define PWR_CLK_DIS_BIT BIT(4)
  117. #define MD1_PWR_STA_MASK BIT(0)
  118. #define MD2_PWR_STA_MASK BIT(22)
  119. #define CONN_PWR_STA_MASK BIT(1)
  120. #define DIS_PWR_STA_MASK BIT(3)
  121. #define MFG_PWR_STA_MASK BIT(4)
  122. #define ISP_PWR_STA_MASK BIT(5)
  123. #define VDE_PWR_STA_MASK BIT(7)
  124. #define VEN_PWR_STA_MASK BIT(8)
  125. #define SRAM_PDN (0xf << 8) /* VDEC, VENC, ISP, DISP */
  126. #define MFG_SRAM_PDN (0xf << 8)
  127. #define MD_SRAM_PDN (0x1 << 8) /* MD1, C2K */
  128. #define CONN_SRAM_PDN (0x1 << 8)
  129. #define VDE_SRAM_ACK (0x1 << 12)
  130. #define VEN_SRAM_ACK (0xf << 12)
  131. #define ISP_SRAM_ACK (0x3 << 12)
  132. #define DIS_SRAM_ACK (0x1 << 12)
  133. #define MFG_SRAM_ACK (0x1 << 12)
  134. #define DISP_PROT_MASK ((0x1<<1))/* bit 1, 6, 16; if bit6 set, MMSYS PDN, access reg will hang, */
  135. #define MFG_PROT_MASK ((0x1<<14))
  136. #define MD1_PROT_MASK ((0x1<<24) | (0x1<<25) | (0x1<<26) | (0x1<<27) | \
  137. (0x1<<28)) /* bit 24,25,26,27,28 */
  138. #define MD2_PROT_MASK ((0x1<<29) | (0x1<<30) | (0x1<<31)) /* bit 29, 30, 31 */
  139. #define CONN_PROT_MASK ((0x1<<2) | (0x1<<8)) /* bit 2, 8 */
  140. #if defined(CONFIG_ARCH_MT6735M)
  141. /* #define MD_PWRON_BY_CPU */
  142. #elif defined(CONFIG_ARCH_MT6753)
  143. #define MD_PWRON_BY_CPU
  144. #else
  145. #define MD_PWRON_BY_CPU
  146. #endif
  147. static struct subsys syss[] = /* NR_SYSS */
  148. {
  149. [SYS_MD1] = {
  150. .name = __stringify(SYS_MD1),
  151. .sta_mask = MD1_PWR_STA_MASK,
  152. /* .ctl_addr = NULL, */ /* SPM_MD_PWR_CON, */
  153. .sram_pdn_bits = MD_SRAM_PDN,
  154. .sram_pdn_ack_bits = 0, /* GENMASK(15, 12), */
  155. .bus_prot_mask = MD1_PROT_MASK,
  156. .ops = &MD1_sys_ops,
  157. },
  158. [SYS_MD2] = {
  159. .name = __stringify(SYS_MD2),
  160. .sta_mask = MD2_PWR_STA_MASK,
  161. /* .ctl_addr = NULL, */ /* SPM_C2K_PWR_CON, */
  162. .sram_pdn_bits = MD_SRAM_PDN,
  163. .sram_pdn_ack_bits = 0,
  164. .bus_prot_mask = MD2_PROT_MASK,
  165. .ops = &general_sys_ops,
  166. },
  167. [SYS_CONN] = {
  168. .name = __stringify(SYS_CONN),
  169. .sta_mask = CONN_PWR_STA_MASK,
  170. /* .ctl_addr = NULL, */ /* SPM_CONN_PWR_CON, */
  171. .sram_pdn_bits = CONN_SRAM_PDN,
  172. .sram_pdn_ack_bits = 0,
  173. .bus_prot_mask = 0,
  174. .ops = &general_sys_ops,
  175. },
  176. [SYS_DIS] = {
  177. .name = __stringify(SYS_DIS),
  178. .sta_mask = DIS_PWR_STA_MASK,
  179. /* .ctl_addr = NULL, */ /* SPM_DIS_PWR_CON, */
  180. .sram_pdn_bits = SRAM_PDN,
  181. .sram_pdn_ack_bits = DIS_SRAM_ACK,
  182. .bus_prot_mask = DISP_PROT_MASK,
  183. .ops = &general_sys_ops,
  184. },
  185. [SYS_MFG] = {
  186. .name = __stringify(SYS_MFG),
  187. .sta_mask = MFG_PWR_STA_MASK,
  188. /* .ctl_addr = NULL, */ /* SPM_MFG_PWR_CON, */
  189. .sram_pdn_bits = SRAM_PDN,
  190. .sram_pdn_ack_bits = MFG_SRAM_ACK,
  191. .bus_prot_mask = MFG_PROT_MASK,
  192. .ops = &general_sys_ops,
  193. },
  194. [SYS_ISP] = {
  195. .name = __stringify(SYS_ISP),
  196. .sta_mask = ISP_PWR_STA_MASK,
  197. /* .ctl_addr = NULL, */ /* SPM_ISP_PWR_CON, */
  198. .sram_pdn_bits = SRAM_PDN,
  199. .sram_pdn_ack_bits = ISP_SRAM_ACK,
  200. .bus_prot_mask = 0,
  201. .ops = &general_sys_ops,
  202. },
  203. [SYS_VDE] = {
  204. .name = __stringify(SYS_VDE),
  205. .sta_mask = VDE_PWR_STA_MASK,
  206. /* .ctl_addr = NULL, */ /* SPM_VDE_PWR_CON, */
  207. .sram_pdn_bits = SRAM_PDN,
  208. .sram_pdn_ack_bits = VDE_SRAM_ACK,
  209. .bus_prot_mask = 0,
  210. .ops = &general_sys_ops,
  211. },
  212. [SYS_VEN] = {
  213. .name = __stringify(SYS_VEN),
  214. .sta_mask = VEN_PWR_STA_MASK,
  215. /* .ctl_addr = 0, */ /* SPM_VEN_PWR_CON, */
  216. .sram_pdn_bits = SRAM_PDN,
  217. .sram_pdn_ack_bits = VEN_SRAM_ACK,
  218. .bus_prot_mask = 0,
  219. .ops = &general_sys_ops,
  220. },
  221. };
  222. static struct pg_callbacks *g_pgcb;
  223. struct pg_callbacks *register_pg_callback(struct pg_callbacks *pgcb)
  224. {
  225. struct pg_callbacks *old_pgcb = g_pgcb;
  226. g_pgcb = pgcb;
  227. return old_pgcb;
  228. }
  229. static struct subsys *id_to_sys(unsigned int id)
  230. {
  231. return id < NR_SYSS ? &syss[id] : NULL;
  232. }
  233. #if MT_CCF_BRINGUP
  234. /** sync from mt_spm_mtcmos.c for bring up */
  235. static int spm_mtcmos_ctrl_connsys(int state)
  236. {
  237. int err = 0;
  238. volatile unsigned int val;
  239. unsigned long flags;
  240. int count = 0;
  241. pr_debug_ratelimited("[CCF] %s: state=%d: S\n", __func__, state);
  242. spm_mtcmos_noncpu_lock(flags);
  243. if (state == STA_POWER_DOWN) {
  244. spm_write(INFRA_TOPAXI_PROTECTEN,
  245. spm_read(INFRA_TOPAXI_PROTECTEN) | CONN_PROT_MASK);
  246. while ((spm_read(INFRA_TOPAXI_PROTECTSTA1) & CONN_PROT_MASK)
  247. != CONN_PROT_MASK) {
  248. count++;
  249. if (count > 1000)
  250. break;
  251. }
  252. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) | CONN_SRAM_PDN);
  253. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) | PWR_ISO);
  254. val = spm_read(SPM_CONN_PWR_CON);
  255. val = (val & ~PWR_RST_B) | PWR_CLK_DIS;
  256. spm_write(SPM_CONN_PWR_CON, val);
  257. spm_write(SPM_CONN_PWR_CON,
  258. spm_read(SPM_CONN_PWR_CON) & ~(PWR_ON | PWR_ON_2ND));
  259. while ((spm_read(SPM_PWR_STATUS) & CONN_PWR_STA_MASK)
  260. || (spm_read(SPM_PWR_STATUS_2ND) & CONN_PWR_STA_MASK))
  261. ; /* nothing */
  262. } else {
  263. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) | PWR_ON);
  264. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) | PWR_ON_2ND);
  265. while (!(spm_read(SPM_PWR_STATUS) & CONN_PWR_STA_MASK)
  266. || !(spm_read(SPM_PWR_STATUS_2ND) & CONN_PWR_STA_MASK))
  267. ; /* nothing */
  268. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) & ~PWR_CLK_DIS);
  269. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) & ~PWR_ISO);
  270. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) | PWR_RST_B);
  271. spm_write(SPM_CONN_PWR_CON, spm_read(SPM_CONN_PWR_CON) & ~CONN_SRAM_PDN);
  272. spm_write(INFRA_TOPAXI_PROTECTEN,
  273. spm_read(INFRA_TOPAXI_PROTECTEN) & ~CONN_PROT_MASK);
  274. while (spm_read(INFRA_TOPAXI_PROTECTSTA1) & CONN_PROT_MASK)
  275. ; /* nothing */
  276. }
  277. spm_mtcmos_noncpu_unlock(flags);
  278. return err;
  279. }
  280. #endif /* MT_CCF_BRINGUP */
  281. static int spm_mtcmos_ctrl_mdsys1(int state)
  282. {
  283. int err = 0;
  284. volatile unsigned int val;
  285. unsigned long flags;
  286. int count = 0;
  287. pr_debug_ratelimited("[CCF] %s: state=%d: S\n", __func__, state);
  288. spm_mtcmos_noncpu_lock(flags);
  289. if (state == STA_POWER_DOWN) {
  290. spm_write(INFRA_TOPAXI_PROTECTEN,
  291. spm_read(INFRA_TOPAXI_PROTECTEN) | MD1_PROT_MASK);
  292. while ((spm_read(INFRA_TOPAXI_PROTECTSTA1) & MD1_PROT_MASK)
  293. != MD1_PROT_MASK) {
  294. count++;
  295. if (count > 1000)
  296. break;
  297. }
  298. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) | MD_SRAM_PDN);
  299. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) | PWR_ISO);
  300. #ifdef VLTE_SUPPORT
  301. /* enable LTE LS ISO */
  302. val = spm_read(C2K_SPM_CTRL);
  303. val |= 0x40;
  304. spm_write(C2K_SPM_CTRL, val);
  305. #endif /* VLTE_SUPPORT */
  306. val = spm_read(SPM_MD_PWR_CON);
  307. val = (val & ~PWR_RST_B) | PWR_CLK_DIS;
  308. spm_write(SPM_MD_PWR_CON, val);
  309. spm_write(SPM_MD_PWR_CON,
  310. spm_read(SPM_MD_PWR_CON) & ~(PWR_ON | PWR_ON_2ND));
  311. while ((spm_read(SPM_PWR_STATUS) & MD1_PWR_STA_MASK)
  312. || (spm_read(SPM_PWR_STATUS_2ND) & MD1_PWR_STA_MASK))
  313. ; /* nothing */
  314. } else { /* STA_POWER_ON */
  315. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) | PWR_ON);
  316. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) | PWR_ON_2ND);
  317. while (!(spm_read(SPM_PWR_STATUS) & MD1_PWR_STA_MASK)
  318. || !(spm_read(SPM_PWR_STATUS_2ND) & MD1_PWR_STA_MASK))
  319. ; /* nothing */
  320. #ifdef MD_PWRON_BY_CPU
  321. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) & ~PWR_CLK_DIS);
  322. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) & ~PWR_ISO);
  323. /* disable LTE LS ISO */
  324. val = spm_read(C2K_SPM_CTRL);
  325. val &= ~(0x40);
  326. spm_write(C2K_SPM_CTRL, val);
  327. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) | PWR_RST_B);
  328. #else
  329. pr_debug("MD power on by SPM\n");
  330. spm_write(SPM_PCM_PASR_DPD_3, 0xbeef);
  331. spm_write(SPM_SLEEP_CPU_WAKEUP_EVENT, 0x1);
  332. while (spm_read(SPM_PCM_PASR_DPD_3)) {
  333. count++;
  334. udelay(1);
  335. if (count > 1000) {
  336. pr_debug("MD power on: SPM no response\n");
  337. pr_debug("PCM_IM_PTR : 0x%x (%u)\n", spm_read(SPM_PCM_IM_PTR),
  338. spm_read(SPM_PCM_IM_LEN));
  339. BUG();
  340. }
  341. }
  342. #endif
  343. spm_write(SPM_MD_PWR_CON, spm_read(SPM_MD_PWR_CON) & ~MD_SRAM_PDN);
  344. spm_write(INFRA_TOPAXI_PROTECTEN,
  345. spm_read(INFRA_TOPAXI_PROTECTEN) & ~MD1_PROT_MASK);
  346. while (spm_read(INFRA_TOPAXI_PROTECTSTA1) & MD1_PROT_MASK)
  347. ; /* nothing */
  348. }
  349. spm_mtcmos_noncpu_unlock(flags);
  350. return err;
  351. }
  352. #if MT_CCF_BRINGUP
  353. static int spm_mtcmos_ctrl_mdsys2(int state)
  354. {
  355. int err = 0;
  356. volatile unsigned int val;
  357. unsigned long flags;
  358. int count = 0;
  359. pr_debug_ratelimited("[CCF] %s: state=%d: S\n", __func__, state);
  360. spm_mtcmos_noncpu_lock(flags);
  361. if (state == STA_POWER_DOWN) {
  362. spm_write(INFRA_TOPAXI_PROTECTEN,
  363. spm_read(INFRA_TOPAXI_PROTECTEN) | MD2_PROT_MASK);
  364. while ((spm_read(INFRA_TOPAXI_PROTECTSTA1) & MD2_PROT_MASK)
  365. != MD2_PROT_MASK) {
  366. count++;
  367. if (count > 1000)
  368. break;
  369. }
  370. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) | MD_SRAM_PDN);
  371. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) | PWR_ISO);
  372. val = spm_read(SPM_C2K_PWR_CON);
  373. val = (val & ~PWR_RST_B) | PWR_CLK_DIS;
  374. spm_write(SPM_C2K_PWR_CON, val);
  375. spm_write(SPM_C2K_PWR_CON,
  376. spm_read(SPM_C2K_PWR_CON) & ~(PWR_ON | PWR_ON_2ND));
  377. while ((spm_read(SPM_PWR_STATUS) & MD2_PWR_STA_MASK)
  378. || (spm_read(SPM_PWR_STATUS_2ND) & MD2_PWR_STA_MASK))
  379. ; /* nothing */
  380. } else { /* STA_POWER_ON */
  381. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) | PWR_ON);
  382. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) | PWR_ON_2ND);
  383. while (!(spm_read(SPM_PWR_STATUS) & MD2_PWR_STA_MASK)
  384. || !(spm_read(SPM_PWR_STATUS_2ND) & MD2_PWR_STA_MASK))
  385. ; /* nothing */
  386. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) & ~PWR_CLK_DIS);
  387. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) & ~PWR_ISO);
  388. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) | PWR_RST_B);
  389. spm_write(SPM_C2K_PWR_CON, spm_read(SPM_C2K_PWR_CON) & ~MD_SRAM_PDN);
  390. spm_write(INFRA_TOPAXI_PROTECTEN,
  391. spm_read(INFRA_TOPAXI_PROTECTEN) & ~MD2_PROT_MASK);
  392. while (spm_read(INFRA_TOPAXI_PROTECTSTA1) & MD2_PROT_MASK)
  393. ; /* nothing */
  394. }
  395. spm_mtcmos_noncpu_unlock(flags);
  396. return err;
  397. }
  398. #endif /* MT_CCF_BRINGUP */
  399. static void set_bus_protect(int en, uint32_t mask, unsigned long expired)
  400. {
  401. #if MT_CCF_DEBUG
  402. pr_debug("[CCF] %s: en=%d, mask=%u, expired=%lu: S\n", __func__,
  403. en, mask, expired);
  404. #endif /* MT_CCF_DEBUG */
  405. if (!mask)
  406. return;
  407. if (en) {
  408. clk_setl(mask, INFRA_TOPAXI_PROTECTEN);
  409. #if !DUMMY_REG_TEST
  410. while ((clk_readl(INFRA_TOPAXI_PROTECTSTA1) & mask) != mask) {
  411. if (time_after(jiffies, expired)) {
  412. WARN_ON(1);
  413. break;
  414. }
  415. }
  416. #endif /* !DUMMY_REG_TEST */
  417. } else {
  418. clk_clrl(mask, INFRA_TOPAXI_PROTECTEN);
  419. #if !DUMMY_REG_TEST
  420. while (clk_readl(INFRA_TOPAXI_PROTECTSTA1) & mask) {
  421. if (time_after(jiffies, expired)) {
  422. WARN_ON(1);
  423. break;
  424. }
  425. }
  426. #endif /* !DUMMY_REG_TEST */
  427. }
  428. }
  429. static int spm_mtcmos_power_off_general_locked(struct subsys *sys,
  430. int wait_power_ack, int ext_pwr_delay)
  431. {
  432. unsigned long expired = jiffies + HZ / 10;
  433. void __iomem *ctl_addr = sys->ctl_addr;
  434. /* #if !DUMMY_REG_TEST */
  435. /* if (sys->sram_pdn_ack_bits) { */
  436. /* uint32_t sram_pdn_ack = sys->sram_pdn_ack_bits; */
  437. /* } */
  438. /* #endif */
  439. #if MT_CCF_DEBUG
  440. pr_debug_ratelimited("[CCF] %s: sys=%s, wait_power_ack=%d, ext_pwr_delay=%d\n",
  441. __func__, sys->name, wait_power_ack, ext_pwr_delay);
  442. #endif /* MT_CCF_DEBUG */
  443. /* BUS_PROTECT */
  444. if (sys->bus_prot_mask)
  445. set_bus_protect(1, sys->bus_prot_mask, expired);
  446. /* SRAM_PDN */
  447. clk_setl(sys->sram_pdn_bits, ctl_addr);
  448. /* wait until SRAM_PDN_ACK all 1 */
  449. #if !DUMMY_REG_TEST
  450. if (sys->sram_pdn_ack_bits) {
  451. while (((clk_readl(ctl_addr) & sys->sram_pdn_ack_bits) != sys->sram_pdn_ack_bits)) {
  452. if (time_after(jiffies, expired)) {
  453. WARN_ON(1);
  454. break;
  455. }
  456. }
  457. }
  458. #endif /* !DUMMY_REG_TEST */
  459. clk_setl(PWR_ISO_BIT, ctl_addr);
  460. clk_clrl(PWR_RST_B_BIT, ctl_addr);
  461. clk_setl(PWR_CLK_DIS_BIT, ctl_addr);
  462. clk_clrl(PWR_ON_BIT, ctl_addr);
  463. clk_clrl(PWR_ON_2ND_BIT, ctl_addr);
  464. /* extra delay after power off */
  465. if (ext_pwr_delay > 0)
  466. udelay(ext_pwr_delay);
  467. if (wait_power_ack) {
  468. /* wait until PWR_ACK = 0 */
  469. #if !DUMMY_REG_TEST
  470. while ((clk_readl(SPM_PWR_STATUS) & sys->sta_mask)
  471. || (clk_readl(SPM_PWR_STATUS_2ND) & sys->sta_mask)) {
  472. if (time_after(jiffies, expired)) {
  473. WARN_ON(1);
  474. break;
  475. }
  476. }
  477. #endif /* !DUMMY_REG_TEST */
  478. }
  479. return 0;
  480. }
  481. static int spm_mtcmos_power_on_general_locked(
  482. struct subsys *sys, int wait_power_ack, int ext_pwr_delay)
  483. {
  484. unsigned long expired = jiffies + HZ / 10;
  485. void __iomem *ctl_addr = sys->ctl_addr;
  486. /* #if !DUMMY_REG_TEST */
  487. /* if (sys->sram_pdn_ack_bits) { */
  488. /* uint32_t sram_pdn_ack = sys->sram_pdn_ack_bits; */
  489. /* } */
  490. /* #endif */
  491. #if MT_CCF_DEBUG
  492. pr_debug_ratelimited("[CCF] %s: sys=%s, wait_power_ack=%d, ext_pwr_delay=%d\n",
  493. __func__, sys->name, wait_power_ack, ext_pwr_delay);
  494. #endif /* MT_CCF_DEBUG */
  495. clk_setl(PWR_ON_BIT, ctl_addr);
  496. clk_setl(PWR_ON_2ND_BIT, ctl_addr);
  497. /* extra delay after power on */
  498. if (ext_pwr_delay > 0)
  499. udelay(ext_pwr_delay);
  500. if (wait_power_ack) {
  501. /* wait until PWR_ACK = 1 */
  502. #if !DUMMY_REG_TEST
  503. while (!(clk_readl(SPM_PWR_STATUS) & sys->sta_mask)
  504. || !(clk_readl(SPM_PWR_STATUS_2ND) & sys->sta_mask)) {
  505. if (time_after(jiffies, expired)) {
  506. WARN_ON(1);
  507. break;
  508. }
  509. }
  510. #endif /* !DUMMY_REG_TEST */
  511. }
  512. clk_clrl(PWR_CLK_DIS_BIT, ctl_addr);
  513. clk_clrl(PWR_ISO_BIT, ctl_addr);
  514. clk_setl(PWR_RST_B_BIT, ctl_addr);
  515. /* SRAM_PDN */
  516. clk_clrl(sys->sram_pdn_bits, ctl_addr);
  517. /* wait until SRAM_PDN_ACK all 0 */
  518. #if !DUMMY_REG_TEST
  519. if (sys->sram_pdn_ack_bits) {
  520. while (sys->sram_pdn_ack_bits && (clk_readl(ctl_addr) & sys->sram_pdn_ack_bits)) {
  521. if (time_after(jiffies, expired)) {
  522. WARN_ON(1);
  523. break;
  524. }
  525. }
  526. }
  527. #endif /* !DUMMY_REG_TEST */
  528. /* BUS_PROTECT */
  529. if (sys->bus_prot_mask)
  530. set_bus_protect(0, sys->bus_prot_mask, expired);
  531. return 0;
  532. }
  533. static int general_sys_enable_op(struct subsys *sys)
  534. {
  535. return spm_mtcmos_power_on_general_locked(sys, 1, 0);
  536. }
  537. static int general_sys_disable_op(struct subsys *sys)
  538. {
  539. return spm_mtcmos_power_off_general_locked(sys, 1, 0);
  540. }
  541. static int MD1_sys_enable_op(struct subsys *sys)
  542. {
  543. return spm_mtcmos_ctrl_mdsys1(STA_POWER_ON);
  544. }
  545. static int MD1_sys_disable_op(struct subsys *sys)
  546. {
  547. return spm_mtcmos_ctrl_mdsys1(STA_POWER_DOWN);
  548. }
  549. static int sys_get_state_op(struct subsys *sys)
  550. {
  551. unsigned int sta = clk_readl(SPM_PWR_STATUS);
  552. unsigned int sta_s = clk_readl(SPM_PWR_STATUS_2ND);
  553. return (sta & sys->sta_mask) && (sta_s & sys->sta_mask);
  554. }
  555. static struct subsys_ops general_sys_ops = {
  556. .enable = general_sys_enable_op,
  557. .disable = general_sys_disable_op,
  558. .get_state = sys_get_state_op,
  559. };
  560. static struct subsys_ops MD1_sys_ops = {
  561. .enable = MD1_sys_enable_op,
  562. .disable = MD1_sys_disable_op,
  563. .get_state = sys_get_state_op,
  564. };
  565. static int subsys_is_on(enum subsys_id id)
  566. {
  567. int r;
  568. struct subsys *sys = id_to_sys(id);
  569. BUG_ON(!sys);
  570. r = sys->ops->get_state(sys);
  571. #if MT_CCF_DEBUG
  572. pr_debug("[CCF] %s:%d, sys=%s, id=%d\n", __func__, r, sys->name, id);
  573. #endif /* MT_CCF_DEBUG */
  574. return r;
  575. }
  576. static int enable_subsys(enum subsys_id id)
  577. {
  578. int r;
  579. unsigned long flags;
  580. struct subsys *sys = id_to_sys(id);
  581. BUG_ON(!sys);
  582. #if MT_CCF_BRINGUP
  583. pr_debug("[CCF] %s: sys=%s, id=%d\n", __func__, sys->name, id);
  584. switch (id) {
  585. case SYS_MD1:
  586. #ifdef VLTE_SUPPORT /* Workaround is handled by ccci */
  587. /* mt_set_gpio_out(GPIO_LTE_VSRAM_EXT_POWER_EN_PIN,1);
  588. pmic_config_interface(0x04D6, 0x1, 0x1, 0);
  589. udelay(200); */
  590. #endif /* VLTE_SUPPORT */
  591. spm_mtcmos_ctrl_mdsys1(STA_POWER_ON);
  592. break;
  593. case SYS_MD2:
  594. spm_mtcmos_ctrl_mdsys2(STA_POWER_ON);
  595. break;
  596. case SYS_CONN:
  597. spm_mtcmos_ctrl_connsys(STA_POWER_ON);
  598. break;
  599. default:
  600. break;
  601. }
  602. return 0;
  603. #endif /* MT_CCF_BRINGUP */
  604. mtk_clk_lock(flags);
  605. #if CHECK_PWR_ST
  606. if (sys->ops->get_state(sys) == SUBSYS_PWR_ON) {
  607. mtk_clk_unlock(flags);
  608. return 0;
  609. }
  610. #endif /* CHECK_PWR_ST */
  611. r = sys->ops->enable(sys);
  612. WARN_ON(r);
  613. mtk_clk_unlock(flags);
  614. if (g_pgcb && g_pgcb->after_on)
  615. g_pgcb->after_on(id);
  616. return r;
  617. }
  618. static int disable_subsys(enum subsys_id id)
  619. {
  620. int r;
  621. unsigned long flags;
  622. struct subsys *sys = id_to_sys(id);
  623. BUG_ON(!sys);
  624. #if MT_CCF_BRINGUP
  625. pr_debug("[CCF] %s: sys=%s, id=%d\n", __func__, sys->name, id);
  626. switch (id) {
  627. case SYS_MD1:
  628. spm_mtcmos_ctrl_mdsys1(STA_POWER_DOWN);
  629. #ifdef VLTE_SUPPORT /* Workaround is handled by ccci */
  630. /* pmic_config_interface(0x04D6, 0x0, 0x1, 0); //bit[0] =>1'b0
  631. mt_set_gpio_out(GPIO_LTE_VSRAM_EXT_POWER_EN_PIN,0); */
  632. #endif /* VLTE_SUPPORT */
  633. break;
  634. case SYS_MD2:
  635. spm_mtcmos_ctrl_mdsys2(STA_POWER_DOWN);
  636. break;
  637. case SYS_CONN:
  638. spm_mtcmos_ctrl_connsys(STA_POWER_DOWN);
  639. break;
  640. default:
  641. break;
  642. }
  643. return 0;
  644. #endif /* MT_CCF_BRINGUP */
  645. /* TODO: check all clocks related to this subsys are off */
  646. /* could be power off or not */
  647. if (g_pgcb && g_pgcb->before_off)
  648. g_pgcb->before_off(id);
  649. mtk_clk_lock(flags);
  650. #if CHECK_PWR_ST
  651. if (sys->ops->get_state(sys) == SUBSYS_PWR_DOWN) {
  652. mtk_clk_unlock(flags);
  653. return 0;
  654. }
  655. #endif /* CHECK_PWR_ST */
  656. r = sys->ops->disable(sys);
  657. WARN_ON(r);
  658. mtk_clk_unlock(flags);
  659. return r;
  660. }
  661. /*
  662. * power_gate
  663. */
  664. struct mt_power_gate {
  665. struct clk_hw hw;
  666. struct clk *pre_clk;
  667. enum subsys_id pd_id;
  668. };
  669. #define to_power_gate(_hw) container_of(_hw, struct mt_power_gate, hw)
  670. static int pg_enable(struct clk_hw *hw)
  671. {
  672. struct mt_power_gate *pg = to_power_gate(hw);
  673. #if MT_CCF_DEBUG
  674. pr_debug("[CCF] %s: sys=%s, pd_id=%u\n", __func__,
  675. __clk_get_name(hw->clk), pg->pd_id);
  676. #endif /* MT_CCF_DEBUG */
  677. return enable_subsys(pg->pd_id);
  678. }
  679. static void pg_disable(struct clk_hw *hw)
  680. {
  681. struct mt_power_gate *pg = to_power_gate(hw);
  682. #if MT_CCF_DEBUG
  683. pr_debug("[CCF] %s: sys=%s, pd_id=%u\n", __func__,
  684. __clk_get_name(hw->clk), pg->pd_id);
  685. #endif /* MT_CCF_DEBUG */
  686. disable_subsys(pg->pd_id);
  687. }
  688. static int pg_is_enabled(struct clk_hw *hw)
  689. {
  690. struct mt_power_gate *pg = to_power_gate(hw);
  691. #if MT_CCF_BRINGUP
  692. return 1;
  693. #endif /* MT_CCF_BRINGUP */
  694. return subsys_is_on(pg->pd_id);
  695. }
  696. int pg_prepare(struct clk_hw *hw)
  697. {
  698. int r;
  699. struct mt_power_gate *pg = to_power_gate(hw);
  700. #if MT_CCF_DEBUG
  701. pr_debug("[CCF] %s: sys=%s, pre_sys=%s\n", __func__,
  702. __clk_get_name(hw->clk),
  703. pg->pre_clk ? __clk_get_name(pg->pre_clk) : "");
  704. #endif /* MT_CCF_DEBUG */
  705. if (pg->pre_clk) {
  706. r = clk_prepare_enable(pg->pre_clk);
  707. if (r)
  708. return r;
  709. }
  710. return pg_enable(hw);
  711. }
  712. void pg_unprepare(struct clk_hw *hw)
  713. {
  714. struct mt_power_gate *pg = to_power_gate(hw);
  715. #if MT_CCF_DEBUG
  716. pr_debug("[CCF] %s: clk=%s, pre_clk=%s\n", __func__,
  717. __clk_get_name(hw->clk),
  718. pg->pre_clk ? __clk_get_name(pg->pre_clk) : "");
  719. #endif /* MT_CCF_DEBUG */
  720. pg_disable(hw);
  721. if (pg->pre_clk)
  722. clk_disable_unprepare(pg->pre_clk);
  723. }
  724. static const struct clk_ops mt_power_gate_ops = {
  725. .prepare = pg_prepare,
  726. .unprepare = pg_unprepare,
  727. .is_enabled = pg_is_enabled,
  728. };
  729. struct clk *mt_clk_register_power_gate(
  730. const char *name,
  731. const char *parent_name,
  732. struct clk *pre_clk,
  733. enum subsys_id pd_id)
  734. {
  735. struct mt_power_gate *pg;
  736. struct clk *clk;
  737. struct clk_init_data init;
  738. pg = kzalloc(sizeof(*pg), GFP_KERNEL);
  739. if (!pg)
  740. return ERR_PTR(-ENOMEM);
  741. init.name = name;
  742. init.flags = CLK_IGNORE_UNUSED;
  743. init.parent_names = parent_name ? &parent_name : NULL;
  744. init.num_parents = parent_name ? 1 : 0;
  745. init.ops = &mt_power_gate_ops;
  746. pg->pre_clk = pre_clk;
  747. pg->pd_id = pd_id;
  748. pg->hw.init = &init;
  749. clk = clk_register(NULL, &pg->hw);
  750. if (IS_ERR(clk))
  751. kfree(pg);
  752. return clk;
  753. }
  754. #define pg_md1 "pg_md1"
  755. #define pg_md2 "pg_md2"
  756. #define pg_conn "pg_conn"
  757. #define pg_dis "pg_dis"
  758. #define pg_mfg "pg_mfg"
  759. #define pg_isp "pg_isp"
  760. #define pg_vde "pg_vde"
  761. #define pg_ven "pg_ven"
  762. #define md_sel "md_sel"
  763. #define conn_sel "conn_sel"
  764. #define mm_sel "mm_sel"
  765. #define vdec_sel "vdec_sel"
  766. #define venc_sel "venc_sel"
  767. #define mfg_sel "mfg_sel"
  768. struct mtk_power_gate {
  769. int id;
  770. const char *name;
  771. const char *parent_name;
  772. const char *pre_clk_name;
  773. enum subsys_id pd_id;
  774. };
  775. #define PGATE(_id, _name, _parent, _pre_clk, _pd_id) { \
  776. .id = _id, \
  777. .name = _name, \
  778. .parent_name = _parent, \
  779. .pre_clk_name = _pre_clk, \
  780. .pd_id = _pd_id, \
  781. }
  782. struct mtk_power_gate scp_clks[] __initdata = {
  783. PGATE(SCP_SYS_MD1, pg_md1, NULL, NULL, SYS_MD1), /* md_sel */
  784. PGATE(SCP_SYS_MD2, pg_md2, NULL, NULL, SYS_MD2), /* md_sel */
  785. PGATE(SCP_SYS_CONN, pg_conn, NULL, NULL, SYS_CONN), /* conn_sel */
  786. PGATE(SCP_SYS_DIS, pg_dis, NULL, mm_sel, SYS_DIS), /* mm_sel */
  787. PGATE(SCP_SYS_MFG, pg_mfg, NULL, mfg_sel, SYS_MFG), /* mfg_sel */
  788. PGATE(SCP_SYS_ISP, pg_isp, NULL, NULL, SYS_ISP), /* pre_clk null? */
  789. PGATE(SCP_SYS_VDE, pg_vde, NULL, vdec_sel, SYS_VDE), /* vdec_sel */
  790. PGATE(SCP_SYS_VEN, pg_ven, NULL, NULL, SYS_VEN), /* venc_sel */
  791. };
  792. static void __init init_clk_scpsys(
  793. void __iomem *infracfg_reg,
  794. void __iomem *spm_reg ,
  795. struct clk_onecell_data *clk_data)
  796. {
  797. int i;
  798. struct clk *clk;
  799. struct clk *pre_clk;
  800. infracfg_base = infracfg_reg;
  801. spm_base = spm_reg;
  802. syss[SYS_MD1].ctl_addr = SPM_MD_PWR_CON;
  803. syss[SYS_MD2].ctl_addr = SPM_C2K_PWR_CON;
  804. syss[SYS_CONN].ctl_addr = SPM_CONN_PWR_CON;
  805. syss[SYS_DIS].ctl_addr = SPM_DIS_PWR_CON;
  806. syss[SYS_MFG].ctl_addr = SPM_MFG_PWR_CON;
  807. syss[SYS_ISP].ctl_addr = SPM_ISP_PWR_CON;
  808. syss[SYS_VDE].ctl_addr = SPM_VDE_PWR_CON;
  809. syss[SYS_VEN].ctl_addr = SPM_VEN_PWR_CON;
  810. for (i = 0; i < ARRAY_SIZE(scp_clks); i++) {
  811. struct mtk_power_gate *pg = &scp_clks[i];
  812. pre_clk = pg->pre_clk_name ?
  813. __clk_lookup(pg->pre_clk_name) : NULL;
  814. clk = mt_clk_register_power_gate(pg->name, pg->parent_name,
  815. pre_clk, pg->pd_id);
  816. if (IS_ERR(clk)) {
  817. pr_err("[CCF] %s: Failed to register clk %s: %ld\n",
  818. __func__, pg->name, PTR_ERR(clk));
  819. continue;
  820. }
  821. if (clk_data)
  822. clk_data->clks[pg->id] = clk;
  823. #if MT_CCF_DEBUG
  824. pr_debug("[CCF] %s: pgate %3d: %s\n", __func__, i, pg->name);
  825. #endif /* MT_CCF_DEBUG */
  826. }
  827. }
  828. /*
  829. * device tree support
  830. */
  831. /* TODO: remove this function */
  832. static struct clk_onecell_data *alloc_clk_data(unsigned int clk_num)
  833. {
  834. int i;
  835. struct clk_onecell_data *clk_data;
  836. clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
  837. if (!clk_data)
  838. return NULL;
  839. clk_data->clks = kcalloc(clk_num, sizeof(struct clk *), GFP_KERNEL);
  840. if (!clk_data->clks) {
  841. kfree(clk_data);
  842. return NULL;
  843. }
  844. clk_data->clk_num = clk_num;
  845. for (i = 0; i < clk_num; ++i)
  846. clk_data->clks[i] = ERR_PTR(-ENOENT);
  847. return clk_data;
  848. }
  849. /* TODO: remove this function */
  850. static void __iomem *get_reg(struct device_node *np, int index)
  851. {
  852. #if DUMMY_REG_TEST
  853. return kzalloc(PAGE_SIZE, GFP_KERNEL);
  854. #else
  855. return of_iomap(np, index);
  856. #endif
  857. }
  858. static void __init mt_scpsys_init(struct device_node *node)
  859. {
  860. struct clk_onecell_data *clk_data;
  861. void __iomem *infracfg_reg;
  862. void __iomem *spm_reg;
  863. int r;
  864. infracfg_reg = get_reg(node, 0);
  865. spm_reg = get_reg(node, 1);
  866. if (!infracfg_reg || !spm_reg) {
  867. pr_err("clk-pg-mt6735: missing reg\n");
  868. return;
  869. }
  870. pr_debug("[CCF] %s: sys: %s, reg: 0x%p, 0x%p\n",
  871. __func__, node->name, infracfg_reg, spm_reg);
  872. clk_data = alloc_clk_data(SCP_NR_SYSS);
  873. init_clk_scpsys(infracfg_reg, spm_reg, clk_data);
  874. r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
  875. if (r)
  876. pr_err("[CCF] %s:could not register clock provide\n", __func__);
  877. #if !MT_CCF_BRINGUP
  878. /* subsys init: per modem owner request, disable modem power first */
  879. disable_subsys(SYS_MD1);
  880. disable_subsys(SYS_MD2);
  881. #endif /* !MT_CCF_BRINGUP */
  882. }
  883. CLK_OF_DECLARE(mtk_pg_regs, "mediatek,mt6735-scpsys", mt_scpsys_init);
  884. #if CLK_DEBUG
  885. /*
  886. * debug / unit test
  887. */
  888. #include <linux/proc_fs.h>
  889. #include <linux/fs.h>
  890. #include <linux/seq_file.h>
  891. #include <linux/uaccess.h>
  892. static char last_cmd[128] = "null";
  893. static int test_pg_dump_regs(struct seq_file *s, void *v)
  894. {
  895. int i;
  896. for (i = 0; i < NR_SYSS; i++) {
  897. if (!syss[i].ctl_addr)
  898. continue;
  899. seq_printf(s, "%10s: [0x%p]: 0x%08x\n", syss[i].name,
  900. syss[i].ctl_addr, clk_readl(syss[i].ctl_addr));
  901. }
  902. return 0;
  903. }
  904. static void dump_pg_state(const char *clkname, struct seq_file *s)
  905. {
  906. struct clk *c = __clk_lookup(clkname);
  907. struct clk *p = IS_ERR_OR_NULL(c) ? NULL : __clk_get_parent(c);
  908. if (IS_ERR_OR_NULL(c)) {
  909. seq_printf(s, "[%17s: NULL]\n", clkname);
  910. return;
  911. }
  912. seq_printf(s, "[%17s: %3s, %3d, %3d, %10ld, %17s]\n",
  913. __clk_get_name(c),
  914. __clk_is_enabled(c) ? "ON" : "off",
  915. __clk_get_prepare_count(c),
  916. __clk_get_enable_count(c),
  917. __clk_get_rate(c),
  918. p ? __clk_get_name(p) : "");
  919. clk_put(c);
  920. }
  921. static int test_pg_dump_state_all(struct seq_file *s, void *v)
  922. {
  923. static const char * const clks[] = {
  924. pg_md1,
  925. pg_md2,
  926. pg_conn,
  927. pg_dis,
  928. pg_mfg,
  929. pg_isp,
  930. pg_vde,
  931. pg_ven,
  932. };
  933. int i;
  934. pr_debug("\n");
  935. for (i = 0; i < ARRAY_SIZE(clks); i++)
  936. dump_pg_state(clks[i], s);
  937. return 0;
  938. }
  939. static struct {
  940. const char *name;
  941. struct clk *clk;
  942. } g_clks[] = {
  943. {.name = pg_md1},
  944. {.name = pg_vde},
  945. {.name = pg_ven},
  946. {.name = pg_mfg},
  947. };
  948. static int test_pg_1(struct seq_file *s, void *v)
  949. {
  950. int i;
  951. pr_debug("\n");
  952. for (i = 0; i < ARRAY_SIZE(g_clks); i++) {
  953. g_clks[i].clk = __clk_lookup(g_clks[i].name);
  954. if (IS_ERR_OR_NULL(g_clks[i].clk)) {
  955. seq_printf(s, "clk_get(%s): NULL\n",
  956. g_clks[i].name);
  957. continue;
  958. }
  959. clk_prepare_enable(g_clks[i].clk);
  960. seq_printf(s, "clk_prepare_enable(%s)\n",
  961. __clk_get_name(g_clks[i].clk));
  962. }
  963. return 0;
  964. }
  965. static int test_pg_2(struct seq_file *s, void *v)
  966. {
  967. int i;
  968. pr_debug("\n");
  969. for (i = 0; i < ARRAY_SIZE(g_clks); i++) {
  970. if (IS_ERR_OR_NULL(g_clks[i].clk)) {
  971. seq_printf(s, "(%s).clk: NULL\n",
  972. g_clks[i].name);
  973. continue;
  974. }
  975. seq_printf(s, "clk_disable_unprepare(%s)\n",
  976. __clk_get_name(g_clks[i].clk));
  977. clk_disable_unprepare(g_clks[i].clk);
  978. clk_put(g_clks[i].clk);
  979. }
  980. return 0;
  981. }
  982. static int test_pg_show(struct seq_file *s, void *v)
  983. {
  984. static const struct {
  985. int (*fn)(struct seq_file *, void *);
  986. const char *cmd;
  987. } cmds[] = {
  988. {.cmd = "dump_regs", .fn = test_pg_dump_regs},
  989. {.cmd = "dump_state", .fn = test_pg_dump_state_all},
  990. {.cmd = "1", .fn = test_pg_1},
  991. {.cmd = "2", .fn = test_pg_2},
  992. };
  993. int i;
  994. pr_debug("last_cmd: %s\n", last_cmd);
  995. for (i = 0; i < ARRAY_SIZE(cmds); i++) {
  996. if (strcmp(cmds[i].cmd, last_cmd) == 0)
  997. return cmds[i].fn(s, v);
  998. }
  999. return 0;
  1000. }
  1001. static int test_pg_open(struct inode *inode, struct file *file)
  1002. {
  1003. return single_open(file, test_pg_show, NULL);
  1004. }
  1005. static ssize_t test_pg_write(
  1006. struct file *file,
  1007. const char __user *buffer,
  1008. size_t count,
  1009. loff_t *data)
  1010. {
  1011. char desc[sizeof(last_cmd)];
  1012. int len = 0;
  1013. pr_debug("count: %zu\n", count);
  1014. len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1);
  1015. if (copy_from_user(desc, buffer, len))
  1016. return 0;
  1017. desc[len] = '\0';
  1018. strcpy(last_cmd, desc);
  1019. if (last_cmd[len - 1] == '\n')
  1020. last_cmd[len - 1] = 0;
  1021. return count;
  1022. }
  1023. static const struct file_operations test_pg_fops = {
  1024. .owner = THIS_MODULE,
  1025. .open = test_pg_open,
  1026. .read = seq_read,
  1027. .write = test_pg_write,
  1028. .llseek = seq_lseek,
  1029. .release = single_release,
  1030. };
  1031. static int __init debug_init(void)
  1032. {
  1033. static int init;
  1034. struct proc_dir_entry *entry;
  1035. pr_debug("init: %d\n", init);
  1036. if (init)
  1037. return 0;
  1038. ++init;
  1039. entry = proc_create("test_pg", 0, 0, &test_pg_fops);
  1040. if (!entry)
  1041. return -ENOMEM;
  1042. ++init;
  1043. return 0;
  1044. }
  1045. static void __exit debug_exit(void)
  1046. {
  1047. remove_proc_entry("test_pg", NULL);
  1048. }
  1049. module_init(debug_init);
  1050. module_exit(debug_exit);
  1051. #endif /* CLK_DEBUG */