mt_cpuidle.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. * Author: Cheng-En Chung <cheng-en.chung@mediatek.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <asm/cacheflush.h>
  15. #include <asm/irqflags.h>
  16. #include <asm/neon.h>
  17. #include <asm/psci.h>
  18. #include <asm/suspend.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of.h>
  21. #include <mt-plat/mt_dbg.h>
  22. #include <mt-plat/mt_io.h>
  23. #include <mt-plat/sync_write.h>
  24. #include "mt_cpuidle.h"
  25. #include "mt_spm.h"
  26. #include "smp.h"
  27. #include <mach/irqs.h>
  28. #include <mach/mt_spm_mtcmos.h>
  29. #if defined(CONFIG_MTK_RAM_CONSOLE) || defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
  30. #include <mach/mt_secure_api.h>
  31. #endif
  32. #if defined(CONFIG_TRUSTY) && defined(CONFIG_ARCH_MT6580)
  33. #include <mach/mt_trusty_api.h>
  34. #endif
  35. #define TAG "[Power-Dormant] "
  36. #define dormant_err(fmt, args...) pr_err(TAG fmt, ##args)
  37. #define dormant_warn(fmt, args...) pr_warn(TAG fmt, ##args)
  38. #define dormant_debug(fmt, args...) pr_debug(TAG fmt, ##args)
  39. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M) || defined(CONFIG_ARCH_MT6753)
  40. #define CONFIG_ARCH_MT6735_SERIES
  41. #endif
  42. #ifdef CONFIG_MTK_RAM_CONSOLE
  43. unsigned long *sleep_aee_rec_cpu_dormant_pa;
  44. unsigned long *sleep_aee_rec_cpu_dormant_va;
  45. #endif
  46. #ifdef CONFIG_ARCH_MT6580
  47. static unsigned long mcucfg_base;
  48. static unsigned long infracfg_ao_base;
  49. static unsigned long gic_id_base;
  50. static unsigned long gic_ci_base;
  51. #else
  52. static unsigned long gic_id_base;
  53. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M)
  54. static unsigned long biu_base;
  55. #endif
  56. #endif
  57. static unsigned int kp_irq_bit;
  58. static unsigned int conn_wdt_irq_bit;
  59. static unsigned int lowbattery_irq_bit;
  60. static unsigned int md1_wdt_irq_bit;
  61. #ifdef CONFIG_MTK_C2K_SUPPORT
  62. static unsigned int c2k_wdt_irq_bit;
  63. #endif
  64. #if defined(CONFIG_ARCH_MT6735_SERIES) || defined(CONFIG_ARCH_MT6580)
  65. #define CPUIDLE_CPU_IDLE_STA SPM_SLEEP_TIMER_STA
  66. #define CPUIDLE_CPU_IDLE_STA_OFFSET 16
  67. #define CPUIDLE_SPM_WAKEUP_MISC SPM_SLEEP_WAKEUP_MISC
  68. #elif defined(CONFIG_ARCH_MT6755) || defined(CONFIG_ARCH_MT6797)
  69. #define CPUIDLE_CPU_IDLE_STA CPU_IDLE_STA
  70. #define CPUIDLE_CPU_IDLE_STA_OFFSET 10
  71. #define CPUIDLE_SPM_WAKEUP_MISC SPM_WAKEUP_MISC
  72. #endif
  73. #define MAX_CORES 4
  74. #define MAX_CLUSTER 2
  75. #ifdef CONFIG_ARCH_MT6580
  76. #define MP0_CACHE_CONFIG (mcucfg_base + 0)
  77. #define MP1_CACHE_CONFIG (mcucfg_base + 0x200)
  78. #define L2RSTDISABLE BIT(4)
  79. #define DMT_BOOTROM_PWR_CTRL ((void *) (infracfg_ao_base + 0x804))
  80. #define DMT_BOOTROM_BOOT_ADDR ((void *) (infracfg_ao_base + 0x800))
  81. #define SW_ROM_PD BIT(31)
  82. #endif
  83. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M)
  84. #define BIU_CONTROL (biu_base)
  85. #define CMD_QUEUE_EN BIT(0)
  86. #define DCM_EN BIT(1)
  87. #define TLB_ULTRA_EN BIT(8)
  88. #endif
  89. #define reg_read(addr) __raw_readl(IOMEM(addr))
  90. #define reg_write(addr, val) mt_reg_sync_writel(val, addr)
  91. #define _and(a, b) ((a) & (b))
  92. #define _or(a, b) ((a) | (b))
  93. #define _aor(a, b, c) _or(_and(a, b), (c))
  94. struct core_context {
  95. volatile u64 timestamp[5];
  96. unsigned long timer_data[8];
  97. };
  98. struct cluster_context {
  99. struct core_context core[MAX_CORES] ____cacheline_aligned;
  100. unsigned long dbg_data[40];
  101. int l2rstdisable;
  102. int l2rstdisable_rfcnt;
  103. };
  104. struct system_context {
  105. struct cluster_context cluster[MAX_CLUSTER];
  106. struct _data_poc {
  107. void (*cpu_resume_phys)(void);
  108. unsigned long l2ctlr;
  109. } poc ____cacheline_aligned;
  110. };
  111. struct system_context dormant_data[1];
  112. static int mt_dormant_initialized;
  113. #define SPM_CORE_ID() core_idx()
  114. #define SPM_IS_CPU_IRQ_OCCUR(core_id) \
  115. ({ \
  116. (!!(spm_read(CPUIDLE_SPM_WAKEUP_MISC) & ((0x101<<(core_id))))); \
  117. })
  118. #ifdef CONFIG_MTK_RAM_CONSOLE
  119. #define DORMANT_LOG(cid, pattern) (sleep_aee_rec_cpu_dormant_va[cid] = pattern)
  120. #else
  121. #define DORMANT_LOG(cid, pattern)
  122. #endif
  123. #define core_idx() \
  124. ({ \
  125. ((read_cluster_id() >> 6) | read_cpu_id()); \
  126. })
  127. inline void read_id(int *cpu_id, int *cluster_id)
  128. {
  129. *cpu_id = read_cpu_id();
  130. *cluster_id = read_cluster_id();
  131. }
  132. #define system_cluster(system, clusterid) (&((struct system_context *)system)->cluster[clusterid])
  133. #define cluster_core(cluster, cpuid) (&((struct cluster_context *)cluster)->core[cpuid])
  134. void *_get_data(int core_or_cluster)
  135. {
  136. int cpuid, clusterid;
  137. struct cluster_context *cluster;
  138. struct core_context *core;
  139. read_id(&cpuid, &clusterid);
  140. cluster = system_cluster(dormant_data, clusterid);
  141. if (core_or_cluster == 1)
  142. return (void *)cluster;
  143. core = cluster_core(cluster, cpuid);
  144. return (void *)core;
  145. }
  146. #define GET_CORE_DATA() ((struct core_context *)_get_data(0))
  147. #define GET_CLUSTER_DATA() ((struct cluster_context *)_get_data(1))
  148. void stop_generic_timer(void)
  149. {
  150. write_cntpctl(read_cntpctl() & ~1);
  151. }
  152. void start_generic_timer(void)
  153. {
  154. write_cntpctl(read_cntpctl() | 1);
  155. }
  156. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M)
  157. static inline void biu_reconfig(void)
  158. {
  159. int val;
  160. val = reg_read(BIU_CONTROL);
  161. val |= TLB_ULTRA_EN;
  162. val |= DCM_EN;
  163. val |= CMD_QUEUE_EN;
  164. reg_write(BIU_CONTROL, val);
  165. }
  166. #endif
  167. struct set_and_clear_regs {
  168. volatile unsigned int set[32], clear[32];
  169. };
  170. unsigned int __weak *mt_save_dbg_regs(unsigned int *p, unsigned int cpuid)
  171. {
  172. return p;
  173. }
  174. void __weak mt_restore_dbg_regs(unsigned int *p, unsigned int cpuid) { }
  175. void __weak mt_copy_dbg_regs(int to, int from) { }
  176. void __weak mt_save_banked_registers(unsigned int *container) { }
  177. void __weak mt_restore_banked_registers(unsigned int *container) { }
  178. struct interrupt_distributor {
  179. volatile unsigned int control; /* 0x000 */
  180. const unsigned int controller_type;
  181. const unsigned int implementer;
  182. const char padding1[116];
  183. volatile unsigned int security[32]; /* 0x080 */
  184. struct set_and_clear_regs enable; /* 0x100 */
  185. struct set_and_clear_regs pending; /* 0x200 */
  186. struct set_and_clear_regs active; /* 0x300 */
  187. volatile unsigned int priority[256]; /* 0x400 */
  188. volatile unsigned int target[256]; /* 0x800 */
  189. volatile unsigned int configuration[64]; /* 0xC00 */
  190. const char padding3[256]; /* 0xD00 */
  191. volatile unsigned int non_security_access_control[64]; /* 0xE00 */
  192. volatile unsigned int software_interrupt; /* 0xF00 */
  193. volatile unsigned int sgi_clr_pending[4]; /* 0xF10 */
  194. volatile unsigned int sgi_set_pending[4]; /* 0xF20 */
  195. const char padding4[176];
  196. unsigned const int peripheral_id[4]; /* 0xFE0 */
  197. unsigned const int primecell_id[4]; /* 0xFF0 */
  198. };
  199. static void restore_gic_spm_irq(struct interrupt_distributor *id, long wakeup_sta, int wake_src, int *irq_bit)
  200. {
  201. int i, j;
  202. if (reg_read(wakeup_sta) & wake_src) {
  203. i = *irq_bit / GIC_PRIVATE_SIGNALS;
  204. j = *irq_bit % GIC_PRIVATE_SIGNALS;
  205. id->pending.set[i] |= (1 << j);
  206. }
  207. }
  208. static void restore_edge_gic_spm_irq(unsigned long gic_distributor_address)
  209. {
  210. struct interrupt_distributor *id = (struct interrupt_distributor *) gic_distributor_address;
  211. unsigned int backup;
  212. backup = id->control;
  213. id->control = 0;
  214. /* Set the pending bit for spm wakeup source that is edge triggerd */
  215. #if defined(CONFIG_ARCH_MT6580) || defined(CONFIG_ARCH_MT6735_SERIES)
  216. restore_gic_spm_irq(id, (long) SPM_SLEEP_ISR_RAW_STA, WAKE_SRC_KP, &kp_irq_bit);
  217. restore_gic_spm_irq(id, (long) SPM_SLEEP_ISR_RAW_STA, WAKE_SRC_CONN_WDT, &conn_wdt_irq_bit);
  218. restore_gic_spm_irq(id, (long) SPM_SLEEP_ISR_RAW_STA, WAKE_SRC_LOW_BAT, &lowbattery_irq_bit);
  219. #if defined(CONFIG_ARCH_MT6735_SERIES)
  220. restore_gic_spm_irq(id, (long) SPM_SLEEP_ISR_RAW_STA, WAKE_SRC_MD_WDT, &md1_wdt_irq_bit);
  221. #else
  222. restore_gic_spm_irq(id, (long) SPM_SLEEP_ISR_RAW_STA, WAKE_SRC_MD1_WDT, &md1_wdt_irq_bit);
  223. #endif /* #if defined(CONFIG_ARCH_MT6735_SERIES) */
  224. #ifdef CONFIG_MTK_C2K_SUPPORT
  225. restore_gic_spm_irq(id, (long) SPM_SLEEP_ISR_RAW_STA, WAKE_SRC_C2K_WDT, &c2k_wdt_irq_bit);
  226. #endif /* #ifdef CONFIG_MTK_C2K_SUPPORT */
  227. #elif defined(CONFIG_ARCH_MT6755)
  228. restore_gic_spm_irq(id, (long) SPM_WAKEUP_STA, WAKE_SRC_R12_KP_IRQ_B, &kp_irq_bit);
  229. restore_gic_spm_irq(id, (long) SPM_WAKEUP_STA, WAKE_SRC_R12_CONN_WDT_IRQ_B, &conn_wdt_irq_bit);
  230. /* restore_gic_spm_irq(id, (long) SPM_WAKEUP_STA, WAKE_SRC_R12_LOWBATTERY_IRQ_B, &lowbattery_irq_bit); */
  231. restore_gic_spm_irq(id, (long) SPM_WAKEUP_STA, WAKE_SRC_R12_MD1_WDT_B, &md1_wdt_irq_bit);
  232. #ifdef CONFIG_MTK_C2K_SUPPORT
  233. restore_gic_spm_irq(id, (long) SPM_WAKEUP_STA, WAKE_SRC_R12_C2K_WDT_IRQ_B, &c2k_wdt_irq_bit);
  234. #endif /* #ifdef CONFIG_MTK_C2K_SUPPORT */
  235. #endif /* #if defined(CONFIG_ARCH_MT6580) || defined(CONFIG_ARCH_MT6735_SERIES) */
  236. id->control = backup;
  237. }
  238. #if defined(CONFIG_ARCH_MT6580)
  239. struct cpu_interface {
  240. volatile unsigned int control; /* 0x00 */
  241. volatile unsigned int priority_mask; /* 0x04 */
  242. volatile unsigned int binary_point; /* 0x08 */
  243. volatile unsigned const int interrupt_ack; /* 0x0c */
  244. volatile unsigned int end_of_interrupt; /* 0x10 */
  245. volatile unsigned const int running_priority; /* 0x14 */
  246. volatile unsigned const int highest_pending; /* 0x18 */
  247. volatile unsigned int aliased_binary_point; /* 0x1c */
  248. volatile unsigned const int aliased_interrupt_ack; /* 0x20 */
  249. volatile unsigned int alias_end_of_interrupt; /* 0x24 */
  250. volatile unsigned int aliased_highest_pending; /* 0x28 */
  251. };
  252. struct gic_cpu_context {
  253. unsigned int gic_cpu_if_regs[32]; /* GIC context local to the CPU */
  254. unsigned int gic_dist_if_pvt_regs[32]; /* GIC SGI/PPI context local to the CPU */
  255. unsigned int gic_dist_if_regs[512]; /* GIC distributor context to be saved by the last cpu. */
  256. };
  257. struct gic_cpu_context gic_data[1];
  258. #define gic_data_base() ((struct gic_cpu_context *)&gic_data[0])
  259. /*
  260. * Saves the GIC CPU interface context
  261. * Requires 3 words of memory
  262. */
  263. static void save_gic_interface(u32 *pointer, unsigned long gic_interface_address)
  264. {
  265. struct cpu_interface *ci = (struct cpu_interface *) gic_interface_address;
  266. pointer[0] = ci->control;
  267. pointer[1] = ci->priority_mask;
  268. pointer[2] = ci->binary_point;
  269. pointer[3] = ci->aliased_binary_point;
  270. pointer[4] = ci->aliased_highest_pending;
  271. }
  272. /*
  273. * Saves this CPU's banked parts of the distributor
  274. * Returns non-zero if an SGI/PPI interrupt is pending (after saving all required context)
  275. * Requires 19 words of memory
  276. */
  277. static void save_gic_distributor_private(u32 *pointer,
  278. unsigned long gic_distributor_address)
  279. {
  280. struct interrupt_distributor *id =
  281. (struct interrupt_distributor *) gic_distributor_address;
  282. unsigned int *ptr = 0x0;
  283. /* Save SGI,PPI enable status */
  284. *pointer = id->enable.set[0];
  285. ++pointer;
  286. /* Save SGI,PPI priority status */
  287. pointer = copy_words(pointer, id->priority, 8);
  288. /* Save SGI,PPI target status */
  289. pointer = copy_words(pointer, id->target, 8);
  290. /* Save just the PPI configurations (SGIs are not configurable) */
  291. *pointer = id->configuration[1];
  292. ++pointer;
  293. /* Save SGI,PPI security status */
  294. *pointer = id->security[0];
  295. ++pointer;
  296. /* Save SGI Non-security status (PPI is read-only) */
  297. *pointer = id->non_security_access_control[0] & 0x0ffff;
  298. ++pointer;
  299. /* Save SGI,PPI pending status */
  300. *pointer = id->pending.set[0];
  301. ++pointer;
  302. /*
  303. * IPIs are different and can be replayed just by saving
  304. * and restoring the set/clear pending registers
  305. */
  306. ptr = pointer;
  307. copy_words(pointer, id->sgi_set_pending, 4);
  308. pointer += 8;
  309. /*
  310. * Clear the pending SGIs on this cpuif so that they don't
  311. * interfere with the wfi later on.
  312. */
  313. copy_words(id->sgi_clr_pending, ptr, 4);
  314. }
  315. /*
  316. * Saves the shared parts of the distributor
  317. * Requires 1 word of memory, plus 20 words for each block of 32 SPIs (max 641 words)
  318. * Returns non-zero if an SPI interrupt is pending (after saving all required context)
  319. */
  320. static void save_gic_distributor_shared(u32 *pointer,
  321. unsigned long gic_distributor_address)
  322. {
  323. struct interrupt_distributor *id =
  324. (struct interrupt_distributor *) gic_distributor_address;
  325. unsigned num_spis, *saved_pending;
  326. /* Calculate how many SPIs the GIC supports */
  327. num_spis = 32 * (id->controller_type & 0x1f);
  328. /* TODO: add nonsecure stuff */
  329. /* Save rest of GIC configuration */
  330. if (num_spis) {
  331. pointer =
  332. copy_words(pointer, id->enable.set + 1, num_spis / 32);
  333. pointer = copy_words(pointer, id->priority + 8, num_spis / 4);
  334. pointer = copy_words(pointer, id->target + 8, num_spis / 4);
  335. pointer =
  336. copy_words(pointer, id->configuration + 2, num_spis / 16);
  337. pointer = copy_words(pointer, id->security + 1, num_spis / 32);
  338. saved_pending = pointer;
  339. pointer =
  340. copy_words(pointer, id->pending.set + 1, num_spis / 32);
  341. pointer =
  342. copy_words(pointer, id->non_security_access_control + 1,
  343. num_spis / 16);
  344. }
  345. /* Save control register */
  346. *pointer = id->control;
  347. }
  348. static void restore_gic_interface(u32 *pointer, unsigned long gic_interface_address)
  349. {
  350. struct cpu_interface *ci = (struct cpu_interface *) gic_interface_address;
  351. ci->priority_mask = pointer[1];
  352. ci->binary_point = pointer[2];
  353. ci->aliased_binary_point = pointer[3];
  354. ci->aliased_highest_pending = pointer[4];
  355. /* Restore control register last */
  356. ci->control = pointer[0];
  357. }
  358. static void restore_gic_distributor_private(u32 *pointer,
  359. unsigned long gic_distributor_address)
  360. {
  361. struct interrupt_distributor *id =
  362. (struct interrupt_distributor *) gic_distributor_address;
  363. unsigned tmp;
  364. /* unsigned ctr, prev_val = 0, prev_ctr = 0; */
  365. /* First disable the distributor so we can write to its config registers */
  366. tmp = id->control;
  367. id->control = 0;
  368. /* Restore SGI,PPI enable status */
  369. id->enable.set[0] = *pointer;
  370. ++pointer;
  371. /* Restore SGI,PPI priority status */
  372. copy_words(id->priority, pointer, 8);
  373. pointer += 8;
  374. /* Restore SGI,PPI target status */
  375. copy_words(id->target, pointer, 8);
  376. pointer += 8;
  377. /* Restore just the PPI configurations (SGIs are not configurable) */
  378. id->configuration[1] = *pointer;
  379. ++pointer;
  380. /* Restore SGI,PPI security status */
  381. id->security[0] = *pointer;
  382. ++pointer;
  383. /* restore SGI Non-security status (PPI is read-only) */
  384. id->non_security_access_control[0] =
  385. (id->non_security_access_control[0] & 0x0ffff0000) | (*pointer);
  386. ++pointer;
  387. /* Restore SGI,PPI pending status */
  388. id->pending.set[0] = *pointer;
  389. ++pointer;
  390. /*
  391. * Restore pending SGIs
  392. */
  393. copy_words(id->sgi_set_pending, pointer, 4);
  394. pointer += 4;
  395. id->control = tmp;
  396. }
  397. static void restore_gic_distributor_shared(u32 *pointer,
  398. unsigned long gic_distributor_address)
  399. {
  400. struct interrupt_distributor *id = (struct interrupt_distributor *) gic_distributor_address;
  401. unsigned num_spis;
  402. /* First disable the distributor so we can write to its config registers */
  403. id->control = 0;
  404. /* Calculate how many SPIs the GIC supports */
  405. num_spis = 32 * ((id->controller_type) & 0x1f);
  406. /* Restore rest of GIC configuration */
  407. if (num_spis) {
  408. copy_words(id->enable.set + 1, pointer, num_spis / 32);
  409. pointer += num_spis / 32;
  410. copy_words(id->priority + 8, pointer, num_spis / 4);
  411. pointer += num_spis / 4;
  412. copy_words(id->target + 8, pointer, num_spis / 4);
  413. pointer += num_spis / 4;
  414. copy_words(id->configuration + 2, pointer, num_spis / 16);
  415. pointer += num_spis / 16;
  416. copy_words(id->security + 1, pointer, num_spis / 32);
  417. pointer += num_spis / 32;
  418. copy_words(id->pending.set + 1, pointer, num_spis / 32);
  419. pointer += num_spis / 32;
  420. copy_words(id->non_security_access_control + 1, pointer,
  421. num_spis / 16);
  422. pointer += num_spis / 16;
  423. restore_edge_gic_spm_irq(gic_distributor_address);
  424. }
  425. /* We assume the I and F bits are set in the CPSR so that we will not respond to interrupts! */
  426. /* Restore control register */
  427. id->control = *pointer;
  428. }
  429. static void gic_cpu_save(void)
  430. {
  431. save_gic_interface(gic_data_base()->gic_cpu_if_regs, gic_ci_base);
  432. /*
  433. * TODO:
  434. * Is it safe for the secondary cpu to save its context
  435. * while the GIC distributor is on. Should be as its
  436. * banked context and the cpu itself is the only one
  437. * who can change it. Still have to consider cases e.g
  438. * SGIs/Localtimers becoming pending.
  439. */
  440. /* Save distributoer interface private context */
  441. save_gic_distributor_private(gic_data_base()->gic_dist_if_pvt_regs,
  442. gic_id_base);
  443. }
  444. static void gic_cpu_restore(void)
  445. {
  446. /*restores the private context */
  447. restore_gic_distributor_private(gic_data_base()->gic_dist_if_pvt_regs,
  448. gic_id_base);
  449. /* Restore GIC context */
  450. restore_gic_interface(gic_data_base()->gic_cpu_if_regs,
  451. gic_ci_base);
  452. }
  453. static void gic_dist_save(void)
  454. {
  455. /* Save distributoer interface global context */
  456. save_gic_distributor_shared(gic_data_base()->gic_dist_if_regs,
  457. gic_id_base);
  458. }
  459. static void gic_dist_restore(void)
  460. {
  461. /*restores the global context */
  462. restore_gic_distributor_shared(gic_data_base()->gic_dist_if_regs,
  463. gic_id_base);
  464. }
  465. DEFINE_SPINLOCK(mp0_l2rstd_lock);
  466. DEFINE_SPINLOCK(mp1_l2rstd_lock);
  467. static inline void mp0_l2rstdisable(int flags)
  468. {
  469. unsigned int read_back;
  470. int reg_val;
  471. spin_lock(&mp0_l2rstd_lock); /* avoid MCDI racing on */
  472. read_back = reg_read(MP0_CACHE_CONFIG);
  473. reg_val = _aor(read_back, ~L2RSTDISABLE,
  474. IS_DORMANT_INNER_OFF(flags) ? 0 : L2RSTDISABLE);
  475. reg_write(MP0_CACHE_CONFIG, reg_val);
  476. if (GET_CLUSTER_DATA()->l2rstdisable_rfcnt++ == 0)
  477. GET_CLUSTER_DATA()->l2rstdisable = read_back & L2RSTDISABLE;
  478. spin_unlock(&mp0_l2rstd_lock);
  479. }
  480. static inline void mp1_l2rstdisable(int flags)
  481. {
  482. unsigned int read_back;
  483. int reg_val;
  484. spin_lock(&mp1_l2rstd_lock); /* avoid MCDI racing on */
  485. read_back = reg_read(MP1_CACHE_CONFIG);
  486. reg_val =
  487. _aor(read_back, ~L2RSTDISABLE,
  488. IS_DORMANT_INNER_OFF(flags) ? 0 : L2RSTDISABLE);
  489. reg_write(MP1_CACHE_CONFIG, reg_val);
  490. if (GET_CLUSTER_DATA()->l2rstdisable_rfcnt++ == 0)
  491. GET_CLUSTER_DATA()->l2rstdisable = read_back & L2RSTDISABLE;
  492. spin_unlock(&mp1_l2rstd_lock);
  493. }
  494. static inline void mp0_l2rstdisable_restore(int flags)
  495. {
  496. unsigned int read_back;
  497. int reg_val;
  498. spin_lock(&mp0_l2rstd_lock); /* avoid MCDI racing on */
  499. GET_CLUSTER_DATA()->l2rstdisable_rfcnt--;
  500. if (GET_CLUSTER_DATA()->l2rstdisable_rfcnt == 0) {
  501. read_back = reg_read(MP0_CACHE_CONFIG);
  502. reg_val = _aor(read_back, ~L2RSTDISABLE, GET_CLUSTER_DATA()->l2rstdisable);
  503. reg_write(MP0_CACHE_CONFIG, reg_val);
  504. }
  505. spin_unlock(&mp0_l2rstd_lock); /* avoid MCDI racing on */
  506. }
  507. static inline void mp1_l2rstdisable_restore(int flags)
  508. {
  509. unsigned int read_back;
  510. int reg_val;
  511. spin_lock(&mp1_l2rstd_lock); /* avoid MCDI racing on */
  512. GET_CLUSTER_DATA()->l2rstdisable_rfcnt--;
  513. if (GET_CLUSTER_DATA()->l2rstdisable_rfcnt == 0) {
  514. read_back = reg_read(MP1_CACHE_CONFIG);
  515. reg_val = _aor(read_back, ~L2RSTDISABLE,
  516. GET_CLUSTER_DATA()->l2rstdisable);
  517. reg_write(MP1_CACHE_CONFIG, reg_val);
  518. }
  519. spin_unlock(&mp1_l2rstd_lock); /* avoid MCDI racing on */
  520. }
  521. static void mt_cluster_save(int flags)
  522. {
  523. if (read_cluster_id() == 0)
  524. mp0_l2rstdisable(flags);
  525. else
  526. mp1_l2rstdisable(flags);
  527. }
  528. #endif
  529. static void mt_cluster_restore(int flags)
  530. {
  531. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M)
  532. biu_reconfig();
  533. #endif
  534. #if defined(CONFIG_ARCH_MT6580)
  535. if (read_cluster_id() == 0)
  536. mp0_l2rstdisable_restore(flags);
  537. else
  538. mp1_l2rstdisable_restore(flags);
  539. #endif
  540. }
  541. void mt_cpu_save(void)
  542. {
  543. struct core_context *core;
  544. struct cluster_context *cluster;
  545. unsigned int sleep_sta;
  546. int cpuid, clusterid;
  547. read_id(&cpuid, &clusterid);
  548. core = GET_CORE_DATA();
  549. mt_save_generic_timer((unsigned int *)core->timer_data, 0x0);
  550. stop_generic_timer();
  551. if (clusterid == 0)
  552. sleep_sta = (spm_read(CPUIDLE_CPU_IDLE_STA) >> CPUIDLE_CPU_IDLE_STA_OFFSET) & 0x0f;
  553. else
  554. sleep_sta = (spm_read(CPUIDLE_CPU_IDLE_STA) >> (CPUIDLE_CPU_IDLE_STA_OFFSET + 4)) & 0x0f;
  555. if ((sleep_sta | (1 << cpuid)) == 0x0f) { /* last core */
  556. cluster = GET_CLUSTER_DATA();
  557. mt_save_dbg_regs((unsigned int *)cluster->dbg_data, cpuid + (clusterid * 4));
  558. }
  559. }
  560. void mt_cpu_restore(void)
  561. {
  562. struct core_context *core;
  563. struct cluster_context *cluster;
  564. unsigned int sleep_sta;
  565. int cpuid, clusterid;
  566. read_id(&cpuid, &clusterid);
  567. core = GET_CORE_DATA();
  568. if (clusterid == 0)
  569. sleep_sta = (spm_read(CPUIDLE_CPU_IDLE_STA) >> CPUIDLE_CPU_IDLE_STA_OFFSET) & 0x0f;
  570. else
  571. sleep_sta = (spm_read(CPUIDLE_CPU_IDLE_STA) >> (CPUIDLE_CPU_IDLE_STA_OFFSET + 4)) & 0x0f;
  572. sleep_sta = (sleep_sta | (1 << cpuid));
  573. if (sleep_sta == 0x0f) { /* first core */
  574. cluster = GET_CLUSTER_DATA();
  575. mt_restore_dbg_regs((unsigned int *)cluster->dbg_data, cpuid + (clusterid * 4));
  576. } else {
  577. int any = __builtin_ffs(~sleep_sta) - 1;
  578. mt_copy_dbg_regs(cpuid + (clusterid * 4), any + (clusterid * 4));
  579. }
  580. mt_restore_generic_timer((unsigned int *)core->timer_data, 0x0);
  581. }
  582. void mt_platform_save_context(int flags)
  583. {
  584. mt_cpu_save();
  585. #if defined(CONFIG_ARCH_MT6580)
  586. mt_cluster_save(flags);
  587. if (IS_DORMANT_GIC_OFF(flags)) {
  588. gic_cpu_save();
  589. gic_dist_save();
  590. }
  591. #endif
  592. }
  593. void mt_platform_restore_context(int flags)
  594. {
  595. mt_cluster_restore(flags);
  596. mt_cpu_restore();
  597. #if defined(CONFIG_ARCH_MT6580)
  598. if (IS_DORMANT_GIC_OFF(flags)) {
  599. gic_dist_restore();
  600. gic_cpu_restore();
  601. }
  602. #else
  603. if (IS_DORMANT_GIC_OFF(flags))
  604. restore_edge_gic_spm_irq(gic_id_base);
  605. #endif
  606. }
  607. #if !defined(CONFIG_ARM64) && !defined(CONFIG_ARCH_MT6580)
  608. int mt_cpu_dormant_psci(unsigned long flags)
  609. {
  610. int ret = 1;
  611. int cpuid, clusterid;
  612. struct psci_power_state pps = {
  613. .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
  614. .affinity_level = 1,
  615. };
  616. read_id(&cpuid, &clusterid);
  617. if (psci_ops.cpu_suspend) {
  618. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x203);
  619. ret = psci_ops.cpu_suspend(pps, virt_to_phys(cpu_resume));
  620. }
  621. BUG();
  622. return ret;
  623. }
  624. #endif
  625. #if defined(CONFIG_ARCH_MT6580)
  626. int mt_cpu_dormant_reset(unsigned long flags)
  627. {
  628. int ret = 1; /* dormant abort */
  629. int cpuid, clusterid;
  630. read_id(&cpuid, &clusterid);
  631. disable_dcache_safe(!!IS_DORMANT_INNER_OFF(flags));
  632. if ((unlikely(IS_DORMANT_BREAK_CHECK(flags)) &&
  633. unlikely(SPM_IS_CPU_IRQ_OCCUR(SPM_CORE_ID())))) {
  634. ret = 2; /* dormant break */
  635. goto _break0;
  636. }
  637. amp();
  638. DORMANT_LOG(clusterid * 4 + cpuid, 0x301);
  639. wfi();
  640. smp();
  641. DORMANT_LOG(clusterid * 4 + cpuid, 0x302);
  642. _break0:
  643. __enable_dcache();
  644. DORMANT_LOG(clusterid * 4 + cpuid, 0x303);
  645. return ret;
  646. }
  647. #define get_data_nommu(va) \
  648. ({ \
  649. register int data = 0; \
  650. register unsigned long pva = (unsigned long)(void *)(&(va)); \
  651. mt_get_data_nommu(data, pva); \
  652. data; \
  653. })
  654. __naked void cpu_resume_wrapper(void)
  655. {
  656. register int val;
  657. #ifdef CONFIG_MTK_RAM_CONSOLE
  658. reg_write(get_data_nommu(sleep_aee_rec_cpu_dormant_pa), 0x401);
  659. #endif
  660. /*
  661. * restore L2 SRAM latency:
  662. * This register can only be written when the L2 memory system is
  663. * idle. ARM recommends that you write to this register after a
  664. * powerup reset before the MMU is enabled and before any AXI4 or
  665. * ACP traffic has begun.
  666. */
  667. val = get_data_nommu(dormant_data[0].poc.l2ctlr);
  668. if (val) {
  669. val &= 0x3ffff;
  670. mt_restore_l2ctlr(val);
  671. }
  672. /* jump to cpu_resume() */
  673. mt_goto_cpu_resume(&(dormant_data[0].poc.cpu_resume_phys));
  674. }
  675. #endif
  676. static int mt_cpu_dormant_abort(unsigned long index)
  677. {
  678. #if defined(CONFIG_ARCH_MT6580)
  679. int cpuid, clusterid;
  680. read_id(&cpuid, &clusterid);
  681. #ifdef CONFIG_TRUSTONIC_TEE_SUPPORT
  682. if (cpuid == 0)
  683. mt_secure_call(MC_FC_SLEEP_CANCELLED, 0, 0, 0);
  684. #elif defined(CONFIG_TRUSTY) && defined(CONFIG_ARCH_MT6580)
  685. if (cpuid == 0)
  686. mt_trusty_call(SMC_FC_CPU_DORMANT_CANCEL, 0, 0, 0);
  687. #endif
  688. /* restore l2rstdisable setting */
  689. if (read_cluster_id() == 0)
  690. mp0_l2rstdisable_restore(index);
  691. else
  692. mp1_l2rstdisable_restore(index);
  693. #endif
  694. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M)
  695. biu_reconfig();
  696. #endif
  697. start_generic_timer();
  698. return 0;
  699. }
  700. int mt_cpu_dormant(unsigned long flags)
  701. {
  702. int ret;
  703. int cpuid, clusterid;
  704. if (!mt_dormant_initialized)
  705. return MT_CPU_DORMANT_BYPASS;
  706. read_id(&cpuid, &clusterid);
  707. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x101);
  708. BUG_ON(!irqs_disabled());
  709. /* to mark as cpu clobs vfp register.*/
  710. kernel_neon_begin();
  711. /* dormant break */
  712. if (IS_DORMANT_BREAK_CHECK(flags) && SPM_IS_CPU_IRQ_OCCUR(SPM_CORE_ID())) {
  713. ret = MT_CPU_DORMANT_BREAK_V(IRQ_PENDING_1);
  714. goto dormant_exit;
  715. }
  716. mt_platform_save_context(flags);
  717. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x102);
  718. /* dormant break */
  719. if (IS_DORMANT_BREAK_CHECK(flags) && SPM_IS_CPU_IRQ_OCCUR(SPM_CORE_ID())) {
  720. mt_cpu_dormant_abort(flags);
  721. ret = MT_CPU_DORMANT_BREAK_V(IRQ_PENDING_2);
  722. goto dormant_exit;
  723. }
  724. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x103);
  725. #if !defined(CONFIG_ARM64) && !defined(CONFIG_ARCH_MT6580)
  726. ret = cpu_suspend(flags, mt_cpu_dormant_psci);
  727. #elif !defined(CONFIG_ARCH_MT6580)
  728. ret = cpu_suspend(2);
  729. #else
  730. dormant_data[0].poc.cpu_resume_phys = (void (*)(void))(long)virt_to_phys(cpu_resume);
  731. #ifdef CONFIG_TRUSTONIC_TEE_SUPPORT
  732. mt_secure_call(MC_FC_MTK_SLEEP, virt_to_phys(cpu_resume), cpuid, 0);
  733. #elif defined(CONFIG_TRUSTY) && defined(CONFIG_ARCH_MT6580)
  734. mt_trusty_call(SMC_FC_CPU_DORMANT, virt_to_phys(cpu_resume), cpuid, 0);
  735. #else
  736. writel_relaxed(virt_to_phys(cpu_resume), DMT_BOOTROM_BOOT_ADDR);
  737. #endif
  738. ret = cpu_suspend(flags, mt_cpu_dormant_reset);
  739. #endif
  740. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x601);
  741. #if defined(CONFIG_ARCH_MT6580)
  742. if (IS_DORMANT_INNER_OFF(flags)) {
  743. reg_write(DMT_BOOTROM_BOOT_ADDR, virt_to_phys(cpu_wake_up_errata_802022));
  744. #ifdef CONFIG_TRUSTONIC_TEE_SUPPORT
  745. mt_secure_call(MC_FC_SET_RESET_VECTOR, virt_to_phys(cpu_wake_up_errata_802022), 1, 0);
  746. if (num_possible_cpus() == 4) {
  747. mt_secure_call(MC_FC_SET_RESET_VECTOR, virt_to_phys(cpu_wake_up_errata_802022), 2, 0);
  748. mt_secure_call(MC_FC_SET_RESET_VECTOR, virt_to_phys(cpu_wake_up_errata_802022), 3, 0);
  749. }
  750. #elif defined(CONFIG_TRUSTY) && defined(CONFIG_ARCH_MT6580)
  751. mt_trusty_call(SMC_FC_CPU_ON, virt_to_phys(cpu_wake_up_errata_802022), 1, 1);
  752. if (num_possible_cpus() == 4) {
  753. mt_trusty_call(SMC_FC_CPU_ON, virt_to_phys(cpu_wake_up_errata_802022), 2, 1);
  754. mt_trusty_call(SMC_FC_CPU_ON, virt_to_phys(cpu_wake_up_errata_802022), 3, 1);
  755. }
  756. #endif
  757. spm_mtcmos_ctrl_cpu1(STA_POWER_ON, 1);
  758. if (num_possible_cpus() == 4) {
  759. spm_mtcmos_ctrl_cpu2(STA_POWER_ON, 1);
  760. spm_mtcmos_ctrl_cpu3(STA_POWER_ON, 1);
  761. spm_mtcmos_ctrl_cpu3(STA_POWER_DOWN, 1);
  762. spm_mtcmos_ctrl_cpu2(STA_POWER_DOWN, 1);
  763. }
  764. spm_mtcmos_ctrl_cpu1(STA_POWER_DOWN, 1);
  765. #ifdef CONFIG_TRUSTONIC_TEE_SUPPORT
  766. mt_secure_call(MC_FC_ERRATA_808022, 0, 0, 0);
  767. #elif defined(CONFIG_TRUSTY) && defined(CONFIG_ARCH_MT6580)
  768. mt_trusty_call(SMC_FC_CPU_ERRATA_802022, 0, 0, 0);
  769. #endif
  770. }
  771. #endif
  772. switch (ret) {
  773. case 0: /* back from dormant reset */
  774. mt_platform_restore_context(flags);
  775. ret = MT_CPU_DORMANT_RESET;
  776. break;
  777. case 1: /* back from dormant abort, */
  778. mt_cpu_dormant_abort(flags);
  779. ret = MT_CPU_DORMANT_ABORT;
  780. break;
  781. case 2:
  782. mt_cpu_dormant_abort(flags);
  783. ret = MT_CPU_DORMANT_BREAK_V(IRQ_PENDING_3);
  784. break;
  785. default: /* back from dormant break, do nothing for return */
  786. dormant_err("EOPNOTSUPP\n");
  787. break;
  788. }
  789. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x602);
  790. local_fiq_enable();
  791. dormant_exit:
  792. kernel_neon_end();
  793. DORMANT_LOG(clusterid * MAX_CORES + cpuid, 0x0);
  794. return ret & 0x0ff;
  795. }
  796. static unsigned long get_dts_node_address(char *node_compatible, int index)
  797. {
  798. unsigned long node_address = 0;
  799. struct device_node *node;
  800. if (!node_compatible)
  801. return 0;
  802. node = of_find_compatible_node(NULL, NULL, node_compatible);
  803. if (!node) {
  804. dormant_err("error: cannot find node [%s]\n", node_compatible);
  805. BUG();
  806. }
  807. node_address = (unsigned long)of_iomap(node, index);
  808. if (!node_address) {
  809. dormant_err("error: cannot iomap [%s]\n", node_compatible);
  810. BUG();
  811. }
  812. of_node_put(node);
  813. return node_address;
  814. }
  815. static u32 get_dts_node_irq_bit(char *node_compatible, const int int_size, int int_offset)
  816. {
  817. struct device_node *node;
  818. u32 node_interrupt[int_size];
  819. unsigned int irq_bit;
  820. if (!node_compatible)
  821. return 0;
  822. node = of_find_compatible_node(NULL, NULL, node_compatible);
  823. if (!node) {
  824. dormant_err("error: cannot find node [%s]\n", node_compatible);
  825. BUG();
  826. }
  827. if (of_property_read_u32_array(node, "interrupts", node_interrupt, int_size)) {
  828. dormant_err("error: cannot property_read [%s]\n", node_compatible);
  829. BUG();
  830. }
  831. /* irq[0] = 0 => spi */
  832. irq_bit = ((1 - node_interrupt[int_offset]) << 5) + node_interrupt[int_offset+1];
  833. of_node_put(node);
  834. dormant_debug("compatible = %s, irq_bit = %u\n", node_compatible, irq_bit);
  835. return irq_bit;
  836. }
  837. #ifdef CONFIG_ARCH_MT6580
  838. static void get_dts_nodes_address(void)
  839. {
  840. mcucfg_base = get_dts_node_address("mediatek,mt6580-mcucfg", 0);
  841. infracfg_ao_base = get_dts_node_address("mediatek,INFRACFG_AO", 0);
  842. gic_id_base = get_dts_node_address("arm,cortex-a7-gic", 0);
  843. gic_ci_base = get_dts_node_address("arm,cortex-a7-gic", 1);
  844. }
  845. static void get_dts_nodes_irq_bit(void)
  846. {
  847. kp_irq_bit = get_dts_node_irq_bit("mediatek,mt6580-keypad", 3, 0);
  848. conn_wdt_irq_bit = get_dts_node_irq_bit("mediatek,mt6580-consys", 6, 3);
  849. lowbattery_irq_bit = get_dts_node_irq_bit("mediatek,mt6735-auxadc", 3, 0);
  850. md1_wdt_irq_bit = get_dts_node_irq_bit("mediatek,ap_ccif0", 6, 3);
  851. }
  852. #elif defined(CONFIG_ARCH_MT6735_SERIES)
  853. static void get_dts_nodes_address(void)
  854. {
  855. #if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6735M)
  856. biu_base = get_dts_node_address("mediatek,mt6735-mcu_biu", 0);
  857. #endif
  858. gic_id_base = get_dts_node_address("mediatek,mt6735-gic", 0);
  859. }
  860. static void get_dts_nodes_irq_bit(void)
  861. {
  862. kp_irq_bit = get_dts_node_irq_bit("mediatek,mt6735-keypad", 3, 0);
  863. conn_wdt_irq_bit = get_dts_node_irq_bit("mediatek,mt6735-consys", 6, 3);
  864. lowbattery_irq_bit = get_dts_node_irq_bit("mediatek,mt6735-auxadc", 3, 0);
  865. md1_wdt_irq_bit = get_dts_node_irq_bit("mediatek,mdcldma", 9, 6);
  866. #ifdef CONFIG_MTK_C2K_SUPPORT
  867. c2k_wdt_irq_bit = get_dts_node_irq_bit("mediatek,mdc2k", 3, 0);
  868. #endif
  869. }
  870. #elif defined(CONFIG_ARCH_MT6755)
  871. static void get_dts_nodes_address(void)
  872. {
  873. gic_id_base = get_dts_node_address("mediatek,mt6735-gic", 0);
  874. }
  875. static void get_dts_nodes_irq_bit(void)
  876. {
  877. kp_irq_bit = get_dts_node_irq_bit("mediatek,mt6755-keypad", 3, 0);
  878. conn_wdt_irq_bit = get_dts_node_irq_bit("mediatek,mt6755-consys", 6, 3);
  879. lowbattery_irq_bit = get_dts_node_irq_bit(NULL, 3, 0);
  880. md1_wdt_irq_bit = get_dts_node_irq_bit("mediatek,mdcldma", 9, 6);
  881. #ifdef CONFIG_MTK_C2K_SUPPORT
  882. c2k_wdt_irq_bit = get_dts_node_irq_bit("mediatek,ap2c2k_ccif", 6, 3);
  883. #endif
  884. }
  885. #elif defined(CONFIG_ARCH_MT6797)
  886. static void get_dts_nodes_address(void)
  887. {
  888. gic_id_base = get_dts_node_address("arm,gic-v3", 0);
  889. }
  890. static void get_dts_nodes_irq_bit(void)
  891. {
  892. kp_irq_bit = get_dts_node_irq_bit("mediatek,mt6797-keypad", 3, 0);
  893. conn_wdt_irq_bit = get_dts_node_irq_bit(NULL, 6, 3);
  894. lowbattery_irq_bit = get_dts_node_irq_bit("mediatek,auxadc", 3, 0);
  895. md1_wdt_irq_bit = get_dts_node_irq_bit(NULL, 9, 6);
  896. #ifdef CONFIG_MTK_C2K_SUPPORT
  897. c2k_wdt_irq_bit = get_dts_node_irq_bit("mediatek,ap2c2k_ccif", 6, 3);
  898. #endif
  899. }
  900. #endif
  901. static int mt_dormant_dts_map(void)
  902. {
  903. get_dts_nodes_address();
  904. get_dts_nodes_irq_bit();
  905. return 0;
  906. }
  907. int mt_cpu_dormant_init(void)
  908. {
  909. int cpuid, clusterid;
  910. read_id(&cpuid, &clusterid);
  911. if (mt_dormant_initialized == 1)
  912. return MT_CPU_DORMANT_BYPASS;
  913. mt_dormant_dts_map();
  914. #if defined(CONFIG_ARCH_MT6580)
  915. /* enable bootrom power down mode */
  916. reg_write(DMT_BOOTROM_PWR_CTRL, reg_read(DMT_BOOTROM_PWR_CTRL) | SW_ROM_PD);
  917. mt_save_l2ctlr(dormant_data[0].poc.l2ctlr);
  918. #endif
  919. #ifdef CONFIG_MTK_RAM_CONSOLE
  920. sleep_aee_rec_cpu_dormant_va = aee_rr_rec_cpu_dormant();
  921. sleep_aee_rec_cpu_dormant_pa = aee_rr_rec_cpu_dormant_pa();
  922. BUG_ON(!sleep_aee_rec_cpu_dormant_va || !sleep_aee_rec_cpu_dormant_pa);
  923. #if !defined(CONFIG_ARCH_MT6580)
  924. #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI)
  925. kernel_smc_msg(0, 2, (long) sleep_aee_rec_cpu_dormant_pa);
  926. #endif
  927. #endif
  928. dormant_debug("init aee_rec_cpu_dormant: va:%p pa:%p\n",
  929. sleep_aee_rec_cpu_dormant_va, sleep_aee_rec_cpu_dormant_pa);
  930. #endif
  931. mt_dormant_initialized = 1;
  932. return 0;
  933. }