sleep44xx.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. /*
  2. * OMAP44xx sleep code.
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. *
  7. * This program is free software,you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/assembler.h>
  13. #include <asm/smp_scu.h>
  14. #include <asm/memory.h>
  15. #include <asm/hardware/cache-l2x0.h>
  16. #include "omap-secure.h"
  17. #include "common.h"
  18. #include "omap44xx.h"
  19. #include "omap4-sar-layout.h"
  20. #if defined(CONFIG_SMP) && defined(CONFIG_PM)
  21. .macro DO_SMC
  22. dsb
  23. smc #0
  24. dsb
  25. .endm
  26. ppa_zero_params:
  27. .word 0x0
  28. ppa_por_params:
  29. .word 1, 0
  30. #ifdef CONFIG_ARCH_OMAP4
  31. /*
  32. * =============================
  33. * == CPU suspend finisher ==
  34. * =============================
  35. *
  36. * void omap4_finish_suspend(unsigned long cpu_state)
  37. *
  38. * This function code saves the CPU context and performs the CPU
  39. * power down sequence. Calling WFI effectively changes the CPU
  40. * power domains states to the desired target power state.
  41. *
  42. * @cpu_state : contains context save state (r0)
  43. * 0 - No context lost
  44. * 1 - CPUx L1 and logic lost: MPUSS CSWR
  45. * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
  46. * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
  47. * @return: This function never returns for CPU OFF and DORMANT power states.
  48. * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
  49. * from this follows a full CPU reset path via ROM code to CPU restore code.
  50. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  51. * It returns to the caller for CPU INACTIVE and ON power states or in case
  52. * CPU failed to transition to targeted OFF/DORMANT state.
  53. *
  54. * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
  55. * stack frame and it expects the caller to take care of it. Hence the entire
  56. * stack frame is saved to avoid possible stack corruption.
  57. */
  58. ENTRY(omap4_finish_suspend)
  59. stmfd sp!, {r4-r12, lr}
  60. cmp r0, #0x0
  61. beq do_WFI @ No lowpower state, jump to WFI
  62. /*
  63. * Flush all data from the L1 data cache before disabling
  64. * SCTLR.C bit.
  65. */
  66. bl omap4_get_sar_ram_base
  67. ldr r9, [r0, #OMAP_TYPE_OFFSET]
  68. cmp r9, #0x1 @ Check for HS device
  69. bne skip_secure_l1_clean
  70. mov r0, #SCU_PM_NORMAL
  71. mov r1, #0xFF @ clean seucre L1
  72. stmfd r13!, {r4-r12, r14}
  73. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  74. DO_SMC
  75. ldmfd r13!, {r4-r12, r14}
  76. skip_secure_l1_clean:
  77. bl v7_flush_dcache_all
  78. /*
  79. * Clear the SCTLR.C bit to prevent further data cache
  80. * allocation. Clearing SCTLR.C would make all the data accesses
  81. * strongly ordered and would not hit the cache.
  82. */
  83. mrc p15, 0, r0, c1, c0, 0
  84. bic r0, r0, #(1 << 2) @ Disable the C bit
  85. mcr p15, 0, r0, c1, c0, 0
  86. isb
  87. /*
  88. * Invalidate L1 data cache. Even though only invalidate is
  89. * necessary exported flush API is used here. Doing clean
  90. * on already clean cache would be almost NOP.
  91. */
  92. bl v7_flush_dcache_all
  93. /*
  94. * Switch the CPU from Symmetric Multiprocessing (SMP) mode
  95. * to AsymmetricMultiprocessing (AMP) mode by programming
  96. * the SCU power status to DORMANT or OFF mode.
  97. * This enables the CPU to be taken out of coherency by
  98. * preventing the CPU from receiving cache, TLB, or BTB
  99. * maintenance operations broadcast by other CPUs in the cluster.
  100. */
  101. bl omap4_get_sar_ram_base
  102. mov r8, r0
  103. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  104. cmp r9, #0x1 @ Check for HS device
  105. bne scu_gp_set
  106. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  107. ands r0, r0, #0x0f
  108. ldreq r0, [r8, #SCU_OFFSET0]
  109. ldrne r0, [r8, #SCU_OFFSET1]
  110. mov r1, #0x00
  111. stmfd r13!, {r4-r12, r14}
  112. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  113. DO_SMC
  114. ldmfd r13!, {r4-r12, r14}
  115. b skip_scu_gp_set
  116. scu_gp_set:
  117. mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
  118. ands r0, r0, #0x0f
  119. ldreq r1, [r8, #SCU_OFFSET0]
  120. ldrne r1, [r8, #SCU_OFFSET1]
  121. bl omap4_get_scu_base
  122. bl scu_power_mode
  123. skip_scu_gp_set:
  124. mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
  125. tst r0, #(1 << 18)
  126. mrcne p15, 0, r0, c1, c0, 1
  127. bicne r0, r0, #(1 << 6) @ Disable SMP bit
  128. mcrne p15, 0, r0, c1, c0, 1
  129. isb
  130. dsb
  131. #ifdef CONFIG_CACHE_L2X0
  132. /*
  133. * Clean and invalidate the L2 cache.
  134. * Common cache-l2x0.c functions can't be used here since it
  135. * uses spinlocks. We are out of coherency here with data cache
  136. * disabled. The spinlock implementation uses exclusive load/store
  137. * instruction which can fail without data cache being enabled.
  138. * OMAP4 hardware doesn't support exclusive monitor which can
  139. * overcome exclusive access issue. Because of this, CPU can
  140. * lead to deadlock.
  141. */
  142. bl omap4_get_sar_ram_base
  143. mov r8, r0
  144. mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
  145. ands r5, r5, #0x0f
  146. ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
  147. ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
  148. cmp r0, #3
  149. bne do_WFI
  150. #ifdef CONFIG_PL310_ERRATA_727915
  151. mov r0, #0x03
  152. mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
  153. DO_SMC
  154. #endif
  155. bl omap4_get_l2cache_base
  156. mov r2, r0
  157. ldr r0, =0xffff
  158. str r0, [r2, #L2X0_CLEAN_INV_WAY]
  159. wait:
  160. ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
  161. ldr r1, =0xffff
  162. ands r0, r0, r1
  163. bne wait
  164. #ifdef CONFIG_PL310_ERRATA_727915
  165. mov r0, #0x00
  166. mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
  167. DO_SMC
  168. #endif
  169. l2x_sync:
  170. bl omap4_get_l2cache_base
  171. mov r2, r0
  172. mov r0, #0x0
  173. str r0, [r2, #L2X0_CACHE_SYNC]
  174. sync:
  175. ldr r0, [r2, #L2X0_CACHE_SYNC]
  176. ands r0, r0, #0x1
  177. bne sync
  178. #endif
  179. do_WFI:
  180. bl omap_do_wfi
  181. /*
  182. * CPU is here when it failed to enter OFF/DORMANT or
  183. * no low power state was attempted.
  184. */
  185. mrc p15, 0, r0, c1, c0, 0
  186. tst r0, #(1 << 2) @ Check C bit enabled?
  187. orreq r0, r0, #(1 << 2) @ Enable the C bit
  188. mcreq p15, 0, r0, c1, c0, 0
  189. isb
  190. /*
  191. * Ensure the CPU power state is set to NORMAL in
  192. * SCU power state so that CPU is back in coherency.
  193. * In non-coherent mode CPU can lock-up and lead to
  194. * system deadlock.
  195. */
  196. mrc p15, 0, r0, c1, c0, 1
  197. tst r0, #(1 << 6) @ Check SMP bit enabled?
  198. orreq r0, r0, #(1 << 6)
  199. mcreq p15, 0, r0, c1, c0, 1
  200. isb
  201. bl omap4_get_sar_ram_base
  202. mov r8, r0
  203. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  204. cmp r9, #0x1 @ Check for HS device
  205. bne scu_gp_clear
  206. mov r0, #SCU_PM_NORMAL
  207. mov r1, #0x00
  208. stmfd r13!, {r4-r12, r14}
  209. ldr r12, =OMAP4_MON_SCU_PWR_INDEX
  210. DO_SMC
  211. ldmfd r13!, {r4-r12, r14}
  212. b skip_scu_gp_clear
  213. scu_gp_clear:
  214. bl omap4_get_scu_base
  215. mov r1, #SCU_PM_NORMAL
  216. bl scu_power_mode
  217. skip_scu_gp_clear:
  218. isb
  219. dsb
  220. ldmfd sp!, {r4-r12, pc}
  221. ENDPROC(omap4_finish_suspend)
  222. /*
  223. * ============================
  224. * == CPU resume entry point ==
  225. * ============================
  226. *
  227. * void omap4_cpu_resume(void)
  228. *
  229. * ROM code jumps to this function while waking up from CPU
  230. * OFF or DORMANT state. Physical address of the function is
  231. * stored in the SAR RAM while entering to OFF or DORMANT mode.
  232. * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
  233. */
  234. ENTRY(omap4_cpu_resume)
  235. /*
  236. * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
  237. * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
  238. * init and for CPU1, a secure PPA API provided. CPU0 must be ON
  239. * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
  240. * OMAP443X GP devices- SMP bit isn't accessible.
  241. * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
  242. */
  243. ldr r8, =OMAP44XX_SAR_RAM_BASE
  244. ldr r9, [r8, #OMAP_TYPE_OFFSET]
  245. cmp r9, #0x1 @ Skip if GP device
  246. bne skip_ns_smp_enable
  247. mrc p15, 0, r0, c0, c0, 5
  248. ands r0, r0, #0x0f
  249. beq skip_ns_smp_enable
  250. ppa_actrl_retry:
  251. mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
  252. adr r3, ppa_zero_params @ Pointer to parameters
  253. mov r1, #0x0 @ Process ID
  254. mov r2, #0x4 @ Flag
  255. mov r6, #0xff
  256. mov r12, #0x00 @ Secure Service ID
  257. DO_SMC
  258. cmp r0, #0x0 @ API returns 0 on success.
  259. beq enable_smp_bit
  260. b ppa_actrl_retry
  261. enable_smp_bit:
  262. mrc p15, 0, r0, c1, c0, 1
  263. tst r0, #(1 << 6) @ Check SMP bit enabled?
  264. orreq r0, r0, #(1 << 6)
  265. mcreq p15, 0, r0, c1, c0, 1
  266. isb
  267. skip_ns_smp_enable:
  268. #ifdef CONFIG_CACHE_L2X0
  269. /*
  270. * Restore the L2 AUXCTRL and enable the L2 cache.
  271. * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
  272. * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
  273. * register r0 contains value to be programmed.
  274. * L2 cache is already invalidate by ROM code as part
  275. * of MPUSS OFF wakeup path.
  276. */
  277. ldr r2, =OMAP44XX_L2CACHE_BASE
  278. ldr r0, [r2, #L2X0_CTRL]
  279. and r0, #0x0f
  280. cmp r0, #1
  281. beq skip_l2en @ Skip if already enabled
  282. ldr r3, =OMAP44XX_SAR_RAM_BASE
  283. ldr r1, [r3, #OMAP_TYPE_OFFSET]
  284. cmp r1, #0x1 @ Check for HS device
  285. bne set_gp_por
  286. ldr r0, =OMAP4_PPA_L2_POR_INDEX
  287. ldr r1, =OMAP44XX_SAR_RAM_BASE
  288. ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
  289. adr r3, ppa_por_params
  290. str r4, [r3, #0x04]
  291. mov r1, #0x0 @ Process ID
  292. mov r2, #0x4 @ Flag
  293. mov r6, #0xff
  294. mov r12, #0x00 @ Secure Service ID
  295. DO_SMC
  296. b set_aux_ctrl
  297. set_gp_por:
  298. ldr r1, =OMAP44XX_SAR_RAM_BASE
  299. ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
  300. ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
  301. DO_SMC
  302. set_aux_ctrl:
  303. ldr r1, =OMAP44XX_SAR_RAM_BASE
  304. ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
  305. ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
  306. DO_SMC
  307. mov r0, #0x1
  308. ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
  309. DO_SMC
  310. skip_l2en:
  311. #endif
  312. b cpu_resume @ Jump to generic resume
  313. ENDPROC(omap4_cpu_resume)
  314. #endif /* CONFIG_ARCH_OMAP4 */
  315. #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
  316. #ifndef CONFIG_OMAP4_ERRATA_I688
  317. ENTRY(omap_bus_sync)
  318. ret lr
  319. ENDPROC(omap_bus_sync)
  320. #endif
  321. ENTRY(omap_do_wfi)
  322. stmfd sp!, {lr}
  323. /* Drain interconnect write buffers. */
  324. bl omap_bus_sync
  325. /*
  326. * Execute an ISB instruction to ensure that all of the
  327. * CP15 register changes have been committed.
  328. */
  329. isb
  330. /*
  331. * Execute a barrier instruction to ensure that all cache,
  332. * TLB and branch predictor maintenance operations issued
  333. * by any CPU in the cluster have completed.
  334. */
  335. dsb
  336. dmb
  337. /*
  338. * Execute a WFI instruction and wait until the
  339. * STANDBYWFI output is asserted to indicate that the
  340. * CPU is in idle and low power state. CPU can specualatively
  341. * prefetch the instructions so add NOPs after WFI. Sixteen
  342. * NOPs as per Cortex-A9 pipeline.
  343. */
  344. wfi @ Wait For Interrupt
  345. nop
  346. nop
  347. nop
  348. nop
  349. nop
  350. nop
  351. nop
  352. nop
  353. nop
  354. nop
  355. nop
  356. nop
  357. nop
  358. nop
  359. nop
  360. nop
  361. ldmfd sp!, {pc}
  362. ENDPROC(omap_do_wfi)