hotplug.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/errno.h>
  15. #include <asm/cacheflush.h>
  16. #include <mt-plat/sync_write.h>
  17. #include <mach/mt_spm_mtcmos.h>
  18. #include "mt-smp.h"
  19. #include "smp.h"
  20. #include "hotplug.h"
  21. atomic_t hotplug_cpu_count = ATOMIC_INIT(1);
  22. static inline void cpu_enter_lowpower(unsigned int cpu)
  23. {
  24. if (((cpu == 4) && (cpu_online(5) == 0) && (cpu_online(6) == 0)
  25. && (cpu_online(7) == 0)) || ((cpu == 5) && (cpu_online(4) == 0)
  26. && (cpu_online(6) == 0)
  27. && (cpu_online(7) == 0))
  28. || ((cpu == 6) && (cpu_online(4) == 0) && (cpu_online(5) == 0)
  29. && (cpu_online(7) == 0)) || ((cpu == 7) && (cpu_online(4) == 0)
  30. && (cpu_online(5) == 0)
  31. && (cpu_online(6) == 0))) {
  32. __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2
  33. ();
  34. /* Switch the processor from SMP mode to AMP mode by clearing
  35. the ACTLR SMP bit */
  36. __switch_to_amp();
  37. /* Execute an ISB instruction to ensure that all of the CP15
  38. register changes from the previous steps have been
  39. committed */
  40. isb();
  41. /* Execute a DSB instruction to ensure that all cache, TLB
  42. and branch predictor maintenance operations issued by any
  43. processor in the multiprocessor device before the SMP bit
  44. was cleared have completed */
  45. mb();
  46. /* Disable snoop requests and DVM message requests */
  47. REG_WRITE((void *)CCI400_SI3_SNOOP_CONTROL,
  48. readl((void *)CCI400_SI3_SNOOP_CONTROL) &
  49. ~(SNOOP_REQ | DVM_MSG_REQ));
  50. while (readl((void *)CCI400_STATUS) & CHANGE_PENDING)
  51. ;
  52. /* Disable CA15L snoop function */
  53. REG_WRITE((void *)MP1_AXI_CONFIG,
  54. readl((void *)MP1_AXI_CONFIG) | ACINACTM);
  55. } else {
  56. __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2
  57. ();
  58. /* Execute a CLREX instruction */
  59. __asm__ __volatile__("clrex");
  60. /* Switch the processor from SMP mode to AMP mode by
  61. clearing the ACTLR SMP bit */
  62. __switch_to_amp();
  63. }
  64. }
  65. static inline void cpu_leave_lowpower(unsigned int cpu)
  66. {
  67. if (((cpu == 4) && (cpu_online(5) == 0) && (cpu_online(6) == 0)
  68. && (cpu_online(7) == 0)) || ((cpu == 5) && (cpu_online(4) == 0)
  69. && (cpu_online(6) == 0)
  70. && (cpu_online(7) == 0))
  71. || ((cpu == 6) && (cpu_online(4) == 0) && (cpu_online(5) == 0)
  72. && (cpu_online(7) == 0)) || ((cpu == 7) && (cpu_online(4) == 0)
  73. && (cpu_online(5) == 0)
  74. && (cpu_online(6) == 0))) {
  75. /* Enable CA15L snoop function */
  76. REG_WRITE((void *)MP1_AXI_CONFIG,
  77. readl((void *)MP1_AXI_CONFIG) & ~ACINACTM);
  78. /* Enable snoop requests and DVM message requests */
  79. REG_WRITE((void *)CCI400_SI3_SNOOP_CONTROL,
  80. readl((void *)CCI400_SI3_SNOOP_CONTROL) |
  81. (SNOOP_REQ | DVM_MSG_REQ));
  82. while (readl((void *)CCI400_STATUS) & CHANGE_PENDING)
  83. ;
  84. }
  85. /* Set the ACTLR.SMP bit to 1 for SMP mode */
  86. __switch_to_smp();
  87. /* Enable dcache */
  88. __enable_dcache();
  89. }
  90. static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
  91. {
  92. /* Just enter wfi for now. TODO: Properly shut off the cpu. */
  93. for (;;) {
  94. /* Execute an ISB instruction to ensure that all of the CP15
  95. register changes from the previous steps have been
  96. committed */
  97. isb();
  98. /* Execute a DSB instruction to ensure that all cache, TLB and
  99. branch predictor maintenance operations issued by any
  100. processor in the multiprocessor device before the SMP bit
  101. was cleared have completed */
  102. mb();
  103. /*
  104. * here's the WFI
  105. */
  106. __asm__ __volatile__("wfi");
  107. if (pen_release == cpu) {
  108. /*
  109. * OK, proper wakeup, we're done
  110. */
  111. break;
  112. }
  113. /*
  114. * Getting here, means that we have come out of WFI without
  115. * having been woken up - this shouldn't happen
  116. *
  117. * Just note it happening - when we're woken, we can report
  118. * its occurrence.
  119. */
  120. (*spurious)++;
  121. }
  122. }
  123. /*
  124. * mt_cpu_kill:
  125. * @cpu:
  126. * Return TBD.
  127. */
  128. int mt_cpu_kill(unsigned int cpu)
  129. {
  130. HOTPLUG_INFO("mt_cpu_kill, cpu: %d\n", cpu);
  131. #ifdef CONFIG_HOTPLUG_WITH_POWER_CTRL
  132. switch (cpu) {
  133. case 1:
  134. spm_mtcmos_ctrl_cpu1(STA_POWER_DOWN, 1);
  135. break;
  136. case 2:
  137. spm_mtcmos_ctrl_cpu2(STA_POWER_DOWN, 1);
  138. break;
  139. case 3:
  140. spm_mtcmos_ctrl_cpu3(STA_POWER_DOWN, 1);
  141. break;
  142. case 4:
  143. spm_mtcmos_ctrl_cpu4(STA_POWER_DOWN, 1);
  144. break;
  145. case 5:
  146. spm_mtcmos_ctrl_cpu5(STA_POWER_DOWN, 1);
  147. break;
  148. case 6:
  149. spm_mtcmos_ctrl_cpu6(STA_POWER_DOWN, 1);
  150. break;
  151. case 7:
  152. spm_mtcmos_ctrl_cpu7(STA_POWER_DOWN, 1);
  153. break;
  154. default:
  155. break;
  156. }
  157. #endif
  158. atomic_dec(&hotplug_cpu_count);
  159. return 1;
  160. }
  161. /*
  162. * mt_cpu_die: shutdown a CPU
  163. * @cpu:
  164. */
  165. void mt_cpu_die(unsigned int cpu)
  166. {
  167. int spurious = 0;
  168. HOTPLUG_INFO("mt_cpu_die, cpu: %d\n", cpu);
  169. /*
  170. * we're ready for shutdown now, so do it
  171. */
  172. cpu_enter_lowpower(cpu);
  173. platform_do_lowpower(cpu, &spurious);
  174. /*
  175. * bring this CPU back into the world of cache
  176. * coherency, and then restore interrupts
  177. */
  178. cpu_leave_lowpower(cpu);
  179. if (spurious)
  180. HOTPLUG_INFO(
  181. "spurious wakeup call, cpu: %d, spurious: %d\n",
  182. cpu, spurious);
  183. }
  184. /*
  185. * mt_cpu_disable:
  186. * @cpu:
  187. * Return error code.
  188. */
  189. int mt_cpu_disable(unsigned int cpu)
  190. {
  191. /*
  192. * we don't allow CPU 0 to be shutdown (it is still too special
  193. * e.g. clock tick interrupts)
  194. */
  195. HOTPLUG_INFO("mt_cpu_disable, cpu: %d\n", cpu);
  196. return cpu == 0 ? -EPERM : 0;
  197. }