platmcpm.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * Copyright (c) 2013-2014 Linaro Ltd.
  3. * Copyright (c) 2013-2014 Hisilicon Limited.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/io.h>
  11. #include <linux/memblock.h>
  12. #include <linux/of_address.h>
  13. #include <asm/cputype.h>
  14. #include <asm/cp15.h>
  15. #include <asm/mcpm.h>
  16. #include "core.h"
  17. /* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x]
  18. * 1 -- unreset; 0 -- reset
  19. */
  20. #define CORE_RESET_BIT(x) (1 << x)
  21. #define NEON_RESET_BIT(x) (1 << (x + 4))
  22. #define CORE_DEBUG_RESET_BIT(x) (1 << (x + 9))
  23. #define CLUSTER_L2_RESET_BIT (1 << 8)
  24. #define CLUSTER_DEBUG_RESET_BIT (1 << 13)
  25. /*
  26. * bits definition in SC_CPU_RESET_STATUS[x]
  27. * 1 -- reset status; 0 -- unreset status
  28. */
  29. #define CORE_RESET_STATUS(x) (1 << x)
  30. #define NEON_RESET_STATUS(x) (1 << (x + 4))
  31. #define CORE_DEBUG_RESET_STATUS(x) (1 << (x + 9))
  32. #define CLUSTER_L2_RESET_STATUS (1 << 8)
  33. #define CLUSTER_DEBUG_RESET_STATUS (1 << 13)
  34. #define CORE_WFI_STATUS(x) (1 << (x + 16))
  35. #define CORE_WFE_STATUS(x) (1 << (x + 20))
  36. #define CORE_DEBUG_ACK(x) (1 << (x + 24))
  37. #define SC_CPU_RESET_REQ(x) (0x520 + (x << 3)) /* reset */
  38. #define SC_CPU_RESET_DREQ(x) (0x524 + (x << 3)) /* unreset */
  39. #define SC_CPU_RESET_STATUS(x) (0x1520 + (x << 3))
  40. #define FAB_SF_MODE 0x0c
  41. #define FAB_SF_INVLD 0x10
  42. /* bits definition in FB_SF_INVLD */
  43. #define FB_SF_INVLD_START (1 << 8)
  44. #define HIP04_MAX_CLUSTERS 4
  45. #define HIP04_MAX_CPUS_PER_CLUSTER 4
  46. #define POLL_MSEC 10
  47. #define TIMEOUT_MSEC 1000
  48. static void __iomem *sysctrl, *fabric;
  49. static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
  50. static DEFINE_SPINLOCK(boot_lock);
  51. static u32 fabric_phys_addr;
  52. /*
  53. * [0]: bootwrapper physical address
  54. * [1]: bootwrapper size
  55. * [2]: relocation address
  56. * [3]: relocation size
  57. */
  58. static u32 hip04_boot_method[4];
  59. static bool hip04_cluster_is_down(unsigned int cluster)
  60. {
  61. int i;
  62. for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++)
  63. if (hip04_cpu_table[cluster][i])
  64. return false;
  65. return true;
  66. }
  67. static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on)
  68. {
  69. unsigned long data;
  70. if (!fabric)
  71. BUG();
  72. data = readl_relaxed(fabric + FAB_SF_MODE);
  73. if (on)
  74. data |= 1 << cluster;
  75. else
  76. data &= ~(1 << cluster);
  77. writel_relaxed(data, fabric + FAB_SF_MODE);
  78. do {
  79. cpu_relax();
  80. } while (data != readl_relaxed(fabric + FAB_SF_MODE));
  81. }
  82. static int hip04_mcpm_power_up(unsigned int cpu, unsigned int cluster)
  83. {
  84. unsigned long data;
  85. void __iomem *sys_dreq, *sys_status;
  86. if (!sysctrl)
  87. return -ENODEV;
  88. if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
  89. return -EINVAL;
  90. spin_lock_irq(&boot_lock);
  91. if (hip04_cpu_table[cluster][cpu])
  92. goto out;
  93. sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster);
  94. sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster);
  95. if (hip04_cluster_is_down(cluster)) {
  96. data = CLUSTER_DEBUG_RESET_BIT;
  97. writel_relaxed(data, sys_dreq);
  98. do {
  99. cpu_relax();
  100. data = readl_relaxed(sys_status);
  101. } while (data & CLUSTER_DEBUG_RESET_STATUS);
  102. }
  103. data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
  104. CORE_DEBUG_RESET_BIT(cpu);
  105. writel_relaxed(data, sys_dreq);
  106. do {
  107. cpu_relax();
  108. } while (data == readl_relaxed(sys_status));
  109. /*
  110. * We may fail to power up core again without this delay.
  111. * It's not mentioned in document. It's found by test.
  112. */
  113. udelay(20);
  114. out:
  115. hip04_cpu_table[cluster][cpu]++;
  116. spin_unlock_irq(&boot_lock);
  117. return 0;
  118. }
  119. static void hip04_mcpm_power_down(void)
  120. {
  121. unsigned int mpidr, cpu, cluster;
  122. bool skip_wfi = false, last_man = false;
  123. mpidr = read_cpuid_mpidr();
  124. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  125. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  126. __mcpm_cpu_going_down(cpu, cluster);
  127. spin_lock(&boot_lock);
  128. BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
  129. hip04_cpu_table[cluster][cpu]--;
  130. if (hip04_cpu_table[cluster][cpu] == 1) {
  131. /* A power_up request went ahead of us. */
  132. skip_wfi = true;
  133. } else if (hip04_cpu_table[cluster][cpu] > 1) {
  134. pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
  135. BUG();
  136. }
  137. last_man = hip04_cluster_is_down(cluster);
  138. if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
  139. spin_unlock(&boot_lock);
  140. /* Since it's Cortex A15, disable L2 prefetching. */
  141. asm volatile(
  142. "mcr p15, 1, %0, c15, c0, 3 \n\t"
  143. "isb \n\t"
  144. "dsb "
  145. : : "r" (0x400) );
  146. v7_exit_coherency_flush(all);
  147. hip04_set_snoop_filter(cluster, 0);
  148. __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
  149. } else {
  150. spin_unlock(&boot_lock);
  151. v7_exit_coherency_flush(louis);
  152. }
  153. __mcpm_cpu_down(cpu, cluster);
  154. if (!skip_wfi)
  155. wfi();
  156. }
  157. static int hip04_mcpm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
  158. {
  159. unsigned int data, tries, count;
  160. int ret = -ETIMEDOUT;
  161. BUG_ON(cluster >= HIP04_MAX_CLUSTERS ||
  162. cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
  163. count = TIMEOUT_MSEC / POLL_MSEC;
  164. spin_lock_irq(&boot_lock);
  165. for (tries = 0; tries < count; tries++) {
  166. if (hip04_cpu_table[cluster][cpu]) {
  167. ret = -EBUSY;
  168. goto err;
  169. }
  170. cpu_relax();
  171. data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
  172. if (data & CORE_WFI_STATUS(cpu))
  173. break;
  174. spin_unlock_irq(&boot_lock);
  175. /* Wait for clean L2 when the whole cluster is down. */
  176. msleep(POLL_MSEC);
  177. spin_lock_irq(&boot_lock);
  178. }
  179. if (tries >= count)
  180. goto err;
  181. data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \
  182. CORE_DEBUG_RESET_BIT(cpu);
  183. writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster));
  184. for (tries = 0; tries < count; tries++) {
  185. cpu_relax();
  186. data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
  187. if (data & CORE_RESET_STATUS(cpu))
  188. break;
  189. }
  190. if (tries >= count)
  191. goto err;
  192. spin_unlock_irq(&boot_lock);
  193. return 0;
  194. err:
  195. spin_unlock_irq(&boot_lock);
  196. return ret;
  197. }
  198. static void hip04_mcpm_powered_up(void)
  199. {
  200. unsigned int mpidr, cpu, cluster;
  201. mpidr = read_cpuid_mpidr();
  202. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  203. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  204. spin_lock(&boot_lock);
  205. if (!hip04_cpu_table[cluster][cpu])
  206. hip04_cpu_table[cluster][cpu] = 1;
  207. spin_unlock(&boot_lock);
  208. }
  209. static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
  210. {
  211. asm volatile (" \n"
  212. " cmp r0, #0 \n"
  213. " bxeq lr \n"
  214. /* calculate fabric phys address */
  215. " adr r2, 2f \n"
  216. " ldmia r2, {r1, r3} \n"
  217. " sub r0, r2, r1 \n"
  218. " ldr r2, [r0, r3] \n"
  219. /* get cluster id from MPIDR */
  220. " mrc p15, 0, r0, c0, c0, 5 \n"
  221. " ubfx r1, r0, #8, #8 \n"
  222. /* 1 << cluster id */
  223. " mov r0, #1 \n"
  224. " mov r3, r0, lsl r1 \n"
  225. " ldr r0, [r2, #"__stringify(FAB_SF_MODE)"] \n"
  226. " tst r0, r3 \n"
  227. " bxne lr \n"
  228. " orr r1, r0, r3 \n"
  229. " str r1, [r2, #"__stringify(FAB_SF_MODE)"] \n"
  230. "1: ldr r0, [r2, #"__stringify(FAB_SF_MODE)"] \n"
  231. " tst r0, r3 \n"
  232. " beq 1b \n"
  233. " bx lr \n"
  234. " .align 2 \n"
  235. "2: .word . \n"
  236. " .word fabric_phys_addr \n"
  237. );
  238. }
  239. static const struct mcpm_platform_ops hip04_mcpm_ops = {
  240. .power_up = hip04_mcpm_power_up,
  241. .power_down = hip04_mcpm_power_down,
  242. .wait_for_powerdown = hip04_mcpm_wait_for_powerdown,
  243. .powered_up = hip04_mcpm_powered_up,
  244. };
  245. static bool __init hip04_cpu_table_init(void)
  246. {
  247. unsigned int mpidr, cpu, cluster;
  248. mpidr = read_cpuid_mpidr();
  249. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  250. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  251. if (cluster >= HIP04_MAX_CLUSTERS ||
  252. cpu >= HIP04_MAX_CPUS_PER_CLUSTER) {
  253. pr_err("%s: boot CPU is out of bound!\n", __func__);
  254. return false;
  255. }
  256. hip04_set_snoop_filter(cluster, 1);
  257. hip04_cpu_table[cluster][cpu] = 1;
  258. return true;
  259. }
  260. static int __init hip04_mcpm_init(void)
  261. {
  262. struct device_node *np, *np_sctl, *np_fab;
  263. struct resource fab_res;
  264. void __iomem *relocation;
  265. int ret = -ENODEV;
  266. np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper");
  267. if (!np)
  268. goto err;
  269. ret = of_property_read_u32_array(np, "boot-method",
  270. &hip04_boot_method[0], 4);
  271. if (ret)
  272. goto err;
  273. np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
  274. if (!np_sctl)
  275. goto err;
  276. np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric");
  277. if (!np_fab)
  278. goto err;
  279. ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]);
  280. if (ret)
  281. goto err;
  282. relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]);
  283. if (!relocation) {
  284. pr_err("failed to map relocation space\n");
  285. ret = -ENOMEM;
  286. goto err_reloc;
  287. }
  288. sysctrl = of_iomap(np_sctl, 0);
  289. if (!sysctrl) {
  290. pr_err("failed to get sysctrl base\n");
  291. ret = -ENOMEM;
  292. goto err_sysctrl;
  293. }
  294. ret = of_address_to_resource(np_fab, 0, &fab_res);
  295. if (ret) {
  296. pr_err("failed to get fabric base phys\n");
  297. goto err_fabric;
  298. }
  299. fabric_phys_addr = fab_res.start;
  300. sync_cache_w(&fabric_phys_addr);
  301. fabric = of_iomap(np_fab, 0);
  302. if (!fabric) {
  303. pr_err("failed to get fabric base\n");
  304. ret = -ENOMEM;
  305. goto err_fabric;
  306. }
  307. if (!hip04_cpu_table_init()) {
  308. ret = -EINVAL;
  309. goto err_table;
  310. }
  311. ret = mcpm_platform_register(&hip04_mcpm_ops);
  312. if (ret) {
  313. goto err_table;
  314. }
  315. /*
  316. * Fill the instruction address that is used after secondary core
  317. * out of reset.
  318. */
  319. writel_relaxed(hip04_boot_method[0], relocation);
  320. writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */
  321. writel_relaxed(virt_to_phys(mcpm_entry_point), relocation + 8);
  322. writel_relaxed(0, relocation + 12);
  323. iounmap(relocation);
  324. mcpm_sync_init(hip04_mcpm_power_up_setup);
  325. mcpm_smp_set_ops();
  326. pr_info("HiP04 MCPM initialized\n");
  327. return ret;
  328. err_table:
  329. iounmap(fabric);
  330. err_fabric:
  331. iounmap(sysctrl);
  332. err_sysctrl:
  333. iounmap(relocation);
  334. err_reloc:
  335. memblock_free(hip04_boot_method[0], hip04_boot_method[1]);
  336. err:
  337. return ret;
  338. }
  339. early_initcall(hip04_mcpm_init);