topology.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /*
  2. * arch/arm/kernel/topology.c
  3. *
  4. * Copyright (C) 2011 Linaro Limited.
  5. * Written by: Vincent Guittot
  6. *
  7. * based on arch/sh/kernel/topology.c
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/cpu.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/export.h>
  16. #include <linux/init.h>
  17. #include <linux/percpu.h>
  18. #include <linux/node.h>
  19. #include <linux/nodemask.h>
  20. #include <linux/of.h>
  21. #include <linux/sched.h>
  22. #include <linux/slab.h>
  23. #include <asm/cputype.h>
  24. #include <asm/topology.h>
  25. /*
  26. * cpu capacity scale management
  27. */
  28. /*
  29. * cpu capacity table
  30. * This per cpu data structure describes the relative capacity of each core.
  31. * On a heteregenous system, cores don't have the same computation capacity
  32. * and we reflect that difference in the cpu_capacity field so the scheduler
  33. * can take this difference into account during load balance. A per cpu
  34. * structure is preferred because each CPU updates its own cpu_capacity field
  35. * during the load balance except for idle cores. One idle core is selected
  36. * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  37. * updated during this sequence.
  38. */
  39. static DEFINE_PER_CPU(unsigned long, cpu_scale);
  40. unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
  41. {
  42. return per_cpu(cpu_scale, cpu);
  43. }
  44. unsigned long arch_get_max_cpu_capacity(int cpu)
  45. {
  46. return per_cpu(cpu_scale, cpu);
  47. }
  48. static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
  49. {
  50. per_cpu(cpu_scale, cpu) = capacity;
  51. }
  52. #ifdef CONFIG_OF
  53. struct cpu_efficiency {
  54. const char *compatible;
  55. unsigned long efficiency;
  56. };
  57. static int __init get_cpu_for_node(struct device_node *node)
  58. {
  59. struct device_node *cpu_node;
  60. int cpu;
  61. cpu_node = of_parse_phandle(node, "cpu", 0);
  62. if (!cpu_node)
  63. return -1;
  64. for_each_possible_cpu(cpu) {
  65. if (of_get_cpu_node(cpu, NULL) == cpu_node) {
  66. of_node_put(cpu_node);
  67. return cpu;
  68. }
  69. }
  70. pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
  71. of_node_put(cpu_node);
  72. return -1;
  73. }
  74. static int __init parse_core(struct device_node *core, int cluster_id,
  75. int core_id)
  76. {
  77. char name[10];
  78. bool leaf = true;
  79. int i = 0;
  80. int cpu;
  81. struct device_node *t;
  82. do {
  83. snprintf(name, sizeof(name), "thread%d", i);
  84. t = of_get_child_by_name(core, name);
  85. if (t) {
  86. leaf = false;
  87. cpu = get_cpu_for_node(t);
  88. if (cpu >= 0) {
  89. cpu_topology[cpu].socket_id = cluster_id;
  90. cpu_topology[cpu].core_id = core_id;
  91. cpu_topology[cpu].thread_id = i;
  92. } else {
  93. pr_err("%s: Can't get CPU for thread\n",
  94. t->full_name);
  95. of_node_put(t);
  96. return -EINVAL;
  97. }
  98. of_node_put(t);
  99. }
  100. i++;
  101. } while (t);
  102. cpu = get_cpu_for_node(core);
  103. if (cpu >= 0) {
  104. if (!leaf) {
  105. pr_err("%s: Core has both threads and CPU\n",
  106. core->full_name);
  107. return -EINVAL;
  108. }
  109. cpu_topology[cpu].socket_id = cluster_id;
  110. cpu_topology[cpu].core_id = core_id;
  111. } else if (leaf) {
  112. pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
  113. return -EINVAL;
  114. }
  115. return 0;
  116. }
  117. static int __init parse_cluster(struct device_node *cluster, int depth)
  118. {
  119. char name[10];
  120. bool leaf = true;
  121. bool has_cores = false;
  122. int core_id = 0;
  123. static int cluster_id __initdata;
  124. struct device_node *c;
  125. int i, ret;
  126. /*
  127. * First check for child clusters; we currently ignore any
  128. * information about the nesting of clusters and present the
  129. * scheduler with a flat list of them.
  130. */
  131. i = 0;
  132. do {
  133. snprintf(name, sizeof(name), "cluster%d", i);
  134. c = of_get_child_by_name(cluster, name);
  135. if (c) {
  136. leaf = false;
  137. ret = parse_cluster(c, depth + 1);
  138. of_node_put(c);
  139. if (ret != 0)
  140. return ret;
  141. }
  142. i++;
  143. } while (c);
  144. /* Now check for cores */
  145. i = 0;
  146. do {
  147. snprintf(name, sizeof(name), "core%d", i);
  148. c = of_get_child_by_name(cluster, name);
  149. if (c) {
  150. has_cores = true;
  151. if (depth == 0) {
  152. pr_err("%s: cpu-map children should be clusters\n",
  153. c->full_name);
  154. of_node_put(c);
  155. return -EINVAL;
  156. }
  157. if (leaf) {
  158. ret = parse_core(c, cluster_id, core_id++);
  159. } else {
  160. pr_err("%s: Non-leaf cluster with core %s\n",
  161. cluster->full_name, name);
  162. ret = -EINVAL;
  163. }
  164. of_node_put(c);
  165. if (ret != 0)
  166. return ret;
  167. }
  168. i++;
  169. } while (c);
  170. if (leaf && !has_cores)
  171. pr_warn("%s: empty cluster\n", cluster->full_name);
  172. if (leaf)
  173. cluster_id++;
  174. return 0;
  175. }
  176. /*
  177. * Table of relative efficiency of each processors
  178. * The efficiency value must fit in 20bit and the final
  179. * cpu_scale value must be in the range
  180. * 0 < cpu_scale < SCHED_CAPACITY_SCALE.
  181. * Processors that are not defined in the table,
  182. * use the default SCHED_CAPACITY_SCALE value for cpu_scale.
  183. */
  184. static const struct cpu_efficiency table_efficiency[] = {
  185. {"arm,cortex-a15", 3891},
  186. {"arm,cortex-a17", 3276},
  187. {"arm,cortex-a12", 3276},
  188. {"arm,cortex-a53", 2520},
  189. {"arm,cortex-a7", 2048},
  190. {NULL, },
  191. };
  192. static unsigned long *__cpu_capacity;
  193. #define cpu_capacity(cpu) __cpu_capacity[cpu]
  194. static unsigned long max_cpu_perf, min_cpu_perf;
  195. static int __init parse_dt_topology(void)
  196. {
  197. struct device_node *cn, *map;
  198. int ret = 0;
  199. int cpu;
  200. cn = of_find_node_by_path("/cpus");
  201. if (!cn) {
  202. pr_err("No CPU information found in DT\n");
  203. return 0;
  204. }
  205. /*
  206. * When topology is provided cpu-map is essentially a root
  207. * cluster with restricted subnodes.
  208. */
  209. map = of_get_child_by_name(cn, "cpu-map");
  210. if (!map)
  211. goto out;
  212. ret = parse_cluster(map, 0);
  213. if (ret != 0)
  214. goto out_map;
  215. /*
  216. * Check that all cores are in the topology; the SMP code will
  217. * only mark cores described in the DT as possible.
  218. */
  219. for_each_possible_cpu(cpu)
  220. if (cpu_topology[cpu].socket_id == -1)
  221. ret = -EINVAL;
  222. out_map:
  223. of_node_put(map);
  224. out:
  225. of_node_put(cn);
  226. return ret;
  227. }
  228. /*
  229. * Iterate all CPUs' descriptor in DT and compute the efficiency
  230. * (as per table_efficiency). Calculate the max cpu performance too.
  231. */
  232. static void parse_dt_cpu_capacity(void)
  233. {
  234. const struct cpu_efficiency *cpu_eff;
  235. struct device_node *cn = NULL;
  236. int cpu = 0, i = 0;
  237. __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
  238. GFP_NOWAIT);
  239. min_cpu_perf = ULONG_MAX;
  240. max_cpu_perf = 0;
  241. for_each_possible_cpu(cpu) {
  242. const u32 *rate;
  243. int len;
  244. unsigned long cpu_perf;
  245. /* too early to use cpu->of_node */
  246. cn = of_get_cpu_node(cpu, NULL);
  247. if (!cn) {
  248. pr_err("missing device node for CPU %d\n", cpu);
  249. continue;
  250. }
  251. for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
  252. if (of_device_is_compatible(cn, cpu_eff->compatible))
  253. break;
  254. if (cpu_eff->compatible == NULL)
  255. continue;
  256. rate = of_get_property(cn, "clock-frequency", &len);
  257. if (!rate || len != 4) {
  258. pr_err("%s missing clock-frequency property\n",
  259. cn->full_name);
  260. continue;
  261. }
  262. cpu_perf = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
  263. cpu_capacity(cpu) = cpu_perf;
  264. max_cpu_perf = max(max_cpu_perf, cpu_perf);
  265. min_cpu_perf = min(min_cpu_perf, cpu_perf);
  266. i++;
  267. }
  268. if (i < num_possible_cpus()) {
  269. max_cpu_perf = 0;
  270. min_cpu_perf = 0;
  271. }
  272. }
  273. /*
  274. * Look for a customed capacity of a CPU in the cpu_capacity table during the
  275. * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
  276. * function returns directly for SMP systems or if there is no complete set
  277. * of cpu efficiency, clock frequency data for each cpu.
  278. */
  279. static void update_cpu_capacity(unsigned int cpu)
  280. {
  281. unsigned long capacity = cpu_capacity(cpu);
  282. if (!capacity || !max_cpu_perf) {
  283. cpu_capacity(cpu) = 0;
  284. return;
  285. }
  286. capacity *= SCHED_CAPACITY_SCALE;
  287. capacity /= max_cpu_perf;
  288. set_capacity_scale(cpu, capacity);
  289. printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n",
  290. cpu, arch_scale_cpu_capacity(NULL, cpu));
  291. }
  292. /*
  293. * Scheduler load-tracking scale-invariance
  294. *
  295. * Provides the scheduler with a scale-invariance correction factor that
  296. * compensates for frequency scaling.
  297. */
  298. static DEFINE_PER_CPU(atomic_long_t, cpu_freq_capacity);
  299. static DEFINE_PER_CPU(atomic_long_t, cpu_max_freq);
  300. /* cpufreq callback function setting current cpu frequency */
  301. void arch_scale_set_curr_freq(int cpu, unsigned long freq)
  302. {
  303. unsigned long max = atomic_long_read(&per_cpu(cpu_max_freq, cpu));
  304. unsigned long curr;
  305. if (!max)
  306. return;
  307. curr = (freq * SCHED_CAPACITY_SCALE) / max;
  308. atomic_long_set(&per_cpu(cpu_freq_capacity, cpu), curr);
  309. }
  310. /* cpufreq callback function setting max cpu frequency */
  311. void arch_scale_set_max_freq(int cpu, unsigned long freq)
  312. {
  313. atomic_long_set(&per_cpu(cpu_max_freq, cpu), freq);
  314. }
  315. unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
  316. {
  317. unsigned long curr = atomic_long_read(&per_cpu(cpu_freq_capacity, cpu));
  318. if (!curr)
  319. return SCHED_CAPACITY_SCALE;
  320. return curr;
  321. }
  322. #else
  323. static inline void parse_dt_topology(void) {}
  324. static inline void update_cpu_capacity(unsigned int cpuid) {}
  325. #endif
  326. /*
  327. * cpu topology table
  328. */
  329. struct cputopo_arm cpu_topology[NR_CPUS];
  330. EXPORT_SYMBOL_GPL(cpu_topology);
  331. const struct cpumask *cpu_coregroup_mask(int cpu)
  332. {
  333. return &cpu_topology[cpu].core_sibling;
  334. }
  335. /*
  336. * The current assumption is that we can power gate each core independently.
  337. * This will be superseded by DT binding once available.
  338. */
  339. const struct cpumask *cpu_corepower_mask(int cpu)
  340. {
  341. return &cpu_topology[cpu].thread_sibling;
  342. }
  343. static void update_siblings_masks(unsigned int cpuid)
  344. {
  345. struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
  346. int cpu;
  347. /* update core and thread sibling masks */
  348. for_each_possible_cpu(cpu) {
  349. cpu_topo = &cpu_topology[cpu];
  350. if (cpuid_topo->socket_id != cpu_topo->socket_id)
  351. continue;
  352. cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
  353. if (cpu != cpuid)
  354. cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
  355. if (cpuid_topo->core_id != cpu_topo->core_id)
  356. continue;
  357. cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
  358. if (cpu != cpuid)
  359. cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
  360. }
  361. smp_wmb();
  362. }
  363. /*
  364. * store_cpu_topology is called at boot when only one cpu is running
  365. * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
  366. * which prevents simultaneous write access to cpu_topology array
  367. */
  368. void store_cpu_topology(unsigned int cpuid)
  369. {
  370. struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
  371. unsigned int mpidr;
  372. mpidr = read_cpuid_mpidr();
  373. /* If the cpu topology has been already set, just return */
  374. if (cpuid_topo->socket_id != -1)
  375. goto topology_populated;
  376. /* create cpu topology mapping */
  377. if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
  378. /*
  379. * This is a multiprocessor system
  380. * multiprocessor format & multiprocessor mode field are set
  381. */
  382. if (mpidr & MPIDR_MT_BITMASK) {
  383. /* core performance interdependency */
  384. cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  385. cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  386. cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
  387. } else {
  388. /* largely independent cores */
  389. cpuid_topo->thread_id = -1;
  390. cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  391. cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  392. }
  393. } else {
  394. /*
  395. * This is an uniprocessor system
  396. * we are in multiprocessor format but uniprocessor system
  397. * or in the old uniprocessor format
  398. */
  399. cpuid_topo->thread_id = -1;
  400. cpuid_topo->core_id = 0;
  401. cpuid_topo->socket_id = -1;
  402. }
  403. cpuid_topo->partno = read_cpuid_part();
  404. topology_populated:
  405. update_siblings_masks(cpuid);
  406. update_cpu_capacity(cpuid);
  407. printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
  408. cpuid, cpu_topology[cpuid].thread_id,
  409. cpu_topology[cpuid].core_id,
  410. cpu_topology[cpuid].socket_id, mpidr);
  411. }
  412. static inline int cpu_corepower_flags(void)
  413. {
  414. return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
  415. }
  416. static struct sched_domain_topology_level arm_topology[] = {
  417. #ifdef CONFIG_SCHED_MC
  418. { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
  419. { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
  420. #endif
  421. { cpu_cpu_mask, SD_INIT_NAME(DIE) },
  422. { NULL, },
  423. };
  424. #ifdef CONFIG_SCHED_HMP
  425. void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
  426. struct cpumask *slow)
  427. {
  428. unsigned int cpu;
  429. cpumask_clear(fast);
  430. cpumask_clear(slow);
  431. /*
  432. * Use the config options if they are given. This helps testing
  433. * HMP scheduling on systems without a big.LITTLE architecture.
  434. */
  435. if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
  436. if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
  437. WARN(1, "Failed to parse HMP fast cpu mask!\n");
  438. if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
  439. WARN(1, "Failed to parse HMP slow cpu mask!\n");
  440. return;
  441. }
  442. /* check by capacity */
  443. for_each_possible_cpu(cpu) {
  444. if (cpu_capacity(cpu) > min_cpu_perf)
  445. cpumask_set_cpu(cpu, fast);
  446. else
  447. cpumask_set_cpu(cpu, slow);
  448. }
  449. if (!cpumask_empty(fast) && !cpumask_empty(slow))
  450. return;
  451. /*
  452. * We didn't find both big and little cores so let's call all cores
  453. * fast as this will keep the system running, with all cores being
  454. * treated equal.
  455. */
  456. cpumask_setall(slow);
  457. cpumask_clear(fast);
  458. }
  459. struct cpumask hmp_fast_cpu_mask;
  460. struct cpumask hmp_slow_cpu_mask;
  461. void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
  462. {
  463. struct hmp_domain *domain;
  464. arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
  465. /*
  466. * Initialize hmp_domains
  467. * Must be ordered with respect to compute capacity.
  468. * Fastest domain at head of list.
  469. */
  470. if (!cpumask_empty(&hmp_slow_cpu_mask)) {
  471. domain = (struct hmp_domain *)
  472. kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
  473. cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
  474. cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
  475. list_add(&domain->hmp_domains, hmp_domains_list);
  476. }
  477. domain = (struct hmp_domain *)
  478. kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
  479. cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
  480. cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
  481. list_add(&domain->hmp_domains, hmp_domains_list);
  482. }
  483. #endif /* CONFIG_SCHED_HMP */
  484. static void __init reset_cpu_topology(void)
  485. {
  486. unsigned int cpu;
  487. /* init core mask and capacity */
  488. for_each_possible_cpu(cpu) {
  489. struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
  490. cpu_topo->thread_id = -1;
  491. cpu_topo->core_id = -1;
  492. cpu_topo->socket_id = -1;
  493. cpumask_clear(&cpu_topo->core_sibling);
  494. cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
  495. cpumask_clear(&cpu_topo->thread_sibling);
  496. cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
  497. set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
  498. }
  499. smp_wmb();
  500. }
  501. static int cpu_topology_init;
  502. /*
  503. * init_cpu_topology is called at boot when only one cpu is running
  504. * which prevent simultaneous write access to cpu_topology array
  505. */
  506. /*
  507. * init_cpu_topology is called at boot when only one cpu is running
  508. * which prevent simultaneous write access to cpu_topology array
  509. */
  510. void __init init_cpu_topology(void)
  511. {
  512. if (cpu_topology_init)
  513. return;
  514. reset_cpu_topology();
  515. /*
  516. * Discard anything that was parsed if we hit an error so we
  517. * don't use partial information.
  518. */
  519. if (parse_dt_topology())
  520. reset_cpu_topology();
  521. parse_dt_cpu_capacity();
  522. /* Set scheduler topology descriptor */
  523. set_sched_topology(arm_topology);
  524. }
  525. #ifdef CONFIG_MTK_CPU_TOPOLOGY
  526. void __init arch_build_cpu_topology_domain(void)
  527. {
  528. init_cpu_topology();
  529. cpu_topology_init = 1;
  530. }
  531. #endif
  532. /*
  533. * Extras of CPU & Cluster functions
  534. */
  535. int arch_cpu_is_big(unsigned int cpu)
  536. {
  537. struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
  538. switch (arm_cputopo->partno) {
  539. case ARM_CPU_PART_CORTEX_A12:
  540. case ARM_CPU_PART_CORTEX_A17:
  541. case ARM_CPU_PART_CORTEX_A15:
  542. return 1;
  543. default:
  544. return 0;
  545. }
  546. }
  547. int arch_cpu_is_little(unsigned int cpu)
  548. {
  549. return !arch_cpu_is_big(cpu);
  550. }
  551. int arch_is_smp(void)
  552. {
  553. static int __arch_smp = -1;
  554. if (__arch_smp != -1)
  555. return __arch_smp;
  556. __arch_smp = (max_cpu_perf != min_cpu_perf) ? 0 : 1;
  557. return __arch_smp;
  558. }
  559. int arch_get_nr_clusters(void)
  560. {
  561. static int __arch_nr_clusters = -1;
  562. int max_id = 0;
  563. unsigned int cpu;
  564. if (__arch_nr_clusters != -1)
  565. return __arch_nr_clusters;
  566. /* assume socket id is monotonic increasing without gap. */
  567. for_each_possible_cpu(cpu) {
  568. struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
  569. if (arm_cputopo->socket_id > max_id)
  570. max_id = arm_cputopo->socket_id;
  571. }
  572. __arch_nr_clusters = max_id + 1;
  573. return __arch_nr_clusters;
  574. }
  575. int arch_is_multi_cluster(void)
  576. {
  577. return arch_get_nr_clusters() > 1 ? 1 : 0;
  578. }
  579. int arch_get_cluster_id(unsigned int cpu)
  580. {
  581. struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
  582. return arm_cputopo->socket_id < 0 ? 0 : arm_cputopo->socket_id;
  583. }
  584. void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id)
  585. {
  586. unsigned int cpu;
  587. cpumask_clear(cpus);
  588. for_each_possible_cpu(cpu) {
  589. struct cputopo_arm *arm_cputopo = &cpu_topology[cpu];
  590. if (arm_cputopo->socket_id == cluster_id)
  591. cpumask_set_cpu(cpu, cpus);
  592. }
  593. }
  594. int arch_better_capacity(unsigned int cpu)
  595. {
  596. BUG_ON(cpu >= num_possible_cpus());
  597. return cpu_capacity(cpu) > min_cpu_perf;
  598. }