smp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962
  1. /*
  2. * linux/arch/arm/kernel/smp.c
  3. *
  4. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/delay.h>
  12. #include <linux/init.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/sched.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/cache.h>
  17. #include <linux/profile.h>
  18. #include <linux/errno.h>
  19. #include <linux/mm.h>
  20. #include <linux/err.h>
  21. #include <linux/cpu.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/irq.h>
  24. #include <linux/percpu.h>
  25. #include <linux/clockchips.h>
  26. #include <linux/completion.h>
  27. #include <linux/cpufreq.h>
  28. #include <linux/irq_work.h>
  29. #ifdef CONFIG_TRUSTY
  30. #include <linux/irqdomain.h>
  31. #endif
  32. #include <linux/atomic.h>
  33. #include <asm/smp.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/cpu.h>
  36. #include <asm/cputype.h>
  37. #include <asm/exception.h>
  38. #include <asm/idmap.h>
  39. #include <asm/topology.h>
  40. #include <asm/mmu_context.h>
  41. #include <asm/pgtable.h>
  42. #include <asm/pgalloc.h>
  43. #include <asm/processor.h>
  44. #include <asm/sections.h>
  45. #include <asm/tlbflush.h>
  46. #include <asm/ptrace.h>
  47. #include <asm/smp_plat.h>
  48. #include <asm/virt.h>
  49. #include <asm/mach/arch.h>
  50. #include <asm/mpu.h>
  51. #ifdef CONFIG_MTPROF
  52. #include "mt_sched_mon.h"
  53. #endif
  54. #include <mt-plat/mtk_ram_console.h>
  55. #include <hotplug.h>
  56. #define CREATE_TRACE_POINTS
  57. #include <trace/events/ipi.h>
  58. /*
  59. * as from 2.5, kernels no longer have an init_tasks structure
  60. * so we need some other way of telling a new secondary core
  61. * where to place its SVC stack
  62. */
  63. struct secondary_data secondary_data;
  64. /*
  65. * control for which core is the next to come out of the secondary
  66. * boot "holding pen"
  67. */
  68. volatile int pen_release = -1;
  69. enum ipi_msg_type {
  70. IPI_WAKEUP,
  71. IPI_TIMER,
  72. IPI_RESCHEDULE,
  73. IPI_CALL_FUNC,
  74. IPI_CALL_FUNC_SINGLE,
  75. IPI_CPU_STOP,
  76. IPI_IRQ_WORK,
  77. IPI_COMPLETION,
  78. IPI_CPU_BACKTRACE,
  79. #ifdef CONFIG_TRUSTY
  80. IPI_CUSTOM_FIRST,
  81. IPI_CUSTOM_LAST = 15,
  82. #endif
  83. };
  84. #ifdef CONFIG_TRUSTY
  85. struct irq_domain *ipi_custom_irq_domain;
  86. #endif
  87. static DECLARE_COMPLETION(cpu_running);
  88. static struct smp_operations smp_ops;
  89. void __init smp_set_ops(struct smp_operations *ops)
  90. {
  91. if (ops)
  92. smp_ops = *ops;
  93. };
  94. static unsigned long get_arch_pgd(pgd_t *pgd)
  95. {
  96. phys_addr_t pgdir = virt_to_idmap(pgd);
  97. BUG_ON(pgdir & ARCH_PGD_MASK);
  98. return pgdir >> ARCH_PGD_SHIFT;
  99. }
  100. int __cpu_up(unsigned int cpu, struct task_struct *idle)
  101. {
  102. int ret;
  103. if (!smp_ops.smp_boot_secondary)
  104. return -ENOSYS;
  105. /*
  106. * We need to tell the secondary core where to find
  107. * its stack and the page tables.
  108. */
  109. secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
  110. #ifdef CONFIG_ARM_MPU
  111. secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
  112. #endif
  113. #ifdef CONFIG_MMU
  114. secondary_data.pgdir = get_arch_pgd(idmap_pgd);
  115. secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
  116. #endif
  117. sync_cache_w(&secondary_data);
  118. /*
  119. * Now bring the CPU into our world.
  120. */
  121. ret = smp_ops.smp_boot_secondary(cpu, idle);
  122. if (ret == 0) {
  123. /*
  124. * CPU was successfully started, wait for it
  125. * to come online or time out.
  126. */
  127. wait_for_completion_timeout(&cpu_running,
  128. msecs_to_jiffies(1000));
  129. if (!cpu_online(cpu)) {
  130. pr_crit("CPU%u: failed to come online\n", cpu);
  131. ret = -EIO;
  132. }
  133. } else {
  134. pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
  135. }
  136. memset(&secondary_data, 0, sizeof(secondary_data));
  137. return ret;
  138. }
  139. /* platform specific SMP operations */
  140. void __init smp_init_cpus(void)
  141. {
  142. if (smp_ops.smp_init_cpus)
  143. smp_ops.smp_init_cpus();
  144. }
  145. int platform_can_cpu_hotplug(void)
  146. {
  147. #ifdef CONFIG_HOTPLUG_CPU
  148. if (smp_ops.cpu_kill)
  149. return 1;
  150. #endif
  151. return 0;
  152. }
  153. #ifdef CONFIG_HOTPLUG_CPU
  154. static int platform_cpu_kill(unsigned int cpu)
  155. {
  156. if (smp_ops.cpu_kill)
  157. return smp_ops.cpu_kill(cpu);
  158. return 1;
  159. }
  160. static int platform_cpu_disable(unsigned int cpu)
  161. {
  162. if (smp_ops.cpu_disable)
  163. return smp_ops.cpu_disable(cpu);
  164. /*
  165. * By default, allow disabling all CPUs except the first one,
  166. * since this is special on a lot of platforms, e.g. because
  167. * of clock tick interrupts.
  168. */
  169. return cpu == 0 ? -EPERM : 0;
  170. }
  171. /*
  172. * __cpu_disable runs on the processor to be shutdown.
  173. */
  174. int __cpu_disable(void)
  175. {
  176. unsigned int cpu = smp_processor_id();
  177. int ret;
  178. ret = platform_cpu_disable(cpu);
  179. if (ret)
  180. return ret;
  181. /*
  182. * Take this CPU offline. Once we clear this, we can't return,
  183. * and we must not schedule until we're ready to give up the cpu.
  184. */
  185. set_cpu_online(cpu, false);
  186. /*
  187. * OK - migrate IRQs away from this CPU
  188. */
  189. migrate_irqs();
  190. /*
  191. * Flush user cache and TLB mappings, and then remove this CPU
  192. * from the vm mask set of all processes.
  193. *
  194. * Caches are flushed to the Level of Unification Inner Shareable
  195. * to write-back dirty lines to unified caches shared by all CPUs.
  196. */
  197. flush_cache_louis();
  198. local_flush_tlb_all();
  199. clear_tasks_mm_cpumask(cpu);
  200. return 0;
  201. }
  202. static DECLARE_COMPLETION(cpu_died);
  203. /*
  204. * called on the thread which is asking for a CPU to be shutdown -
  205. * waits until shutdown has completed, or it is timed out.
  206. */
  207. void __cpu_die(unsigned int cpu)
  208. {
  209. if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
  210. pr_err("CPU%u: cpu didn't die\n", cpu);
  211. return;
  212. }
  213. printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
  214. /*
  215. * platform_cpu_kill() is generally expected to do the powering off
  216. * and/or cutting of clocks to the dying CPU. Optionally, this may
  217. * be done by the CPU which is dying in preference to supporting
  218. * this call, but that means there is _no_ synchronisation between
  219. * the requesting CPU and the dying CPU actually losing power.
  220. */
  221. if (!platform_cpu_kill(cpu))
  222. printk("CPU%u: unable to kill\n", cpu);
  223. }
  224. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  225. void __attribute__((weak)) gic_set_primask(void)
  226. {
  227. }
  228. #endif
  229. /*
  230. * Called from the idle thread for the CPU which has been shutdown.
  231. *
  232. * Note that we disable IRQs here, but do not re-enable them
  233. * before returning to the caller. This is also the behaviour
  234. * of the other hotplug-cpu capable cores, so presumably coming
  235. * out of idle fixes this.
  236. */
  237. void __ref cpu_die(void)
  238. {
  239. unsigned int cpu = smp_processor_id();
  240. aee_rr_rec_hoplug(cpu, 51, 0);
  241. idle_task_exit();
  242. aee_rr_rec_hoplug(cpu, 52, 0);
  243. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  244. gic_set_primask();
  245. #endif
  246. local_irq_disable();
  247. aee_rr_rec_hoplug(cpu, 53, 0);
  248. /*
  249. * Flush the data out of the L1 cache for this CPU. This must be
  250. * before the completion to ensure that data is safely written out
  251. * before platform_cpu_kill() gets called - which may disable
  252. * *this* CPU and power down its cache.
  253. */
  254. flush_cache_louis();
  255. aee_rr_rec_hoplug(cpu, 54, 0);
  256. /*
  257. * Tell __cpu_die() that this CPU is now safe to dispose of. Once
  258. * this returns, power and/or clocks can be removed at any point
  259. * from this CPU and its cache by platform_cpu_kill().
  260. */
  261. complete(&cpu_died);
  262. aee_rr_rec_hoplug(cpu, 55, 0);
  263. /*
  264. * Ensure that the cache lines associated with that completion are
  265. * written out. This covers the case where _this_ CPU is doing the
  266. * powering down, to ensure that the completion is visible to the
  267. * CPU waiting for this one.
  268. */
  269. flush_cache_louis();
  270. aee_rr_rec_hoplug(cpu, 56, 0);
  271. /*
  272. * The actual CPU shutdown procedure is at least platform (if not
  273. * CPU) specific. This may remove power, or it may simply spin.
  274. *
  275. * Platforms are generally expected *NOT* to return from this call,
  276. * although there are some which do because they have no way to
  277. * power down the CPU. These platforms are the _only_ reason we
  278. * have a return path which uses the fragment of assembly below.
  279. *
  280. * The return path should not be used for platforms which can
  281. * power off the CPU.
  282. */
  283. if (smp_ops.cpu_die)
  284. smp_ops.cpu_die(cpu);
  285. pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
  286. cpu);
  287. /*
  288. * Do not return to the idle loop - jump back to the secondary
  289. * cpu initialisation. There's some initialisation which needs
  290. * to be repeated to undo the effects of taking the CPU offline.
  291. */
  292. __asm__("mov sp, %0\n"
  293. " mov fp, #0\n"
  294. " b secondary_start_kernel"
  295. :
  296. : "r" (task_stack_page(current) + THREAD_SIZE - 8));
  297. }
  298. #endif /* CONFIG_HOTPLUG_CPU */
  299. /*
  300. * Called by both boot and secondaries to move global data into
  301. * per-processor storage.
  302. */
  303. static void smp_store_cpu_info(unsigned int cpuid)
  304. {
  305. struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
  306. cpu_info->loops_per_jiffy = loops_per_jiffy;
  307. cpu_info->cpuid = read_cpuid_id();
  308. store_cpu_topology(cpuid);
  309. }
  310. /*
  311. * This is the secondary CPU boot entry. We're using this CPUs
  312. * idle thread stack, but a set of temporary page tables.
  313. */
  314. asmlinkage void secondary_start_kernel(void)
  315. {
  316. struct mm_struct *mm = &init_mm;
  317. unsigned int cpu;
  318. aee_rr_rec_hoplug(cpu, 1, 0);
  319. /*
  320. * The identity mapping is uncached (strongly ordered), so
  321. * switch away from it before attempting any exclusive accesses.
  322. */
  323. cpu_switch_mm(mm->pgd, mm);
  324. local_flush_bp_all();
  325. enter_lazy_tlb(mm, current);
  326. local_flush_tlb_all();
  327. aee_rr_rec_hoplug(cpu, 2, 0);
  328. /*
  329. * All kernel threads share the same mm context; grab a
  330. * reference and switch to it.
  331. */
  332. cpu = smp_processor_id();
  333. aee_rr_rec_hoplug(cpu, 3, 0);
  334. atomic_inc(&mm->mm_count);
  335. current->active_mm = mm;
  336. cpumask_set_cpu(cpu, mm_cpumask(mm));
  337. aee_rr_rec_hoplug(cpu, 4, 0);
  338. cpu_init();
  339. aee_rr_rec_hoplug(cpu, 5, 0);
  340. printk("CPU%u: Booted secondary processor\n", cpu);
  341. preempt_disable();
  342. aee_rr_rec_hoplug(cpu, 6, 0);
  343. trace_hardirqs_off();
  344. aee_rr_rec_hoplug(cpu, 7, 0);
  345. /*
  346. * Give the platform a chance to do its own initialisation.
  347. */
  348. if (smp_ops.smp_secondary_init)
  349. smp_ops.smp_secondary_init(cpu);
  350. aee_rr_rec_hoplug(cpu, 8, 0);
  351. notify_cpu_starting(cpu);
  352. aee_rr_rec_hoplug(cpu, 9, 0);
  353. calibrate_delay();
  354. aee_rr_rec_hoplug(cpu, 10, 0);
  355. smp_store_cpu_info(cpu);
  356. aee_rr_rec_hoplug(cpu, 11, 0);
  357. /*
  358. * OK, now it's safe to let the boot CPU continue. Wait for
  359. * the CPU migration code to notice that the CPU is online
  360. * before we continue - which happens after __cpu_up returns.
  361. */
  362. set_cpu_online(cpu, true);
  363. aee_rr_rec_hoplug(cpu, 12, 0);
  364. complete(&cpu_running);
  365. aee_rr_rec_hoplug(cpu, 13, 0);
  366. local_irq_enable();
  367. aee_rr_rec_hoplug(cpu, 14, 0);
  368. local_fiq_enable();
  369. aee_rr_rec_hoplug(cpu, 15, 0);
  370. /*
  371. * OK, it's off to the idle thread for us
  372. */
  373. cpu_startup_entry(CPUHP_ONLINE);
  374. aee_rr_rec_hoplug(cpu, 16, 0);
  375. }
  376. void __init smp_cpus_done(unsigned int max_cpus)
  377. {
  378. int cpu;
  379. unsigned long bogosum = 0;
  380. for_each_online_cpu(cpu)
  381. bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
  382. printk(KERN_INFO "SMP: Total of %d processors activated "
  383. "(%lu.%02lu BogoMIPS).\n",
  384. num_online_cpus(),
  385. bogosum / (500000/HZ),
  386. (bogosum / (5000/HZ)) % 100);
  387. hyp_mode_check();
  388. }
  389. void __init smp_prepare_boot_cpu(void)
  390. {
  391. set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
  392. }
  393. void __init smp_prepare_cpus(unsigned int max_cpus)
  394. {
  395. unsigned int ncores = num_possible_cpus();
  396. init_cpu_topology();
  397. smp_store_cpu_info(smp_processor_id());
  398. /*
  399. * are we trying to boot more cores than exist?
  400. */
  401. if (max_cpus > ncores)
  402. max_cpus = ncores;
  403. if (ncores > 1 && max_cpus) {
  404. /*
  405. * Initialise the present map, which describes the set of CPUs
  406. * actually populated at the present time. A platform should
  407. * re-initialize the map in the platforms smp_prepare_cpus()
  408. * if present != possible (e.g. physical hotplug).
  409. */
  410. init_cpu_present(cpu_possible_mask);
  411. /*
  412. * Initialise the SCU if there are more than one CPU
  413. * and let them know where to start.
  414. */
  415. if (smp_ops.smp_prepare_cpus)
  416. smp_ops.smp_prepare_cpus(max_cpus);
  417. }
  418. }
  419. static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
  420. void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
  421. {
  422. if (!__smp_cross_call)
  423. __smp_cross_call = fn;
  424. }
  425. static const char *ipi_types[NR_IPI] __tracepoint_string = {
  426. #define S(x,s) [x] = s
  427. S(IPI_WAKEUP, "CPU wakeup interrupts"),
  428. S(IPI_TIMER, "Timer broadcast interrupts"),
  429. S(IPI_RESCHEDULE, "Rescheduling interrupts"),
  430. S(IPI_CALL_FUNC, "Function call interrupts"),
  431. S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
  432. S(IPI_CPU_STOP, "CPU stop interrupts"),
  433. S(IPI_IRQ_WORK, "IRQ work interrupts"),
  434. S(IPI_COMPLETION, "completion interrupts"),
  435. S(IPI_CPU_BACKTRACE, "CPU backtrace"),
  436. };
  437. static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
  438. {
  439. trace_ipi_raise(target, ipi_types[ipinr]);
  440. __smp_cross_call(target, ipinr);
  441. }
  442. void show_ipi_list(struct seq_file *p, int prec)
  443. {
  444. unsigned int cpu, i;
  445. for (i = 0; i < NR_IPI; i++) {
  446. seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
  447. for_each_online_cpu(cpu)
  448. seq_printf(p, "%10u ",
  449. __get_irq_stat(cpu, ipi_irqs[i]));
  450. seq_printf(p, " %s\n", ipi_types[i]);
  451. }
  452. }
  453. u64 smp_irq_stat_cpu(unsigned int cpu)
  454. {
  455. u64 sum = 0;
  456. int i;
  457. for (i = 0; i < NR_IPI; i++)
  458. sum += __get_irq_stat(cpu, ipi_irqs[i]);
  459. return sum;
  460. }
  461. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  462. {
  463. smp_cross_call(mask, IPI_CALL_FUNC);
  464. }
  465. void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
  466. {
  467. smp_cross_call(mask, IPI_WAKEUP);
  468. }
  469. void arch_send_call_function_single_ipi(int cpu)
  470. {
  471. smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
  472. }
  473. #ifdef CONFIG_IRQ_WORK
  474. void arch_irq_work_raise(void)
  475. {
  476. if (arch_irq_work_has_interrupt())
  477. smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
  478. }
  479. #endif
  480. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  481. void tick_broadcast(const struct cpumask *mask)
  482. {
  483. smp_cross_call(mask, IPI_TIMER);
  484. }
  485. #endif
  486. static DEFINE_RAW_SPINLOCK(stop_lock);
  487. /*
  488. * ipi_cpu_stop - handle IPI from smp_send_stop()
  489. */
  490. static void ipi_cpu_stop(unsigned int cpu)
  491. {
  492. if (system_state == SYSTEM_BOOTING ||
  493. system_state == SYSTEM_RUNNING) {
  494. raw_spin_lock(&stop_lock);
  495. printk(KERN_CRIT "CPU%u: stopping\n", cpu);
  496. dump_stack();
  497. raw_spin_unlock(&stop_lock);
  498. }
  499. set_cpu_online(cpu, false);
  500. local_fiq_disable();
  501. local_irq_disable();
  502. while (1)
  503. cpu_relax();
  504. }
  505. static DEFINE_PER_CPU(struct completion *, cpu_completion);
  506. int register_ipi_completion(struct completion *completion, int cpu)
  507. {
  508. per_cpu(cpu_completion, cpu) = completion;
  509. return IPI_COMPLETION;
  510. }
  511. static void ipi_complete(unsigned int cpu)
  512. {
  513. complete(per_cpu(cpu_completion, cpu));
  514. }
  515. static cpumask_t backtrace_mask;
  516. static DEFINE_RAW_SPINLOCK(backtrace_lock);
  517. /* "in progress" flag of arch_trigger_all_cpu_backtrace */
  518. static unsigned long backtrace_flag;
  519. void smp_send_all_cpu_backtrace(void)
  520. {
  521. unsigned int this_cpu = smp_processor_id();
  522. int i;
  523. if (test_and_set_bit(0, &backtrace_flag))
  524. /*
  525. * If there is already a trigger_all_cpu_backtrace() in progress
  526. * (backtrace_flag == 1), don't output double cpu dump infos.
  527. */
  528. return;
  529. cpumask_copy(&backtrace_mask, cpu_online_mask);
  530. cpu_clear(this_cpu, backtrace_mask);
  531. pr_info("Backtrace for cpu %d (current):\n", this_cpu);
  532. dump_stack();
  533. pr_info("\nsending IPI to all other CPUs:\n");
  534. smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
  535. /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
  536. for (i = 0; i < 10 * 1000; i++) {
  537. if (cpumask_empty(&backtrace_mask))
  538. break;
  539. mdelay(1);
  540. }
  541. clear_bit(0, &backtrace_flag);
  542. smp_mb__after_atomic();
  543. }
  544. /*
  545. * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
  546. */
  547. static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
  548. {
  549. if (cpu_isset(cpu, backtrace_mask)) {
  550. raw_spin_lock(&backtrace_lock);
  551. pr_warning("IPI backtrace for cpu %d\n", cpu);
  552. show_regs(regs);
  553. raw_spin_unlock(&backtrace_lock);
  554. cpu_clear(cpu, backtrace_mask);
  555. }
  556. }
  557. /*
  558. * Main handler for inter-processor interrupts
  559. */
  560. asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
  561. {
  562. handle_IPI(ipinr, regs);
  563. }
  564. void handle_IPI(int ipinr, struct pt_regs *regs)
  565. {
  566. unsigned int cpu = smp_processor_id();
  567. struct pt_regs *old_regs = set_irq_regs(regs);
  568. if ((unsigned)ipinr < NR_IPI) {
  569. trace_ipi_entry(ipi_types[ipinr]);
  570. __inc_irq_stat(cpu, ipi_irqs[ipinr]);
  571. }
  572. switch (ipinr) {
  573. case IPI_WAKEUP:
  574. #ifdef CONFIG_MTPROF
  575. mt_trace_ISR_start(ipinr);
  576. mt_trace_ISR_end(ipinr);
  577. #endif
  578. break;
  579. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  580. case IPI_TIMER:
  581. irq_enter();
  582. #ifdef CONFIG_MTPROF
  583. mt_trace_ISR_start(ipinr);
  584. #endif
  585. tick_receive_broadcast();
  586. #ifdef CONFIG_MTPROF
  587. mt_trace_ISR_end(ipinr);
  588. #endif
  589. irq_exit();
  590. break;
  591. #endif
  592. case IPI_RESCHEDULE:
  593. scheduler_ipi();
  594. break;
  595. case IPI_CALL_FUNC:
  596. irq_enter();
  597. #ifdef CONFIG_MTPROF
  598. mt_trace_ISR_start(ipinr);
  599. #endif
  600. generic_smp_call_function_interrupt();
  601. #ifdef CONFIG_MTPROF
  602. mt_trace_ISR_end(ipinr);
  603. #endif
  604. irq_exit();
  605. break;
  606. case IPI_CALL_FUNC_SINGLE:
  607. irq_enter();
  608. #ifdef CONFIG_MTPROF
  609. mt_trace_ISR_start(ipinr);
  610. #endif
  611. generic_smp_call_function_single_interrupt();
  612. #ifdef CONFIG_MTPROF
  613. mt_trace_ISR_end(ipinr);
  614. #endif
  615. irq_exit();
  616. break;
  617. case IPI_CPU_STOP:
  618. irq_enter();
  619. #ifdef CONFIG_MTPROF
  620. mt_trace_ISR_start(ipinr);
  621. #endif
  622. ipi_cpu_stop(cpu);
  623. #ifdef CONFIG_MTPROF
  624. mt_trace_ISR_end(ipinr);
  625. #endif
  626. irq_exit();
  627. break;
  628. #ifdef CONFIG_IRQ_WORK
  629. case IPI_IRQ_WORK:
  630. irq_enter();
  631. #ifdef CONFIG_MTPROF
  632. mt_trace_ISR_start(ipinr);
  633. #endif
  634. irq_work_run();
  635. #ifdef CONFIG_MTPROF
  636. mt_trace_ISR_end(ipinr);
  637. #endif
  638. irq_exit();
  639. break;
  640. #endif
  641. case IPI_COMPLETION:
  642. irq_enter();
  643. #ifdef CONFIG_MTPROF
  644. mt_trace_ISR_start(ipinr);
  645. #endif
  646. ipi_complete(cpu);
  647. #ifdef CONFIG_MTPROF
  648. mt_trace_ISR_end(ipinr);
  649. #endif
  650. irq_exit();
  651. break;
  652. case IPI_CPU_BACKTRACE:
  653. #ifdef CONFIG_MTPROF
  654. mt_trace_ISR_start(ipinr);
  655. #endif
  656. ipi_cpu_backtrace(cpu, regs);
  657. #ifdef CONFIG_MTPROF
  658. mt_trace_ISR_end(ipinr);
  659. #endif
  660. break;
  661. default:
  662. #ifdef CONFIG_TRUSTY
  663. if (ipinr >= IPI_CUSTOM_FIRST && ipinr <= IPI_CUSTOM_LAST)
  664. handle_domain_irq(ipi_custom_irq_domain, ipinr, regs);
  665. else
  666. #endif
  667. printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
  668. cpu, ipinr);
  669. break;
  670. }
  671. if ((unsigned)ipinr < NR_IPI)
  672. trace_ipi_exit(ipi_types[ipinr]);
  673. set_irq_regs(old_regs);
  674. }
  675. #ifdef CONFIG_TRUSTY
  676. static void custom_ipi_enable(struct irq_data *data)
  677. {
  678. /*
  679. * Always trigger a new ipi on enable. This only works for clients
  680. * that then clear the ipi before unmasking interrupts.
  681. */
  682. smp_cross_call(cpumask_of(smp_processor_id()), data->irq);
  683. }
  684. static void custom_ipi_disable(struct irq_data *data)
  685. {
  686. }
  687. static struct irq_chip custom_ipi_chip = {
  688. .name = "CustomIPI",
  689. .irq_enable = custom_ipi_enable,
  690. .irq_disable = custom_ipi_disable,
  691. };
  692. static void handle_custom_ipi_irq(unsigned int irq, struct irq_desc *desc)
  693. {
  694. if (!desc->action) {
  695. pr_crit("CPU%u: Unknown IPI message 0x%x, no custom handler\n",
  696. smp_processor_id(), irq);
  697. return;
  698. }
  699. if (!cpumask_test_cpu(smp_processor_id(), desc->percpu_enabled))
  700. return; /* IPIs may not be maskable in hardware */
  701. handle_percpu_devid_irq(irq, desc);
  702. }
  703. static int __init smp_custom_ipi_init(void)
  704. {
  705. int ipinr;
  706. /* alloc descs for these custom ipis/irqs before using them */
  707. irq_alloc_descs(IPI_CUSTOM_FIRST, 0,
  708. IPI_CUSTOM_LAST - IPI_CUSTOM_FIRST + 1, 0);
  709. for (ipinr = IPI_CUSTOM_FIRST; ipinr <= IPI_CUSTOM_LAST; ipinr++) {
  710. irq_set_percpu_devid(ipinr);
  711. irq_set_chip_and_handler(ipinr, &custom_ipi_chip,
  712. handle_custom_ipi_irq);
  713. set_irq_flags(ipinr, IRQF_VALID | IRQF_NOAUTOEN);
  714. }
  715. ipi_custom_irq_domain = irq_domain_add_legacy(NULL,
  716. IPI_CUSTOM_LAST - IPI_CUSTOM_FIRST + 1,
  717. IPI_CUSTOM_FIRST, IPI_CUSTOM_FIRST,
  718. &irq_domain_simple_ops,
  719. &custom_ipi_chip);
  720. return 0;
  721. }
  722. core_initcall(smp_custom_ipi_init);
  723. #endif
  724. void smp_send_reschedule(int cpu)
  725. {
  726. smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
  727. }
  728. void smp_send_stop(void)
  729. {
  730. unsigned long timeout;
  731. struct cpumask mask;
  732. cpumask_copy(&mask, cpu_online_mask);
  733. cpumask_clear_cpu(smp_processor_id(), &mask);
  734. if (!cpumask_empty(&mask))
  735. smp_cross_call(&mask, IPI_CPU_STOP);
  736. /* Wait up to one second for other CPUs to stop */
  737. timeout = USEC_PER_SEC;
  738. while (num_online_cpus() > 1 && timeout--)
  739. udelay(1);
  740. if (num_online_cpus() > 1)
  741. pr_warn("SMP: failed to stop secondary CPUs\n");
  742. }
  743. /*
  744. * not supported here
  745. */
  746. int setup_profiling_timer(unsigned int multiplier)
  747. {
  748. return -EINVAL;
  749. }
  750. #ifdef CONFIG_CPU_FREQ
  751. static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
  752. static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
  753. static unsigned long global_l_p_j_ref;
  754. static unsigned long global_l_p_j_ref_freq;
  755. static int cpufreq_callback(struct notifier_block *nb,
  756. unsigned long val, void *data)
  757. {
  758. struct cpufreq_freqs *freq = data;
  759. int cpu = freq->cpu;
  760. if (freq->flags & CPUFREQ_CONST_LOOPS)
  761. return NOTIFY_OK;
  762. if (!per_cpu(l_p_j_ref, cpu)) {
  763. per_cpu(l_p_j_ref, cpu) =
  764. per_cpu(cpu_data, cpu).loops_per_jiffy;
  765. per_cpu(l_p_j_ref_freq, cpu) = freq->old;
  766. if (!global_l_p_j_ref) {
  767. global_l_p_j_ref = loops_per_jiffy;
  768. global_l_p_j_ref_freq = freq->old;
  769. }
  770. }
  771. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  772. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
  773. loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
  774. global_l_p_j_ref_freq,
  775. freq->new);
  776. per_cpu(cpu_data, cpu).loops_per_jiffy =
  777. cpufreq_scale(per_cpu(l_p_j_ref, cpu),
  778. per_cpu(l_p_j_ref_freq, cpu),
  779. freq->new);
  780. }
  781. return NOTIFY_OK;
  782. }
  783. static struct notifier_block cpufreq_notifier = {
  784. .notifier_call = cpufreq_callback,
  785. };
  786. static int __init register_cpufreq_notifier(void)
  787. {
  788. return cpufreq_register_notifier(&cpufreq_notifier,
  789. CPUFREQ_TRANSITION_NOTIFIER);
  790. }
  791. core_initcall(register_cpufreq_notifier);
  792. #endif