irq.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /*
  2. * Based on arch/arm/kernel/irq.c
  3. *
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
  6. * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
  7. * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
  8. * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
  9. * Copyright (C) 2012 ARM Ltd.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include <linux/kernel_stat.h>
  24. #include <linux/irq.h>
  25. #include <linux/smp.h>
  26. #include <linux/init.h>
  27. #include <linux/irqchip.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/ratelimit.h>
  30. unsigned long irq_err_count;
  31. int arch_show_interrupts(struct seq_file *p, int prec)
  32. {
  33. #ifdef CONFIG_SMP
  34. show_ipi_list(p, prec);
  35. #endif
  36. seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
  37. return 0;
  38. }
  39. void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
  40. {
  41. if (handle_arch_irq)
  42. return;
  43. handle_arch_irq = handle_irq;
  44. }
  45. void __init init_IRQ(void)
  46. {
  47. irqchip_init();
  48. if (!handle_arch_irq)
  49. panic("No interrupt controller found.");
  50. }
  51. #ifdef CONFIG_HOTPLUG_CPU
  52. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  53. #include <linux/slab.h>
  54. #include <linux/bitmap.h>
  55. static inline bool mt_cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
  56. {
  57. return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), num_possible_cpus());
  58. }
  59. struct thread_safe_list irq_need_migrate_list[CONFIG_NR_CPUS];
  60. void fixup_update_irq_need_migrate_list(struct irq_desc *desc)
  61. {
  62. struct irq_data *d = irq_desc_get_irq_data(desc);
  63. struct irq_chip *c = irq_data_get_irq_chip(d);
  64. /* if this IRQ is not per-cpu IRQ => force to target all*/
  65. if (!irqd_is_per_cpu(d)) {
  66. /* gic affinity to target all and update the smp affinity */
  67. if (!c->irq_set_affinity)
  68. pr_err("IRQ%u: unable to set affinity\n", d->irq);
  69. else if (c->irq_set_affinity(d, cpu_possible_mask, true) == IRQ_SET_MASK_OK)
  70. cpumask_copy(d->affinity, cpu_possible_mask);
  71. }
  72. }
  73. bool check_consistency_of_irq_settings(struct irq_desc *desc)
  74. {
  75. struct irq_data *d = irq_desc_get_irq_data(desc);
  76. struct per_cpu_irq_desc *node;
  77. struct list_head *pos, *temp;
  78. cpumask_t per_cpu_list_affinity, gic_target_affinity, tmp_affinity;
  79. bool ret = true;
  80. int cpu;
  81. /* if this IRQ is per-cpu IRQ: only check gic setting */
  82. if (irqd_is_per_cpu(d))
  83. goto check_gic;
  84. /* get the setting in per-cpu irq-need-lists */
  85. cpumask_clear(&per_cpu_list_affinity);
  86. rcu_read_lock();
  87. for_each_cpu(cpu, cpu_possible_mask)
  88. list_for_each_safe(pos, temp, &(irq_need_migrate_list[cpu].list)) {
  89. node = list_entry_rcu(pos, struct per_cpu_irq_desc, list);
  90. if (node->desc == desc) {
  91. cpumask_set_cpu(cpu, &per_cpu_list_affinity);
  92. break;
  93. }
  94. }
  95. rcu_read_unlock();
  96. /* compare with the setting of smp affinity */
  97. if (mt_cpumask_equal(d->affinity, cpu_possible_mask)) {
  98. /*
  99. * if smp affinity is set to all CPUs
  100. * AND this IRQ is not be found in any per-cpu list -> success
  101. */
  102. ret = (cpumask_empty(&per_cpu_list_affinity)) ? true : false;
  103. } else if (!mt_cpumask_equal(&per_cpu_list_affinity, d->affinity)) {
  104. /* smp affinity should be the same as per-cpu list */
  105. ret = false;
  106. }
  107. /* print out to error logs */
  108. if (!ret) {
  109. pr_err("[IRQ] IRQ %d: smp affinity is not consistent with per-cpu list\n", d->irq);
  110. cpumask_xor(&tmp_affinity, &per_cpu_list_affinity, d->affinity);
  111. /* iterates on cpus with inconsitent setting */
  112. for_each_cpu(cpu, &tmp_affinity)
  113. if (cpumask_test_cpu(cpu, d->affinity))
  114. pr_err("[IRQ] @CPU%u: smp affinity is set, but per-cpu list is not set\n", cpu);
  115. else
  116. pr_err("[IRQ] @CPU%u: smp affinity is not set, but per-cpu list is set\n", cpu);
  117. }
  118. check_gic:
  119. if (mt_is_secure_irq(d)) {
  120. /* no need to check WDT */
  121. ret = true;
  122. goto out;
  123. }
  124. /* get the setting in gic setting and compare with the setting in gic setting*/
  125. cpumask_clear(&gic_target_affinity);
  126. if (!mt_get_irq_gic_targets(d, &gic_target_affinity)) {
  127. /* failed to get GICD_ITARGETSR the setting */
  128. pr_err("[IRQ] unable to get GICD_ITARGETSR setting of IRQ %d\n", d->irq);
  129. ret = false;
  130. } else if (!mt_cpumask_equal(&gic_target_affinity, d->affinity)) {
  131. pr_err("[IRQ] IRQ %d: smp affinity is not consistent with GICD_ITARGETSR\n", d->irq);
  132. cpumask_xor(&tmp_affinity, &gic_target_affinity, d->affinity);
  133. /* iterates on cpus with inconsitent setting */
  134. for_each_cpu(cpu, &tmp_affinity)
  135. if (cpumask_test_cpu(cpu, d->affinity))
  136. pr_err("[IRQ] @CPU%u: smp affinity is set, but gic reg is not set\n", cpu);
  137. else
  138. pr_err("[IRQ] @CPU%u: smp affinity is not set, but gic reg is set\n", cpu);
  139. ret = false;
  140. }
  141. out:
  142. if (!ret)
  143. pr_err("[IRQ] IRQ %d: the affintiy setting is INCONSITENT\n", d->irq);
  144. return ret;
  145. }
  146. void dump_irq_need_migrate_list(const struct cpumask *mask)
  147. {
  148. struct per_cpu_irq_desc *node;
  149. struct list_head *pos, *temp;
  150. int cpu;
  151. rcu_read_lock();
  152. for_each_cpu(cpu, mask) {
  153. pr_debug("[IRQ] dump per-cpu irq-need-migrate list of CPU%u\n", cpu);
  154. list_for_each_safe(pos, temp, &(irq_need_migrate_list[cpu].list)) {
  155. node = list_entry_rcu(pos, struct per_cpu_irq_desc, list);
  156. pr_debug("[IRQ] IRQ %d\n", (node->desc->irq_data).irq);
  157. }
  158. }
  159. rcu_read_unlock();
  160. }
  161. static void del_from_irq_need_migrate_list(struct irq_desc *desc, const struct cpumask *cpumask_to_del)
  162. {
  163. struct per_cpu_irq_desc *node, *next;
  164. int cpu;
  165. for_each_cpu(cpu, cpumask_to_del) {
  166. spin_lock(&(irq_need_migrate_list[cpu].lock));
  167. list_for_each_entry_safe(node, next,
  168. &(irq_need_migrate_list[cpu].list), list) {
  169. if (node->desc != desc)
  170. continue;
  171. pr_debug("[IRQ] list_del to cpu %d\n", cpu);
  172. list_del_rcu(&node->list);
  173. kfree(node);
  174. break;
  175. }
  176. spin_unlock(&(irq_need_migrate_list[cpu].lock));
  177. }
  178. }
  179. /* return false for error */
  180. static bool add_to_irq_need_migrate_list(struct irq_desc *desc, const struct cpumask *cpumask_to_add)
  181. {
  182. struct per_cpu_irq_desc *node;
  183. int cpu;
  184. bool ret = true;
  185. for_each_cpu(cpu, cpumask_to_add) {
  186. spin_lock(&(irq_need_migrate_list[cpu].lock));
  187. node = kmalloc(sizeof(struct per_cpu_irq_desc), GFP_ATOMIC);
  188. if (node == NULL) {
  189. spin_unlock(&(irq_need_migrate_list[cpu].lock));
  190. ret = false;
  191. break;
  192. }
  193. node->desc = desc;
  194. pr_debug("[IRQ] list_add to cpu %d\n", cpu);
  195. list_add_rcu(&node->list, &(irq_need_migrate_list[cpu].list));
  196. spin_unlock(&(irq_need_migrate_list[cpu].lock));
  197. }
  198. /* delete what we have added when failed */
  199. if (!ret) {
  200. pr_err("[IRQ] kmalloc failed: cannot add node into CPU%d per-cpu IRQ list\n", cpu);
  201. del_from_irq_need_migrate_list(desc, cpumask_to_add);
  202. }
  203. return ret;
  204. }
  205. /*
  206. * must be invoked before the cpumask_copy of irq_desc for getting the original smp affinity
  207. * return @true when success
  208. */
  209. bool update_irq_need_migrate_list(struct irq_desc *desc, const struct cpumask *new_affinity)
  210. {
  211. struct irq_data *d = irq_desc_get_irq_data(desc);
  212. cpumask_t need_update_affinity, tmp_affinity;
  213. pr_debug("[IRQ] update per-cpu list (IRQ %d)\n", d->irq);
  214. /* find out the per-cpu irq-need-migrate lists to be updated */
  215. cpumask_xor(&need_update_affinity, d->affinity, new_affinity);
  216. /* return if there is no need to update the per-cpu irq-need-migrate lists */
  217. if (cpumask_empty(&need_update_affinity))
  218. return true;
  219. /* special cases */
  220. if (mt_cpumask_equal(new_affinity, cpu_possible_mask)) {
  221. /*
  222. * case 1: new affinity is to all cpus
  223. * clear this IRQs from all per-cpu irq-need-migrate lists of old affinity
  224. */
  225. del_from_irq_need_migrate_list(desc, d->affinity);
  226. return true;
  227. } else if (mt_cpumask_equal(d->affinity, cpu_possible_mask)) {
  228. /*
  229. * case 2: old affinity is to all cpus
  230. * add this IRQs to per-cpu irq-need-migrate lists of new affinity
  231. */
  232. return add_to_irq_need_migrate_list(desc, new_affinity);
  233. }
  234. /* needs to be update AND is in new affinity -> list_add */
  235. cpumask_and(&tmp_affinity, &need_update_affinity, new_affinity);
  236. if (!add_to_irq_need_migrate_list(desc, &tmp_affinity))
  237. return false;
  238. /* needs to be update AND is in old affinity -> list_del */
  239. cpumask_and(&tmp_affinity, &need_update_affinity, d->affinity);
  240. del_from_irq_need_migrate_list(desc, &tmp_affinity);
  241. return true;
  242. }
  243. /* update smp affinity and per-cpu irq-need-migrate lists */
  244. void update_affinity_settings(struct irq_desc *desc, const struct cpumask *new_affinity, bool update_smp_affinity)
  245. {
  246. struct irq_data *d = irq_desc_get_irq_data(desc);
  247. bool need_fix = false;
  248. need_fix = !update_irq_need_migrate_list(desc, new_affinity);
  249. if (update_smp_affinity)
  250. cpumask_copy(d->affinity, new_affinity);
  251. if (need_fix)
  252. fixup_update_irq_need_migrate_list(desc);
  253. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN_DEBUG
  254. /* verify the consistency of IRQ setting after updating */
  255. BUG_ON(!check_consistency_of_irq_settings(desc));
  256. #endif
  257. }
  258. #endif
  259. static bool migrate_one_irq(struct irq_desc *desc)
  260. {
  261. struct irq_data *d = irq_desc_get_irq_data(desc);
  262. const struct cpumask *affinity = d->affinity;
  263. struct irq_chip *c;
  264. bool ret = false;
  265. /*
  266. * If this is a per-CPU interrupt, or the affinity does not
  267. * include this CPU, then we have nothing to do.
  268. */
  269. if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
  270. return false;
  271. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  272. affinity = cpu_online_mask;
  273. ret = true;
  274. }
  275. c = irq_data_get_irq_chip(d);
  276. if (!c->irq_set_affinity)
  277. pr_debug("IRQ%u: unable to set affinity\n", d->irq);
  278. else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
  279. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  280. cpumask_copy(d->affinity, affinity);
  281. #else
  282. update_affinity_settings(desc, affinity, true);
  283. #endif
  284. return ret;
  285. }
  286. /*
  287. * The current CPU has been marked offline. Migrate IRQs off this CPU.
  288. * If the affinity settings do not allow other CPUs, force them onto any
  289. * available CPU.
  290. *
  291. * Note: we must iterate over all IRQs, whether they have an attached
  292. * action structure or not, as we need to get chained interrupts too.
  293. */
  294. void migrate_irqs(void)
  295. {
  296. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  297. unsigned int i;
  298. #endif
  299. struct irq_desc *desc;
  300. unsigned long flags;
  301. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  302. struct list_head *pos, *temp;
  303. #endif
  304. local_irq_save(flags);
  305. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  306. rcu_read_lock();
  307. list_for_each_safe(pos, temp, &(irq_need_migrate_list[smp_processor_id()].list)) {
  308. struct per_cpu_irq_desc *ptr = list_entry_rcu(pos, struct per_cpu_irq_desc, list);
  309. bool affinity_broken;
  310. desc = ptr->desc;
  311. pr_debug("[IRQ] CPU%u is going down, IRQ%u needs to be migrated\n",
  312. smp_processor_id(), (desc->irq_data).irq);
  313. raw_spin_lock(&desc->lock);
  314. affinity_broken = migrate_one_irq(desc);
  315. raw_spin_unlock(&desc->lock);
  316. if (affinity_broken)
  317. pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
  318. (desc->irq_data).irq, smp_processor_id());
  319. }
  320. rcu_read_unlock();
  321. #else
  322. for_each_irq_desc(i, desc) {
  323. bool affinity_broken;
  324. raw_spin_lock(&desc->lock);
  325. affinity_broken = migrate_one_irq(desc);
  326. raw_spin_unlock(&desc->lock);
  327. if (affinity_broken)
  328. pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
  329. i, smp_processor_id());
  330. }
  331. #endif
  332. local_irq_restore(flags);
  333. }
  334. #endif /* CONFIG_HOTPLUG_CPU */