irq.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * linux/arch/arm/kernel/irq.c
  3. *
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
  6. *
  7. * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
  8. * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
  9. * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This file contains the code used by various IRQ handling routines:
  16. * asking for different IRQ's should be done through these routines
  17. * instead of just grabbing them. Thus setups with different IRQ numbers
  18. * shouldn't result in any weird surprises, and installing new handlers
  19. * should be easier.
  20. *
  21. * IRQ's are in fact implemented a bit like signal handlers for the kernel.
  22. * Naturally it's not a 1:1 relation, but there are similarities.
  23. */
  24. #include <linux/kernel_stat.h>
  25. #include <linux/signal.h>
  26. #include <linux/ioport.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/irq.h>
  29. #include <linux/irqchip.h>
  30. #include <linux/random.h>
  31. #include <linux/smp.h>
  32. #include <linux/init.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/errno.h>
  35. #include <linux/list.h>
  36. #include <linux/kallsyms.h>
  37. #include <linux/proc_fs.h>
  38. #include <linux/export.h>
  39. #include <linux/ratelimit.h>
  40. #include <asm/hardware/cache-l2x0.h>
  41. #include <asm/exception.h>
  42. #include <asm/mach/arch.h>
  43. #include <asm/mach/irq.h>
  44. #include <asm/mach/time.h>
  45. unsigned long irq_err_count;
  46. int arch_show_interrupts(struct seq_file *p, int prec)
  47. {
  48. #ifdef CONFIG_FIQ
  49. show_fiq_list(p, prec);
  50. #endif
  51. #ifdef CONFIG_SMP
  52. show_ipi_list(p, prec);
  53. #endif
  54. seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
  55. return 0;
  56. }
  57. /*
  58. * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
  59. * not come via this function. Instead, they should provide their
  60. * own 'handler'. Used by platform code implementing C-based 1st
  61. * level decoding.
  62. */
  63. void handle_IRQ(unsigned int irq, struct pt_regs *regs)
  64. {
  65. __handle_domain_irq(NULL, irq, false, regs);
  66. }
  67. /*
  68. * asm_do_IRQ is the interface to be used from assembly code.
  69. */
  70. asmlinkage void __exception_irq_entry
  71. asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
  72. {
  73. handle_IRQ(irq, regs);
  74. }
  75. void set_irq_flags(unsigned int irq, unsigned int iflags)
  76. {
  77. unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
  78. if (irq >= nr_irqs) {
  79. printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
  80. return;
  81. }
  82. if (iflags & IRQF_VALID)
  83. clr |= IRQ_NOREQUEST;
  84. if (iflags & IRQF_PROBE)
  85. clr |= IRQ_NOPROBE;
  86. if (!(iflags & IRQF_NOAUTOEN))
  87. clr |= IRQ_NOAUTOEN;
  88. /* Order is clear bits in "clr" then set bits in "set" */
  89. irq_modify_status(irq, clr, set & ~clr);
  90. }
  91. EXPORT_SYMBOL_GPL(set_irq_flags);
  92. void __init init_IRQ(void)
  93. {
  94. int ret;
  95. if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
  96. irqchip_init();
  97. else
  98. machine_desc->init_irq();
  99. if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
  100. (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
  101. outer_cache.write_sec = machine_desc->l2c_write_sec;
  102. ret = l2x0_of_init(machine_desc->l2c_aux_val,
  103. machine_desc->l2c_aux_mask);
  104. if (ret)
  105. pr_err("L2C: failed to init: %d\n", ret);
  106. }
  107. }
  108. #ifdef CONFIG_MULTI_IRQ_HANDLER
  109. void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
  110. {
  111. if (handle_arch_irq)
  112. return;
  113. handle_arch_irq = handle_irq;
  114. }
  115. #endif
  116. #ifdef CONFIG_SPARSE_IRQ
  117. int __init arch_probe_nr_irqs(void)
  118. {
  119. nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
  120. return nr_irqs;
  121. }
  122. #endif
  123. #ifdef CONFIG_HOTPLUG_CPU
  124. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  125. #include <linux/slab.h>
  126. #include <linux/bitmap.h>
  127. static inline bool mt_cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
  128. {
  129. return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), num_possible_cpus());
  130. }
  131. struct thread_safe_list irq_need_migrate_list[CONFIG_NR_CPUS];
  132. void fixup_update_irq_need_migrate_list(struct irq_desc *desc)
  133. {
  134. struct irq_data *d = irq_desc_get_irq_data(desc);
  135. struct irq_chip *c = irq_data_get_irq_chip(d);
  136. /* if this IRQ is not per-cpu IRQ => force to target all*/
  137. if (!irqd_is_per_cpu(d)) {
  138. /* gic affinity to target all and update the smp affinity */
  139. if (!c->irq_set_affinity)
  140. pr_err("IRQ%u: unable to set affinity\n", d->irq);
  141. else if (c->irq_set_affinity(d, cpu_possible_mask, true) == IRQ_SET_MASK_OK)
  142. cpumask_copy(d->affinity, cpu_possible_mask);
  143. }
  144. }
  145. bool check_consistency_of_irq_settings(struct irq_desc *desc)
  146. {
  147. struct irq_data *d = irq_desc_get_irq_data(desc);
  148. struct per_cpu_irq_desc *node;
  149. struct list_head *pos, *temp;
  150. cpumask_t per_cpu_list_affinity, gic_target_affinity, tmp_affinity;
  151. bool ret = true;
  152. int cpu;
  153. /* if this IRQ is per-cpu IRQ: only check gic setting */
  154. if (irqd_is_per_cpu(d))
  155. goto check_gic;
  156. /* get the setting in per-cpu irq-need-lists */
  157. cpumask_clear(&per_cpu_list_affinity);
  158. rcu_read_lock();
  159. for_each_cpu(cpu, cpu_possible_mask)
  160. list_for_each_safe(pos, temp, &(irq_need_migrate_list[cpu].list)) {
  161. node = list_entry_rcu(pos, struct per_cpu_irq_desc, list);
  162. if (node->desc == desc) {
  163. cpumask_set_cpu(cpu, &per_cpu_list_affinity);
  164. break;
  165. }
  166. }
  167. rcu_read_unlock();
  168. /* compare with the setting of smp affinity */
  169. if (mt_cpumask_equal(d->affinity, cpu_possible_mask)) {
  170. /*
  171. * if smp affinity is set to all CPUs
  172. * AND this IRQ is not be found in any per-cpu list -> success
  173. */
  174. ret = (cpumask_empty(&per_cpu_list_affinity)) ? true : false;
  175. } else if (!mt_cpumask_equal(&per_cpu_list_affinity, d->affinity)) {
  176. /* smp affinity should be the same as per-cpu list */
  177. ret = false;
  178. }
  179. /* print out to error logs */
  180. if (!ret) {
  181. pr_err("[IRQ] IRQ %d: smp affinity is not consistent with per-cpu list\n", d->irq);
  182. cpumask_xor(&tmp_affinity, &per_cpu_list_affinity, d->affinity);
  183. /* iterates on cpus with inconsitent setting */
  184. for_each_cpu(cpu, &tmp_affinity)
  185. if (cpumask_test_cpu(cpu, d->affinity))
  186. pr_err("[IRQ] @CPU%u: smp affinity is set, but per-cpu list is not set\n", cpu);
  187. else
  188. pr_err("[IRQ] @CPU%u: smp affinity is not set, but per-cpu list is set\n", cpu);
  189. }
  190. check_gic:
  191. if (mt_is_secure_irq(d)) {
  192. /* no need to check WDT */
  193. ret = true;
  194. goto out;
  195. }
  196. /* get the setting in gic setting and compare with the setting in gic setting*/
  197. cpumask_clear(&gic_target_affinity);
  198. if (!mt_get_irq_gic_targets(d, &gic_target_affinity)) {
  199. /* failed to get GICD_ITARGETSR the setting */
  200. pr_err("[IRQ] unable to get GICD_ITARGETSR setting of IRQ %d\n", d->irq);
  201. ret = false;
  202. } else if (!mt_cpumask_equal(&gic_target_affinity, d->affinity)) {
  203. pr_err("[IRQ] IRQ %d: smp affinity is not consistent with GICD_ITARGETSR\n", d->irq);
  204. cpumask_xor(&tmp_affinity, &gic_target_affinity, d->affinity);
  205. /* iterates on cpus with inconsitent setting */
  206. for_each_cpu(cpu, &tmp_affinity)
  207. if (cpumask_test_cpu(cpu, d->affinity))
  208. pr_err("[IRQ] @CPU%u: smp affinity is set, but gic reg is not set\n", cpu);
  209. else
  210. pr_err("[IRQ] @CPU%u: smp affinity is not set, but gic reg is set\n", cpu);
  211. ret = false;
  212. }
  213. out:
  214. if (!ret)
  215. pr_err("[IRQ] IRQ %d: the affintiy setting is INCONSITENT\n", d->irq);
  216. return ret;
  217. }
  218. void dump_irq_need_migrate_list(const struct cpumask *mask)
  219. {
  220. struct per_cpu_irq_desc *node;
  221. struct list_head *pos, *temp;
  222. int cpu;
  223. rcu_read_lock();
  224. for_each_cpu(cpu, mask) {
  225. pr_debug("[IRQ] dump per-cpu irq-need-migrate list of CPU%u\n", cpu);
  226. list_for_each_safe(pos, temp, &(irq_need_migrate_list[cpu].list)) {
  227. node = list_entry_rcu(pos, struct per_cpu_irq_desc, list);
  228. pr_debug("[IRQ] IRQ %d\n", (node->desc->irq_data).irq);
  229. }
  230. }
  231. rcu_read_unlock();
  232. }
  233. static void del_from_irq_need_migrate_list(struct irq_desc *desc, const struct cpumask *cpumask_to_del)
  234. {
  235. struct per_cpu_irq_desc *node, *next;
  236. int cpu;
  237. for_each_cpu(cpu, cpumask_to_del) {
  238. spin_lock(&(irq_need_migrate_list[cpu].lock));
  239. list_for_each_entry_safe(node, next,
  240. &(irq_need_migrate_list[cpu].list), list) {
  241. if (node->desc != desc)
  242. continue;
  243. pr_debug("[IRQ] list_del to cpu %d\n", cpu);
  244. list_del_rcu(&node->list);
  245. kfree(node);
  246. break;
  247. }
  248. spin_unlock(&(irq_need_migrate_list[cpu].lock));
  249. }
  250. }
  251. /* return false for error */
  252. static bool add_to_irq_need_migrate_list(struct irq_desc *desc, const struct cpumask *cpumask_to_add)
  253. {
  254. struct per_cpu_irq_desc *node;
  255. int cpu;
  256. bool ret = true;
  257. for_each_cpu(cpu, cpumask_to_add) {
  258. spin_lock(&(irq_need_migrate_list[cpu].lock));
  259. node = kmalloc(sizeof(struct per_cpu_irq_desc), GFP_ATOMIC);
  260. if (node == NULL) {
  261. spin_unlock(&(irq_need_migrate_list[cpu].lock));
  262. ret = false;
  263. break;
  264. }
  265. node->desc = desc;
  266. pr_debug("[IRQ] list_add to cpu %d\n", cpu);
  267. list_add_rcu(&node->list, &(irq_need_migrate_list[cpu].list));
  268. spin_unlock(&(irq_need_migrate_list[cpu].lock));
  269. }
  270. /* delete what we have added when failed */
  271. if (!ret) {
  272. pr_err("[IRQ] kmalloc failed: cannot add node into CPU%d per-cpu IRQ list\n", cpu);
  273. del_from_irq_need_migrate_list(desc, cpumask_to_add);
  274. }
  275. return ret;
  276. }
  277. /*
  278. * must be invoked before the cpumask_copy of irq_desc for getting the original smp affinity
  279. * return @true when success
  280. */
  281. bool update_irq_need_migrate_list(struct irq_desc *desc, const struct cpumask *new_affinity)
  282. {
  283. struct irq_data *d = irq_desc_get_irq_data(desc);
  284. cpumask_t need_update_affinity, tmp_affinity;
  285. pr_debug("[IRQ] update per-cpu list (IRQ %d)\n", d->irq);
  286. /* find out the per-cpu irq-need-migrate lists to be updated */
  287. cpumask_xor(&need_update_affinity, d->affinity, new_affinity);
  288. /* return if there is no need to update the per-cpu irq-need-migrate lists */
  289. if (cpumask_empty(&need_update_affinity))
  290. return true;
  291. /* special cases */
  292. if (mt_cpumask_equal(new_affinity, cpu_possible_mask)) {
  293. /*
  294. * case 1: new affinity is to all cpus
  295. * clear this IRQs from all per-cpu irq-need-migrate lists of old affinity
  296. */
  297. del_from_irq_need_migrate_list(desc, d->affinity);
  298. return true;
  299. } else if (mt_cpumask_equal(d->affinity, cpu_possible_mask)) {
  300. /*
  301. * case 2: old affinity is to all cpus
  302. * add this IRQs to per-cpu irq-need-migrate lists of new affinity
  303. */
  304. return add_to_irq_need_migrate_list(desc, new_affinity);
  305. }
  306. /* needs to be update AND is in new affinity -> list_add */
  307. cpumask_and(&tmp_affinity, &need_update_affinity, new_affinity);
  308. if (!add_to_irq_need_migrate_list(desc, &tmp_affinity))
  309. return false;
  310. /* needs to be update AND is in old affinity -> list_del */
  311. cpumask_and(&tmp_affinity, &need_update_affinity, d->affinity);
  312. del_from_irq_need_migrate_list(desc, &tmp_affinity);
  313. return true;
  314. }
  315. /* update smp affinity and per-cpu irq-need-migrate lists */
  316. void update_affinity_settings(struct irq_desc *desc, const struct cpumask *new_affinity, bool update_smp_affinity)
  317. {
  318. struct irq_data *d = irq_desc_get_irq_data(desc);
  319. bool need_fix = false;
  320. need_fix = !update_irq_need_migrate_list(desc, new_affinity);
  321. if (update_smp_affinity)
  322. cpumask_copy(d->affinity, new_affinity);
  323. if (need_fix)
  324. fixup_update_irq_need_migrate_list(desc);
  325. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN_DEBUG
  326. /* verify the consistency of IRQ setting after updating */
  327. BUG_ON(!check_consistency_of_irq_settings(desc));
  328. #endif
  329. }
  330. #endif
  331. static bool migrate_one_irq(struct irq_desc *desc)
  332. {
  333. struct irq_data *d = irq_desc_get_irq_data(desc);
  334. const struct cpumask *affinity = d->affinity;
  335. struct irq_chip *c;
  336. bool ret = false;
  337. /*
  338. * If this is a per-CPU interrupt, or the affinity does not
  339. * include this CPU, then we have nothing to do.
  340. */
  341. if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
  342. return false;
  343. if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
  344. affinity = cpu_online_mask;
  345. ret = true;
  346. }
  347. c = irq_data_get_irq_chip(d);
  348. if (!c->irq_set_affinity)
  349. pr_debug("IRQ%u: unable to set affinity\n", d->irq);
  350. else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
  351. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  352. cpumask_copy(d->affinity, affinity);
  353. #else
  354. update_affinity_settings(desc, affinity, true);
  355. #endif
  356. return ret;
  357. }
  358. /*
  359. * The current CPU has been marked offline. Migrate IRQs off this CPU.
  360. * If the affinity settings do not allow other CPUs, force them onto any
  361. * available CPU.
  362. *
  363. * Note: we must iterate over all IRQs, whether they have an attached
  364. * action structure or not, as we need to get chained interrupts too.
  365. */
  366. void migrate_irqs(void)
  367. {
  368. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  369. unsigned int i;
  370. #endif
  371. struct irq_desc *desc;
  372. unsigned long flags;
  373. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  374. struct list_head *pos, *temp;
  375. #endif
  376. local_irq_save(flags);
  377. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  378. rcu_read_lock();
  379. list_for_each_safe(pos, temp, &(irq_need_migrate_list[smp_processor_id()].list)) {
  380. struct per_cpu_irq_desc *ptr = list_entry_rcu(pos, struct per_cpu_irq_desc, list);
  381. bool affinity_broken;
  382. desc = ptr->desc;
  383. pr_debug("[IRQ] CPU%u is going down, IRQ%u needs to be migrated\n",
  384. smp_processor_id(), (desc->irq_data).irq);
  385. raw_spin_lock(&desc->lock);
  386. affinity_broken = migrate_one_irq(desc);
  387. raw_spin_unlock(&desc->lock);
  388. if (affinity_broken)
  389. pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
  390. (desc->irq_data).irq, smp_processor_id());
  391. }
  392. rcu_read_unlock();
  393. #else
  394. for_each_irq_desc(i, desc) {
  395. bool affinity_broken;
  396. raw_spin_lock(&desc->lock);
  397. affinity_broken = migrate_one_irq(desc);
  398. raw_spin_unlock(&desc->lock);
  399. if (affinity_broken && printk_ratelimit())
  400. pr_warn("IRQ%u no longer affine to CPU%u\n",
  401. i, smp_processor_id());
  402. }
  403. #endif
  404. local_irq_restore(flags);
  405. }
  406. #endif /* CONFIG_HOTPLUG_CPU */