irq-gic-v3.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869
  1. /*
  2. * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/cpu.h>
  18. #include <linux/delay.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/of.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/percpu.h>
  24. #include <linux/slab.h>
  25. #include <linux/io.h>
  26. #include <linux/irqchip/arm-gic-v3.h>
  27. #include <asm/cputype.h>
  28. #include <asm/exception.h>
  29. #include <asm/smp_plat.h>
  30. #include "irq-gic-common.h"
  31. #include "irqchip.h"
  32. #include <mach/mt_secure_api.h>
  33. #define IOMEM(x) ((void __force __iomem *)(x))
  34. struct gic_chip_data {
  35. void __iomem *dist_base;
  36. void __iomem **redist_base;
  37. void __percpu __iomem **rdist;
  38. struct irq_domain *domain;
  39. u64 redist_stride;
  40. u32 redist_regions;
  41. unsigned int irq_nr;
  42. };
  43. static struct gic_chip_data gic_data __read_mostly;
  44. #define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
  45. #define gic_data_rdist_rd_base() (*gic_data_rdist())
  46. #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
  47. /* Our default, arbitrary priority value. Linux only uses one anyway. */
  48. #define DEFAULT_PMR_VALUE 0xf0
  49. static inline unsigned int gic_irq(struct irq_data *d)
  50. {
  51. return d->hwirq;
  52. }
  53. static inline int gic_irq_in_rdist(struct irq_data *d)
  54. {
  55. return gic_irq(d) < 32;
  56. }
  57. static inline void __iomem *gic_dist_base(struct irq_data *d)
  58. {
  59. if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
  60. return gic_data_rdist_sgi_base();
  61. if (d->hwirq <= 1023) /* SPI -> dist_base */
  62. return gic_data.dist_base;
  63. if (d->hwirq >= 8192)
  64. BUG(); /* LPI Detected!!! */
  65. return NULL;
  66. }
  67. static void gic_do_wait_for_rwp(void __iomem *base)
  68. {
  69. u32 count = 1000000; /* 1s! */
  70. while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
  71. count--;
  72. if (!count) {
  73. pr_err_ratelimited("RWP timeout, gone fishing\n");
  74. return;
  75. }
  76. cpu_relax();
  77. udelay(1);
  78. };
  79. }
  80. /* Wait for completion of a distributor change */
  81. static void gic_dist_wait_for_rwp(void)
  82. {
  83. gic_do_wait_for_rwp(gic_data.dist_base);
  84. }
  85. /* Wait for completion of a redistributor change */
  86. static void gic_redist_wait_for_rwp(void)
  87. {
  88. gic_do_wait_for_rwp(gic_data_rdist_rd_base());
  89. }
  90. /* Low level accessors */
  91. static u64 gic_read_iar(void)
  92. {
  93. u64 irqstat;
  94. asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
  95. return irqstat;
  96. }
  97. static void gic_write_pmr(u64 val)
  98. {
  99. asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
  100. }
  101. static void gic_write_ctlr(u64 val)
  102. {
  103. asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
  104. isb();
  105. }
  106. static void gic_write_grpen1(u64 val)
  107. {
  108. asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
  109. isb();
  110. }
  111. static void gic_write_sgi1r(u64 val)
  112. {
  113. asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
  114. }
  115. static void gic_enable_sre(void)
  116. {
  117. u64 val;
  118. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  119. val |= ICC_SRE_EL1_SRE;
  120. asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
  121. isb();
  122. /*
  123. * Need to check that the SRE bit has actually been set. If
  124. * not, it means that SRE is disabled at EL2. We're going to
  125. * die painfully, and there is nothing we can do about it.
  126. *
  127. * Kindly inform the luser.
  128. */
  129. asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
  130. if (!(val & ICC_SRE_EL1_SRE))
  131. pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
  132. }
  133. static void gic_enable_redist(void)
  134. {
  135. void __iomem *rbase;
  136. u32 count = 1000000; /* 1s! */
  137. u32 val;
  138. rbase = gic_data_rdist_rd_base();
  139. /* Wake up this CPU redistributor */
  140. val = readl_relaxed(rbase + GICR_WAKER);
  141. val &= ~GICR_WAKER_ProcessorSleep;
  142. writel_relaxed(val, rbase + GICR_WAKER);
  143. while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
  144. count--;
  145. if (!count) {
  146. pr_err_ratelimited("redist didn't wake up...\n");
  147. return;
  148. }
  149. cpu_relax();
  150. udelay(1);
  151. };
  152. }
  153. /*
  154. * Routines to disable, enable, EOI and route interrupts
  155. */
  156. static void gic_poke_irq(struct irq_data *d, u32 offset)
  157. {
  158. u32 mask = 1 << (gic_irq(d) % 32);
  159. void (*rwp_wait)(void);
  160. void __iomem *base;
  161. if (gic_irq_in_rdist(d)) {
  162. base = gic_data_rdist_sgi_base();
  163. rwp_wait = gic_redist_wait_for_rwp;
  164. } else {
  165. base = gic_data.dist_base;
  166. rwp_wait = gic_dist_wait_for_rwp;
  167. }
  168. writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
  169. rwp_wait();
  170. }
  171. static int gic_peek_irq(struct irq_data *d, u32 offset)
  172. {
  173. u32 mask = 1 << (gic_irq(d) % 32);
  174. void __iomem *base;
  175. if (gic_irq_in_rdist(d))
  176. base = gic_data_rdist_sgi_base();
  177. else
  178. base = gic_data.dist_base;
  179. return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
  180. }
  181. static void gic_mask_irq(struct irq_data *d)
  182. {
  183. gic_poke_irq(d, GICD_ICENABLER);
  184. }
  185. static void gic_unmask_irq(struct irq_data *d)
  186. {
  187. gic_poke_irq(d, GICD_ISENABLER);
  188. }
  189. static void gic_eoi_irq(struct irq_data *d)
  190. {
  191. gic_write_eoir(gic_irq(d));
  192. }
  193. void __iomem *INT_POL_CTL_REG;
  194. __weak void _mt_set_pol_reg(u32 reg_index, u32 value)
  195. {
  196. writel_relaxed(value, (INT_POL_CTL_REG + (reg_index * 4)));
  197. }
  198. void _mt_irq_set_polarity(unsigned int irq, unsigned int polarity)
  199. {
  200. u32 offset, reg_index, value;
  201. if (irq < 32) {
  202. pr_err("Fail to set polarity of interrupt %d\n", irq);
  203. return;
  204. }
  205. offset = (irq - 32) & 0x1F;
  206. reg_index = (irq - 32) >> 5;
  207. /* tmp hack for everest, non-continuous POL registers allocations */
  208. if (reg_index >= 8) {
  209. reg_index -= 8;
  210. reg_index += 0x70/4;
  211. }
  212. if (polarity == 0) {
  213. /* active low */
  214. value = readl_relaxed(IOMEM(INT_POL_CTL_REG + (reg_index * 4)));
  215. value |= (1 << offset);
  216. /* some platforms has to write POL register in secure world.
  217. USE PHYSICALL ADDRESS */
  218. _mt_set_pol_reg(reg_index, value);
  219. } else {
  220. /* active high */
  221. value = readl_relaxed(IOMEM(INT_POL_CTL_REG + (reg_index * 4)));
  222. value &= ~(0x1 << offset);
  223. /* some platforms has to write POL register in secure world */
  224. _mt_set_pol_reg(reg_index, value);
  225. }
  226. }
  227. static int gic_set_type(struct irq_data *d, unsigned int type)
  228. {
  229. unsigned int irq = gic_irq(d);
  230. void (*rwp_wait)(void);
  231. void __iomem *base;
  232. /* Interrupt configuration for SGIs can't be changed */
  233. if (irq < 16)
  234. return -EINVAL;
  235. if (type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
  236. _mt_irq_set_polarity(irq,
  237. (type & IRQF_TRIGGER_FALLING) ? 0 : 1);
  238. else if (type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW))
  239. _mt_irq_set_polarity(irq,
  240. (type & IRQF_TRIGGER_LOW) ? 0 : 1);
  241. if (gic_irq_in_rdist(d)) {
  242. base = gic_data_rdist_sgi_base();
  243. rwp_wait = gic_redist_wait_for_rwp;
  244. } else {
  245. base = gic_data.dist_base;
  246. rwp_wait = gic_dist_wait_for_rwp;
  247. }
  248. gic_configure_irq(irq, type, base, rwp_wait);
  249. return 0;
  250. }
  251. static u64 gic_mpidr_to_affinity(u64 mpidr)
  252. {
  253. u64 aff;
  254. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  255. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  256. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  257. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  258. return aff;
  259. }
  260. static asmlinkage
  261. void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  262. {
  263. u64 irqnr;
  264. do {
  265. irqnr = gic_read_iar();
  266. if (likely(irqnr > 15 && irqnr < 1020)) {
  267. int err;
  268. err = handle_domain_irq(gic_data.domain, irqnr, regs);
  269. if (err) {
  270. WARN_ONCE(true, "Unexpected SPI recived!\n");
  271. gic_write_eoir(irqnr);
  272. }
  273. continue;
  274. }
  275. if (irqnr < 16) {
  276. gic_write_eoir(irqnr);
  277. #ifdef CONFIG_SMP
  278. handle_IPI(irqnr, regs);
  279. #else
  280. WARN_ONCE(true, "Unexpected SGI received!\n");
  281. #endif
  282. continue;
  283. }
  284. } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
  285. }
  286. static void __init gic_dist_init(void)
  287. {
  288. unsigned int i;
  289. u64 affinity;
  290. void __iomem *base = gic_data.dist_base;
  291. /* Disable the distributor */
  292. writel_relaxed(0, base + GICD_CTLR);
  293. gic_dist_wait_for_rwp();
  294. gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
  295. /* Enable distributor with ARE, Group1 */
  296. writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
  297. GICD_CTLR_ENABLE_G1, base + GICD_CTLR);
  298. /*
  299. * Set all global interrupts to the boot CPU only. ARE must be
  300. * enabled.
  301. */
  302. affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
  303. for (i = 32; i < gic_data.irq_nr; i++)
  304. writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
  305. }
  306. static int gic_populate_rdist(void)
  307. {
  308. u64 mpidr = cpu_logical_map(smp_processor_id());
  309. u64 typer;
  310. u32 aff;
  311. int i;
  312. /*
  313. * Convert affinity to a 32bit value that can be matched to
  314. * GICR_TYPER bits [63:32].
  315. */
  316. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  317. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  318. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  319. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  320. for (i = 0; i < gic_data.redist_regions; i++) {
  321. void __iomem *ptr = gic_data.redist_base[i];
  322. u32 reg;
  323. reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
  324. if (reg != GIC_PIDR2_ARCH_GICv3 &&
  325. reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
  326. pr_warn("No redistributor present @%p\n", ptr);
  327. break;
  328. }
  329. do {
  330. typer = readq_relaxed(ptr + GICR_TYPER);
  331. if ((typer >> 32) == aff) {
  332. gic_data_rdist_rd_base() = ptr;
  333. pr_info("CPU%d: found redistributor %llx @%p\n",
  334. smp_processor_id(),
  335. (unsigned long long)mpidr, ptr);
  336. return 0;
  337. }
  338. if (gic_data.redist_stride) {
  339. ptr += gic_data.redist_stride;
  340. } else {
  341. ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
  342. /* Skip VLPI_base + reserved page */
  343. if (typer & GICR_TYPER_VLPIS)
  344. ptr += SZ_64K * 2;
  345. }
  346. } while (!(typer & GICR_TYPER_LAST));
  347. }
  348. /* We couldn't even deal with ourselves... */
  349. WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
  350. smp_processor_id(), (unsigned long long)mpidr);
  351. return -ENODEV;
  352. }
  353. void mt_gic_cpu_init_for_low_power(void)
  354. {
  355. /* Enable system registers */
  356. gic_enable_sre();
  357. /* Set priority mask register */
  358. gic_write_pmr(DEFAULT_PMR_VALUE);
  359. /* EOI deactivates interrupt too (mode 0) */
  360. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
  361. /* ... and let's hit the road... */
  362. gic_write_grpen1(1);
  363. }
  364. static void gic_cpu_init(void)
  365. {
  366. void __iomem *rbase;
  367. /* Register ourselves with the rest of the world */
  368. if (gic_populate_rdist())
  369. return;
  370. gic_enable_redist();
  371. rbase = gic_data_rdist_sgi_base();
  372. gic_cpu_config(rbase, gic_redist_wait_for_rwp);
  373. /* Enable system registers */
  374. gic_enable_sre();
  375. /* Set priority mask register */
  376. gic_write_pmr(DEFAULT_PMR_VALUE);
  377. /* EOI deactivates interrupt too (mode 0) */
  378. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
  379. /* ... and let's hit the road... */
  380. gic_write_grpen1(1);
  381. }
  382. #ifdef CONFIG_SMP
  383. static int gic_secondary_init(struct notifier_block *nfb,
  384. unsigned long action, void *hcpu)
  385. {
  386. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  387. gic_cpu_init();
  388. return NOTIFY_OK;
  389. }
  390. /*
  391. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  392. * priority because the GIC needs to be up before the ARM generic timers.
  393. */
  394. static struct notifier_block gic_cpu_notifier = {
  395. .notifier_call = gic_secondary_init,
  396. .priority = 100,
  397. };
  398. static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
  399. u64 cluster_id)
  400. {
  401. int cpu = *base_cpu;
  402. u64 mpidr = cpu_logical_map(cpu);
  403. u16 tlist = 0;
  404. while (cpu < nr_cpu_ids) {
  405. /*
  406. * If we ever get a cluster of more than 16 CPUs, just
  407. * scream and skip that CPU.
  408. */
  409. if (WARN_ON((mpidr & 0xff) >= 16))
  410. goto out;
  411. tlist |= 1 << (mpidr & 0xf);
  412. cpu = cpumask_next(cpu, mask);
  413. if (cpu == nr_cpu_ids)
  414. goto out;
  415. mpidr = cpu_logical_map(cpu);
  416. if (cluster_id != (mpidr & ~0xffUL)) {
  417. cpu--;
  418. goto out;
  419. }
  420. }
  421. out:
  422. *base_cpu = cpu;
  423. return tlist;
  424. }
  425. static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
  426. {
  427. u64 val;
  428. val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
  429. MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
  430. irq << 24 |
  431. MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
  432. tlist);
  433. pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
  434. gic_write_sgi1r(val);
  435. }
  436. static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  437. {
  438. int cpu;
  439. if (WARN_ON(irq >= 16))
  440. return;
  441. /*
  442. * Ensure that stores to Normal memory are visible to the
  443. * other CPUs before issuing the IPI.
  444. */
  445. smp_wmb();
  446. for_each_cpu_mask(cpu, *mask) {
  447. u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
  448. u16 tlist;
  449. tlist = gic_compute_target_list(&cpu, mask, cluster_id);
  450. gic_send_sgi(cluster_id, tlist, irq);
  451. }
  452. /* Force the above writes to ICC_SGI1R_EL1 to be executed */
  453. isb();
  454. }
  455. static void gic_smp_init(void)
  456. {
  457. set_smp_cross_call(gic_raise_softirq);
  458. register_cpu_notifier(&gic_cpu_notifier);
  459. }
  460. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  461. bool force)
  462. {
  463. unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  464. void __iomem *reg;
  465. int enabled;
  466. u64 val;
  467. if (gic_irq_in_rdist(d))
  468. return -EINVAL;
  469. /* If interrupt was enabled, disable it first */
  470. enabled = gic_peek_irq(d, GICD_ISENABLER);
  471. if (enabled)
  472. gic_mask_irq(d);
  473. reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
  474. val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
  475. writeq_relaxed(val, reg);
  476. /*
  477. * If the interrupt was enabled, enabled it again. Otherwise,
  478. * just wait for the distributor to have digested our changes.
  479. */
  480. if (enabled)
  481. gic_unmask_irq(d);
  482. else
  483. gic_dist_wait_for_rwp();
  484. return IRQ_SET_MASK_OK;
  485. }
  486. #else
  487. #define gic_set_affinity NULL
  488. #define gic_smp_init() do { } while (0)
  489. #endif
  490. static struct irq_chip gic_chip = {
  491. .name = "GICv3",
  492. .irq_mask = gic_mask_irq,
  493. .irq_unmask = gic_unmask_irq,
  494. .irq_eoi = gic_eoi_irq,
  495. .irq_set_type = gic_set_type,
  496. .irq_set_affinity = gic_set_affinity,
  497. };
  498. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  499. irq_hw_number_t hw)
  500. {
  501. /* SGIs are private to the core kernel */
  502. if (hw < 16)
  503. return -EPERM;
  504. /* PPIs */
  505. if (hw < 32) {
  506. irq_set_percpu_devid(irq);
  507. irq_set_chip_and_handler(irq, &gic_chip,
  508. handle_percpu_devid_irq);
  509. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  510. }
  511. /* SPIs */
  512. if (hw >= 32 && hw < gic_data.irq_nr) {
  513. irq_set_chip_and_handler(irq, &gic_chip,
  514. handle_fasteoi_irq);
  515. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  516. }
  517. irq_set_chip_data(irq, d->host_data);
  518. return 0;
  519. }
  520. static int gic_irq_domain_xlate(struct irq_domain *d,
  521. struct device_node *controller,
  522. const u32 *intspec, unsigned int intsize,
  523. unsigned long *out_hwirq,
  524. unsigned int *out_type)
  525. {
  526. if (d->of_node != controller)
  527. return -EINVAL;
  528. if (intsize < 3)
  529. return -EINVAL;
  530. switch (intspec[0]) {
  531. case 0: /* SPI */
  532. *out_hwirq = intspec[1] + 32;
  533. break;
  534. case 1: /* PPI */
  535. *out_hwirq = intspec[1] + 16;
  536. break;
  537. default:
  538. return -EINVAL;
  539. }
  540. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  541. return 0;
  542. }
  543. static int mt_gic_irqs;
  544. #ifndef CONFIG_MTK_GIC
  545. int mt_get_supported_irq_num(void)
  546. {
  547. return mt_gic_irqs;
  548. }
  549. #endif
  550. static const struct irq_domain_ops gic_irq_domain_ops = {
  551. .map = gic_irq_domain_map,
  552. .xlate = gic_irq_domain_xlate,
  553. };
  554. static int __init gic_of_init(struct device_node *node,
  555. struct device_node *parent)
  556. {
  557. void __iomem *dist_base;
  558. void __iomem **redist_base;
  559. void __iomem *pol_base;
  560. struct resource res;
  561. u64 redist_stride;
  562. u32 redist_regions;
  563. u32 reg;
  564. int gic_irqs;
  565. int err;
  566. int i;
  567. int irq_base;
  568. dist_base = of_iomap(node, 0);
  569. if (!dist_base) {
  570. pr_err("%s: unable to map gic dist registers\n",
  571. node->full_name);
  572. return -ENXIO;
  573. }
  574. reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
  575. if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
  576. pr_err("%s: no distributor detected, giving up\n",
  577. node->full_name);
  578. err = -ENODEV;
  579. goto out_unmap_dist;
  580. }
  581. if (of_property_read_u32(node,
  582. "#redistributor-regions", &redist_regions))
  583. redist_regions = 1;
  584. redist_base = kcalloc(redist_regions, sizeof(*redist_base), GFP_KERNEL);
  585. if (!redist_base) {
  586. err = -ENOMEM;
  587. goto out_unmap_dist;
  588. }
  589. for (i = 0; i < redist_regions; i++) {
  590. redist_base[i] = of_iomap(node, 1 + i);
  591. if (!redist_base[i]) {
  592. pr_err("%s: couldn't map region %d\n",
  593. node->full_name, i);
  594. err = -ENODEV;
  595. goto out_unmap_rdist;
  596. }
  597. }
  598. pol_base = of_iomap(node, redist_regions+1);
  599. WARN(!pol_base, "unable to map pol registers\n");
  600. INT_POL_CTL_REG = pol_base;
  601. if (of_address_to_resource(node, redist_regions+1, &res))
  602. WARN(!pol_base, "unable to map pol registers\n");
  603. if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
  604. redist_stride = 0;
  605. gic_data.dist_base = dist_base;
  606. gic_data.redist_base = redist_base;
  607. gic_data.redist_regions = redist_regions;
  608. gic_data.redist_stride = redist_stride;
  609. /*
  610. * Find out how many interrupts are supported.
  611. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
  612. */
  613. gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
  614. gic_irqs = (gic_irqs + 1) * 32;
  615. if (gic_irqs > 1020)
  616. gic_irqs = 1020;
  617. gic_data.irq_nr = gic_irqs;
  618. gic_irqs -= 16; /* calculate # of irqs to allocate, PPI+SPI */
  619. /* start from 16, which means the first PPI irq */
  620. irq_base = irq_alloc_descs(-1, 16, gic_irqs, numa_node_id());
  621. if (irq_base == -ENOMEM) {
  622. pr_err("[GIC] alloc desc failed\n");
  623. return -ENOMEM;
  624. }
  625. /* start from first PPI to the last SPI,
  626. * linear mapping, virq starts from 16 */
  627. gic_data.domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
  628. 16, &gic_irq_domain_ops, &gic_data);
  629. gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
  630. if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
  631. err = -ENOMEM;
  632. goto out_free;
  633. }
  634. set_handle_irq(gic_handle_irq);
  635. gic_smp_init();
  636. gic_dist_init();
  637. gic_cpu_init();
  638. mt_gic_irqs = gic_data.irq_nr;
  639. return 0;
  640. out_free:
  641. if (gic_data.domain)
  642. irq_domain_remove(gic_data.domain);
  643. free_percpu(gic_data.rdist);
  644. out_unmap_rdist:
  645. for (i = 0; i < redist_regions; i++)
  646. if (redist_base[i])
  647. iounmap(redist_base[i]);
  648. kfree(redist_base);
  649. out_unmap_dist:
  650. iounmap(dist_base);
  651. return err;
  652. }
  653. IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
  654. #ifndef CONFIG_MTK_GIC
  655. char *mt_irq_dump_status_buf(int irq, char *buf)
  656. {
  657. int rc;
  658. unsigned int result;
  659. char *ptr = buf;
  660. if (!ptr)
  661. return NULL;
  662. ptr += sprintf(ptr, "[mt gic dump] irq = %d\n", irq);
  663. #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI)
  664. rc = mt_secure_call(MTK_SIP_KERNEL_GIC_DUMP, irq, 0, 0);
  665. #else
  666. rc = -1;
  667. #endif
  668. if (rc < 0) {
  669. ptr += sprintf(ptr, "[mt gic dump] not allowed to dump!\n");
  670. return ptr;
  671. }
  672. /* get mask */
  673. result = rc & 0x1;
  674. ptr += sprintf(ptr, "[mt gic dump] enable = %d\n", result);
  675. /* get group */
  676. result = (rc >> 1) & 0x1;
  677. ptr += sprintf(ptr, "[mt gic dump] group = %x (0x1:irq,0x0:fiq)\n",
  678. result);
  679. /* get priority */
  680. result = (rc >> 2) & 0xff;
  681. ptr += sprintf(ptr, "[mt gic dump] priority = %x\n", result);
  682. /* get sensitivity */
  683. result = (rc >> 10) & 0x1;
  684. ptr += sprintf(ptr, "[mt gic dump] sensitivity = %x ", result);
  685. ptr += sprintf(ptr, "(edge:0x1, level:0x0)\n");
  686. /* get pending status */
  687. result = (rc >> 11) & 0x1;
  688. ptr += sprintf(ptr, "[mt gic dump] pending = %x\n", result);
  689. /* get active status */
  690. result = (rc >> 12) & 0x1;
  691. ptr += sprintf(ptr, "[mt gic dump] active status = %x\n", result);
  692. /* get polarity */
  693. result = (rc >> 13) & 0x1;
  694. ptr += sprintf(ptr,
  695. "[mt gic dump] polarity = %x (0x0: high, 0x1:low)\n",
  696. result);
  697. /* get target cpu mask */
  698. result = (rc >> 14) & 0xff;
  699. ptr += sprintf(ptr, "[mt gic dump] tartget cpu mask = 0x%x\n", result);
  700. return ptr;
  701. }
  702. void mt_irq_dump_status(int irq)
  703. {
  704. char *buf = kmalloc(2048, GFP_KERNEL);
  705. if (!buf)
  706. return;
  707. if (mt_irq_dump_status_buf(irq, buf))
  708. pr_debug("%s", buf);
  709. kfree(buf);
  710. }
  711. EXPORT_SYMBOL(mt_irq_dump_status);
  712. #endif