irq-mt-gic.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489
  1. /*
  2. * Copy from ARM GIC and add mediatek interrupt specific control codes
  3. */
  4. #include <linux/init.h>
  5. #include <linux/kernel.h>
  6. #include <linux/err.h>
  7. #include <linux/module.h>
  8. #include <linux/list.h>
  9. #include <linux/smp.h>
  10. #include <linux/cpu.h>
  11. #include <linux/cpu_pm.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/io.h>
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_irq.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/percpu.h>
  20. #include <linux/slab.h>
  21. #include <linux/irqchip/chained_irq.h>
  22. #include <linux/irqchip/arm-gic.h>
  23. #include <linux/irqchip/mt-gic.h>
  24. #include <asm/irq.h>
  25. #include <asm/exception.h>
  26. #include <asm/smp_plat.h>
  27. #include "irqchip.h"
  28. #include <mach/mt_secure_api.h>
  29. #include <mt-plat/mt_io.h>
  30. union gic_base {
  31. void __iomem *common_base;
  32. void __percpu __iomem **percpu_base;
  33. };
  34. struct gic_chip_data {
  35. union gic_base dist_base;
  36. union gic_base cpu_base;
  37. #ifdef CONFIG_CPU_PM
  38. u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
  39. u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
  40. u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
  41. u32 __percpu *saved_ppi_enable;
  42. u32 __percpu *saved_ppi_conf;
  43. #endif
  44. struct irq_domain *domain;
  45. unsigned int gic_irqs;
  46. #ifdef CONFIG_GIC_NON_BANKED
  47. void __iomem *(*get_base)(union gic_base *);
  48. #endif
  49. };
  50. void (*irq_pol_workaround)(phys_addr_t addr, u32 value);
  51. static DEFINE_RAW_SPINLOCK(irq_controller_lock);
  52. /*
  53. * The GIC mapping of CPU interfaces does not necessarily match
  54. * the logical CPU numbering. Let's use a mapping as returned
  55. * by the GIC itself.
  56. */
  57. #define NR_GIC_CPU_IF 8
  58. static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
  59. #ifndef MAX_GIC_NR
  60. #define MAX_GIC_NR 1
  61. #endif
  62. #ifndef NR_GIC_SGI
  63. #define NR_GIC_SGI 16
  64. #endif
  65. #ifndef NR_GIC_PPI
  66. #define NR_GIC_PPI 16
  67. #endif
  68. static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
  69. #ifdef CONFIG_GIC_NON_BANKED
  70. static void __iomem *gic_get_percpu_base(union gic_base *base)
  71. {
  72. return *__this_cpu_ptr(base->percpu_base);
  73. }
  74. static void __iomem *gic_get_common_base(union gic_base *base)
  75. {
  76. return base->common_base;
  77. }
  78. static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
  79. {
  80. return data->get_base(&data->dist_base);
  81. }
  82. static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
  83. {
  84. return data->get_base(&data->cpu_base);
  85. }
  86. static inline void gic_set_base_accessor(struct gic_chip_data *data,
  87. void __iomem * (*f)(union gic_base *))
  88. {
  89. data->get_base = f;
  90. }
  91. #else
  92. #define gic_data_dist_base(d) ((d)->dist_base.common_base)
  93. #define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
  94. #define gic_set_base_accessor(d, f)
  95. #endif
  96. static inline void __iomem *gic_dist_base(struct irq_data *d)
  97. {
  98. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  99. return gic_data_dist_base(gic_data);
  100. }
  101. static inline void __iomem *gic_cpu_base(struct irq_data *d)
  102. {
  103. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  104. return gic_data_cpu_base(gic_data);
  105. }
  106. static inline unsigned int gic_irq(struct irq_data *d)
  107. {
  108. return d->hwirq;
  109. }
  110. /*
  111. * Routines to acknowledge, disable and enable interrupts
  112. */
  113. static void gic_mask_irq(struct irq_data *d)
  114. {
  115. u32 mask = 1 << (gic_irq(d) % 32);
  116. raw_spin_lock(&irq_controller_lock);
  117. writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
  118. raw_spin_unlock(&irq_controller_lock);
  119. }
  120. static void gic_unmask_irq(struct irq_data *d)
  121. {
  122. u32 mask = 1 << (gic_irq(d) % 32);
  123. raw_spin_lock(&irq_controller_lock);
  124. writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
  125. raw_spin_unlock(&irq_controller_lock);
  126. }
  127. static void gic_eoi_irq(struct irq_data *d)
  128. {
  129. writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
  130. }
  131. void __iomem *GIC_DIST_BASE;
  132. void __iomem *GIC_CPU_BASE;
  133. void __iomem *INT_POL_CTL0;
  134. phys_addr_t INT_POL_CTL0_phys;
  135. __weak void mt_set_pol_reg(u32 reg_index, u32 value)
  136. {
  137. #ifndef mcusys_smc_write_phy
  138. writel_relaxed(value, (INT_POL_CTL0 + (reg_index * 4)));
  139. #else
  140. mcusys_smc_write_phy((INT_POL_CTL0_phys + (reg_index * 4)), value);
  141. #endif
  142. }
  143. void mt_irq_set_polarity(unsigned int irq, unsigned int polarity)
  144. {
  145. u32 offset, reg_index, value;
  146. if (irq < (NR_GIC_SGI + NR_GIC_PPI)) {
  147. pr_crit("Fail to set polarity of interrupt %d\n", irq);
  148. return;
  149. }
  150. offset = (irq - (NR_GIC_SGI + NR_GIC_PPI)) & 0x1F;
  151. reg_index = (irq - (NR_GIC_SGI + NR_GIC_PPI)) >> 5;
  152. /*
  153. raw_spin_lock(&irq_controller_lock);
  154. */
  155. if (polarity == 0) {
  156. /* active low */
  157. value = readl_relaxed(IOMEM(INT_POL_CTL0 + (reg_index * 4)));
  158. value |= (1 << offset);
  159. /* some platforms has to write POL register in secure world. USE PHYSICALL ADDRESS */
  160. mt_set_pol_reg(reg_index, value);
  161. } else {
  162. /* active high */
  163. value = readl_relaxed(IOMEM(INT_POL_CTL0 + (reg_index * 4)));
  164. value &= ~(0x1 << offset);
  165. /* some platforms has to write POL register in secure world */
  166. mt_set_pol_reg(reg_index, value);
  167. }
  168. /*
  169. raw_spin_unlock(&irq_controller_lock);
  170. */
  171. }
  172. /* EXPORT_SYMBOL(mt_irq_set_polarity); */
  173. static int gic_set_type(struct irq_data *d, unsigned int type)
  174. {
  175. void __iomem *base = gic_dist_base(d);
  176. unsigned int gicirq = gic_irq(d);
  177. u32 enablemask = 1 << (gicirq % 32);
  178. u32 enableoff = (gicirq / 32) * 4;
  179. u32 confmask = 0x2 << ((gicirq % 16) * 2);
  180. u32 confoff = (gicirq / 16) * 4;
  181. bool enabled = false;
  182. u32 val;
  183. /* Interrupt configuration for SGIs can't be changed */
  184. if (gicirq < 16)
  185. return -EINVAL;
  186. /*
  187. if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  188. return -EINVAL;
  189. */
  190. raw_spin_lock(&irq_controller_lock);
  191. val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
  192. if ((type == IRQ_TYPE_LEVEL_HIGH) || (type == IRQ_TYPE_LEVEL_LOW)) {
  193. val &= ~confmask;
  194. } else if ((type == IRQ_TYPE_EDGE_RISING) || (type == IRQ_TYPE_EDGE_FALLING)) {
  195. val |= confmask;
  196. } else {
  197. pr_err("[GIC] not correct trigger type (0x%x)\n", type);
  198. dump_stack();
  199. }
  200. /*
  201. * As recommended by the spec, disable the interrupt before changing
  202. * the configuration
  203. */
  204. if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
  205. writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
  206. enabled = true;
  207. }
  208. writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
  209. if (enabled)
  210. writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
  211. /*mtk polarity setting */
  212. if (type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
  213. mt_irq_set_polarity(gicirq, (type & IRQF_TRIGGER_FALLING) ? 0 : 1);
  214. else if (type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW))
  215. mt_irq_set_polarity(gicirq, (type & IRQF_TRIGGER_LOW) ? 0 : 1);
  216. raw_spin_unlock(&irq_controller_lock);
  217. return 0;
  218. }
  219. static int gic_retrigger(struct irq_data *d)
  220. {
  221. /* the genirq layer expects 0 if we can't retrigger in hardware */
  222. return 0;
  223. }
  224. /* set the priority mask to 0x00 for masking all irqs to this cpu */
  225. void gic_set_primask(void)
  226. {
  227. struct gic_chip_data *gic = &gic_data[0];
  228. void __iomem *base = gic_data_cpu_base(gic);
  229. writel_relaxed(0x00, base + GIC_CPU_PRIMASK);
  230. }
  231. /* restore the priority mask value */
  232. void gic_clear_primask(void)
  233. {
  234. struct gic_chip_data *gic = &gic_data[0];
  235. void __iomem *base = gic_data_cpu_base(gic);
  236. writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
  237. }
  238. #ifdef CONFIG_SMP
  239. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force)
  240. {
  241. void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
  242. unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
  243. u32 val, bit = 0;
  244. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  245. u32 mask;
  246. #endif
  247. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  248. if (!force)
  249. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  250. else
  251. cpu = cpumask_first(mask_val);
  252. if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
  253. return -EINVAL;
  254. mask = 0xff << shift;
  255. bit = gic_cpu_map[cpu] << shift;
  256. raw_spin_lock(&irq_controller_lock);
  257. val = readl_relaxed(reg) & ~mask;
  258. writel_relaxed(val | bit, reg);
  259. raw_spin_unlock(&irq_controller_lock);
  260. #else
  261. /*
  262. * no need to update when:
  263. * input mask is equal to the current setting
  264. */
  265. if (cpumask_equal(d->affinity, mask_val))
  266. return IRQ_SET_MASK_OK_NOCOPY;
  267. /*
  268. * cpumask_first_and() returns >= nr_cpu_ids when the intersection
  269. * of inputs is an empty set -> return error when this is not a "forced" update
  270. */
  271. if (!force && (cpumask_first_and(mask_val, cpu_online_mask) >= nr_cpu_ids))
  272. return -EINVAL;
  273. /* set target cpus */
  274. for_each_cpu(cpu, mask_val)
  275. bit |= gic_cpu_map[cpu] << shift;
  276. /* update gic register */
  277. raw_spin_lock(&irq_controller_lock);
  278. val = readl_relaxed(reg) & ~(0xff << shift);
  279. writel_relaxed(val | bit, reg);
  280. raw_spin_unlock(&irq_controller_lock);
  281. #endif
  282. return IRQ_SET_MASK_OK;
  283. }
  284. #endif
  285. #ifdef CONFIG_PM
  286. static int gic_set_wake(struct irq_data *d, unsigned int on)
  287. {
  288. int ret = -ENXIO;
  289. return ret;
  290. }
  291. #else
  292. #define gic_set_wake NULL
  293. #endif
  294. static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  295. {
  296. u32 irqstat, irqnr;
  297. struct gic_chip_data *gic = &gic_data[0];
  298. void __iomem *cpu_base = gic_data_cpu_base(gic);
  299. do {
  300. irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  301. irqnr = irqstat & ~0x1c00;
  302. if (likely(irqnr > 15 && irqnr < 1021)) {
  303. handle_domain_irq(gic->domain, irqnr, regs);
  304. continue;
  305. }
  306. if (irqnr < 16) {
  307. writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
  308. #ifdef CONFIG_SMP
  309. handle_IPI(irqnr, regs);
  310. #endif
  311. continue;
  312. }
  313. break;
  314. } while (1);
  315. }
  316. static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  317. {
  318. struct gic_chip_data *chip_data = irq_get_handler_data(irq);
  319. struct irq_chip *chip = irq_get_chip(irq);
  320. unsigned int cascade_irq, gic_irq;
  321. unsigned long status;
  322. chained_irq_enter(chip, desc);
  323. raw_spin_lock(&irq_controller_lock);
  324. status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
  325. raw_spin_unlock(&irq_controller_lock);
  326. gic_irq = (status & 0x3ff);
  327. if (gic_irq == 1023)
  328. goto out;
  329. cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
  330. if (unlikely(gic_irq < 32 || gic_irq > 1020))
  331. handle_bad_irq(cascade_irq, desc);
  332. else
  333. generic_handle_irq(cascade_irq);
  334. out:
  335. chained_irq_exit(chip, desc);
  336. }
  337. static struct irq_chip gic_chip = {
  338. .name = "GIC",
  339. .irq_mask = gic_mask_irq,
  340. .irq_unmask = gic_unmask_irq,
  341. .irq_eoi = gic_eoi_irq,
  342. .irq_set_type = gic_set_type,
  343. .irq_retrigger = gic_retrigger,
  344. #ifdef CONFIG_SMP
  345. .irq_set_affinity = gic_set_affinity,
  346. #endif
  347. .irq_set_wake = gic_set_wake,
  348. };
  349. void __init mt_gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
  350. {
  351. if (gic_nr >= MAX_GIC_NR)
  352. BUG();
  353. if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
  354. BUG();
  355. irq_set_chained_handler(irq, gic_handle_cascade_irq);
  356. }
  357. /*
  358. static u8 gic_get_cpumask(struct gic_chip_data *gic)
  359. {
  360. void __iomem *base = gic_data_dist_base(gic);
  361. u32 mask, i;
  362. for (i = mask = 0; i < 32; i += 4) {
  363. mask = readl_relaxed(base + GIC_DIST_TARGET + i);
  364. mask |= mask >> 16;
  365. mask |= mask >> 8;
  366. if (mask)
  367. break;
  368. }
  369. if (!mask)
  370. pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
  371. return mask;
  372. }
  373. */
  374. static void __init gic_dist_init(struct gic_chip_data *gic)
  375. {
  376. unsigned int i;
  377. u32 cpumask;
  378. unsigned int gic_irqs = gic->gic_irqs;
  379. void __iomem *base = gic_data_dist_base(gic);
  380. writel_relaxed(0, base + GIC_DIST_CTRL);
  381. /*
  382. * Set all global interrupts to be level triggered, active low.
  383. */
  384. for (i = 32; i < gic_irqs; i += 16)
  385. writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
  386. /*
  387. * Set all global interrupts to this CPU only.
  388. */
  389. /*
  390. cpumask = gic_get_cpumask(gic);
  391. */
  392. /*FIXME*/ cpumask = 1 << smp_processor_id();
  393. cpumask |= cpumask << 8;
  394. cpumask |= cpumask << 16;
  395. for (i = 32; i < gic_irqs; i += 4)
  396. #ifndef CONFIG_MTK_IRQ_NEW_DESIGN
  397. writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
  398. #else
  399. writel_relaxed(0xffffffff, base + GIC_DIST_TARGET + i * 4 / 4);
  400. #endif
  401. /*
  402. * Set priority on all global interrupts.
  403. */
  404. for (i = 32; i < gic_irqs; i += 4)
  405. writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
  406. /*
  407. * Disable all interrupts. Leave the PPI and SGIs alone
  408. * as these enables are banked registers.
  409. */
  410. for (i = 32; i < gic_irqs; i += 32)
  411. writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
  412. writel_relaxed(1, base + GIC_DIST_CTRL);
  413. }
  414. static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
  415. {
  416. void __iomem *dist_base = gic_data_dist_base(gic);
  417. void __iomem *base = gic_data_cpu_base(gic);
  418. unsigned int cpu_mask, cpu = smp_processor_id();
  419. int i;
  420. /*
  421. * Get what the GIC says our CPU mask is.
  422. */
  423. BUG_ON(cpu >= NR_GIC_CPU_IF);
  424. /*
  425. cpu_mask = gic_get_cpumask(gic);
  426. FIXME
  427. */
  428. cpu_mask = 1 << smp_processor_id();
  429. gic_cpu_map[cpu] = cpu_mask;
  430. /*
  431. * Clear our mask from the other map entries in case they're
  432. * still undefined.
  433. */
  434. for (i = 0; i < NR_GIC_CPU_IF; i++)
  435. if (i != cpu)
  436. gic_cpu_map[i] &= ~cpu_mask;
  437. /*
  438. * Deal with the banked PPI and SGI interrupts - disable all
  439. * PPI interrupts, ensure all SGI interrupts are enabled.
  440. */
  441. writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
  442. writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
  443. /*
  444. * Set priority on PPI and SGI interrupts
  445. */
  446. for (i = 0; i < (NR_GIC_SGI + NR_GIC_PPI); i += 4)
  447. writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
  448. writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
  449. writel_relaxed(1, base + GIC_CPU_CTRL);
  450. }
  451. #ifdef CONFIG_CPU_PM
  452. /*
  453. * Saves the GIC distributor registers during suspend or idle. Must be called
  454. * with interrupts disabled but before powering down the GIC. After calling
  455. * this function, no interrupts will be delivered by the GIC, and another
  456. * platform-specific wakeup source must be enabled.
  457. */
  458. static void gic_dist_save(unsigned int gic_nr)
  459. {
  460. unsigned int gic_irqs;
  461. void __iomem *dist_base;
  462. int i;
  463. if (gic_nr >= MAX_GIC_NR)
  464. BUG();
  465. gic_irqs = gic_data[gic_nr].gic_irqs;
  466. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  467. if (!dist_base)
  468. return;
  469. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  470. gic_data[gic_nr].saved_spi_conf[i] =
  471. readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  472. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  473. gic_data[gic_nr].saved_spi_target[i] =
  474. readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
  475. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  476. gic_data[gic_nr].saved_spi_enable[i] =
  477. readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  478. }
  479. /*
  480. * Restores the GIC distributor registers during resume or when coming out of
  481. * idle. Must be called before enabling interrupts. If a level interrupt
  482. * that occurred while the GIC was suspended is still present, it will be
  483. * handled normally, but any edge interrupts that occurred will not be seen by
  484. * the GIC and need to be handled by the platform-specific wakeup source.
  485. */
  486. static void gic_dist_restore(unsigned int gic_nr)
  487. {
  488. unsigned int gic_irqs;
  489. unsigned int i;
  490. void __iomem *dist_base;
  491. if (gic_nr >= MAX_GIC_NR)
  492. BUG();
  493. gic_irqs = gic_data[gic_nr].gic_irqs;
  494. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  495. if (!dist_base)
  496. return;
  497. writel_relaxed(0, dist_base + GIC_DIST_CTRL);
  498. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
  499. writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
  500. dist_base + GIC_DIST_CONFIG + i * 4);
  501. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  502. writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
  503. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
  504. writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
  505. dist_base + GIC_DIST_TARGET + i * 4);
  506. for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
  507. writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
  508. dist_base + GIC_DIST_ENABLE_SET + i * 4);
  509. writel_relaxed(1, dist_base + GIC_DIST_CTRL);
  510. }
  511. static void gic_cpu_save(unsigned int gic_nr)
  512. {
  513. int i;
  514. u32 *ptr;
  515. void __iomem *dist_base;
  516. void __iomem *cpu_base;
  517. if (gic_nr >= MAX_GIC_NR)
  518. BUG();
  519. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  520. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  521. if (!dist_base || !cpu_base)
  522. return;
  523. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  524. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  525. ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
  526. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  527. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  528. ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
  529. }
  530. static void gic_cpu_restore(unsigned int gic_nr)
  531. {
  532. int i;
  533. u32 *ptr;
  534. void __iomem *dist_base;
  535. void __iomem *cpu_base;
  536. if (gic_nr >= MAX_GIC_NR)
  537. BUG();
  538. dist_base = gic_data_dist_base(&gic_data[gic_nr]);
  539. cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
  540. if (!dist_base || !cpu_base)
  541. return;
  542. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
  543. for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
  544. writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
  545. ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
  546. for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
  547. writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
  548. for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
  549. writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
  550. writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
  551. writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
  552. }
  553. static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
  554. {
  555. int i;
  556. for (i = 0; i < MAX_GIC_NR; i++) {
  557. #ifdef CONFIG_GIC_NON_BANKED
  558. /* Skip over unused GICs */
  559. if (!gic_data[i].get_base)
  560. continue;
  561. #endif
  562. switch (cmd) {
  563. case CPU_PM_ENTER:
  564. gic_cpu_save(i);
  565. break;
  566. case CPU_PM_ENTER_FAILED:
  567. case CPU_PM_EXIT:
  568. gic_cpu_restore(i);
  569. break;
  570. case CPU_CLUSTER_PM_ENTER:
  571. gic_dist_save(i);
  572. break;
  573. case CPU_CLUSTER_PM_ENTER_FAILED:
  574. case CPU_CLUSTER_PM_EXIT:
  575. gic_dist_restore(i);
  576. break;
  577. }
  578. }
  579. return NOTIFY_OK;
  580. }
  581. static struct notifier_block gic_notifier_block = {
  582. .notifier_call = gic_notifier,
  583. };
  584. static void __init gic_pm_init(struct gic_chip_data *gic)
  585. {
  586. gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, sizeof(u32));
  587. BUG_ON(!gic->saved_ppi_enable);
  588. gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, sizeof(u32));
  589. BUG_ON(!gic->saved_ppi_conf);
  590. if (gic == &gic_data[0])
  591. cpu_pm_register_notifier(&gic_notifier_block);
  592. }
  593. #else
  594. static void __init gic_pm_init(struct gic_chip_data *gic)
  595. {
  596. }
  597. #endif
  598. #ifdef CONFIG_SMP
  599. void mt_gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  600. {
  601. int cpu;
  602. unsigned long map = 0;
  603. /* Convert our logical CPU mask into a physical one. */
  604. for_each_cpu(cpu, mask)
  605. map |= gic_cpu_map[cpu];
  606. /*
  607. * Ensure that stores to Normal memory are visible to the
  608. * other CPUs before issuing the IPI.
  609. */
  610. mb();
  611. /* this always happens on GIC0 */
  612. writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
  613. }
  614. #endif
  615. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
  616. {
  617. if (hw < 32) {
  618. irq_set_percpu_devid(irq);
  619. irq_set_chip_and_handler(irq, &gic_chip, handle_percpu_devid_irq);
  620. set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
  621. } else {
  622. irq_set_chip_and_handler(irq, &gic_chip, handle_fasteoi_irq);
  623. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  624. }
  625. irq_set_chip_data(irq, d->host_data);
  626. return 0;
  627. }
  628. static int gic_irq_domain_xlate(struct irq_domain *d,
  629. struct device_node *controller,
  630. const u32 *intspec, unsigned int intsize,
  631. unsigned long *out_hwirq, unsigned int *out_type)
  632. {
  633. if (d->of_node != controller)
  634. return -EINVAL;
  635. if (intsize < 3)
  636. return -EINVAL;
  637. /* Get the interrupt number and add 16 to skip over SGIs */
  638. *out_hwirq = intspec[1] + NR_GIC_SGI;
  639. /* For SPIs, we need to add 16 more to get the GIC irq ID number */
  640. if (!intspec[0])
  641. *out_hwirq += NR_GIC_SGI;
  642. *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
  643. return 0;
  644. }
  645. void mt_gic_register_sgi(unsigned int gic_nr, int irq)
  646. {
  647. struct irq_desc *desc = irq_to_desc(irq);
  648. if (desc)
  649. desc->irq_data.hwirq = irq;
  650. irq_set_chip_and_handler(irq, &gic_chip, handle_fasteoi_irq);
  651. set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
  652. irq_set_chip_data(irq, &gic_data[gic_nr]);
  653. }
  654. #ifdef CONFIG_SMP
  655. static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
  656. unsigned long action, void *hcpu)
  657. {
  658. if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
  659. gic_cpu_init(&gic_data[0]);
  660. return NOTIFY_OK;
  661. }
  662. /*
  663. * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
  664. * priority because the GIC needs to be up before the ARM generic timers.
  665. */
  666. static struct notifier_block gic_cpu_notifier __cpuinitdata = {
  667. .notifier_call = gic_secondary_init,
  668. .priority = 100,
  669. };
  670. #endif
  671. const struct irq_domain_ops mt_gic_irq_domain_ops = {
  672. .map = gic_irq_domain_map,
  673. .xlate = gic_irq_domain_xlate,
  674. };
  675. void __init mt_gic_init_bases(unsigned int gic_nr, int irq_start,
  676. void __iomem *dist_base, void __iomem *cpu_base,
  677. u32 percpu_offset, struct device_node *node)
  678. {
  679. irq_hw_number_t hwirq_base;
  680. struct gic_chip_data *gic;
  681. int gic_irqs, irq_base, i;
  682. BUG_ON(gic_nr >= MAX_GIC_NR);
  683. gic = &gic_data[gic_nr];
  684. #ifdef CONFIG_GIC_NON_BANKED
  685. if (percpu_offset) { /* Frankein-GIC without banked registers... */
  686. unsigned int cpu;
  687. gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
  688. gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
  689. if (WARN_ON(!gic->dist_base.percpu_base || !gic->cpu_base.percpu_base)) {
  690. free_percpu(gic->dist_base.percpu_base);
  691. free_percpu(gic->cpu_base.percpu_base);
  692. return;
  693. }
  694. for_each_possible_cpu(cpu) {
  695. unsigned long offset = percpu_offset * cpu_logical_map(cpu);
  696. *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
  697. *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
  698. }
  699. gic_set_base_accessor(gic, gic_get_percpu_base);
  700. } else
  701. #endif
  702. {
  703. /* Normal, sane GIC... */
  704. WARN(percpu_offset,
  705. "GIC_NON_BANKED not enabled, ignoring %08x offset!", percpu_offset);
  706. gic->dist_base.common_base = dist_base;
  707. gic->cpu_base.common_base = cpu_base;
  708. gic_set_base_accessor(gic, gic_get_common_base);
  709. }
  710. /*
  711. * Initialize the CPU interface map to all CPUs.
  712. * It will be refined as each CPU probes its ID.
  713. */
  714. for (i = 0; i < NR_GIC_CPU_IF; i++)
  715. gic_cpu_map[i] = 0xff;
  716. /*
  717. * For primary GICs, skip over SGIs.
  718. * For secondary GICs, skip over PPIs, too.
  719. */
  720. if (gic_nr == 0 && (irq_start & 31) > 0) {
  721. hwirq_base = 16;
  722. if (irq_start != -1)
  723. irq_start = (irq_start & ~31) + 16;
  724. } else {
  725. hwirq_base = 32;
  726. }
  727. /*
  728. * Find out how many interrupts are supported.
  729. * The GIC only supports up to 1020 interrupt sources.
  730. */
  731. gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
  732. gic_irqs = (gic_irqs + 1) * 32;
  733. if (gic_irqs > 1020)
  734. gic_irqs = 1020;
  735. gic->gic_irqs = gic_irqs;
  736. gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
  737. irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
  738. if (IS_ERR_VALUE(irq_base)) {
  739. WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", irq_start);
  740. irq_base = irq_start;
  741. }
  742. gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
  743. hwirq_base, &mt_gic_irq_domain_ops, gic);
  744. if (WARN_ON(!gic->domain))
  745. return;
  746. #ifdef CONFIG_SMP
  747. set_smp_cross_call(mt_gic_raise_softirq);
  748. register_cpu_notifier(&gic_cpu_notifier);
  749. #endif
  750. set_handle_irq(gic_handle_irq);
  751. gic_dist_init(gic);
  752. gic_cpu_init(gic);
  753. gic_pm_init(gic);
  754. }
  755. /* Special APIs for specific modules */
  756. static spinlock_t irq_lock;
  757. /*
  758. * mt_irq_mask_all: disable all interrupts
  759. * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
  760. * Return 0 for success; return negative values for failure.
  761. * (This is ONLY used for the idle current measurement by the factory mode.)
  762. */
  763. int mt_irq_mask_all(struct mtk_irq_mask *mask)
  764. {
  765. unsigned long flags;
  766. void __iomem *dist_base;
  767. dist_base = gic_data_dist_base(&gic_data[0]);
  768. if (mask) {
  769. /*
  770. #if defined(CONFIG_FIQ_GLUE)
  771. local_fiq_disable();
  772. #endif
  773. */
  774. spin_lock_irqsave(&irq_lock, flags);
  775. mask->mask0 = readl((dist_base + GIC_DIST_ENABLE_SET));
  776. mask->mask1 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x4));
  777. mask->mask2 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x8));
  778. mask->mask3 = readl((dist_base + GIC_DIST_ENABLE_SET + 0xC));
  779. mask->mask4 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x10));
  780. mask->mask5 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x14));
  781. mask->mask6 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x18));
  782. mask->mask7 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x1C));
  783. mask->mask8 = readl((dist_base + GIC_DIST_ENABLE_SET + 0x20));
  784. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR));
  785. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x4));
  786. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x8));
  787. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0xC));
  788. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x10));
  789. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x14));
  790. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x18));
  791. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x1C));
  792. writel(0xFFFFFFFF, (dist_base + GIC_DIST_ENABLE_CLEAR + 0x20));
  793. mb();
  794. spin_unlock_irqrestore(&irq_lock, flags);
  795. /*
  796. #if defined(CONFIG_FIQ_GLUE)
  797. local_fiq_enable();
  798. #endif
  799. */
  800. mask->header = IRQ_MASK_HEADER;
  801. mask->footer = IRQ_MASK_FOOTER;
  802. return 0;
  803. } else {
  804. return -1;
  805. }
  806. }
  807. /*
  808. * mt_irq_mask_restore: restore all interrupts
  809. * @mask: pointer to struct mtk_irq_mask for storing the original mask value.
  810. * Return 0 for success; return negative values for failure.
  811. * (This is ONLY used for the idle current measurement by the factory mode.)
  812. */
  813. int mt_irq_mask_restore(struct mtk_irq_mask *mask)
  814. {
  815. unsigned long flags;
  816. void __iomem *dist_base;
  817. dist_base = gic_data_dist_base(&gic_data[0]);
  818. if (!mask)
  819. return -1;
  820. if (mask->header != IRQ_MASK_HEADER)
  821. return -1;
  822. if (mask->footer != IRQ_MASK_FOOTER)
  823. return -1;
  824. /*
  825. #if defined(CONFIG_FIQ_GLUE)
  826. local_fiq_disable();
  827. #endif
  828. */
  829. spin_lock_irqsave(&irq_lock, flags);
  830. writel(mask->mask0, (dist_base + GIC_DIST_ENABLE_SET));
  831. writel(mask->mask1, (dist_base + GIC_DIST_ENABLE_SET + 0x4));
  832. writel(mask->mask2, (dist_base + GIC_DIST_ENABLE_SET + 0x8));
  833. writel(mask->mask3, (dist_base + GIC_DIST_ENABLE_SET + 0xC));
  834. writel(mask->mask4, (dist_base + GIC_DIST_ENABLE_SET + 0x10));
  835. writel(mask->mask5, (dist_base + GIC_DIST_ENABLE_SET + 0x14));
  836. writel(mask->mask6, (dist_base + GIC_DIST_ENABLE_SET + 0x18));
  837. writel(mask->mask7, (dist_base + GIC_DIST_ENABLE_SET + 0x1C));
  838. writel(mask->mask8, (dist_base + GIC_DIST_ENABLE_SET + 0x20));
  839. mb();
  840. spin_unlock_irqrestore(&irq_lock, flags);
  841. /*
  842. #if defined(CONFIG_FIQ_GLUE)
  843. local_fiq_enable();
  844. #endif
  845. */
  846. return 0;
  847. }
  848. /*
  849. * mt_irq_set_pending_for_sleep: pending an interrupt for the sleep manager's use
  850. * @irq: interrupt id
  851. * (THIS IS ONLY FOR SLEEP FUNCTION USE. DO NOT USE IT YOURSELF!)
  852. */
  853. void mt_irq_set_pending_for_sleep(unsigned int irq)
  854. {
  855. void __iomem *dist_base;
  856. u32 mask = 1 << (irq % 32);
  857. dist_base = gic_data_dist_base(&gic_data[0]);
  858. if (irq < 16) {
  859. pr_err("Fail to set a pending on interrupt %d\n", irq);
  860. return;
  861. }
  862. *(volatile u32 *)(dist_base + GIC_DIST_PENDING_SET + irq / 32 * 4) = mask;
  863. pr_debug("irq:%d, 0x%p=0x%x\n", irq,
  864. dist_base + GIC_DIST_PENDING_SET + irq / 32 * 4, mask);
  865. mb();
  866. }
  867. u32 mt_irq_get_pending(unsigned int irq)
  868. {
  869. void __iomem *dist_base;
  870. u32 bit = 1 << (irq % 32);
  871. dist_base = gic_data_dist_base(&gic_data[0]);
  872. return (readl_relaxed(dist_base + GIC_DIST_PENDING_SET + irq / 32 * 4) & bit) ? 1 : 0;
  873. }
  874. void mt_irq_set_pending(unsigned int irq)
  875. {
  876. void __iomem *dist_base;
  877. u32 bit = 1 << (irq % 32);
  878. dist_base = gic_data_dist_base(&gic_data[0]);
  879. writel(bit, dist_base + GIC_DIST_PENDING_SET + irq / 32 * 4);
  880. }
  881. /*
  882. * mt_irq_unmask_for_sleep: enable an interrupt for the sleep manager's use
  883. * @irq: interrupt id
  884. * (THIS IS ONLY FOR SLEEP FUNCTION USE. DO NOT USE IT YOURSELF!)
  885. */
  886. void mt_irq_unmask_for_sleep(unsigned int irq)
  887. {
  888. void __iomem *dist_base;
  889. u32 mask = 1 << (irq % 32);
  890. dist_base = gic_data_dist_base(&gic_data[0]);
  891. if (irq < 16) {
  892. pr_err("Fail to enable interrupt %d\n", irq);
  893. return;
  894. }
  895. *(volatile u32 *)(dist_base + GIC_DIST_ENABLE_SET + irq / 32 * 4) = mask;
  896. mb();
  897. }
  898. /*
  899. * mt_irq_mask_for_sleep: disable an interrupt for the sleep manager's use
  900. * @irq: interrupt id
  901. * (THIS IS ONLY FOR SLEEP FUNCTION USE. DO NOT USE IT YOURSELF!)
  902. */
  903. void mt_irq_mask_for_sleep(unsigned int irq)
  904. {
  905. void __iomem *dist_base;
  906. u32 mask = 1 << (irq % 32);
  907. dist_base = gic_data_dist_base(&gic_data[0]);
  908. if (irq < 16) {
  909. pr_err("Fail to enable interrupt %d\n", irq);
  910. return;
  911. }
  912. *(volatile u32 *)(dist_base + GIC_DIST_ENABLE_CLEAR + irq / 32 * 4) = mask;
  913. mb();
  914. }
  915. /*
  916. * mt_irq_set_sens: set the interrupt sensitivity
  917. * @irq: interrupt id
  918. * @sens: sensitivity
  919. */
  920. void mt_irq_set_sens(unsigned int irq, unsigned int sens)
  921. {
  922. unsigned long flags;
  923. u32 config;
  924. if (irq < (NR_GIC_SGI + NR_GIC_PPI)) {
  925. pr_err("Fail to set sensitivity of interrupt %d\n", irq);
  926. return;
  927. }
  928. spin_lock_irqsave(&irq_lock, flags);
  929. if (sens == MT_EDGE_SENSITIVE) {
  930. config = readl(GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
  931. config |= (0x2 << (irq % 16) * 2);
  932. writel(config, GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
  933. } else {
  934. config = readl(GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
  935. config &= ~(0x2 << (irq % 16) * 2);
  936. writel(config, GIC_DIST_BASE + GIC_DIST_CONFIG + (irq / 16) * 4);
  937. }
  938. spin_unlock_irqrestore(&irq_lock, flags);
  939. mb();
  940. }
  941. /* EXPORT_SYMBOL(mt_irq_set_sens); */
  942. char *mt_irq_dump_status_buf(int irq, char *buf)
  943. {
  944. int rc;
  945. unsigned int result;
  946. char *ptr = buf;
  947. if (!ptr)
  948. return NULL;
  949. ptr += sprintf(ptr, "[mt gic dump] irq = %d\n", irq);
  950. #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI)
  951. rc = mt_secure_call(MTK_SIP_KERNEL_GIC_DUMP, irq, 0, 0);
  952. #else
  953. rc = -1;
  954. #endif
  955. if (rc < 0) {
  956. ptr += sprintf(ptr, "[mt gic dump] not allowed to dump!\n");
  957. return ptr;
  958. }
  959. /* get mask */
  960. result = rc & 0x1;
  961. ptr += sprintf(ptr, "[mt gic dump] enable = %d\n", result);
  962. /* get group */
  963. result = (rc >> 1) & 0x1;
  964. ptr += sprintf(ptr, "[mt gic dump] group = %x (0x1:irq,0x0:fiq)\n", result);
  965. /* get priority */
  966. result = (rc >> 2) & 0xff;
  967. ptr += sprintf(ptr, "[mt gic dump] priority = %x\n", result);
  968. /* get sensitivity */
  969. result = (rc >> 10) & 0x1;
  970. ptr += sprintf(ptr, "[mt gic dump] sensitivity = %x (edge:0x1, level:0x0)\n", result);
  971. /* get pending status */
  972. result = (rc >> 11) & 0x1;
  973. ptr += sprintf(ptr, "[mt gic dump] pending = %x\n", result);
  974. /* get active status */
  975. result = (rc >> 12) & 0x1;
  976. ptr += sprintf(ptr, "[mt gic dump] active status = %x\n", result);
  977. /* get polarity */
  978. result = (rc >> 13) & 0x1;
  979. ptr += sprintf(ptr, "[mt gic dump] polarity = %x (0x0: high, 0x1:low)\n", result);
  980. /* get target cpu mask */
  981. result = (rc >> 14) & 0xff;
  982. ptr += sprintf(ptr, "[mt gic dump] tartget cpu mask = 0x%x\n", result);
  983. return ptr;
  984. }
  985. void mt_irq_dump_status(int irq)
  986. {
  987. char *buf = kmalloc(2048, GFP_KERNEL);
  988. if (!buf)
  989. return;
  990. if (mt_irq_dump_status_buf(irq, buf))
  991. pr_debug("%s", buf);
  992. kfree(buf);
  993. }
  994. EXPORT_SYMBOL(mt_irq_dump_status);
  995. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  996. unsigned int wdt_irq;
  997. bool mt_is_secure_irq(struct irq_data *d)
  998. {
  999. return (gic_irq(d) == wdt_irq);
  1000. }
  1001. EXPORT_SYMBOL(mt_is_secure_irq);
  1002. bool mt_get_irq_gic_targets(struct irq_data *d, cpumask_t *mask)
  1003. {
  1004. unsigned int irq = gic_irq(d);
  1005. unsigned int cpu, shift, irq_targets = 0;
  1006. void __iomem *reg;
  1007. int rc;
  1008. /* check whether this IRQ is configured as FIQ */
  1009. if (mt_is_secure_irq(d)) {
  1010. /* secure call for get the irq targets */
  1011. #ifndef CONFIG_MTK_PSCI
  1012. rc = -1;
  1013. #else
  1014. rc = mt_secure_call(MTK_SIP_KERNEL_GIC_DUMP, irq, 0, 0);
  1015. #endif
  1016. if (rc < 0) {
  1017. pr_err("[mt_get_gicd_itargetsr] not allowed to dump!\n");
  1018. return false;
  1019. }
  1020. irq_targets = (rc >> 14) & 0xff;
  1021. } else {
  1022. shift = (irq % 4) * 8;
  1023. reg = gic_dist_base(d) + GIC_DIST_TARGET + (irq & ~3);
  1024. irq_targets = (readl_relaxed(reg) & (0xff << shift)) >> shift;
  1025. }
  1026. cpumask_clear(mask);
  1027. for_each_cpu(cpu, cpu_possible_mask)
  1028. if (irq_targets & (1<<cpu))
  1029. cpumask_set_cpu(cpu, mask);
  1030. return true;
  1031. }
  1032. EXPORT_SYMBOL(mt_get_irq_gic_targets);
  1033. #endif
  1034. #include <linux/platform_device.h>
  1035. static unsigned long dump_irq;
  1036. static struct platform_driver gic_debug_drv = {
  1037. .driver = {
  1038. .name = "gic_debug",
  1039. .bus = &platform_bus_type,
  1040. .owner = THIS_MODULE,
  1041. }
  1042. };
  1043. static ssize_t dump_irq_show(struct device_driver *driver, char *buf)
  1044. {
  1045. mt_irq_dump_status_buf(dump_irq, buf);
  1046. return strlen(buf);
  1047. }
  1048. static ssize_t dump_irq_store(struct device_driver *driver, const char *buf, size_t count)
  1049. {
  1050. int ret;
  1051. ret = kstrtoul(buf, 10, (unsigned long *)&dump_irq);
  1052. if (ret != 0) {
  1053. pr_err("usage: echo $irq_num > /sys/bus/platform/drivers/gic_debug/dump_irq\n");
  1054. return -EINVAL;
  1055. }
  1056. mt_irq_dump_status(dump_irq);
  1057. return count;
  1058. }
  1059. DRIVER_ATTR(dump_irq, 0644, dump_irq_show, dump_irq_store);
  1060. int __init gic_debug_drv_init(void)
  1061. {
  1062. int ret;
  1063. ret = driver_register(&gic_debug_drv.driver);
  1064. if (ret)
  1065. pr_err("fail to create gic debug driver\n");
  1066. else
  1067. pr_err("success to create gic debug driver\n");
  1068. ret = driver_create_file(&gic_debug_drv.driver, &driver_attr_dump_irq);
  1069. if (ret)
  1070. pr_err("fail to create dump_irq sysfs files\n");
  1071. else
  1072. pr_err("success to create dump_irq sysfs files\n");
  1073. return 0;
  1074. }
  1075. arch_initcall(gic_debug_drv_init);
  1076. static unsigned int get_pol(int irq)
  1077. {
  1078. unsigned int bit;
  1079. bit = 1 << (irq % 32);
  1080. /* 0x0: high, 0x1:low */
  1081. return (readl(INT_POL_CTL0 + ((irq - 32) / 32 * 4)) & bit) ? 1 : 0;
  1082. }
  1083. static unsigned int get_sens(int irq)
  1084. {
  1085. unsigned int bit;
  1086. bit = 0x3 << ((irq % 16) * 2);
  1087. /* edge:0x2, level:0x1 */
  1088. return (readl(GIC_DIST_BASE + GIC_DIST_CONFIG + irq / 16 * 4) & bit) >> ((irq % 16) * 2);
  1089. }
  1090. static irqreturn_t gic_test_isr(void)
  1091. {
  1092. return IRQ_HANDLED;
  1093. }
  1094. #define MT_EDGE_SENSITIVE 0
  1095. #define MT_LEVEL_SENSITIVE 1
  1096. #define MT_POLARITY_LOW 0
  1097. #define MT_POLARITY_HIGH 1
  1098. int mt_gic_test(int irq, int type)
  1099. {
  1100. int ret;
  1101. ret = request_irq(irq, (irq_handler_t) gic_test_isr, type, "mtk_watchdog", NULL);
  1102. if (ret < 0)
  1103. pr_err("mtk gic test failed! fail num = %d\n", ret);
  1104. switch (type) {
  1105. case IRQF_TRIGGER_RISING:
  1106. if (get_pol(irq) == !MT_POLARITY_HIGH)
  1107. pr_debug
  1108. ("[IRQF_TRIGGER_RISING]mt_irq_set_polarity GIC_POL_HIGH test passed!!!\n");
  1109. else
  1110. pr_debug
  1111. ("[IRQF_TRIGGER_RISING]mt_irq_set_polarity GIC_POL_HIGH test failed!!!\n");
  1112. if (get_sens(irq) >> 1)
  1113. pr_debug
  1114. ("[IRQF_TRIGGER_RISING]mt_irq_set_sens MT_EDGE_SENSITIVE test passed!!!\n");
  1115. else
  1116. pr_debug
  1117. ("[IRQF_TRIGGER_RISING]mt_irq_set_sens MT_EDGE_SENSITIVE test failed!!!\n");
  1118. break;
  1119. case IRQF_TRIGGER_FALLING:
  1120. if (get_pol(irq) == !MT_POLARITY_LOW)
  1121. pr_debug
  1122. ("[IRQF_TRIGGER_FALLING]mt_irq_set_polarity GIC_POL_LOW test passed!!!\n");
  1123. else
  1124. pr_debug
  1125. ("[IRQF_TRIGGER_FALLING]mt_irq_set_polarity GIC_POL_LOW test failed!!!\n");
  1126. if (get_sens(irq) >> 1)
  1127. pr_debug
  1128. ("[IRQF_TRIGGER_FALLING]mt_irq_set_sens MT_EDGE_SENSITIVE test passed!!!\n");
  1129. else
  1130. pr_debug
  1131. ("[IRQF_TRIGGER_FALLING]mt_irq_set_sens MT_EDGE_SENSITIVE test failed!!!\n");
  1132. break;
  1133. case IRQF_TRIGGER_HIGH:
  1134. if (get_pol(irq) == !MT_POLARITY_HIGH)
  1135. pr_debug
  1136. ("[IRQF_TRIGGER_HIGH]mt_irq_set_polarity GIC_POL_HIGH test passed!!!\n");
  1137. else
  1138. pr_debug
  1139. ("[IRQF_TRIGGER_HIGH]mt_irq_set_polarity GIC_POL_HIGH test failed!!!\n");
  1140. if (!(get_sens(irq) >> 1))
  1141. pr_debug
  1142. ("[IRQF_TRIGGER_HIGH]mt_irq_set_sens MT_LEVEL_SENSITIVE test passed!!!\n");
  1143. else
  1144. pr_debug
  1145. ("[IRQF_TRIGGER_HIGH]mt_irq_set_sens MT_LEVEL_SENSITIVE test failed!!!\n");
  1146. break;
  1147. case IRQF_TRIGGER_LOW:
  1148. if (get_pol(irq) == !MT_POLARITY_LOW)
  1149. pr_debug
  1150. ("[IRQF_TRIGGER_LOW]mt_irq_set_polarity GIC_POL_LOW test passed!!!\n");
  1151. else
  1152. pr_debug
  1153. ("[IRQF_TRIGGER_LOW]mt_irq_set_polarity GIC_POL_LOW test failed!!!\n");
  1154. if (!(get_sens(irq) >> 1))
  1155. pr_debug
  1156. ("[IRQF_TRIGGER_LOW]mt_irq_set_sens MT_LEVEL_SENSITIVE test passed!!!\n");
  1157. else
  1158. pr_debug
  1159. ("[IRQF_TRIGGER_LOW]mt_irq_set_sens MT_LEVEL_SENSITIVE test failed!!!\n");
  1160. break;
  1161. default:
  1162. pr_err("[GIC] not correct trigger type\n");
  1163. return -1;
  1164. }
  1165. return ret;
  1166. }
  1167. #ifdef CONFIG_OF
  1168. static int gic_cnt __initdata;
  1169. int mt_get_supported_irq_num(void)
  1170. {
  1171. void __iomem *dist_base;
  1172. int ret = 0;
  1173. dist_base = gic_data_dist_base(&gic_data[0]);
  1174. if (dist_base) {
  1175. ret = ((readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f) + 1) * 32;
  1176. pr_debug("gic supported max = %d\n", ret);
  1177. } else
  1178. pr_warn("gic dist_base is unknown\n");
  1179. return ret;
  1180. }
  1181. EXPORT_SYMBOL(mt_get_supported_irq_num);
  1182. int __init mt_gic_of_init(struct device_node *node, struct device_node *parent)
  1183. {
  1184. void __iomem *cpu_base;
  1185. void __iomem *dist_base;
  1186. void __iomem *pol_base;
  1187. u32 percpu_offset;
  1188. int irq;
  1189. struct resource res;
  1190. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  1191. int i;
  1192. #endif
  1193. if (WARN_ON(!node))
  1194. return -ENODEV;
  1195. spin_lock_init(&irq_lock);
  1196. dist_base = of_iomap(node, 0);
  1197. WARN(!dist_base, "unable to map gic dist registers\n");
  1198. GIC_DIST_BASE = dist_base;
  1199. cpu_base = of_iomap(node, 1);
  1200. WARN(!cpu_base, "unable to map gic cpu registers\n");
  1201. GIC_CPU_BASE = cpu_base;
  1202. pol_base = of_iomap(node, 2);
  1203. WARN(!pol_base, "unable to map pol registers\n");
  1204. INT_POL_CTL0 = pol_base;
  1205. if (of_address_to_resource(node, 2, &res))
  1206. WARN(!pol_base, "unable to map pol registers\n");
  1207. INT_POL_CTL0_phys = res.start;
  1208. if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
  1209. percpu_offset = 0;
  1210. mt_gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
  1211. if (parent) {
  1212. irq = irq_of_parse_and_map(node, 0);
  1213. mt_gic_cascade_irq(gic_cnt, irq);
  1214. }
  1215. gic_cnt++;
  1216. #ifdef CONFIG_MTK_IRQ_NEW_DESIGN
  1217. for (i = 0; i <= CONFIG_NR_CPUS-1; ++i) {
  1218. INIT_LIST_HEAD(&(irq_need_migrate_list[i].list));
  1219. spin_lock_init(&(irq_need_migrate_list[i].lock));
  1220. }
  1221. if (of_property_read_u32(node, "mediatek,wdt_irq", &wdt_irq))
  1222. wdt_irq = 0;
  1223. #endif
  1224. return 0;
  1225. }
  1226. IRQCHIP_DECLARE(mt_gic, "mediatek,mt6735-gic", mt_gic_of_init);
  1227. #endif