mt_cpuxgpt.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. #include <linux/init.h>
  2. #include <linux/irq.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/irqreturn.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/clockchips.h>
  7. #include <linux/module.h>
  8. #include <linux/kernel.h>
  9. #include <linux/spinlock.h>
  10. #include <asm/arch_timer.h>
  11. /* #include <linux/smp.h> */
  12. #include <linux/io.h>
  13. #include <linux/ioport.h>
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_irq.h>
  17. #include <mach/mt_cpuxgpt.h>
  18. /*if ATF enable, MCUSYS register will be write protect*/
  19. #include <mach/mt_secure_api.h>
  20. #define CPUXGPT_BASE cpuxgpt_regs
  21. #define INDEX_BASE (CPUXGPT_BASE+0x0674)
  22. #define CTL_BASE (CPUXGPT_BASE+0x0670)
  23. #define CPUXGPT_BASE_PHY mt_cpuxgpt_base_phys
  24. #define INDEX_BASE_PHY (CPUXGPT_BASE_PHY+0x0674)
  25. #define CTL_BASE_PHY (CPUXGPT_BASE_PHY+0x0670)
  26. static struct resource cpuxgpt_r;
  27. static phys_addr_t mt_cpuxgpt_base_phys;
  28. static DEFINE_SPINLOCK(cpuxgpt_reg_lock);
  29. static irqreturn_t(*user_handlers[CPUXGPTNUMBERS])(int irq, void *dev_id) = {0};
  30. static unsigned int g_ctl;
  31. static void __iomem *cpuxgpt_regs;
  32. static int cpuxgpt_irq[CPUXGPTNUMBERS];
  33. static irqreturn_t __cpuxgpt0_irq_handler(int irq, void *dev_id);
  34. static irqreturn_t __cpuxgpt1_irq_handler(int irq, void *dev_id);
  35. static irqreturn_t __cpuxgpt2_irq_handler(int irq, void *dev_id);
  36. static irqreturn_t __cpuxgpt3_irq_handler(int irq, void *dev_id);
  37. static irqreturn_t __cpuxgpt4_irq_handler(int irq, void *dev_id);
  38. static irqreturn_t __cpuxgpt5_irq_handler(int irq, void *dev_id);
  39. static irqreturn_t __cpuxgpt6_irq_handler(int irq, void *dev_id);
  40. static irqreturn_t __cpuxgpt7_irq_handler(int irq, void *dev_id);
  41. static const struct of_device_id cpuxgpt_addr_ids[] __initconst = {
  42. {.compatible = "mediatek,mt6735-cpuxgpt"},
  43. {},
  44. };
  45. static irqreturn_t(*cpuxgpt_irq_handler[])(int irq, void *dev_id) = {
  46. __cpuxgpt0_irq_handler,
  47. __cpuxgpt1_irq_handler,
  48. __cpuxgpt2_irq_handler,
  49. __cpuxgpt3_irq_handler,
  50. __cpuxgpt4_irq_handler,
  51. __cpuxgpt5_irq_handler,
  52. __cpuxgpt6_irq_handler,
  53. __cpuxgpt7_irq_handler,
  54. };/* support 8 timer call back */
  55. #define gpt_update_lock(flags) spin_lock_irqsave(&cpuxgpt_reg_lock, flags)
  56. #define gpt_update_unlock(flags) spin_unlock_irqrestore(&cpuxgpt_reg_lock, flags)
  57. static unsigned int __read_cpuxgpt(unsigned int reg_index)
  58. {
  59. unsigned int value = 0;
  60. #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI)
  61. /* DRV_WriteReg32(INDEX_BASE,reg_index); */
  62. mcusys_smc_write_phy(INDEX_BASE_PHY, reg_index);
  63. #else
  64. mcusys_smc_write(INDEX_BASE, reg_index);
  65. #endif
  66. value = __raw_readl(CTL_BASE);
  67. return value;
  68. }
  69. static void __write_cpuxgpt(unsigned int reg_index, unsigned int value)
  70. {
  71. #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI)
  72. /* DRV_WriteReg32(INDEX_BASE,reg_index); */
  73. /* DRV_WriteReg32(CTL_BASE,value); */
  74. mcusys_smc_write_phy(INDEX_BASE_PHY, reg_index);
  75. mcusys_smc_write_phy(CTL_BASE_PHY, value);
  76. #else
  77. mcusys_smc_write(INDEX_BASE, reg_index);
  78. mcusys_smc_write(CTL_BASE, value);
  79. #endif
  80. }
  81. static int __get_irq_id(int id)
  82. {
  83. if (id < CPUXGPTNUMBERS)
  84. return cpuxgpt_irq[id];
  85. pr_err("%s:fine irq id error\n", __func__);
  86. return -1;
  87. }
  88. static void __cpuxgpt_enable(void)
  89. {
  90. unsigned int tmp = 0;
  91. spin_lock(&cpuxgpt_reg_lock);
  92. tmp = __read_cpuxgpt(INDEX_CTL_REG);
  93. tmp |= EN_CPUXGPT;
  94. __write_cpuxgpt(INDEX_CTL_REG, tmp);
  95. spin_unlock(&cpuxgpt_reg_lock);
  96. }
  97. static void __cpuxgpt_disable(void)
  98. {
  99. unsigned int tmp = 0;
  100. spin_lock(&cpuxgpt_reg_lock);
  101. tmp = __read_cpuxgpt(INDEX_CTL_REG);
  102. tmp &= (~EN_CPUXGPT);
  103. __write_cpuxgpt(INDEX_CTL_REG, tmp);
  104. spin_unlock(&cpuxgpt_reg_lock);
  105. }
  106. static void __cpuxgpt_halt_on_debug_en(int en)
  107. {
  108. unsigned int tmp = 0;
  109. spin_lock(&cpuxgpt_reg_lock);
  110. tmp = __read_cpuxgpt(INDEX_CTL_REG);
  111. if (1 == en)
  112. tmp |= EN_AHLT_DEBUG;
  113. if (0 == en)
  114. tmp &= (~EN_AHLT_DEBUG);
  115. __write_cpuxgpt(INDEX_CTL_REG, tmp);
  116. spin_unlock(&cpuxgpt_reg_lock);
  117. }
  118. static void __cpuxgpt_set_clk(unsigned int div)
  119. {
  120. unsigned int tmp = 0;
  121. /* printk("%s fwq div is 0x%x\n",__func__, div); */
  122. if (div != CLK_DIV1 && div != CLK_DIV2 && div != CLK_DIV4)
  123. pr_err("%s error: div is not right\n", __func__);
  124. spin_lock(&cpuxgpt_reg_lock);
  125. tmp = __read_cpuxgpt(INDEX_CTL_REG);
  126. tmp &= CLK_DIV_MASK;
  127. tmp |= div;
  128. __write_cpuxgpt(INDEX_CTL_REG, tmp);
  129. spin_unlock(&cpuxgpt_reg_lock);
  130. }
  131. static void __cpuxgpt_set_init_cnt(unsigned int countH, unsigned int countL)
  132. {
  133. spin_lock(&cpuxgpt_reg_lock);
  134. __write_cpuxgpt(INDEX_CNT_H_INIT, countH);
  135. __write_cpuxgpt(INDEX_CNT_L_INIT, countL);
  136. spin_unlock(&cpuxgpt_reg_lock);
  137. }
  138. static unsigned int __cpuxgpt_irq_en(int cpuxgpt_num)
  139. {
  140. unsigned int tmp = 0;
  141. spin_lock(&cpuxgpt_reg_lock);
  142. tmp = __read_cpuxgpt(INDEX_IRQ_MASK);
  143. tmp |= (1<<cpuxgpt_num);
  144. __write_cpuxgpt(INDEX_IRQ_MASK, tmp);
  145. spin_unlock(&cpuxgpt_reg_lock);
  146. return 0;
  147. }
  148. static unsigned int __cpuxgpt_irq_dis(int cpuxgpt_num)
  149. {
  150. unsigned int tmp = 0;
  151. spin_lock(&cpuxgpt_reg_lock);
  152. tmp = __read_cpuxgpt(INDEX_IRQ_MASK);
  153. tmp &= (~(1<<cpuxgpt_num));
  154. __write_cpuxgpt(INDEX_IRQ_MASK, tmp);
  155. spin_unlock(&cpuxgpt_reg_lock);
  156. return 0;
  157. }
  158. static unsigned int __cpuxgpt_set_cmp(CPUXGPT_NUM cpuxgpt_num, int countH, int countL)
  159. {
  160. spin_lock(&cpuxgpt_reg_lock);
  161. __write_cpuxgpt(INDEX_CMP_BASE+(cpuxgpt_num*0x8)+0x4, countH);
  162. __write_cpuxgpt(INDEX_CMP_BASE+(cpuxgpt_num*0x8), countL);
  163. spin_unlock(&cpuxgpt_reg_lock);
  164. return 0;
  165. }
  166. static irqreturn_t __cpuxgpt0_irq_handler(int irq, void *dev_id)
  167. {
  168. /* printk("cpuxgpt0 irq accured\n" ); */
  169. __cpuxgpt_irq_dis(0);
  170. user_handlers[0](irq, dev_id);
  171. return IRQ_HANDLED;
  172. }
  173. static irqreturn_t __cpuxgpt1_irq_handler(int irq, void *dev_id)
  174. {
  175. /* printk("cpuxgpt1 irq accured\n" ); */
  176. __cpuxgpt_irq_dis(1);
  177. user_handlers[1](irq, dev_id);
  178. return IRQ_HANDLED;
  179. }
  180. static irqreturn_t __cpuxgpt2_irq_handler(int irq, void *dev_id)
  181. {
  182. /* printk("cpuxgpt2 irq accured\n" ); */
  183. __cpuxgpt_irq_dis(2);
  184. user_handlers[2](irq, dev_id);
  185. return IRQ_HANDLED;
  186. }
  187. static irqreturn_t __cpuxgpt3_irq_handler(int irq, void *dev_id)
  188. {
  189. /* printk("cpuxgpt3 irq accured\n" ); */
  190. __cpuxgpt_irq_dis(3);
  191. user_handlers[3](irq, dev_id);
  192. return IRQ_HANDLED;
  193. }
  194. static irqreturn_t __cpuxgpt4_irq_handler(int irq, void *dev_id)
  195. {
  196. /* printk("cpuxgpt4 irq accured\n" ); */
  197. __cpuxgpt_irq_dis(4);
  198. user_handlers[4](irq, dev_id);
  199. return IRQ_HANDLED;
  200. }
  201. static irqreturn_t __cpuxgpt5_irq_handler(int irq, void *dev_id)
  202. {
  203. /* printk("cpuxgpt5 irq accured\n" ); */
  204. __cpuxgpt_irq_dis(5);
  205. user_handlers[5](irq, dev_id);
  206. return IRQ_HANDLED;
  207. }
  208. static irqreturn_t __cpuxgpt6_irq_handler(int irq, void *dev_id)
  209. {
  210. /* printk("cpuxgpt6 irq accured\n" ); */
  211. __cpuxgpt_irq_dis(6);
  212. user_handlers[6](irq, dev_id);
  213. return IRQ_HANDLED;
  214. }
  215. static irqreturn_t __cpuxgpt7_irq_handler(int irq, void *dev_id)
  216. {
  217. /* printk("cpuxgpt7 irq accured\n" ); */
  218. __cpuxgpt_irq_dis(7);
  219. user_handlers[7](irq, dev_id);
  220. return IRQ_HANDLED;
  221. }
  222. int cpu_xgpt_set_cmp_HL(CPUXGPT_NUM cpuxgpt_num, int countH, int countL)
  223. {
  224. __cpuxgpt_set_cmp(cpuxgpt_num, countH, countL);
  225. __cpuxgpt_irq_en(cpuxgpt_num);
  226. return 0;
  227. }
  228. void mt_cpuxgpt_map_base(void)
  229. {
  230. unsigned long save_flags;
  231. struct device_node *config_node;
  232. /* Setup IO addresses based on MCUCFG */
  233. config_node = of_find_matching_node(NULL, cpuxgpt_addr_ids);
  234. if (!config_node)
  235. pr_err("No timer");
  236. cpuxgpt_regs = of_iomap(config_node, 0);
  237. save_flags = of_address_to_resource(config_node, 0, &cpuxgpt_r);
  238. if (save_flags)
  239. pr_err("map phy addr of CPUXGPT fail !!");
  240. mt_cpuxgpt_base_phys = cpuxgpt_r.start;
  241. #ifdef CONFIG_ARM64
  242. pr_err("cpuxgpt_r.start = 0x%llx\n", mt_cpuxgpt_base_phys);
  243. #else
  244. pr_err("cpuxgpt_r.start = 0x%x\n", mt_cpuxgpt_base_phys);
  245. #endif
  246. }
  247. static void __init mt_cpuxgpt_init(struct device_node *node)
  248. {
  249. int i;
  250. /* u32 freq; */
  251. unsigned long save_flags;
  252. struct device_node *config_node;
  253. /* gpt_update_lock(save_flags); */
  254. /* freq=SYS_CLK_RATE */
  255. /* if (of_property_read_u32(node, "clock-frequency", &freq)) */
  256. /* pr_err("clock-frequency not set in the .dts file"); */
  257. /* Setup IRQ numbers */
  258. for (i = CPUXGPT0; i < CPUXGPTNUMBERS; i++)
  259. cpuxgpt_irq[i] = irq_of_parse_and_map(node, i);
  260. /* Setup IO addresses based on MCUCFG */
  261. if (cpuxgpt_regs == NULL) {
  262. config_node = of_find_matching_node(NULL, cpuxgpt_addr_ids);
  263. if (!config_node)
  264. pr_err("No timer");
  265. cpuxgpt_regs = of_iomap(config_node, 0);
  266. save_flags = of_address_to_resource(config_node, 0, &cpuxgpt_r);
  267. if (save_flags)
  268. pr_err("map phy addr of CPUXGPT fail !!");
  269. mt_cpuxgpt_base_phys = cpuxgpt_r.start;
  270. #ifdef CONFIG_ARM64
  271. pr_err("cpuxgpt_r.start = 0x%llx\n", mt_cpuxgpt_base_phys);
  272. #else
  273. pr_err("cpuxgpt_r.start = 0x%x\n", mt_cpuxgpt_base_phys);
  274. #endif
  275. } else
  276. pr_err("cpuxgpt base address had mapped!!\n");
  277. /* pr_alert("mt_cpuxgpt_init: cpuxgpt_regs=0x%x, irq0=%d, irq1=%d, irq2=%d, irq3=%d,
  278. irq4=%d, irq5=%d, irq6=%d, irq7=%d\n", */
  279. /* cpuxgpt_regs, cpuxgpt_irq[0], cpuxgpt_irq[1], cpuxgpt_irq[2], cpuxgpt_irq[3], cpuxgpt_irq[4], cpuxgpt_irq[5], */
  280. /* cpuxgpt_irq[6], cpuxgpt_irq[7]); */
  281. /* gpt_update_unlock(save_flags); */
  282. }
  283. /********************** export area *********************/
  284. u64 localtimer_get_phy_count(void)
  285. {
  286. u64 cval = 0;
  287. cval = arch_counter_get_cntvct();
  288. return cval;
  289. }
  290. EXPORT_SYMBOL(localtimer_get_phy_count);
  291. int cpu_xgpt_register_timer(unsigned int id, irqreturn_t (*func)(int irq, void *dev_id))
  292. {
  293. int ret = 0;
  294. int irq_id = 0;
  295. char *name;
  296. if (id > 7 || id < 0) {
  297. pr_err("%s: err idnumber id=%d should be 0~7\n", __func__, id);
  298. return -1;
  299. }
  300. switch (id) {
  301. case 0:
  302. name = "mtk_cpuxgpt0";
  303. break;
  304. case 1:
  305. name = "mtk_cpuxgpt1";
  306. break;
  307. case 2:
  308. name = "mtk_cpuxgpt2";
  309. break;
  310. case 3:
  311. name = "mtk_cpuxgpt3";
  312. break;
  313. case 4:
  314. name = "mtk_cpuxgpt4";
  315. break;
  316. case 5:
  317. name = "mtk_cpuxgpt5";
  318. break;
  319. case 6:
  320. name = "mtk_cpuxgpt6";
  321. break;
  322. case 7:
  323. name = "mtk_cpuxgpt7";
  324. break;
  325. }
  326. if (func)
  327. user_handlers[id] = func;
  328. /* sprintf(name, "mtk_cpuxgpt%d", id); */
  329. irq_id = __get_irq_id(id);
  330. /*cpuxgpt assigne for per core*/
  331. /*don't trigger IRQ to CPU0 mt_gic_cfg_irq2cpu(irq_id,0,0);*/
  332. /*trigger IRQ to CPUx mt_gic_cfg_irq2cpu(irq_id,(irq_id - CPUXGPT_IRQID_BASE)%num_possible_cpus(),1); */
  333. ret = request_irq(irq_id, (irq_handler_t)cpuxgpt_irq_handler[id], IRQF_TRIGGER_HIGH, name, NULL);
  334. if (ret != 0) {
  335. pr_err("%s:%s fail to register irq\n", __func__, name);
  336. return ret;
  337. }
  338. pr_debug("%s:%s register irq (%d) ok\n", __func__, name , irq_id);
  339. return 0;
  340. }
  341. EXPORT_SYMBOL(cpu_xgpt_register_timer);
  342. int cpu_xgpt_set_timer(int id, u64 ns)
  343. {
  344. u64 count = 0;
  345. u64 now = 0;
  346. u64 set_count = 0;
  347. unsigned int set_count_lo = 0;
  348. unsigned int set_count_hi = 0;
  349. count = ns;
  350. now = localtimer_get_phy_count();
  351. do_div(count, 1000/13);
  352. set_count = count + now;
  353. set_count_lo = 0x00000000FFFFFFFF & set_count;
  354. set_count_hi = (0xFFFFFFFF00000000 & set_count)>>32;
  355. pr_debug("%s:set cpuxgpt(%d) count(%u,%u)\n", __func__ , id, set_count_hi, set_count_lo);
  356. __cpuxgpt_set_cmp(id, set_count_hi, set_count_lo);
  357. __cpuxgpt_irq_en(id);
  358. return 0;
  359. }
  360. EXPORT_SYMBOL(cpu_xgpt_set_timer);
  361. void enable_cpuxgpt(void)
  362. {
  363. __cpuxgpt_enable();
  364. pr_debug("%s: reg(%x)\n", __func__, __read_cpuxgpt(INDEX_CTL_REG));
  365. }
  366. EXPORT_SYMBOL(enable_cpuxgpt);
  367. void disable_cpuxgpt(void)
  368. {
  369. __cpuxgpt_disable();
  370. pr_debug("%s: reg(%x)\n", __func__, __read_cpuxgpt(INDEX_CTL_REG));
  371. }
  372. EXPORT_SYMBOL(disable_cpuxgpt);
  373. void set_cpuxgpt_clk(unsigned int div)
  374. {
  375. __cpuxgpt_set_clk(div);
  376. pr_debug("%s: reg(%x)\n", __func__, __read_cpuxgpt(INDEX_CTL_REG));
  377. }
  378. EXPORT_SYMBOL(set_cpuxgpt_clk);
  379. void restore_cpuxgpt(void)
  380. {
  381. __write_cpuxgpt(INDEX_CTL_REG, g_ctl);
  382. pr_debug("g_ctl:0x%x, %s\n", __read_cpuxgpt(INDEX_CTL_REG), __func__);
  383. }
  384. EXPORT_SYMBOL(restore_cpuxgpt);
  385. void save_cpuxgpt(void)
  386. {
  387. g_ctl = __read_cpuxgpt(INDEX_CTL_REG);
  388. pr_debug("g_ctl:0x%x, %s\n", g_ctl, __func__);
  389. }
  390. EXPORT_SYMBOL(save_cpuxgpt);
  391. unsigned int cpu_xgpt_irq_dis(int cpuxgpt_num)
  392. {
  393. __cpuxgpt_irq_dis(cpuxgpt_num);
  394. return 0;
  395. }
  396. EXPORT_SYMBOL(cpu_xgpt_irq_dis);
  397. int cpu_xgpt_set_cmp(CPUXGPT_NUM cpuxgpt_num, u64 count)
  398. {
  399. unsigned int set_count_lo = 0;
  400. unsigned int set_count_hi = 0;
  401. set_count_lo = 0x00000000FFFFFFFF & count;
  402. set_count_hi = (0xFFFFFFFF00000000 & count)>>32;
  403. cpu_xgpt_set_cmp_HL(cpuxgpt_num, set_count_hi, set_count_lo);
  404. return 0;
  405. }
  406. EXPORT_SYMBOL(cpu_xgpt_set_cmp);
  407. void cpu_xgpt_set_init_count(unsigned int countH, unsigned int countL)
  408. {
  409. __cpuxgpt_set_init_cnt(countH, countL);
  410. }
  411. EXPORT_SYMBOL(cpu_xgpt_set_init_count);
  412. void cpu_xgpt_halt_on_debug_en(int en)
  413. {
  414. __cpuxgpt_halt_on_debug_en(en);
  415. }
  416. EXPORT_SYMBOL(cpu_xgpt_halt_on_debug_en);
  417. CLOCKSOURCE_OF_DECLARE(mtk_cpuxgpt, "mediatek,mt6735-cpuxgpt", mt_cpuxgpt_init);