mt_gpt.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. #include <linux/init.h>
  2. #include <linux/module.h>
  3. #include <linux/kernel.h>
  4. #include <linux/irq.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/proc_fs.h>
  7. #include <linux/syscore_ops.h>
  8. #include <linux/irqreturn.h>
  9. #include <linux/jiffies.h>
  10. #include <linux/clockchips.h>
  11. #include <linux/clocksource.h>
  12. #include <linux/io.h>
  13. #include <asm-generic/uaccess.h>
  14. #include <linux/of.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_irq.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched_clock.h>
  19. #include <clocksource/arm_arch_timer.h>
  20. #include <mt-plat/mt_gpt.h>
  21. /*#include <mach/mt_cpuxgpt.h>*/
  22. #include <mt-plat/sync_write.h>
  23. #define GPT_CLKEVT_ID (GPT1)
  24. #define GPT_CLKSRC_ID (GPT2)
  25. #define GPT_SYSCNT_ID (GPT6)
  26. #define AP_XGPT_BASE xgpt_timers.tmr_regs
  27. #define GPT_IRQEN (AP_XGPT_BASE + 0x00)
  28. #define GPT_IRQSTA (AP_XGPT_BASE + 0x04)
  29. #define GPT_IRQACK (AP_XGPT_BASE + 0x08)
  30. #define GPT1_BASE (AP_XGPT_BASE + 0x10)
  31. #define GPT_CON (0x00)
  32. #define GPT_CLK (0x04)
  33. #define GPT_CNT (0x08)
  34. #define GPT_CMP (0x0C)
  35. #define GPT_CNTH (0x18)
  36. #define GPT_CMPH (0x1C)
  37. #define GPT_CON_ENABLE (0x1 << 0)
  38. #define GPT_CON_CLRCNT (0x1 << 1)
  39. #define GPT_CON_OPMODE (0x3 << 4)
  40. #define GPT_OPMODE_MASK (0x3)
  41. #define GPT_CLKDIV_MASK (0xf)
  42. #define GPT_CLKSRC_MASK (0x1)
  43. #define GPT_OPMODE_OFFSET (4)
  44. #define GPT_CLKSRC_OFFSET (4)
  45. #define GPT_FEAT_64_BIT (0x0001)
  46. #define GPT_ISR (0x0010)
  47. #define GPT_IN_USE (0x0100)
  48. /************define this for 32/64 compatible**************/
  49. #define GPT_BIT_MASK_L 0x00000000FFFFFFFF
  50. #define GPT_BIT_MASK_H 0xFFFFFFFF00000000
  51. struct mt_xgpt_timers {
  52. int tmr_irq;
  53. void __iomem *tmr_regs;
  54. };
  55. struct gpt_device {
  56. unsigned int id;
  57. unsigned int mode;
  58. unsigned int clksrc;
  59. unsigned int clkdiv;
  60. unsigned int cmp[2];
  61. void (*func)(unsigned long);
  62. int flags;
  63. int features;
  64. void __iomem *base_addr;
  65. };
  66. static struct mt_xgpt_timers xgpt_timers;
  67. static struct gpt_device gpt_devs[NR_GPTS];
  68. /***return GPT4 count(before init clear) to record kernel start time between LK and kernel***/
  69. #define GPT4_1MS_TICK ((u32)13000) /* 1000000 / 76.92ns = 13000.520 */
  70. #define GPT4_BASE (AP_XGPT_BASE + 0x0040)
  71. static unsigned int boot_time_value;
  72. #define mt_gpt_set_reg(val, addr) mt_reg_sync_writel(__raw_readl(addr)|(val), addr)
  73. #define mt_gpt_clr_reg(val, addr) mt_reg_sync_writel(__raw_readl(addr)&~(val), addr)
  74. static unsigned int xgpt_boot_up_time(void)
  75. {
  76. unsigned int tick;
  77. tick = __raw_readl(GPT4_BASE + GPT_CNT);
  78. return ((tick + (GPT4_1MS_TICK - 1)) / GPT4_1MS_TICK);
  79. }
  80. /********************************************************************************/
  81. static struct gpt_device *id_to_dev(unsigned int id)
  82. {
  83. if (id == GPT3) /* chip with MD32 will use GPT3 */
  84. return NULL;
  85. return id < NR_GPTS ? gpt_devs + id : NULL;
  86. }
  87. static DEFINE_SPINLOCK(gpt_lock);
  88. #define gpt_update_lock(flags) spin_lock_irqsave(&gpt_lock, flags)
  89. #define gpt_update_unlock(flags) spin_unlock_irqrestore(&gpt_lock, flags)
  90. static inline void noop(unsigned long data)
  91. {
  92. }
  93. static void (*handlers[]) (unsigned long) = {
  94. noop, noop, noop, noop, noop, noop, noop,};
  95. static irqreturn_t gpt_handler(int irq, void *dev_id);
  96. static cycle_t mt_gpt_read(struct clocksource *cs);
  97. static int mt_gpt_set_next_event(unsigned long cycles, struct clock_event_device *evt);
  98. static void mt_gpt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt);
  99. static struct clocksource gpt_clocksource = {
  100. .name = "mtk-gpt",
  101. .rating = 300,
  102. .read = mt_gpt_read,
  103. .mask = CLOCKSOURCE_MASK(32),
  104. .shift = 25,
  105. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  106. };
  107. static struct clock_event_device gpt_clockevent = {
  108. .name = "mtk-gpt",
  109. .features = CLOCK_EVT_FEAT_ONESHOT,
  110. .shift = 32,
  111. .rating = 300,
  112. .set_next_event = mt_gpt_set_next_event,
  113. .set_mode = mt_gpt_set_mode
  114. };
  115. static struct irqaction gpt_irq = {
  116. .name = "mtk-gpt",
  117. .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL | IRQF_TRIGGER_LOW,
  118. .handler = gpt_handler,
  119. .dev_id = &gpt_clockevent,
  120. };
  121. static struct tasklet_struct task[NR_GPTS];
  122. static void task_sched(unsigned long data)
  123. {
  124. unsigned int id = (unsigned int)data;
  125. tasklet_schedule(&task[id]);
  126. }
  127. static void __gpt_set_handler(struct gpt_device *dev, void (*func) (unsigned long))
  128. {
  129. if (func) {
  130. if (dev->flags & GPT_ISR)
  131. handlers[dev->id] = func;
  132. else {
  133. tasklet_init(&task[dev->id], func, 0);
  134. handlers[dev->id] = task_sched;
  135. }
  136. }
  137. dev->func = func;
  138. }
  139. static inline unsigned int gpt_get_and_ack_irq(void)
  140. {
  141. unsigned int id;
  142. unsigned int mask;
  143. unsigned int status = __raw_readl(GPT_IRQSTA);
  144. for (id = GPT1; id < NR_GPTS; id++) {
  145. mask = 0x1 << id;
  146. if (status & mask) {
  147. mt_reg_sync_writel(mask, GPT_IRQACK);
  148. break;
  149. }
  150. }
  151. return id;
  152. }
  153. static irqreturn_t gpt_handler(int irq, void *dev_id)
  154. {
  155. unsigned int id = gpt_get_and_ack_irq();
  156. struct gpt_device *dev = id_to_dev(id);
  157. if (likely(dev)) {
  158. if (!(dev->flags & GPT_ISR))
  159. handlers[id] (id);
  160. else
  161. handlers[id] ((unsigned long)dev_id);
  162. } else
  163. pr_err("GPT id is %d\n", id);
  164. return IRQ_HANDLED;
  165. }
  166. static void __gpt_enable_irq(struct gpt_device *dev)
  167. {
  168. mt_gpt_set_reg(0x1 << (dev->id), GPT_IRQEN);
  169. }
  170. static void __gpt_disable_irq(struct gpt_device *dev)
  171. {
  172. mt_gpt_clr_reg(0x1 << (dev->id), GPT_IRQEN);
  173. }
  174. static void __gpt_ack_irq(struct gpt_device *dev)
  175. {
  176. mt_reg_sync_writel(0x1 << (dev->id), GPT_IRQACK);
  177. }
  178. static void __gpt_reset(struct gpt_device *dev)
  179. {
  180. mt_reg_sync_writel(0x0, dev->base_addr + GPT_CON);
  181. __gpt_disable_irq(dev);
  182. __gpt_ack_irq(dev);
  183. mt_reg_sync_writel(0x0, dev->base_addr + GPT_CLK);
  184. mt_reg_sync_writel(0x2, dev->base_addr + GPT_CON);
  185. mt_reg_sync_writel(0x0, dev->base_addr + GPT_CMP);
  186. if (dev->features & GPT_FEAT_64_BIT)
  187. mt_reg_sync_writel(0, dev->base_addr + GPT_CMPH);
  188. }
  189. static void __gpt_clrcnt(struct gpt_device *dev)
  190. {
  191. mt_gpt_set_reg(GPT_CON_CLRCNT, dev->base_addr + GPT_CON);
  192. while (__raw_readl(dev->base_addr + GPT_CNT))
  193. cpu_relax();
  194. }
  195. static void __gpt_start(struct gpt_device *dev)
  196. {
  197. mt_gpt_set_reg(GPT_CON_ENABLE, dev->base_addr + GPT_CON);
  198. }
  199. static void __gpt_start_from_zero(struct gpt_device *dev)
  200. {
  201. /* DRV_SetReg32(dev->base_addr + GPT_CON, GPT_CON_ENABLE | GPT_CON_CLRCNT); */
  202. __gpt_clrcnt(dev);
  203. __gpt_start(dev);
  204. }
  205. static void __gpt_stop(struct gpt_device *dev)
  206. {
  207. mt_gpt_clr_reg(GPT_CON_ENABLE, dev->base_addr + GPT_CON);
  208. }
  209. static void __gpt_set_mode(struct gpt_device *dev, unsigned int mode)
  210. {
  211. unsigned int ctl = __raw_readl(dev->base_addr + GPT_CON);
  212. mode <<= GPT_OPMODE_OFFSET;
  213. ctl &= ~GPT_CON_OPMODE;
  214. ctl |= mode;
  215. mt_reg_sync_writel(ctl, dev->base_addr + GPT_CON);
  216. dev->mode = mode;
  217. }
  218. static void __gpt_set_clk(struct gpt_device *dev, unsigned int clksrc, unsigned int clkdiv)
  219. {
  220. unsigned int clk = (clksrc << GPT_CLKSRC_OFFSET) | clkdiv;
  221. mt_reg_sync_writel(clk, dev->base_addr + GPT_CLK);
  222. dev->clksrc = clksrc;
  223. dev->clkdiv = clkdiv;
  224. }
  225. static void __gpt_set_cmp(struct gpt_device *dev, unsigned int cmpl, unsigned int cmph)
  226. {
  227. mt_reg_sync_writel(cmpl, dev->base_addr + GPT_CMP);
  228. dev->cmp[0] = cmpl;
  229. if (dev->features & GPT_FEAT_64_BIT) {
  230. mt_reg_sync_writel(cmph, dev->base_addr + GPT_CMPH);
  231. dev->cmp[1] = cmpl;
  232. }
  233. }
  234. static void __gpt_get_cmp(struct gpt_device *dev, unsigned int *ptr)
  235. {
  236. *ptr = __raw_readl(dev->base_addr + GPT_CMP);
  237. if (dev->features & GPT_FEAT_64_BIT)
  238. *(++ptr) = __raw_readl(dev->base_addr + GPT_CMPH);
  239. }
  240. static void __gpt_get_cnt(struct gpt_device *dev, unsigned int *ptr)
  241. {
  242. *ptr = __raw_readl(dev->base_addr + GPT_CNT);
  243. if (dev->features & GPT_FEAT_64_BIT)
  244. *(++ptr) = __raw_readl(dev->base_addr + GPT_CNTH);
  245. }
  246. static void __gpt_set_flags(struct gpt_device *dev, unsigned int flags)
  247. {
  248. dev->flags |= flags;
  249. }
  250. static void gpt_devs_init(void)
  251. {
  252. int i;
  253. for (i = 0; i < NR_GPTS; i++) {
  254. gpt_devs[i].id = i;
  255. gpt_devs[i].base_addr = GPT1_BASE + 0x10 * i;
  256. pr_alert("gpt_devs_init: base_addr=0x%lx\n", (unsigned long)gpt_devs[i].base_addr);
  257. }
  258. gpt_devs[GPT6].features |= GPT_FEAT_64_BIT;
  259. }
  260. static void setup_gpt_dev_locked(struct gpt_device *dev, unsigned int mode,
  261. unsigned int clksrc, unsigned int clkdiv, unsigned int cmp,
  262. void (*func)(unsigned long), unsigned int flags)
  263. {
  264. __gpt_set_flags(dev, flags | GPT_IN_USE);
  265. __gpt_set_mode(dev, mode & GPT_OPMODE_MASK);
  266. __gpt_set_clk(dev, clksrc & GPT_CLKSRC_MASK, clkdiv & GPT_CLKDIV_MASK);
  267. if (func)
  268. __gpt_set_handler(dev, func);
  269. if (dev->mode != GPT_FREE_RUN) {
  270. __gpt_set_cmp(dev, cmp, 0);
  271. if (!(dev->flags & GPT_NOIRQEN))
  272. __gpt_enable_irq(dev);
  273. }
  274. if (!(dev->flags & GPT_NOAUTOEN))
  275. __gpt_start(dev);
  276. }
  277. static int mt_gpt_set_next_event(unsigned long cycles,
  278. struct clock_event_device *evt)
  279. {
  280. struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
  281. __gpt_stop(dev);
  282. __gpt_set_cmp(dev, cycles, 0);
  283. __gpt_start_from_zero(dev);
  284. return 0;
  285. }
  286. static void mt_gpt_set_mode(enum clock_event_mode mode,
  287. struct clock_event_device *evt)
  288. {
  289. struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
  290. switch (mode) {
  291. case CLOCK_EVT_MODE_PERIODIC:
  292. __gpt_stop(dev);
  293. __gpt_set_mode(dev, GPT_REPEAT);
  294. __gpt_enable_irq(dev);
  295. __gpt_start_from_zero(dev);
  296. break;
  297. case CLOCK_EVT_MODE_ONESHOT:
  298. __gpt_stop(dev);
  299. __gpt_set_mode(dev, GPT_ONE_SHOT);
  300. __gpt_enable_irq(dev);
  301. __gpt_start_from_zero(dev);
  302. break;
  303. case CLOCK_EVT_MODE_UNUSED:
  304. case CLOCK_EVT_MODE_SHUTDOWN:
  305. __gpt_stop(dev);
  306. __gpt_disable_irq(dev);
  307. __gpt_ack_irq(dev);
  308. break;
  309. case CLOCK_EVT_MODE_RESUME:
  310. default:
  311. break;
  312. }
  313. }
  314. static cycle_t mt_gpt_read(struct clocksource *cs)
  315. {
  316. cycle_t cycles;
  317. unsigned int cnt[2] = { 0, 0 };
  318. struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
  319. __gpt_get_cnt(dev, cnt);
  320. if (GPT_CLKSRC_ID != GPT6) {
  321. /*
  322. * force do mask for high 32-bit to avoid unpredicted alignment
  323. */
  324. cycles = (GPT_BIT_MASK_L & (cycle_t)(cnt[0]));
  325. } else {
  326. cycles = (GPT_BIT_MASK_H & (((cycle_t) (cnt[1])) << 32)) | (GPT_BIT_MASK_L&((cycle_t) (cnt[0])));
  327. }
  328. return cycles;
  329. }
  330. static u64 notrace mt_read_sched_clock(void)
  331. {
  332. return mt_gpt_read(NULL);
  333. }
  334. static void clkevt_handler(unsigned long data)
  335. {
  336. struct clock_event_device *evt = (struct clock_event_device *)data;
  337. evt->event_handler(evt);
  338. }
  339. static inline void setup_clkevt(u32 freq)
  340. {
  341. unsigned int cmp;
  342. struct clock_event_device *evt = &gpt_clockevent;
  343. struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
  344. evt->mult = div_sc(freq, NSEC_PER_SEC, evt->shift);
  345. evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
  346. evt->min_delta_ns = clockevent_delta2ns(3, evt);
  347. evt->cpumask = cpumask_of(0);
  348. setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
  349. freq / HZ, clkevt_handler, GPT_ISR);
  350. __gpt_get_cmp(dev, &cmp);
  351. pr_alert("GPT1_CMP = %d, HZ = %d\n", cmp, HZ);
  352. clockevents_register_device(evt);
  353. }
  354. static cycle_t mt_read_sched_clock_cc(const struct cyclecounter *cc)
  355. {
  356. return mt_gpt_read(NULL);
  357. }
  358. static struct cyclecounter mt_cyclecounter = {
  359. .read = mt_read_sched_clock_cc,
  360. .mask = CLOCKSOURCE_MASK(32),
  361. };
  362. static inline void setup_clksrc(u32 freq)
  363. {
  364. struct clocksource *cs = &gpt_clocksource;
  365. struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
  366. struct timecounter *mt_timecounter;
  367. u64 start_count;
  368. pr_alert("setup_clksrc1: dev->base_addr=0x%lx GPT2_CON=0x%x\n",
  369. (unsigned long)dev->base_addr, __raw_readl(dev->base_addr));
  370. cs->mult = clocksource_hz2mult(freq, cs->shift);
  371. sched_clock_register(mt_read_sched_clock, 32, freq);
  372. setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
  373. 0, NULL, 0);
  374. clocksource_register(cs);
  375. start_count = mt_read_sched_clock();
  376. mt_cyclecounter.mult = cs->mult;
  377. mt_cyclecounter.shift = cs->shift;
  378. mt_timecounter = arch_timer_get_timecounter();
  379. timecounter_init(mt_timecounter, &mt_cyclecounter, start_count);
  380. pr_alert("setup_clksrc1: mt_cyclecounter.mult=0x%x mt_cyclecounter.shift=0x%x\n",
  381. mt_cyclecounter.mult, mt_cyclecounter.shift);
  382. pr_alert("setup_clksrc2: dev->base_addr=0x%lx GPT2_CON=0x%x\n",
  383. (unsigned long)dev->base_addr, __raw_readl(dev->base_addr));
  384. }
  385. static void setup_syscnt(void)
  386. {
  387. struct gpt_device *dev = id_to_dev(GPT_SYSCNT_ID);
  388. setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0);
  389. /* set cpuxgpt using 13M hz to free run,cpuxgpt always free run & oneshot no need to set */
  390. /*set_cpuxgpt_clk(CLK_DIV2);*/
  391. /* enable cpuxgpt */
  392. /*enable_cpuxgpt();*/
  393. pr_alert("fwq sysc count\n");
  394. }
  395. static ssize_t gpt_stat_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
  396. {
  397. char *p = NULL;
  398. char *page = NULL;
  399. int len = 0;
  400. int i = 0;
  401. int in_use;
  402. int is_counting;
  403. int err = 0;
  404. page = kmalloc(PAGE_SIZE, GFP_KERNEL);
  405. if (!page) {
  406. kfree(page);
  407. return -ENOMEM;
  408. }
  409. p = page;
  410. p += sprintf(p, "\n(HW Timer) GPT Status :\n");
  411. p += sprintf(p, "=========================================\n");
  412. for (i = 0; i < NR_GPTS; i++) {
  413. in_use = gpt_devs[i].flags & GPT_IN_USE;
  414. is_counting = gpt_is_counting(i);
  415. p += sprintf(p, "[GPT%d]in_use:%s, is_counting:%s\n", i + 1,
  416. in_use ? "Y" : "N", is_counting ? "Y" : "N");
  417. }
  418. len = p - page;
  419. if (*ppos >= len) {
  420. kfree(page);
  421. return 0;
  422. }
  423. err = copy_to_user(buf, (char *)page, len);
  424. *ppos += len;
  425. if (err) {
  426. kfree(page);
  427. return err;
  428. }
  429. kfree(page);
  430. return len;
  431. }
  432. static const struct file_operations xgpt_cmd_proc_fops = {
  433. .read = gpt_stat_read,
  434. };
  435. static int __init gpt_mod_init(void)
  436. {
  437. struct proc_dir_entry *xgpt_dir = NULL;
  438. xgpt_dir = proc_mkdir("mt_xgpt", NULL);
  439. proc_create("gpt_stat", S_IRUGO, xgpt_dir, &xgpt_cmd_proc_fops);
  440. pr_alert("GPT: init\n");
  441. return 0;
  442. }
  443. static void __init mt_gpt_init(struct device_node *node)
  444. {
  445. int i;
  446. u32 freq;
  447. unsigned long save_flags;
  448. gpt_update_lock(save_flags);
  449. /* freq=SYS_CLK_RATE */
  450. if (of_property_read_u32(node, "clock-frequency", &freq))
  451. pr_err("clock-frequency not set in the .dts file");
  452. #ifdef CONFIG_MTK_FPGA
  453. freq = (freq / 13 * 6); /* 13M would be 6M on FPGA */
  454. #endif
  455. /* Setup IO addresses */
  456. xgpt_timers.tmr_regs = of_iomap(node, 0);
  457. /* Setup IRQ numbers */
  458. xgpt_timers.tmr_irq = irq_of_parse_and_map(node, 0);
  459. boot_time_value = xgpt_boot_up_time(); /*record the time when init GPT */
  460. pr_alert("mt_gpt_init: tmr_regs=0x%p, tmr_irq=%d, freq=%d\n", xgpt_timers.tmr_regs,
  461. xgpt_timers.tmr_irq, freq);
  462. gpt_devs_init();
  463. for (i = 0; i < NR_GPTS; i++)
  464. __gpt_reset(&gpt_devs[i]);
  465. setup_clksrc(freq);
  466. setup_irq(xgpt_timers.tmr_irq, &gpt_irq);
  467. setup_clkevt(freq);
  468. /* use cpuxgpt as syscnt */
  469. setup_syscnt();
  470. gpt_update_unlock(save_flags);
  471. }
  472. static void release_gpt_dev_locked(struct gpt_device *dev)
  473. {
  474. __gpt_reset(dev);
  475. handlers[dev->id] = noop;
  476. dev->func = NULL;
  477. dev->flags = 0;
  478. }
  479. /* gpt is counting or not */
  480. static int __gpt_get_status(struct gpt_device *dev)
  481. {
  482. return !!(__raw_readl(dev->base_addr + GPT_CON) & GPT_CON_ENABLE);
  483. }
  484. /********************** export area *********************/
  485. int request_gpt(unsigned int id, unsigned int mode, unsigned int clksrc,
  486. unsigned int clkdiv, unsigned int cmp,
  487. void (*func)(unsigned long), unsigned int flags)
  488. {
  489. unsigned long save_flags;
  490. struct gpt_device *dev = id_to_dev(id);
  491. if (!dev)
  492. return -EINVAL;
  493. if (dev->flags & GPT_IN_USE) {
  494. pr_err("%s: GPT%d is in use!\n", __func__, (id + 1));
  495. return -EBUSY;
  496. }
  497. gpt_update_lock(save_flags);
  498. setup_gpt_dev_locked(dev, mode, clksrc, clkdiv, cmp, func, flags);
  499. gpt_update_unlock(save_flags);
  500. return 0;
  501. }
  502. EXPORT_SYMBOL(request_gpt);
  503. int free_gpt(unsigned int id)
  504. {
  505. unsigned long save_flags;
  506. struct gpt_device *dev = id_to_dev(id);
  507. if (!dev)
  508. return -EINVAL;
  509. if (!(dev->flags & GPT_IN_USE))
  510. return 0;
  511. gpt_update_lock(save_flags);
  512. release_gpt_dev_locked(dev);
  513. gpt_update_unlock(save_flags);
  514. return 0;
  515. }
  516. EXPORT_SYMBOL(free_gpt);
  517. int start_gpt(unsigned int id)
  518. {
  519. unsigned long save_flags;
  520. struct gpt_device *dev = id_to_dev(id);
  521. if (!dev)
  522. return -EINVAL;
  523. if (!(dev->flags & GPT_IN_USE)) {
  524. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  525. return -EBUSY;
  526. }
  527. gpt_update_lock(save_flags);
  528. __gpt_clrcnt(dev);
  529. __gpt_start(dev);
  530. gpt_update_unlock(save_flags);
  531. return 0;
  532. }
  533. EXPORT_SYMBOL(start_gpt);
  534. int stop_gpt(unsigned int id)
  535. {
  536. unsigned long save_flags;
  537. struct gpt_device *dev = id_to_dev(id);
  538. if (!dev)
  539. return -EINVAL;
  540. if (!(dev->flags & GPT_IN_USE)) {
  541. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  542. return -EBUSY;
  543. }
  544. gpt_update_lock(save_flags);
  545. __gpt_stop(dev);
  546. gpt_update_unlock(save_flags);
  547. return 0;
  548. }
  549. EXPORT_SYMBOL(stop_gpt);
  550. int restart_gpt(unsigned int id)
  551. {
  552. unsigned long save_flags;
  553. struct gpt_device *dev = id_to_dev(id);
  554. if (!dev)
  555. return -EINVAL;
  556. if (!(dev->flags & GPT_IN_USE)) {
  557. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  558. return -EBUSY;
  559. }
  560. gpt_update_lock(save_flags);
  561. __gpt_start(dev);
  562. gpt_update_unlock(save_flags);
  563. return 0;
  564. }
  565. EXPORT_SYMBOL(restart_gpt);
  566. int gpt_is_counting(unsigned int id)
  567. {
  568. unsigned long save_flags;
  569. int is_counting;
  570. struct gpt_device *dev = id_to_dev(id);
  571. if (!dev)
  572. return -EINVAL;
  573. if (!(dev->flags & GPT_IN_USE)) {
  574. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  575. return -EBUSY;
  576. }
  577. gpt_update_lock(save_flags);
  578. is_counting = __gpt_get_status(dev);
  579. gpt_update_unlock(save_flags);
  580. return is_counting;
  581. }
  582. EXPORT_SYMBOL(gpt_is_counting);
  583. int gpt_set_cmp(unsigned int id, unsigned int val)
  584. {
  585. unsigned long save_flags;
  586. struct gpt_device *dev = id_to_dev(id);
  587. if (!dev)
  588. return -EINVAL;
  589. if (dev->mode == GPT_FREE_RUN)
  590. return -EINVAL;
  591. gpt_update_lock(save_flags);
  592. __gpt_set_cmp(dev, val, 0);
  593. gpt_update_unlock(save_flags);
  594. return 0;
  595. }
  596. EXPORT_SYMBOL(gpt_set_cmp);
  597. int gpt_get_cmp(unsigned int id, unsigned int *ptr)
  598. {
  599. unsigned long save_flags;
  600. struct gpt_device *dev = id_to_dev(id);
  601. if (!dev || !ptr)
  602. return -EINVAL;
  603. gpt_update_lock(save_flags);
  604. __gpt_get_cmp(dev, ptr);
  605. gpt_update_unlock(save_flags);
  606. return 0;
  607. }
  608. EXPORT_SYMBOL(gpt_get_cmp);
  609. int gpt_get_cnt(unsigned int id, unsigned int *ptr)
  610. {
  611. unsigned long save_flags;
  612. struct gpt_device *dev = id_to_dev(id);
  613. if (!dev || !ptr)
  614. return -EINVAL;
  615. if (!(dev->features & GPT_FEAT_64_BIT)) {
  616. __gpt_get_cnt(dev, ptr);
  617. } else {
  618. gpt_update_lock(save_flags);
  619. __gpt_get_cnt(dev, ptr);
  620. gpt_update_unlock(save_flags);
  621. }
  622. return 0;
  623. }
  624. EXPORT_SYMBOL(gpt_get_cnt);
  625. int gpt_check_irq(unsigned int id)
  626. {
  627. unsigned int mask = 0x1 << id;
  628. unsigned int status = __raw_readl(GPT_IRQSTA);
  629. return (status & mask) ? 1 : 0;
  630. }
  631. EXPORT_SYMBOL(gpt_check_irq);
  632. int gpt_check_and_ack_irq(unsigned int id)
  633. {
  634. unsigned int mask = 0x1 << id;
  635. unsigned int status = __raw_readl(GPT_IRQSTA);
  636. if (status & mask) {
  637. mt_reg_sync_writel(mask, GPT_IRQACK);
  638. return 1;
  639. } else {
  640. return 0;
  641. }
  642. }
  643. EXPORT_SYMBOL(gpt_check_and_ack_irq);
  644. unsigned int gpt_boot_time(void)
  645. {
  646. return boot_time_value;
  647. }
  648. EXPORT_SYMBOL(gpt_boot_time);
  649. module_init(gpt_mod_init);
  650. CLOCKSOURCE_OF_DECLARE(mt2701_apxgpt, "mediatek,mt2701-apxgpt", mt_gpt_init);
  651. CLOCKSOURCE_OF_DECLARE(mt8163_apxgpt, "mediatek,mt8163-apxgpt", mt_gpt_init);
  652. CLOCKSOURCE_OF_DECLARE(mt8173_apxgpt, "mediatek,mt8173-apxgpt", mt_gpt_init);
  653. CLOCKSOURCE_OF_DECLARE(mt8127_apxgpt, "mediatek,mt8127-apxgpt", mt_gpt_init);
  654. MODULE_LICENSE("GPL");