mt_gpt.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. #include <linux/init.h>
  2. #include <linux/module.h>
  3. #include <linux/kernel.h>
  4. #include <linux/irq.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/irqreturn.h>
  7. #include <linux/jiffies.h>
  8. #include <linux/clockchips.h>
  9. #include <linux/clocksource.h>
  10. #include <linux/io.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/sched_clock.h>
  15. #include <clocksource/arm_arch_timer.h>
  16. #include <mach/mt_gpt.h>
  17. #include <mach/mt_cpuxgpt.h>
  18. #include <mt-plat/sync_write.h>
  19. #define GPT_CLKEVT_ID (GPT1)
  20. #define GPT_CLKSRC_ID (GPT2)
  21. #define AP_XGPT_BASE xgpt_timers.tmr_regs
  22. #define GPT_IRQEN (AP_XGPT_BASE + 0x0000)
  23. #define GPT_IRQSTA (AP_XGPT_BASE + 0x0004)
  24. #define GPT_IRQACK (AP_XGPT_BASE + 0x0008)
  25. #define GPT1_BASE (AP_XGPT_BASE + 0x0010)
  26. #define GPT_CON (0x00)
  27. #define GPT_CLK (0x04)
  28. #define GPT_CNT (0x08)
  29. #define GPT_CMP (0x0C)
  30. #define GPT_CNTH (0x18)
  31. #define GPT_CMPH (0x1C)
  32. #define GPT_CON_ENABLE (0x1 << 0)
  33. #define GPT_CON_CLRCNT (0x1 << 1)
  34. #define GPT_CON_OPMODE (0x3 << 4)
  35. #define GPT_OPMODE_MASK (0x3)
  36. #define GPT_CLKDIV_MASK (0xf)
  37. #define GPT_CLKSRC_MASK (0x1)
  38. #define GPT_OPMODE_OFFSET (4)
  39. #define GPT_CLKSRC_OFFSET (4)
  40. #define GPT_FEAT_64_BIT (0x0001)
  41. #define GPT_ISR (0x0010)
  42. #define GPT_IN_USE (0x0100)
  43. /************define this for 32/64 compatible**************/
  44. #define GPT_BIT_MASK_L 0x00000000FFFFFFFF
  45. #define GPT_BIT_MASK_H 0xFFFFFFFF00000000
  46. /****************************************************/
  47. struct mt_xgpt_timers {
  48. int tmr_irq;
  49. void __iomem *tmr_regs;
  50. };
  51. struct gpt_device {
  52. unsigned int id;
  53. unsigned int mode;
  54. unsigned int clksrc;
  55. unsigned int clkdiv;
  56. unsigned int cmp[2];
  57. void (*func)(unsigned long);
  58. int flags;
  59. int features;
  60. void __iomem *base_addr;
  61. };
  62. static struct mt_xgpt_timers xgpt_timers;
  63. static struct gpt_device gpt_devs[NR_GPTS];
  64. static DEFINE_SPINLOCK(gpt_lock);
  65. /************************return GPT4 count(before init clear) to
  66. record kernel start time between LK and kernel****************************/
  67. #define GPT4_1MS_TICK ((u32)(13000)) /* 1000000 / 76.92ns = 13000.520 */
  68. #define GPT4_BASE (AP_XGPT_BASE + 0x0040)
  69. static unsigned int boot_time_value;
  70. #define mt_gpt_set_reg(val, addr) mt_reg_sync_writel(__raw_readl(addr)|(val), addr)
  71. #define mt_gpt_clr_reg(val, addr) mt_reg_sync_writel(__raw_readl(addr)&~(val), addr)
  72. static unsigned int xgpt_boot_up_time(void)
  73. {
  74. unsigned int tick;
  75. tick = __raw_readl(GPT4_BASE + GPT_CNT);
  76. return ((tick + (GPT4_1MS_TICK - 1)) / GPT4_1MS_TICK);
  77. }
  78. /*********************************************************/
  79. static struct gpt_device *id_to_dev(unsigned int id)
  80. {
  81. if ((id == GPT3) || (id == GPT6))
  82. return NULL;
  83. return id < NR_GPTS ? gpt_devs + id : NULL;
  84. }
  85. #define gpt_update_lock(flags) spin_lock_irqsave(&gpt_lock, flags)
  86. #define gpt_update_unlock(flags) spin_unlock_irqrestore(&gpt_lock, flags)
  87. static inline void noop(unsigned long data) { }
  88. static void(*handlers[])(unsigned long) = {
  89. noop,
  90. noop,
  91. noop,
  92. noop,
  93. noop,
  94. noop,
  95. noop,
  96. };
  97. static struct tasklet_struct task[NR_GPTS];
  98. static void task_sched(unsigned long data)
  99. {
  100. unsigned int id = (unsigned int)data;
  101. tasklet_schedule(&task[id]);
  102. }
  103. static irqreturn_t gpt_handler(int irq, void *dev_id);
  104. static cycle_t mt_gpt_read(struct clocksource *cs);
  105. static int mt_gpt_set_next_event(unsigned long cycles, struct clock_event_device *evt);
  106. static void mt_gpt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt);
  107. static struct clocksource gpt_clocksource = {
  108. .name = "mt6735-gpt",
  109. .rating = 450,
  110. .read = mt_gpt_read,
  111. .mask = CLOCKSOURCE_MASK(32),
  112. .shift = 25,
  113. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  114. };
  115. static struct clock_event_device gpt_clockevent = {
  116. .name = "mt6735-gpt",
  117. .features = CLOCK_EVT_FEAT_ONESHOT,
  118. .shift = 32,
  119. .rating = 300,
  120. .set_next_event = mt_gpt_set_next_event,
  121. .set_mode = mt_gpt_set_mode
  122. };
  123. static struct irqaction gpt_irq = {
  124. .name = "mt-gpt",
  125. .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL | IRQF_TRIGGER_LOW,
  126. .handler = gpt_handler,
  127. .dev_id = &gpt_clockevent,
  128. };
  129. static inline unsigned int gpt_get_and_ack_irq(void)
  130. {
  131. unsigned int id;
  132. unsigned int mask;
  133. unsigned int status = __raw_readl(GPT_IRQSTA);
  134. for (id = GPT1; id < NR_GPTS; id++) {
  135. mask = 0x1 << id;
  136. if (status & mask) {
  137. mt_reg_sync_writel(mask, GPT_IRQACK);
  138. break;
  139. }
  140. }
  141. return id;
  142. }
  143. static irqreturn_t gpt_handler(int irq, void *dev_id)
  144. {
  145. unsigned int id = gpt_get_and_ack_irq();
  146. struct gpt_device *dev = id_to_dev(id);
  147. if (likely(dev)) {
  148. if (!(dev->flags & GPT_ISR))
  149. handlers[id](id);
  150. else
  151. handlers[id]((unsigned long)dev_id);
  152. } else
  153. pr_err("GPT id is %d\n", id);
  154. return IRQ_HANDLED;
  155. }
  156. static void __gpt_enable_irq(struct gpt_device *dev)
  157. {
  158. mt_gpt_set_reg(0x1 << (dev->id), GPT_IRQEN);
  159. }
  160. static void __gpt_disable_irq(struct gpt_device *dev)
  161. {
  162. mt_gpt_clr_reg(0x1 << (dev->id), GPT_IRQEN);
  163. }
  164. static void __gpt_ack_irq(struct gpt_device *dev)
  165. {
  166. mt_reg_sync_writel(0x1 << (dev->id), GPT_IRQACK);
  167. }
  168. static void __gpt_reset(struct gpt_device *dev)
  169. {
  170. mt_reg_sync_writel(0x0, dev->base_addr + GPT_CON);
  171. __gpt_disable_irq(dev);
  172. __gpt_ack_irq(dev);
  173. mt_reg_sync_writel(0x0, dev->base_addr + GPT_CLK);
  174. mt_reg_sync_writel(0x2, dev->base_addr + GPT_CON);
  175. mt_reg_sync_writel(0x0, dev->base_addr + GPT_CMP);
  176. if (dev->features & GPT_FEAT_64_BIT)
  177. mt_reg_sync_writel(0, dev->base_addr + GPT_CMPH);
  178. }
  179. static void __gpt_get_cnt(struct gpt_device *dev, unsigned int *ptr)
  180. {
  181. *ptr = __raw_readl(dev->base_addr + GPT_CNT);
  182. if (dev->features & GPT_FEAT_64_BIT)
  183. *(++ptr) = __raw_readl(dev->base_addr + GPT_CNTH);
  184. }
  185. static void __gpt_get_cmp(struct gpt_device *dev, unsigned int *ptr)
  186. {
  187. *ptr = __raw_readl(dev->base_addr + GPT_CMP);
  188. if (dev->features & GPT_FEAT_64_BIT)
  189. *(++ptr) = __raw_readl(dev->base_addr + GPT_CMPH);
  190. }
  191. static void __gpt_set_mode(struct gpt_device *dev, unsigned int mode)
  192. {
  193. unsigned int ctl = __raw_readl(dev->base_addr + GPT_CON);
  194. mode <<= GPT_OPMODE_OFFSET;
  195. ctl &= ~GPT_CON_OPMODE;
  196. ctl |= mode;
  197. mt_reg_sync_writel(ctl, dev->base_addr + GPT_CON);
  198. dev->mode = mode;
  199. }
  200. static void __gpt_set_clk(struct gpt_device *dev, unsigned int clksrc, unsigned int clkdiv)
  201. {
  202. unsigned int clk = (clksrc << GPT_CLKSRC_OFFSET) | clkdiv;
  203. mt_reg_sync_writel(clk, dev->base_addr + GPT_CLK);
  204. dev->clksrc = clksrc;
  205. dev->clkdiv = clkdiv;
  206. }
  207. static void __gpt_set_cmp(struct gpt_device *dev, unsigned int cmpl,
  208. unsigned int cmph)
  209. {
  210. mt_reg_sync_writel(cmpl, dev->base_addr + GPT_CMP);
  211. dev->cmp[0] = cmpl;
  212. if (dev->features & GPT_FEAT_64_BIT) {
  213. mt_reg_sync_writel(cmph, dev->base_addr + GPT_CMPH);
  214. dev->cmp[1] = cmpl;
  215. }
  216. }
  217. static void __gpt_clrcnt(struct gpt_device *dev)
  218. {
  219. mt_gpt_set_reg(GPT_CON_CLRCNT, dev->base_addr + GPT_CON);
  220. while (__raw_readl(dev->base_addr + GPT_CNT))
  221. cpu_relax();
  222. }
  223. static void __gpt_start(struct gpt_device *dev)
  224. {
  225. mt_gpt_set_reg(GPT_CON_ENABLE, dev->base_addr + GPT_CON);
  226. }
  227. static void __gpt_stop(struct gpt_device *dev)
  228. {
  229. mt_gpt_clr_reg(GPT_CON_ENABLE, dev->base_addr + GPT_CON);
  230. }
  231. static void __gpt_start_from_zero(struct gpt_device *dev)
  232. {
  233. /* DRV_SetReg32(dev->base_addr + GPT_CON, GPT_CON_ENABLE | GPT_CON_CLRCNT); */
  234. __gpt_clrcnt(dev);
  235. __gpt_start(dev);
  236. }
  237. static void __gpt_set_flags(struct gpt_device *dev, unsigned int flags)
  238. {
  239. dev->flags |= flags;
  240. }
  241. static void __gpt_set_handler(struct gpt_device *dev, void (*func)(unsigned long))
  242. {
  243. if (func) {
  244. if (dev->flags & GPT_ISR)
  245. handlers[dev->id] = func;
  246. else {
  247. tasklet_init(&task[dev->id], func, 0);
  248. handlers[dev->id] = task_sched;
  249. }
  250. }
  251. dev->func = func;
  252. }
  253. static void gpt_devs_init(void)
  254. {
  255. int i;
  256. for (i = 0; i < NR_GPTS; i++) {
  257. gpt_devs[i].id = i;
  258. gpt_devs[i].base_addr = GPT1_BASE + 0x10 * i;
  259. pr_alert("gpt_devs_init: base_addr=0x%lx\n", (unsigned long)gpt_devs[i].base_addr);
  260. }
  261. gpt_devs[GPT6].features |= GPT_FEAT_64_BIT;
  262. }
  263. static void setup_gpt_dev_locked(struct gpt_device *dev, unsigned int mode,
  264. unsigned int clksrc, unsigned int clkdiv, unsigned int cmp,
  265. void (*func)(unsigned long), unsigned int flags)
  266. {
  267. __gpt_set_flags(dev, flags | GPT_IN_USE);
  268. __gpt_set_mode(dev, mode & GPT_OPMODE_MASK);
  269. __gpt_set_clk(dev, clksrc & GPT_CLKSRC_MASK, clkdiv & GPT_CLKDIV_MASK);
  270. if (func)
  271. __gpt_set_handler(dev, func);
  272. if (dev->mode != GPT_FREE_RUN) {
  273. __gpt_set_cmp(dev, cmp, 0);
  274. if (!(dev->flags & GPT_NOIRQEN))
  275. __gpt_enable_irq(dev);
  276. }
  277. if (!(dev->flags & GPT_NOAUTOEN))
  278. __gpt_start(dev);
  279. }
  280. static int mt_gpt_set_next_event(unsigned long cycles,
  281. struct clock_event_device *evt)
  282. {
  283. struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
  284. /* printk("[%s]entry, evt=%lu\n", __func__, cycles); */
  285. __gpt_stop(dev);
  286. __gpt_set_cmp(dev, cycles, 0);
  287. __gpt_start_from_zero(dev);
  288. return 0;
  289. }
  290. static void mt_gpt_set_mode(enum clock_event_mode mode,
  291. struct clock_event_device *evt)
  292. {
  293. struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
  294. /* printk("[%s]entry, mode=%d\n", __func__, mode); */
  295. switch (mode) {
  296. case CLOCK_EVT_MODE_PERIODIC:
  297. __gpt_stop(dev);
  298. __gpt_set_mode(dev, GPT_REPEAT);
  299. __gpt_enable_irq(dev);
  300. __gpt_start_from_zero(dev);
  301. break;
  302. case CLOCK_EVT_MODE_ONESHOT:
  303. __gpt_stop(dev);
  304. __gpt_set_mode(dev, GPT_ONE_SHOT);
  305. __gpt_enable_irq(dev);
  306. __gpt_start_from_zero(dev);
  307. break;
  308. case CLOCK_EVT_MODE_UNUSED:
  309. case CLOCK_EVT_MODE_SHUTDOWN:
  310. __gpt_stop(dev);
  311. __gpt_disable_irq(dev);
  312. __gpt_ack_irq(dev);
  313. break;
  314. case CLOCK_EVT_MODE_RESUME:
  315. default:
  316. break;
  317. }
  318. }
  319. static cycle_t mt_gpt_read(struct clocksource *cs)
  320. {
  321. cycle_t cycles;
  322. unsigned int cnt[2] = {0, 0};
  323. struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
  324. __gpt_get_cnt(dev, cnt);
  325. if (GPT_CLKSRC_ID != GPT6) {
  326. /*
  327. * force do mask for high 32-bit to avoid unpredicted alignment
  328. */
  329. cycles = (GPT_BIT_MASK_L & (cycle_t) (cnt[0]));
  330. } else {
  331. cycles = (GPT_BIT_MASK_H & (((cycle_t) (cnt[1])) << 32)) | (GPT_BIT_MASK_L&((cycle_t) (cnt[0])));
  332. }
  333. return cycles;
  334. }
  335. static u64 notrace mt_read_sched_clock(void)
  336. {
  337. return mt_gpt_read(NULL);
  338. }
  339. static cycle_t mt_read_sched_clock_cc(const struct cyclecounter *cc)
  340. {
  341. return mt_gpt_read(NULL);
  342. }
  343. static void clkevt_handler(unsigned long data)
  344. {
  345. struct clock_event_device *evt = (struct clock_event_device *)data;
  346. evt->event_handler(evt);
  347. }
  348. static struct cyclecounter mt_cyclecounter = {
  349. .read = mt_read_sched_clock_cc,
  350. .mask = CLOCKSOURCE_MASK(32),
  351. };
  352. static inline void setup_clksrc(u32 freq)
  353. {
  354. struct clocksource *cs = &gpt_clocksource;
  355. struct gpt_device *dev = id_to_dev(GPT_CLKSRC_ID);
  356. struct timecounter *mt_timecounter;
  357. u64 start_count;
  358. pr_alert("setup_clksrc1: dev->base_addr=0x%lx GPT2_CON=0x%x\n",
  359. (unsigned long)dev->base_addr, __raw_readl(dev->base_addr));
  360. cs->mult = clocksource_hz2mult(freq, cs->shift);
  361. sched_clock_register(mt_read_sched_clock, 32, freq);
  362. setup_gpt_dev_locked(dev, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
  363. 0, NULL, 0);
  364. clocksource_register(cs);
  365. start_count = mt_read_sched_clock();
  366. mt_cyclecounter.mult = cs->mult;
  367. mt_cyclecounter.shift = cs->shift;
  368. mt_timecounter = arch_timer_get_timecounter();
  369. timecounter_init(mt_timecounter, &mt_cyclecounter, start_count);
  370. pr_alert("setup_clksrc1: mt_cyclecounter.mult=0x%x mt_cyclecounter.shift=0x%x\n",
  371. mt_cyclecounter.mult, mt_cyclecounter.shift);
  372. pr_alert("setup_clksrc2: dev->base_addr=0x%lx GPT2_CON=0x%x\n",
  373. (unsigned long)dev->base_addr, __raw_readl(dev->base_addr));
  374. }
  375. static inline void setup_clkevt(u32 freq)
  376. {
  377. unsigned int cmp;
  378. struct clock_event_device *evt = &gpt_clockevent;
  379. struct gpt_device *dev = id_to_dev(GPT_CLKEVT_ID);
  380. evt->mult = div_sc(freq, NSEC_PER_SEC, evt->shift);
  381. evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
  382. evt->min_delta_ns = clockevent_delta2ns(3, evt);
  383. evt->cpumask = cpumask_of(0);
  384. setup_gpt_dev_locked(dev, GPT_REPEAT, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1,
  385. freq / HZ, clkevt_handler, GPT_ISR);
  386. __gpt_get_cmp(dev, &cmp);
  387. pr_alert("GPT1_CMP = %d, HZ = %d\n", cmp, HZ);
  388. clockevents_register_device(evt);
  389. }
  390. static void setup_syscnt(void)
  391. {
  392. /* map cpuxgpt address */
  393. mt_cpuxgpt_map_base();
  394. /* set cpuxgpt free run,cpuxgpt always free run & oneshot no need to set */
  395. /* set cpuxgpt 13Mhz clock */
  396. set_cpuxgpt_clk(CLK_DIV2);
  397. /* enable cpuxgpt */
  398. enable_cpuxgpt();
  399. }
  400. static void __init mt_gpt_init(struct device_node *node)
  401. {
  402. int i;
  403. u32 freq;
  404. unsigned long save_flags;
  405. gpt_update_lock(save_flags);
  406. /* freq=SYS_CLK_RATE */
  407. if (of_property_read_u32(node, "clock-frequency", &freq))
  408. pr_err("clock-frequency not set in the .dts file");
  409. /* Setup IRQ numbers */
  410. xgpt_timers.tmr_irq = irq_of_parse_and_map(node, 0);
  411. /* Setup IO addresses */
  412. xgpt_timers.tmr_regs = of_iomap(node, 0);
  413. boot_time_value = xgpt_boot_up_time(); /*record the time when init GPT*/
  414. pr_alert("mt_gpt_init: tmr_regs=0x%lx, tmr_irq=%d, freq=%d\n",
  415. (unsigned long)xgpt_timers.tmr_regs, xgpt_timers.tmr_irq, freq);
  416. gpt_devs_init();
  417. for (i = 0; i < NR_GPTS; i++)
  418. __gpt_reset(&gpt_devs[i]);
  419. setup_clksrc(freq);
  420. setup_irq(xgpt_timers.tmr_irq, &gpt_irq);
  421. setup_clkevt(freq);
  422. /* use cpuxgpt as syscnt */
  423. setup_syscnt();
  424. pr_alert("mt_gpt_init: get_cnt_GPT2=%lld\n", mt_gpt_read(NULL)); /* /TODO: remove */
  425. gpt_update_unlock(save_flags);
  426. }
  427. static void release_gpt_dev_locked(struct gpt_device *dev)
  428. {
  429. __gpt_reset(dev);
  430. handlers[dev->id] = noop;
  431. dev->func = NULL;
  432. dev->flags = 0;
  433. }
  434. /* gpt is counting or not */
  435. static int __gpt_get_status(struct gpt_device *dev)
  436. {
  437. return !!(__raw_readl(dev->base_addr + GPT_CON) & GPT_CON_ENABLE);
  438. }
  439. /********************** export area *********************/
  440. int request_gpt(unsigned int id, unsigned int mode, unsigned int clksrc,
  441. unsigned int clkdiv, unsigned int cmp,
  442. void (*func)(unsigned long), unsigned int flags)
  443. {
  444. unsigned long save_flags;
  445. struct gpt_device *dev = id_to_dev(id);
  446. if (!dev)
  447. return -EINVAL;
  448. if (dev->flags & GPT_IN_USE) {
  449. pr_err("%s: GPT%d is in use!\n", __func__, (id + 1));
  450. return -EBUSY;
  451. }
  452. gpt_update_lock(save_flags);
  453. setup_gpt_dev_locked(dev, mode, clksrc, clkdiv, cmp, func, flags);
  454. gpt_update_unlock(save_flags);
  455. return 0;
  456. }
  457. EXPORT_SYMBOL(request_gpt);
  458. int free_gpt(unsigned int id)
  459. {
  460. unsigned long save_flags;
  461. struct gpt_device *dev = id_to_dev(id);
  462. if (!dev)
  463. return -EINVAL;
  464. if (!(dev->flags & GPT_IN_USE))
  465. return 0;
  466. gpt_update_lock(save_flags);
  467. release_gpt_dev_locked(dev);
  468. gpt_update_unlock(save_flags);
  469. return 0;
  470. }
  471. EXPORT_SYMBOL(free_gpt);
  472. int start_gpt(unsigned int id)
  473. {
  474. unsigned long save_flags;
  475. struct gpt_device *dev = id_to_dev(id);
  476. if (!dev)
  477. return -EINVAL;
  478. if (!(dev->flags & GPT_IN_USE)) {
  479. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  480. return -EBUSY;
  481. }
  482. gpt_update_lock(save_flags);
  483. __gpt_clrcnt(dev);
  484. __gpt_start(dev);
  485. gpt_update_unlock(save_flags);
  486. return 0;
  487. }
  488. EXPORT_SYMBOL(start_gpt);
  489. int stop_gpt(unsigned int id)
  490. {
  491. unsigned long save_flags;
  492. struct gpt_device *dev = id_to_dev(id);
  493. if (!dev)
  494. return -EINVAL;
  495. if (!(dev->flags & GPT_IN_USE)) {
  496. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  497. return -EBUSY;
  498. }
  499. gpt_update_lock(save_flags);
  500. __gpt_stop(dev);
  501. gpt_update_unlock(save_flags);
  502. return 0;
  503. }
  504. EXPORT_SYMBOL(stop_gpt);
  505. int restart_gpt(unsigned int id)
  506. {
  507. unsigned long save_flags;
  508. struct gpt_device *dev = id_to_dev(id);
  509. if (!dev)
  510. return -EINVAL;
  511. if (!(dev->flags & GPT_IN_USE)) {
  512. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  513. return -EBUSY;
  514. }
  515. gpt_update_lock(save_flags);
  516. __gpt_start(dev);
  517. gpt_update_unlock(save_flags);
  518. return 0;
  519. }
  520. EXPORT_SYMBOL(restart_gpt);
  521. int gpt_is_counting(unsigned int id)
  522. {
  523. unsigned long save_flags;
  524. int is_counting;
  525. struct gpt_device *dev = id_to_dev(id);
  526. if (!dev)
  527. return -EINVAL;
  528. if (!(dev->flags & GPT_IN_USE)) {
  529. pr_err("%s: GPT%d is not in use!\n", __func__, id);
  530. return -EBUSY;
  531. }
  532. gpt_update_lock(save_flags);
  533. is_counting = __gpt_get_status(dev);
  534. gpt_update_unlock(save_flags);
  535. return is_counting;
  536. }
  537. EXPORT_SYMBOL(gpt_is_counting);
  538. int gpt_set_cmp(unsigned int id, unsigned int val)
  539. {
  540. unsigned long save_flags;
  541. struct gpt_device *dev = id_to_dev(id);
  542. if (!dev)
  543. return -EINVAL;
  544. if (dev->mode == GPT_FREE_RUN)
  545. return -EINVAL;
  546. gpt_update_lock(save_flags);
  547. __gpt_set_cmp(dev, val, 0);
  548. gpt_update_unlock(save_flags);
  549. return 0;
  550. }
  551. EXPORT_SYMBOL(gpt_set_cmp);
  552. int gpt_get_cmp(unsigned int id, unsigned int *ptr)
  553. {
  554. unsigned long save_flags;
  555. struct gpt_device *dev = id_to_dev(id);
  556. if (!dev || !ptr)
  557. return -EINVAL;
  558. gpt_update_lock(save_flags);
  559. __gpt_get_cmp(dev, ptr);
  560. gpt_update_unlock(save_flags);
  561. return 0;
  562. }
  563. EXPORT_SYMBOL(gpt_get_cmp);
  564. int gpt_get_cnt(unsigned int id, unsigned int *ptr)
  565. {
  566. unsigned long save_flags;
  567. struct gpt_device *dev = id_to_dev(id);
  568. if (!dev || !ptr)
  569. return -EINVAL;
  570. if (!(dev->features & GPT_FEAT_64_BIT)) {
  571. __gpt_get_cnt(dev, ptr);
  572. } else {
  573. gpt_update_lock(save_flags);
  574. __gpt_get_cnt(dev, ptr);
  575. gpt_update_unlock(save_flags);
  576. }
  577. return 0;
  578. }
  579. EXPORT_SYMBOL(gpt_get_cnt);
  580. int gpt_check_irq(unsigned int id)
  581. {
  582. unsigned int mask = 0x1 << id;
  583. unsigned int status = __raw_readl(GPT_IRQSTA);
  584. return (status & mask) ? 1 : 0;
  585. }
  586. EXPORT_SYMBOL(gpt_check_irq);
  587. int gpt_check_and_ack_irq(unsigned int id)
  588. {
  589. unsigned int mask = 0x1 << id;
  590. unsigned int status = __raw_readl(GPT_IRQSTA);
  591. if (status & mask) {
  592. mt_reg_sync_writel(mask, GPT_IRQACK);
  593. return 1;
  594. } else {
  595. return 0;
  596. }
  597. }
  598. EXPORT_SYMBOL(gpt_check_and_ack_irq);
  599. unsigned int gpt_boot_time(void)
  600. {
  601. return boot_time_value;
  602. }
  603. EXPORT_SYMBOL(gpt_boot_time);
  604. /************************************************************************************************/
  605. CLOCKSOURCE_OF_DECLARE(mtk_apxgpt, "mediatek,mt6735-apxgpt", mt_gpt_init);