cqdma.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. #include <linux/kernel.h>
  2. #include <linux/device.h>
  3. #include <linux/platform_device.h>
  4. #include <linux/module.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/types.h>
  8. #include <linux/workqueue.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/delay.h>
  12. #include <linux/of.h>
  13. #include <linux/of_address.h>
  14. #include <linux/of_irq.h>
  15. #ifdef CONFIG_MTK_GIC
  16. #include <linux/irqchip/mt-gic.h>
  17. #endif
  18. #include <mt-plat/mt_io.h>
  19. #include <mt-plat/dma.h>
  20. #include <mt-plat/sync_write.h>
  21. /*#include <mach/mt_clkmgr.h>*/
  22. /*#include <mach/emi_mpu.h>*/
  23. struct cqdma_env_info {
  24. void __iomem *base;
  25. u32 irq;
  26. };
  27. #define MAX_CQDMA_CHANNELS 2
  28. static struct cqdma_env_info env_info[MAX_CQDMA_CHANNELS];
  29. static u32 nr_cqdma_channel;
  30. /*
  31. * DMA information
  32. */
  33. #define GDMA_START (0)
  34. /*
  35. * General DMA channel register mapping
  36. */
  37. #define DMA_INT_FLAG(ch) IOMEM((env_info[ch].base + 0x0000))
  38. #define DMA_INT_EN(ch) IOMEM((env_info[ch].base + 0x0004))
  39. #define DMA_START(ch) IOMEM((env_info[ch].base + 0x0008))
  40. #define DMA_RESET(ch) IOMEM((env_info[ch].base + 0x000C))
  41. #define DMA_STOP(ch) IOMEM((env_info[ch].base + 0x0010))
  42. #define DMA_FLUSH(ch) IOMEM((env_info[ch].base + 0x0014))
  43. #define DMA_CON(ch) IOMEM((env_info[ch].base + 0x0018))
  44. #define DMA_SRC(ch) IOMEM((env_info[ch].base + 0x001C))
  45. #define DMA_DST(ch) IOMEM((env_info[ch].base + 0x0020))
  46. #define DMA_LEN1(ch) IOMEM((env_info[ch].base + 0x0024))
  47. #define DMA_LEN2(ch) IOMEM((env_info[ch].base + 0x0028))
  48. #define DMA_JUMP_ADDR(ch) IOMEM((env_info[ch].base + 0x002C))
  49. #define DMA_IBUFF_SIZE(ch) IOMEM((env_info[ch].base + 0x0030))
  50. #define DMA_CONNECT(ch) IOMEM((env_info[ch].base + 0x0034))
  51. #define DMA_AXIATTR(ch) IOMEM((env_info[ch].base + 0x0038))
  52. #define DMA_DBG_STAT(ch) IOMEM((env_info[ch].base + 0x0050))
  53. #define DMA_VIO_DBG1(ch) IOMEM((env_info[ch].base + 0x003c))
  54. #if defined(CONFIG_ARCH_MT6755)
  55. /*#define DMA_SRC_ADDR2(ch) IOMEM((env_info[ch].base + 0x00E0))
  56. #define DMA_DST_ADDR2(ch) IOMEM((env_info[ch].base + 0x00E4))
  57. #define DMA_JUMP_ADDR2(ch) IOMEM((env_info[ch].base + 0x00E8))*/
  58. #define DMA_SRC_4G_SUPPORT(ch) IOMEM((env_info[ch].base + 0x00E0))
  59. #define DMA_DST_4G_SUPPORT(ch) IOMEM((env_info[ch].base + 0x00E4))
  60. #define DMA_JUMP_4G_SUPPORT(ch) IOMEM((env_info[ch].base + 0x00E8))
  61. #else
  62. #define DMA_SRC_4G_SUPPORT(ch) IOMEM((env_info[ch].base + 0x0040))
  63. #define DMA_DST_4G_SUPPORT(ch) IOMEM((env_info[ch].base + 0x0044))
  64. #define DMA_JUMP_4G_SUPPORT(ch) IOMEM((env_info[ch].base + 0x0048))
  65. #endif
  66. #define DMA_GDMA_SEC_EN(ch) IOMEM((env_info[ch].base + 0x0058))
  67. #define DMA_VIO_DBG(ch) IOMEM((env_info[ch].base + 0x0060))
  68. /*
  69. * Register Setting
  70. */
  71. #define DMA_GDMA_LEN_MAX_MASK (0x000FFFFF)
  72. #define DMA_CON_DIR (0x00000001)
  73. #define DMA_CON_FPEN (0x00000002) /* Use fix pattern. */
  74. #define DMA_CON_SLOW_EN (0x00000004)
  75. #define DMA_CON_DFIX (0x00000008)
  76. #define DMA_CON_SFIX (0x00000010)
  77. #define DMA_CON_WPEN (0x00008000)
  78. #define DMA_CON_WPSD (0x00100000)
  79. #define DMA_CON_WSIZE_1BYTE (0x00000000)
  80. #define DMA_CON_WSIZE_2BYTE (0x01000000)
  81. #define DMA_CON_WSIZE_4BYTE (0x02000000)
  82. #define DMA_CON_RSIZE_1BYTE (0x00000000)
  83. #define DMA_CON_RSIZE_2BYTE (0x10000000)
  84. #define DMA_CON_RSIZE_4BYTE (0x20000000)
  85. #define DMA_CON_BURST_MASK (0x00070000)
  86. #define DMA_CON_SLOW_OFFSET (5)
  87. #define DMA_CON_SLOW_MAX_MASK (0x000003FF)
  88. #define DMA_START_BIT (0x00000001)
  89. #define DMA_STOP_BIT (0x00000000)
  90. #define DMA_INT_FLAG_BIT (0x00000001)
  91. #define DMA_INT_FLAG_CLR_BIT (0x00000000)
  92. #define DMA_INT_EN_BIT (0x00000001)
  93. #define DMA_FLUSH_BIT (0x00000001)
  94. #define DMA_FLUSH_CLR_BIT (0x00000000)
  95. #define DMA_UART_RX_INT_EN_BIT (0x00000003)
  96. #define DMA_INT_EN_CLR_BIT (0x00000000)
  97. #define DMA_WARM_RST_BIT (0x00000001)
  98. #define DMA_HARD_RST_BIT (0x00000002)
  99. #define DMA_HARD_RST_CLR_BIT (0x00000000)
  100. #define DMA_READ_COHER_BIT (0x00000010)
  101. #define DMA_WRITE_COHER_BIT (0x00100000)
  102. #define DMA_GSEC_EN_BIT (0x00000001)
  103. #define DMA_SEC_EN_BIT (0x00000001)
  104. #define DMA_ADDR2_EN_BIT (0x00000001)
  105. /*
  106. * Register Limitation
  107. */
  108. #define MAX_TRANSFER_LEN1 (0xFFFFF)
  109. #define MAX_TRANSFER_LEN2 (0xFFFFF)
  110. #define MAX_SLOW_DOWN_CNTER (0x3FF)
  111. /*
  112. * channel information structures
  113. */
  114. struct dma_ctrl {
  115. int in_use;
  116. void (*isr_cb)(void *);
  117. void *data;
  118. };
  119. /*
  120. * global variables
  121. */
  122. #define CQDMA_MAX_CHANNEL (8)
  123. static struct dma_ctrl dma_ctrl[CQDMA_MAX_CHANNEL];
  124. static DEFINE_SPINLOCK(dma_drv_lock);
  125. #define PDN_APDMA_MODULE_NAME ("CQDMA")
  126. #define GDMA_WARM_RST_TIMEOUT (100) /* ms */
  127. volatile unsigned int DMA_INT_DONE;
  128. /*
  129. * mt_req_gdma: request a general DMA.
  130. * @chan: specify a channel or not
  131. * Return channel number for success; return negative errot code for failure.
  132. */
  133. int mt_req_gdma(DMA_CHAN chan)
  134. {
  135. unsigned long flags;
  136. int i;
  137. spin_lock_irqsave(&dma_drv_lock, flags);
  138. if (chan == GDMA_ANY) {
  139. for (i = GDMA_START; i < nr_cqdma_channel; i++) {
  140. if (dma_ctrl[i].in_use)
  141. continue;
  142. else {
  143. dma_ctrl[i].in_use = 1;
  144. break;
  145. }
  146. }
  147. } else {
  148. if (dma_ctrl[chan].in_use)
  149. i = nr_cqdma_channel;
  150. else {
  151. i = chan;
  152. dma_ctrl[chan].in_use = 1;
  153. }
  154. }
  155. spin_unlock_irqrestore(&dma_drv_lock, flags);
  156. if (i < nr_cqdma_channel) {
  157. mt_reset_gdma_conf(i);
  158. return i;
  159. } else {
  160. return -DMA_ERR_NO_FREE_CH;
  161. }
  162. }
  163. EXPORT_SYMBOL(mt_req_gdma);
  164. /*
  165. * mt_start_gdma: start the DMA stransfer for the specified GDMA channel
  166. * @channel: GDMA channel to start
  167. * Return 0 for success; return negative errot code for failure.
  168. */
  169. int mt_start_gdma(int channel)
  170. {
  171. if ((channel < GDMA_START) || (channel >= (GDMA_START + nr_cqdma_channel)))
  172. return -DMA_ERR_INVALID_CH;
  173. if (dma_ctrl[channel].in_use == 0)
  174. return -DMA_ERR_CH_FREE;
  175. mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(channel));
  176. mt_reg_sync_writel(DMA_START_BIT, DMA_START(channel));
  177. return 0;
  178. }
  179. EXPORT_SYMBOL(mt_start_gdma);
  180. /*
  181. * mt_polling_gdma: wait the DMA to finish for the specified GDMA channel
  182. * @channel: GDMA channel to polling
  183. * @timeout: polling timeout in ms
  184. * Return 0 for success;
  185. * Return 1 for timeout
  186. * return negative errot code for failure.
  187. */
  188. int mt_polling_gdma(int channel, unsigned long timeout)
  189. {
  190. if (channel < GDMA_START)
  191. return -DMA_ERR_INVALID_CH;
  192. if (channel >= (GDMA_START + nr_cqdma_channel))
  193. return -DMA_ERR_INVALID_CH;
  194. if (dma_ctrl[channel].in_use == 0)
  195. return -DMA_ERR_CH_FREE;
  196. timeout = jiffies + ((HZ * timeout) / 1000);
  197. do {
  198. if (time_after(jiffies, timeout)) {
  199. pr_err("GDMA_%d polling timeout !!\n", channel);
  200. mt_dump_gdma(channel);
  201. return 1;
  202. }
  203. } while (readl(DMA_START(channel)));
  204. return 0;
  205. }
  206. EXPORT_SYMBOL(mt_polling_gdma);
  207. /*
  208. * mt_stop_gdma: stop the DMA stransfer for the specified GDMA channel
  209. * @channel: GDMA channel to stop
  210. * Return 0 for success; return negative errot code for failure.
  211. */
  212. int mt_stop_gdma(int channel)
  213. {
  214. if (channel < GDMA_START)
  215. return -DMA_ERR_INVALID_CH;
  216. if (channel >= (GDMA_START + nr_cqdma_channel))
  217. return -DMA_ERR_INVALID_CH;
  218. if (dma_ctrl[channel].in_use == 0)
  219. return -DMA_ERR_CH_FREE;
  220. mt_reg_sync_writel(DMA_FLUSH_BIT, DMA_FLUSH(channel));
  221. while (readl(DMA_START(channel)))
  222. ;
  223. mt_reg_sync_writel(DMA_FLUSH_CLR_BIT, DMA_FLUSH(channel));
  224. mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(channel));
  225. return 0;
  226. }
  227. EXPORT_SYMBOL(mt_stop_gdma);
  228. /*
  229. * mt_config_gdma: configure the given GDMA channel.
  230. * @channel: GDMA channel to configure
  231. * @config: pointer to the mt_gdma_conf structure in which the GDMA configurations store
  232. * @flag: ALL, SRC, DST, or SRC_AND_DST.
  233. * Return 0 for success; return negative errot code for failure.
  234. */
  235. int mt_config_gdma(int channel, struct mt_gdma_conf *config, DMA_CONF_FLAG flag)
  236. {
  237. unsigned int dma_con = 0x0, limiter = 0;
  238. if ((channel < GDMA_START) || (channel >= (GDMA_START + nr_cqdma_channel)))
  239. return -DMA_ERR_INVALID_CH;
  240. if (dma_ctrl[channel].in_use == 0)
  241. return -DMA_ERR_CH_FREE;
  242. if (!config)
  243. return -DMA_ERR_INV_CONFIG;
  244. if (config->sfix) {
  245. pr_err("GMDA fixed address mode doesn't support\n");
  246. return -DMA_ERR_INV_CONFIG;
  247. }
  248. if (config->dfix) {
  249. pr_err("GMDA fixed address mode doesn't support\n");
  250. return -DMA_ERR_INV_CONFIG;
  251. }
  252. if (config->count > MAX_TRANSFER_LEN1) {
  253. pr_err("GDMA transfer length cannot exceeed 0x%x.\n", MAX_TRANSFER_LEN1);
  254. return -DMA_ERR_INV_CONFIG;
  255. }
  256. if (config->limiter > MAX_SLOW_DOWN_CNTER) {
  257. pr_err("GDMA slow down counter cannot exceeed 0x%x.\n", MAX_SLOW_DOWN_CNTER);
  258. return -DMA_ERR_INV_CONFIG;
  259. }
  260. switch (flag) {
  261. case ALL:
  262. /* Control Register */
  263. mt_reg_sync_writel((u32) config->src, DMA_SRC(channel));
  264. mt_reg_sync_writel((u32) config->dst, DMA_DST(channel));
  265. mt_reg_sync_writel((config->wplen) & DMA_GDMA_LEN_MAX_MASK, DMA_LEN2(channel));
  266. mt_reg_sync_writel(config->wpto, DMA_JUMP_ADDR(channel));
  267. mt_reg_sync_writel((config->count) & DMA_GDMA_LEN_MAX_MASK, DMA_LEN1(channel));
  268. /*setup security channel */
  269. if (config->sec) {
  270. pr_debug("1:ChSEC:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  271. mt_reg_sync_writel((DMA_SEC_EN_BIT | readl(DMA_GDMA_SEC_EN(channel))),
  272. DMA_GDMA_SEC_EN(channel));
  273. pr_debug("2:ChSEC:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  274. } else {
  275. pr_debug("1:ChSEC:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  276. mt_reg_sync_writel(((~DMA_SEC_EN_BIT) & readl(DMA_GDMA_SEC_EN(channel))),
  277. DMA_GDMA_SEC_EN(channel));
  278. pr_debug("2:ChSEC:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  279. }
  280. /*setup domain_cfg */
  281. if (config->domain) {
  282. pr_debug("1:Domain_cfg:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  283. mt_reg_sync_writel(((config->domain << 1) | readl(DMA_GDMA_SEC_EN(channel))),
  284. DMA_GDMA_SEC_EN(channel));
  285. pr_debug("2:Domain_cfg:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  286. } else {
  287. pr_debug("1:Domain_cfg:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  288. mt_reg_sync_writel((0x1 & readl(DMA_GDMA_SEC_EN(channel))), DMA_GDMA_SEC_EN(channel));
  289. pr_debug("2:Domain_cfg:%x\n", readl(DMA_GDMA_SEC_EN(channel)));
  290. }
  291. /*LPAE for 4GB mode*/
  292. if (config->LPAE_en) {
  293. pr_debug("1:ADDR2_cfg:%x %x %x\n",
  294. readl(DMA_SRC_4G_SUPPORT(channel)),
  295. readl(DMA_DST_4G_SUPPORT(channel)),
  296. readl(DMA_JUMP_4G_SUPPORT(channel)));
  297. mt_reg_sync_writel((DMA_ADDR2_EN_BIT | readl(DMA_SRC_4G_SUPPORT(channel))),
  298. DMA_SRC_4G_SUPPORT(channel));
  299. mt_reg_sync_writel((DMA_ADDR2_EN_BIT | readl(DMA_DST_4G_SUPPORT(channel))),
  300. DMA_DST_4G_SUPPORT(channel));
  301. mt_reg_sync_writel((DMA_ADDR2_EN_BIT | readl(DMA_JUMP_4G_SUPPORT(channel))),
  302. DMA_JUMP_4G_SUPPORT(channel));
  303. pr_debug("2:ADDR2_cfg:%x %x %x\n",
  304. readl(DMA_SRC_4G_SUPPORT(channel)),
  305. readl(DMA_DST_4G_SUPPORT(channel)),
  306. readl(DMA_JUMP_4G_SUPPORT(channel)));
  307. } else {
  308. pr_debug("1:ADDR2_cfg:%x %x %x\n",
  309. readl(DMA_SRC_4G_SUPPORT(channel)),
  310. readl(DMA_DST_4G_SUPPORT(channel)),
  311. readl(DMA_JUMP_4G_SUPPORT(channel)));
  312. mt_reg_sync_writel(((~DMA_ADDR2_EN_BIT) & readl(DMA_SRC_4G_SUPPORT(channel))),
  313. DMA_SRC_4G_SUPPORT(channel));
  314. mt_reg_sync_writel(((~DMA_ADDR2_EN_BIT) & readl(DMA_DST_4G_SUPPORT(channel))),
  315. DMA_DST_4G_SUPPORT(channel));
  316. mt_reg_sync_writel(((~DMA_ADDR2_EN_BIT) & readl(DMA_JUMP_4G_SUPPORT(channel))),
  317. DMA_JUMP_4G_SUPPORT(channel));
  318. pr_debug("2:ADDR2_cfg:%x %x %x\n",
  319. readl(DMA_SRC_4G_SUPPORT(channel)),
  320. readl(DMA_DST_4G_SUPPORT(channel)),
  321. readl(DMA_JUMP_4G_SUPPORT(channel)));
  322. }
  323. if (config->wpen)
  324. dma_con |= DMA_CON_WPEN;
  325. if (config->wpsd)
  326. dma_con |= DMA_CON_WPSD;
  327. if (config->iten) {
  328. dma_ctrl[channel].isr_cb = config->isr_cb;
  329. dma_ctrl[channel].data = config->data;
  330. mt_reg_sync_writel(DMA_INT_EN_BIT, DMA_INT_EN(channel));
  331. } else {
  332. dma_ctrl[channel].isr_cb = NULL;
  333. dma_ctrl[channel].data = NULL;
  334. mt_reg_sync_writel(DMA_INT_EN_CLR_BIT, DMA_INT_EN(channel));
  335. }
  336. if (!(config->dfix) && !(config->sfix))
  337. dma_con |= (config->burst & DMA_CON_BURST_MASK);
  338. else {
  339. if (config->dfix) {
  340. dma_con |= DMA_CON_DFIX;
  341. dma_con |= DMA_CON_WSIZE_1BYTE;
  342. }
  343. if (config->sfix) {
  344. dma_con |= DMA_CON_SFIX;
  345. dma_con |= DMA_CON_RSIZE_1BYTE;
  346. }
  347. /* fixed src/dst mode only supports burst type SINGLE */
  348. dma_con |= DMA_CON_BURST_SINGLE;
  349. }
  350. if (config->limiter) {
  351. limiter = (config->limiter) & DMA_CON_SLOW_MAX_MASK;
  352. dma_con |= limiter << DMA_CON_SLOW_OFFSET;
  353. dma_con |= DMA_CON_SLOW_EN;
  354. }
  355. mt_reg_sync_writel(dma_con, DMA_CON(channel));
  356. break;
  357. case SRC:
  358. mt_reg_sync_writel((u32) config->src, DMA_SRC(channel));
  359. break;
  360. case DST:
  361. mt_reg_sync_writel((u32) config->dst, DMA_DST(channel));
  362. break;
  363. case SRC_AND_DST:
  364. mt_reg_sync_writel((u32) config->src, DMA_SRC(channel));
  365. mt_reg_sync_writel((u32) config->dst, DMA_DST(channel));
  366. break;
  367. default:
  368. break;
  369. }
  370. /* use the data synchronization barrier to ensure that all writes are completed */
  371. mb();
  372. return 0;
  373. }
  374. EXPORT_SYMBOL(mt_config_gdma);
  375. /*
  376. * mt_free_gdma: free a general DMA.
  377. * @channel: channel to free
  378. * Return 0 for success; return negative errot code for failure.
  379. */
  380. int mt_free_gdma(int channel)
  381. {
  382. if (channel < GDMA_START)
  383. return -DMA_ERR_INVALID_CH;
  384. if (channel >= (GDMA_START + nr_cqdma_channel))
  385. return -DMA_ERR_INVALID_CH;
  386. if (dma_ctrl[channel].in_use == 0)
  387. return -DMA_ERR_CH_FREE;
  388. mt_stop_gdma(channel);
  389. dma_ctrl[channel].isr_cb = NULL;
  390. dma_ctrl[channel].data = NULL;
  391. dma_ctrl[channel].in_use = 0;
  392. return 0;
  393. }
  394. EXPORT_SYMBOL(mt_free_gdma);
  395. /*
  396. * mt_dump_gdma: dump registers for the specified GDMA channel
  397. * @channel: GDMA channel to dump registers
  398. * Return 0 for success; return negative errot code for failure.
  399. */
  400. int mt_dump_gdma(int channel)
  401. {
  402. unsigned int i;
  403. pr_debug("Channel 0x%x\n", channel);
  404. for (i = 0; i < 96; i++)
  405. pr_debug("addr:%p, value:%x\n", env_info[channel].base + i * 4,
  406. readl(env_info[channel].base + i * 4));
  407. return 0;
  408. }
  409. EXPORT_SYMBOL(mt_dump_gdma);
  410. /*
  411. * mt_warm_reset_gdma: warm reset the specified GDMA channel
  412. * @channel: GDMA channel to warm reset
  413. * Return 0 for success; return negative errot code for failure.
  414. */
  415. int mt_warm_reset_gdma(int channel)
  416. {
  417. if (channel < GDMA_START)
  418. return -DMA_ERR_INVALID_CH;
  419. if (channel >= (GDMA_START + nr_cqdma_channel))
  420. return -DMA_ERR_INVALID_CH;
  421. if (dma_ctrl[channel].in_use == 0)
  422. return -DMA_ERR_CH_FREE;
  423. mt_reg_sync_writel(DMA_WARM_RST_BIT, DMA_RESET(channel));
  424. if (mt_polling_gdma(channel, GDMA_WARM_RST_TIMEOUT) != 0)
  425. return 1;
  426. return 0;
  427. }
  428. EXPORT_SYMBOL(mt_warm_reset_gdma);
  429. /*
  430. * mt_hard_reset_gdma: hard reset the specified GDMA channel
  431. * @channel: GDMA channel to hard reset
  432. * Return 0 for success; return negative errot code for failure.
  433. */
  434. int mt_hard_reset_gdma(int channel)
  435. {
  436. if (channel < GDMA_START)
  437. return -DMA_ERR_INVALID_CH;
  438. if (channel >= (GDMA_START + nr_cqdma_channel))
  439. return -DMA_ERR_INVALID_CH;
  440. if (dma_ctrl[channel].in_use == 0)
  441. return -DMA_ERR_CH_FREE;
  442. pr_debug("GDMA_%d Hard Reset !!\n", channel);
  443. mt_reg_sync_writel(DMA_HARD_RST_BIT, DMA_RESET(channel));
  444. mt_reg_sync_writel(DMA_HARD_RST_CLR_BIT, DMA_RESET(channel));
  445. return 0;
  446. }
  447. EXPORT_SYMBOL(mt_hard_reset_gdma);
  448. /*
  449. * mt_reset_gdma: reset the specified GDMA channel
  450. * @channel: GDMA channel to reset
  451. * Return 0 for success; return negative errot code for failure.
  452. */
  453. int mt_reset_gdma(int channel)
  454. {
  455. if (channel < GDMA_START)
  456. return -DMA_ERR_INVALID_CH;
  457. if (channel >= (GDMA_START + nr_cqdma_channel))
  458. return -DMA_ERR_INVALID_CH;
  459. if (dma_ctrl[channel].in_use == 0)
  460. return -DMA_ERR_CH_FREE;
  461. if (mt_warm_reset_gdma(channel) != 0)
  462. mt_hard_reset_gdma(channel);
  463. return 0;
  464. }
  465. EXPORT_SYMBOL(mt_reset_gdma);
  466. /*
  467. * gdma1_irq_handler: general DMA channel 1 interrupt service routine.
  468. * @irq: DMA IRQ number
  469. * @dev_id:
  470. * Return IRQ returned code.
  471. */
  472. static irqreturn_t gdma1_irq_handler(int irq, void *dev_id)
  473. {
  474. volatile unsigned glbsta;
  475. unsigned int i;
  476. for (i = 0; i < nr_cqdma_channel; i++)
  477. if (env_info[i].irq == irq)
  478. break;
  479. if (i == nr_cqdma_channel) {
  480. pr_debug("[CQDMA]irq:%d over nr_cqdma_channel!\n", irq);
  481. return IRQ_NONE;
  482. }
  483. glbsta = readl(DMA_INT_FLAG(i));
  484. if (glbsta & 0x1) {
  485. if (dma_ctrl[i].isr_cb)
  486. dma_ctrl[i].isr_cb(dma_ctrl[i].data);
  487. mt_reg_sync_writel(DMA_INT_FLAG_CLR_BIT, DMA_INT_FLAG(i));
  488. } else {
  489. return IRQ_NONE;
  490. }
  491. return IRQ_HANDLED;
  492. }
  493. /*
  494. * mt_reset_gdma_conf: reset the config of the specified DMA channel
  495. * @iChannel: channel number of the DMA channel to reset
  496. */
  497. void mt_reset_gdma_conf(const unsigned int channel)
  498. {
  499. struct mt_gdma_conf conf;
  500. memset(&conf, 0, sizeof(struct mt_gdma_conf));
  501. if (mt_config_gdma(channel, &conf, ALL) != 0)
  502. return;
  503. }
  504. static const struct of_device_id cqdma_of_ids[] = {
  505. { .compatible = "mediatek,mt-cqdma-v1", },
  506. {}
  507. };
  508. static void cqdma_reset(int nr_channel)
  509. {
  510. int i = 0;
  511. for (i = 0; i < nr_channel; i++)
  512. mt_reset_gdma_conf(i);
  513. }
  514. static int cqdma_probe(struct platform_device *pdev)
  515. {
  516. int ret = 0, irq = 0;
  517. unsigned int i;
  518. struct resource *res;
  519. pr_debug("[MTK CQDMA] module probe.\n");
  520. of_property_read_u32(pdev->dev.of_node, "nr_channel", &nr_cqdma_channel);
  521. if (!nr_cqdma_channel) {
  522. pr_err("[CQDMA] no channel found\n");
  523. return -ENODEV;
  524. }
  525. pr_err("[CQDMA] DMA channel = %d\n", nr_cqdma_channel);
  526. for (i = 0; i < nr_cqdma_channel; i++) {
  527. res = platform_get_resource(pdev, IORESOURCE_MEM, i);
  528. env_info[i].base = devm_ioremap_resource(&pdev->dev, res);
  529. env_info[i].irq = platform_get_irq(pdev, i);
  530. if (IS_ERR(env_info[i].base) || (env_info[i].irq <= 0)) {
  531. pr_err("unable to map CQDMA%d base registers and irq=%d!!!\n", i, irq);
  532. return -EINVAL;
  533. }
  534. /*pr_debug("[CQDMA%d] vbase = 0x%p, irq = %d\n", i, env_info[i].base, env_info[i].irq);*/
  535. pr_err("[CQDMA%d] vbase = 0x%p, irq = %d\n", i, env_info[i].base, env_info[i].irq);
  536. }
  537. cqdma_reset(nr_cqdma_channel);
  538. for (i = 0; i < nr_cqdma_channel; i++) {
  539. ret = request_irq(env_info[i].irq, gdma1_irq_handler, IRQF_TRIGGER_NONE, "CQDMA", &dma_ctrl);
  540. if (ret > 0)
  541. pr_err("GDMA%d IRQ LINE NOT AVAILABLE,ret 0x%x!!\n", i, ret);
  542. }
  543. #ifdef CONFIG_ARM_LPAE
  544. for (i = 0; i < nr_cqdma_channel; i++) {
  545. mt_reg_sync_writel(DMA_ADDR2_EN_BIT, DMA_SRC_4G_SUPPORT(i));
  546. mt_reg_sync_writel(DMA_ADDR2_EN_BIT, DMA_DST_4G_SUPPORT(i));
  547. mt_reg_sync_writel(DMA_ADDR2_EN_BIT, DMA_JUMP_4G_SUPPORT(i));
  548. }
  549. #endif
  550. return ret;
  551. }
  552. static int cqdma_remove(struct platform_device *dev)
  553. {
  554. return 0;
  555. }
  556. static struct platform_driver mtk_cqdma_driver = {
  557. .probe = cqdma_probe,
  558. .remove = cqdma_remove,
  559. .driver = {
  560. .name = "cqdma",
  561. .owner = THIS_MODULE,
  562. #ifdef CONFIG_OF
  563. .of_match_table = cqdma_of_ids,
  564. #endif
  565. },
  566. };
  567. static int __init init_cqdma(void)
  568. {
  569. int ret = 0;
  570. ret = platform_driver_register(&mtk_cqdma_driver);
  571. if (ret)
  572. pr_err("CQDMA init FAIL, ret 0x%x!!!\n", ret);
  573. return ret;
  574. }
  575. late_initcall(init_cqdma);