musb_qmu.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #ifdef MUSB_QMU_SUPPORT
  2. #include <linux/delay.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/errno.h>
  6. #include <linux/list.h>
  7. #include <linux/timer.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/stat.h>
  10. #include "musb_core.h"
  11. #include "musb_host.h"
  12. #include "musbhsdma.h"
  13. #include "mtk_musb.h"
  14. #include "musb_qmu.h"
  15. void __iomem *qmu_base;
  16. /* debug variable to check qmu_base issue */
  17. void __iomem *qmu_base_2;
  18. int musb_qmu_init(struct musb *musb)
  19. {
  20. /* set DMA channel 0 burst mode to boost QMU speed */
  21. musb_writel(musb->mregs, 0x204, musb_readl(musb->mregs, 0x204) | 0x600);
  22. #ifdef CONFIG_OF
  23. qmu_base = (void __iomem *)(mtk_musb->mregs + MUSB_QMUBASE);
  24. /* debug variable to check qmu_base issue */
  25. qmu_base_2 = (void __iomem *)(mtk_musb->mregs + MUSB_QMUBASE);
  26. #else
  27. qmu_base = (void __iomem *)(USB_BASE + MUSB_QMUBASE);
  28. /* debug variable to check qmu_base issue */
  29. qmu_base_2 = (void __iomem *)(mtk_musb->mregs + MUSB_QMUBASE);
  30. #endif
  31. mb();
  32. if (qmu_init_gpd_pool(musb->controller)) {
  33. QMU_ERR("[QMU]qmu_init_gpd_pool fail\n");
  34. return -1;
  35. }
  36. return 0;
  37. }
  38. void musb_qmu_exit(struct musb *musb)
  39. {
  40. qmu_destroy_gpd_pool(musb->controller);
  41. }
  42. void musb_disable_q_all(struct musb *musb)
  43. {
  44. u32 ep_num;
  45. QMU_WARN("disable_q_all\n");
  46. for (ep_num = 1; ep_num <= RXQ_NUM; ep_num++) {
  47. if (mtk_is_qmu_enabled(ep_num, RXQ))
  48. mtk_disable_q(musb, ep_num, 1);
  49. }
  50. for (ep_num = 1; ep_num <= TXQ_NUM; ep_num++) {
  51. if (mtk_is_qmu_enabled(ep_num, TXQ))
  52. mtk_disable_q(musb, ep_num, 0);
  53. }
  54. }
  55. void musb_kick_D_CmdQ(struct musb *musb, struct musb_request *request)
  56. {
  57. int isRx;
  58. isRx = request->tx ? 0 : 1;
  59. /* enable qmu at musb_gadget_eanble */
  60. #if 0
  61. if (!mtk_is_qmu_enabled(request->epnum, isRx)) {
  62. /* enable qmu */
  63. mtk_qmu_enable(musb, request->epnum, isRx);
  64. }
  65. #endif
  66. /* note tx needed additional zlp field */
  67. mtk_qmu_insert_task(request->epnum,
  68. isRx,
  69. (u8 *) request->request.dma,
  70. request->request.length, ((request->request.zero == 1) ? 1 : 0));
  71. mtk_qmu_resume(request->epnum, isRx);
  72. }
  73. irqreturn_t musb_q_irq(struct musb *musb)
  74. {
  75. irqreturn_t retval = IRQ_NONE;
  76. u32 wQmuVal = musb->int_queue;
  77. #ifndef QMU_TASKLET
  78. int i;
  79. #endif
  80. QMU_INFO("wQmuVal:%d\n", wQmuVal);
  81. #ifdef QMU_TASKLET
  82. if (musb->qmu_done_intr != 0) {
  83. musb->qmu_done_intr = wQmuVal | musb->qmu_done_intr;
  84. QMU_WARN("Has not handle yet %x\n", musb->qmu_done_intr);
  85. } else
  86. musb->qmu_done_intr = wQmuVal;
  87. tasklet_schedule(&musb->qmu_done);
  88. #else
  89. for (i = 1; i <= MAX_QMU_EP; i++) {
  90. if (wQmuVal & DQMU_M_RX_DONE(i))
  91. qmu_done_rx(musb, i);
  92. if (wQmuVal & DQMU_M_TX_DONE(i))
  93. qmu_done_tx(musb, i);
  94. }
  95. #endif
  96. mtk_qmu_irq_err(musb, wQmuVal);
  97. return retval;
  98. }
  99. void musb_flush_qmu(u32 ep_num, u8 isRx)
  100. {
  101. QMU_DBG("flush %s(%d)\n", isRx ? "RQ" : "TQ", ep_num);
  102. mtk_qmu_stop(ep_num, isRx);
  103. qmu_reset_gpd_pool(ep_num, isRx);
  104. }
  105. void musb_restart_qmu(struct musb *musb, u32 ep_num, u8 isRx)
  106. {
  107. QMU_DBG("restart %s(%d)\n", isRx ? "RQ" : "TQ", ep_num);
  108. flush_ep_csr(musb, ep_num, isRx);
  109. mtk_qmu_enable(musb, ep_num, isRx);
  110. }
  111. bool musb_is_qmu_stop(u32 ep_num, u8 isRx)
  112. {
  113. void __iomem *base = qmu_base;
  114. /* debug variable to check qmu_base issue */
  115. if (qmu_base != qmu_base_2) {
  116. QMU_WARN("qmu_base != qmu_base_2");
  117. QMU_WARN("qmu_base = %p, qmu_base_2=%p", qmu_base, qmu_base_2);
  118. }
  119. if (!isRx) {
  120. if (MGC_ReadQMU16(base, MGC_O_QMU_TQCSR(ep_num)) & DQMU_QUE_ACTIVE)
  121. return false;
  122. else
  123. return true;
  124. } else {
  125. if (MGC_ReadQMU16(base, MGC_O_QMU_RQCSR(ep_num)) & DQMU_QUE_ACTIVE)
  126. return false;
  127. else
  128. return true;
  129. }
  130. }
  131. void musb_tx_zlp_qmu(struct musb *musb, u32 ep_num)
  132. {
  133. /* sent ZLP through PIO */
  134. void __iomem *epio = musb->endpoints[ep_num].regs;
  135. void __iomem *mbase = musb->mregs;
  136. unsigned long timeout = jiffies + HZ;
  137. int is_timeout = 1;
  138. u16 csr;
  139. QMU_WARN("TX ZLP direct sent\n");
  140. musb_ep_select(mbase, ep_num);
  141. /* disable dma for pio */
  142. csr = musb_readw(epio, MUSB_TXCSR);
  143. csr &= ~MUSB_TXCSR_DMAENAB;
  144. musb_writew(epio, MUSB_TXCSR, csr);
  145. /* TXPKTRDY */
  146. csr = musb_readw(epio, MUSB_TXCSR);
  147. csr |= MUSB_TXCSR_TXPKTRDY;
  148. musb_writew(epio, MUSB_TXCSR, csr);
  149. /* wait ZLP sent */
  150. while (time_before_eq(jiffies, timeout)) {
  151. csr = musb_readw(epio, MUSB_TXCSR);
  152. if (!(csr & MUSB_TXCSR_TXPKTRDY)) {
  153. is_timeout = 0;
  154. break;
  155. }
  156. }
  157. /* re-enable dma for qmu */
  158. csr = musb_readw(epio, MUSB_TXCSR);
  159. csr |= MUSB_TXCSR_DMAENAB;
  160. musb_writew(epio, MUSB_TXCSR, csr);
  161. if (is_timeout)
  162. QMU_ERR("TX ZLP sent fail???\n");
  163. QMU_WARN("TX ZLP sent done\n");
  164. }
  165. #endif