mtk_qmu.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048
  1. #ifdef MUSB_QMU_SUPPORT
  2. #include <linux/dma-mapping.h>
  3. #include <linux/dmapool.h>
  4. #include <linux/list.h>
  5. #include "musb_qmu.h"
  6. static PGPD Rx_gpd_head[15];
  7. static PGPD Tx_gpd_head[15];
  8. static PGPD Rx_gpd_end[15];
  9. static PGPD Tx_gpd_end[15];
  10. static PGPD Rx_gpd_last[15];
  11. static PGPD Tx_gpd_last[15];
  12. static GPD_R Rx_gpd_List[15];
  13. static GPD_R Tx_gpd_List[15];
  14. static u64 Rx_gpd_Offset[15];
  15. static u64 Tx_gpd_Offset[15];
  16. u8 PDU_calcCksum(u8 *data, int len)
  17. {
  18. u8 *uDataPtr, ckSum;
  19. int i;
  20. *(data + 1) = 0x0;
  21. uDataPtr = data;
  22. ckSum = 0;
  23. for (i = 0; i < len; i++)
  24. ckSum += *(uDataPtr + i);
  25. return 0xFF - ckSum;
  26. }
  27. static PGPD get_gpd(u8 isRx, u32 num)
  28. {
  29. PGPD ptr;
  30. if (isRx) {
  31. ptr = Rx_gpd_List[num].pNext;
  32. Rx_gpd_List[num].pNext = (PGPD) ((u8 *) (Rx_gpd_List[num].pNext) + GPD_LEN_ALIGNED);
  33. if (Rx_gpd_List[num].pNext >= Rx_gpd_List[num].pEnd)
  34. Rx_gpd_List[num].pNext = Rx_gpd_List[num].pStart;
  35. } else {
  36. ptr = Tx_gpd_List[num].pNext;
  37. Tx_gpd_List[num].pNext = (PGPD) ((u8 *) (Tx_gpd_List[num].pNext) + GPD_LEN_ALIGNED);
  38. if (Tx_gpd_List[num].pNext >= Tx_gpd_List[num].pEnd)
  39. Tx_gpd_List[num].pNext = Tx_gpd_List[num].pStart;
  40. }
  41. return ptr;
  42. }
  43. static void gpd_ptr_align(u8 isRx, u32 num, PGPD ptr)
  44. {
  45. if (isRx)
  46. Rx_gpd_List[num].pNext = (PGPD) ((u8 *) (ptr) + GPD_LEN_ALIGNED);
  47. else
  48. Tx_gpd_List[num].pNext = (PGPD) ((u8 *) (ptr) + GPD_LEN_ALIGNED);
  49. }
  50. static dma_addr_t gpd_virt_to_phys(void *vaddr, u8 isRx, u32 num)
  51. {
  52. dma_addr_t paddr;
  53. if (isRx)
  54. paddr = (dma_addr_t) ((u64) (unsigned long)vaddr - Rx_gpd_Offset[num]);
  55. else
  56. paddr = (dma_addr_t) ((u64) (unsigned long)vaddr - Tx_gpd_Offset[num]);
  57. QMU_INFO("%s[%d]phys=%p<->virt=%p\n",
  58. ((isRx == RXQ) ? "RQ" : "TQ"), num, (void *)paddr, vaddr);
  59. return paddr;
  60. }
  61. static void *gpd_phys_to_virt(dma_addr_t paddr, u8 isRx, u32 num)
  62. {
  63. void *vaddr;
  64. if (isRx)
  65. vaddr = (void *)(unsigned long)((u64) paddr + Rx_gpd_Offset[num]);
  66. else
  67. vaddr = (void *)(unsigned long)((u64) paddr + Tx_gpd_Offset[num]);
  68. QMU_INFO("%s[%d]phys=%p<->virt=%p\n",
  69. ((isRx == RXQ) ? "RQ" : "TQ"), num, (void *)paddr, vaddr);
  70. return vaddr;
  71. }
  72. static void init_gpd_list(u8 isRx, int num, PGPD ptr, PGPD io_ptr, u32 size)
  73. {
  74. if (isRx) {
  75. Rx_gpd_List[num].pStart = ptr;
  76. Rx_gpd_List[num].pEnd = (PGPD) ((u8 *) (ptr + size) + (GPD_EXT_LEN * size));
  77. Rx_gpd_Offset[num] = (u64) (unsigned long)ptr - (u64) (unsigned long)io_ptr;
  78. ptr++;
  79. Rx_gpd_List[num].pNext = (PGPD) ((u8 *) ptr + GPD_EXT_LEN);
  80. QMU_INFO("Rx_gpd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
  81. num, Rx_gpd_List[num].pStart, Rx_gpd_List[num].pNext,
  82. Rx_gpd_List[num].pEnd);
  83. QMU_INFO("Rx_gpd_Offset[%d]=%p\n", num, (void *)(unsigned long)Rx_gpd_Offset[num]);
  84. } else {
  85. Tx_gpd_List[num].pStart = ptr;
  86. Tx_gpd_List[num].pEnd = (PGPD) ((u8 *) (ptr + size) + (GPD_EXT_LEN * size));
  87. Tx_gpd_Offset[num] = (u64) (unsigned long)ptr - (u64) (unsigned long)io_ptr;
  88. ptr++;
  89. Tx_gpd_List[num].pNext = (PGPD) ((u8 *) ptr + GPD_EXT_LEN);
  90. QMU_INFO("Tx_gpd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
  91. num, Tx_gpd_List[num].pStart, Tx_gpd_List[num].pNext,
  92. Tx_gpd_List[num].pEnd);
  93. QMU_INFO("Tx_gpd_Offset[%d]=%p\n", num, (void *)(unsigned long)Tx_gpd_Offset[num]);
  94. }
  95. }
  96. int qmu_init_gpd_pool(struct device *dev)
  97. {
  98. u32 i, size;
  99. TGPD *ptr, *io_ptr;
  100. dma_addr_t dma_handle;
  101. u32 gpd_sz;
  102. gpd_sz = (u32) (u64) sizeof(TGPD);
  103. QMU_INFO("sizeof(TGPD):%d\n", gpd_sz);
  104. if (gpd_sz != GPD_SZ)
  105. QMU_ERR("ERR!!!, GPD SIZE != %d\n", GPD_SZ);
  106. for (i = 1; i <= RXQ_NUM; i++) {
  107. /* Allocate Rx GPD */
  108. size = GPD_LEN_ALIGNED * MAX_GPD_NUM;
  109. ptr = (TGPD *) dma_alloc_coherent(dev, size, &dma_handle, GFP_KERNEL);
  110. if (!ptr)
  111. return -ENOMEM;
  112. memset(ptr, 0, size);
  113. io_ptr = (TGPD *) (dma_handle);
  114. init_gpd_list(RXQ, i, ptr, io_ptr, MAX_GPD_NUM);
  115. Rx_gpd_head[i] = ptr;
  116. QMU_INFO("ALLOC RX GPD Head [%d] Virtual Mem=%p, DMA addr=%p\n", i, Rx_gpd_head[i],
  117. io_ptr);
  118. Rx_gpd_end[i] = Rx_gpd_last[i] = Rx_gpd_head[i];
  119. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[i]);
  120. gpd_ptr_align(RXQ, i, Rx_gpd_end[i]);
  121. QMU_INFO("RQSAR[%d]=%p\n", i, (void *)gpd_virt_to_phys(Rx_gpd_end[i], RXQ, i));
  122. }
  123. for (i = 1; i <= TXQ_NUM; i++) {
  124. /* Allocate Tx GPD */
  125. size = GPD_LEN_ALIGNED * MAX_GPD_NUM;
  126. ptr = (TGPD *) dma_alloc_coherent(dev, size, &dma_handle, GFP_KERNEL);
  127. if (!ptr)
  128. return -ENOMEM;
  129. memset(ptr, 0, size);
  130. io_ptr = (TGPD *) (dma_handle);
  131. init_gpd_list(TXQ, i, ptr, io_ptr, MAX_GPD_NUM);
  132. Tx_gpd_head[i] = ptr;
  133. QMU_INFO("ALLOC TX GPD Head [%d] Virtual Mem=%p, DMA addr=%p\n", i, Tx_gpd_head[i],
  134. io_ptr);
  135. Tx_gpd_end[i] = Tx_gpd_last[i] = Tx_gpd_head[i];
  136. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[i]);
  137. gpd_ptr_align(TXQ, i, Tx_gpd_end[i]);
  138. QMU_INFO("TQSAR[%d]=%p\n", i, (void *)gpd_virt_to_phys(Tx_gpd_end[i], TXQ, i));
  139. }
  140. return 0;
  141. }
  142. void qmu_reset_gpd_pool(u32 ep_num, u8 isRx)
  143. {
  144. u32 size = GPD_LEN_ALIGNED * MAX_GPD_NUM;
  145. /* SW reset */
  146. if (isRx) {
  147. memset(Rx_gpd_head[ep_num], 0, size);
  148. Rx_gpd_end[ep_num] = Rx_gpd_last[ep_num] = Rx_gpd_head[ep_num];
  149. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[ep_num]);
  150. gpd_ptr_align(isRx, ep_num, Rx_gpd_end[ep_num]);
  151. } else {
  152. memset(Tx_gpd_head[ep_num], 0, size);
  153. Tx_gpd_end[ep_num] = Tx_gpd_last[ep_num] = Tx_gpd_head[ep_num];
  154. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[ep_num]);
  155. gpd_ptr_align(isRx, ep_num, Tx_gpd_end[ep_num]);
  156. }
  157. }
  158. void qmu_destroy_gpd_pool(struct device *dev)
  159. {
  160. int i;
  161. u32 size = GPD_LEN_ALIGNED * MAX_GPD_NUM;
  162. for (i = 1; i <= RXQ_NUM; i++) {
  163. dma_free_coherent(dev, size, Rx_gpd_head[i],
  164. gpd_virt_to_phys(Rx_gpd_head[i], RXQ, i));
  165. }
  166. for (i = 1; i <= TXQ_NUM; i++) {
  167. dma_free_coherent(dev, size, Tx_gpd_head[i],
  168. gpd_virt_to_phys(Tx_gpd_head[i], TXQ, i));
  169. }
  170. }
  171. static void prepare_rx_gpd(u8 *pBuf, u32 data_len, u8 ep_num)
  172. {
  173. TGPD *gpd;
  174. /* get gpd from tail */
  175. gpd = Rx_gpd_end[ep_num];
  176. TGPD_SET_DATA(gpd, pBuf);
  177. TGPD_CLR_FORMAT_BDP(gpd);
  178. TGPD_SET_DataBUF_LEN(gpd, data_len);
  179. TGPD_SET_BUF_LEN(gpd, 0);
  180. /* TGPD_CLR_FORMAT_BPS(gpd); */
  181. TGPD_SET_IOC(gpd);
  182. /* update gpd tail */
  183. Rx_gpd_end[ep_num] = get_gpd(RXQ, ep_num);
  184. QMU_INFO("[RX]" "Rx_gpd_end[%d]=%p gpd=%p\n", ep_num, Rx_gpd_end[ep_num], gpd);
  185. memset(Rx_gpd_end[ep_num], 0, GPD_LEN_ALIGNED);
  186. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[ep_num]);
  187. /* make sure struct ready before set to next */
  188. mb();
  189. TGPD_SET_NEXT(gpd, gpd_virt_to_phys(Rx_gpd_end[ep_num], RXQ, ep_num));
  190. TGPD_SET_CHKSUM_HWO(gpd, 16);
  191. /* make sure struct ready before HWO */
  192. mb();
  193. TGPD_SET_FLAGS_HWO(gpd);
  194. }
  195. static void prepare_tx_gpd(u8 *pBuf, u32 data_len, u8 ep_num, u8 zlp)
  196. {
  197. TGPD *gpd;
  198. /* get gpd from tail */
  199. gpd = Tx_gpd_end[ep_num];
  200. TGPD_SET_DATA(gpd, pBuf);
  201. TGPD_CLR_FORMAT_BDP(gpd);
  202. TGPD_SET_BUF_LEN(gpd, data_len);
  203. TGPD_SET_EXT_LEN(gpd, 0);
  204. if (zlp)
  205. TGPD_SET_FORMAT_ZLP(gpd);
  206. else
  207. TGPD_CLR_FORMAT_ZLP(gpd);
  208. /* TGPD_CLR_FORMAT_BPS(gpd); */
  209. TGPD_SET_IOC(gpd);
  210. /* update gpd tail */
  211. Tx_gpd_end[ep_num] = get_gpd(TXQ, ep_num);
  212. QMU_INFO("[TX]" "Tx_gpd_end[%d]=%p gpd=%p\n", ep_num, Tx_gpd_end[ep_num], gpd);
  213. memset(Tx_gpd_end[ep_num], 0, GPD_LEN_ALIGNED);
  214. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[ep_num]);
  215. /* make sure struct ready before set to next */
  216. mb();
  217. TGPD_SET_NEXT(gpd, gpd_virt_to_phys(Tx_gpd_end[ep_num], TXQ, ep_num));
  218. TGPD_SET_CHKSUM_HWO(gpd, 16);
  219. /* make sure struct ready before HWO */
  220. mb();
  221. TGPD_SET_FLAGS_HWO(gpd);
  222. }
  223. void mtk_qmu_resume(u8 ep_num, u8 isRx)
  224. {
  225. void __iomem *base = qmu_base;
  226. if (!isRx) {
  227. MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_RESUME);
  228. if (!MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num))) {
  229. QMU_ERR("TQCSR[%d]=%x\n", ep_num,
  230. MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num)));
  231. MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_RESUME);
  232. QMU_ERR("TQCSR[%d]=%x\n", ep_num,
  233. MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num)));
  234. }
  235. } else {
  236. MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_RESUME);
  237. if (!MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num))) {
  238. QMU_ERR("RQCSR[%d]=%x\n", ep_num,
  239. MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)));
  240. MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_RESUME);
  241. QMU_ERR("RQCSR[%d]=%x\n", ep_num,
  242. MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)));
  243. }
  244. }
  245. }
  246. bool mtk_is_qmu_enabled(u8 ep_num, u8 isRx)
  247. {
  248. void __iomem *base = qmu_base;
  249. if (isRx) {
  250. if (MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) & (USB_QMU_Rx_EN(ep_num)))
  251. return true;
  252. } else {
  253. if (MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) & (USB_QMU_Tx_EN(ep_num)))
  254. return true;
  255. }
  256. return false;
  257. }
  258. void mtk_qmu_enable(struct musb *musb, u8 ep_num, u8 isRx)
  259. {
  260. struct musb_ep *musb_ep;
  261. u32 QCR;
  262. void __iomem *base = qmu_base;
  263. void __iomem *mbase = musb->mregs;
  264. void __iomem *epio;
  265. u16 csr = 0;
  266. u16 intr_e = 0;
  267. epio = musb->endpoints[ep_num].regs;
  268. musb_ep_select(mbase, ep_num);
  269. if (isRx) {
  270. QMU_WARN("enable RQ(%d)\n", ep_num);
  271. /* enable dma */
  272. csr |= MUSB_RXCSR_DMAENAB;
  273. /* check ISOC */
  274. musb_ep = &musb->endpoints[ep_num].ep_out;
  275. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  276. csr |= MUSB_RXCSR_P_ISO;
  277. musb_writew(epio, MUSB_RXCSR, csr);
  278. /* turn off intrRx */
  279. intr_e = musb_readw(mbase, MUSB_INTRRXE);
  280. intr_e = intr_e & (~(1 << (ep_num)));
  281. musb_writew(mbase, MUSB_INTRRXE, intr_e);
  282. /* set 1st gpd and enable */
  283. MGC_WriteQMU32(base, MGC_O_QMU_RQSAR(ep_num),
  284. gpd_virt_to_phys(Rx_gpd_end[ep_num], RXQ, ep_num));
  285. MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
  286. MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) | (USB_QMU_Rx_EN(ep_num)));
  287. #ifdef CFG_CS_CHECK
  288. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
  289. MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR | DQMU_RQCS_EN(ep_num));
  290. #endif
  291. #ifdef CFG_RX_ZLP_EN
  292. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR3);
  293. MGC_WriteQMU32(base, MGC_O_QMU_QCR3, QCR | DQMU_RX_ZLP(ep_num));
  294. #endif
  295. #ifdef CFG_RX_COZ_EN
  296. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR3);
  297. MGC_WriteQMU32(base, MGC_O_QMU_QCR3, QCR | DQMU_RX_COZ(ep_num));
  298. #endif
  299. MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMCR,
  300. DQMU_M_RX_DONE(ep_num) | DQMU_M_RQ_EMPTY | DQMU_M_RXQ_ERR |
  301. DQMU_M_RXEP_ERR);
  302. #ifdef CFG_EMPTY_CHECK
  303. MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEMPMCR, DQMU_M_RX_EMPTY(ep_num));
  304. #else
  305. MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_RQ_EMPTY);
  306. #endif
  307. QCR = DQMU_M_RX_LEN_ERR(ep_num);
  308. #ifdef CFG_CS_CHECK
  309. QCR |= DQMU_M_RX_GPDCS_ERR(ep_num);
  310. #endif
  311. #ifdef CFG_RX_ZLP_EN
  312. QCR |= DQMU_M_RX_ZLP_ERR(ep_num);
  313. #endif
  314. MGC_WriteQIRQ32(base, MGC_O_QIRQ_RQEIMCR, QCR);
  315. MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEIMCR, DQMU_M_RX_EP_ERR(ep_num));
  316. mb();
  317. /* qmu start */
  318. MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_START);
  319. } else {
  320. QMU_WARN("enable TQ(%d)\n", ep_num);
  321. /* enable dma */
  322. csr |= MUSB_TXCSR_DMAENAB;
  323. /* check ISOC */
  324. musb_ep = &musb->endpoints[ep_num].ep_in;
  325. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  326. csr |= MUSB_TXCSR_P_ISO;
  327. musb_writew(epio, MUSB_TXCSR, csr);
  328. /* turn off intrTx */
  329. intr_e = musb_readw(mbase, MUSB_INTRTXE);
  330. intr_e = intr_e & (~(1 << ep_num));
  331. musb_writew(mbase, MUSB_INTRTXE, intr_e);
  332. /* set 1st gpd and enable */
  333. MGC_WriteQMU32(base, MGC_O_QMU_TQSAR(ep_num),
  334. gpd_virt_to_phys(Tx_gpd_end[ep_num], TXQ, ep_num));
  335. MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
  336. MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR) | (USB_QMU_Tx_EN(ep_num)));
  337. #ifdef CFG_CS_CHECK
  338. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
  339. MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR | DQMU_TQCS_EN(ep_num));
  340. #endif
  341. #if (TXZLP == HW_MODE)
  342. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR2);
  343. MGC_WriteQMU32(base, MGC_O_QMU_QCR2, QCR | DQMU_TX_ZLP(ep_num));
  344. #elif (TXZLP == GPD_MODE)
  345. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR2);
  346. MGC_WriteQMU32(base, MGC_O_QMU_QCR2, QCR | DQMU_TX_MULTIPLE(ep_num));
  347. #endif
  348. MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMCR,
  349. DQMU_M_TX_DONE(ep_num) | DQMU_M_TQ_EMPTY | DQMU_M_TXQ_ERR |
  350. DQMU_M_TXEP_ERR);
  351. #ifdef CFG_EMPTY_CHECK
  352. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEMPMCR, DQMU_M_TX_EMPTY(ep_num));
  353. #else
  354. MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_TQ_EMPTY);
  355. #endif
  356. QCR = DQMU_M_TX_LEN_ERR(ep_num);
  357. #ifdef CFG_CS_CHECK
  358. QCR |= DQMU_M_TX_GPDCS_ERR(ep_num) | DQMU_M_TX_BDCS_ERR(ep_num);
  359. #endif
  360. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TQEIMCR, QCR);
  361. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEIMCR, DQMU_M_TX_EP_ERR(ep_num));
  362. mb();
  363. /* qmu start */
  364. MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_START);
  365. }
  366. }
  367. void mtk_qmu_stop(u8 ep_num, u8 isRx)
  368. {
  369. void __iomem *base = qmu_base;
  370. if (!isRx) {
  371. if (MGC_ReadQMU16(base, MGC_O_QMU_TQCSR(ep_num)) & DQMU_QUE_ACTIVE) {
  372. MGC_WriteQMU32(base, MGC_O_QMU_TQCSR(ep_num), DQMU_QUE_STOP);
  373. QMU_WARN("Stop TQ %d\n", ep_num);
  374. } else {
  375. QMU_WARN("TQ %d already inactive\n", ep_num);
  376. }
  377. } else {
  378. if (MGC_ReadQMU16(base, MGC_O_QMU_RQCSR(ep_num)) & DQMU_QUE_ACTIVE) {
  379. MGC_WriteQMU32(base, MGC_O_QMU_RQCSR(ep_num), DQMU_QUE_STOP);
  380. QMU_WARN("Stop RQ %d\n", ep_num);
  381. } else {
  382. QMU_WARN("RQ %d already inactive\n", ep_num);
  383. }
  384. }
  385. }
  386. static void mtk_qmu_disable(u8 ep_num, u8 isRx)
  387. {
  388. u32 QCR;
  389. void __iomem *base = qmu_base;
  390. QMU_WARN("disable %s(%d)\n", isRx ? "RQ" : "TQ", ep_num);
  391. mtk_qmu_stop(ep_num, isRx);
  392. if (isRx) {
  393. /* / clear Queue start address */
  394. MGC_WriteQMU32(base, MGC_O_QMU_RQSAR(ep_num), 0);
  395. /* KOBE, in denali, different EP QMU EN is separated in MGC_O_QUCS_USBGCSR ?? */
  396. MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
  397. MGC_ReadQUCS32(base,
  398. MGC_O_QUCS_USBGCSR) & (~(USB_QMU_Rx_EN(ep_num))));
  399. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
  400. MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR & (~(DQMU_RQCS_EN(ep_num))));
  401. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR3);
  402. MGC_WriteQMU32(base, MGC_O_QMU_QCR3, QCR & (~(DQMU_RX_ZLP(ep_num))));
  403. MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_RX_DONE(ep_num));
  404. MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEMPMSR, DQMU_M_RX_EMPTY(ep_num));
  405. MGC_WriteQIRQ32(base, MGC_O_QIRQ_RQEIMSR,
  406. DQMU_M_RX_LEN_ERR(ep_num) | DQMU_M_RX_GPDCS_ERR(ep_num) |
  407. DQMU_M_RX_ZLP_ERR(ep_num));
  408. MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEIMSR, DQMU_M_RX_EP_ERR(ep_num));
  409. } else {
  410. /* / clear Queue start address */
  411. MGC_WriteQMU32(base, MGC_O_QMU_TQSAR(ep_num), 0);
  412. /* KOBE, in denali, different EP QMU EN is separated in MGC_O_QUCS_USBGCSR ?? */
  413. MGC_WriteQUCS32(base, MGC_O_QUCS_USBGCSR,
  414. MGC_ReadQUCS32(base,
  415. MGC_O_QUCS_USBGCSR) & (~(USB_QMU_Tx_EN(ep_num))));
  416. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR0);
  417. MGC_WriteQMU32(base, MGC_O_QMU_QCR0, QCR & (~(DQMU_TQCS_EN(ep_num))));
  418. QCR = MGC_ReadQMU32(base, MGC_O_QMU_QCR2);
  419. MGC_WriteQMU32(base, MGC_O_QMU_QCR2, QCR & (~(DQMU_TX_ZLP(ep_num))));
  420. MGC_WriteQIRQ32(base, MGC_O_QIRQ_QIMSR, DQMU_M_TX_DONE(ep_num));
  421. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEMPMSR, DQMU_M_TX_EMPTY(ep_num));
  422. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TQEIMSR,
  423. DQMU_M_TX_LEN_ERR(ep_num) | DQMU_M_TX_GPDCS_ERR(ep_num) |
  424. DQMU_M_TX_BDCS_ERR(ep_num));
  425. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEIMSR, DQMU_M_TX_EP_ERR(ep_num));
  426. }
  427. }
  428. void mtk_qmu_insert_task(u8 ep_num, u8 isRx, u8 *buf, u32 length, u8 zlp)
  429. {
  430. QMU_INFO("ep_num: %d, isRx: %d, buf: %p, length: %d\n", ep_num, isRx, buf, length);
  431. if (isRx) {
  432. /* rx don't care zlp input */
  433. prepare_rx_gpd(buf, length, ep_num);
  434. } else {
  435. prepare_tx_gpd(buf, length, ep_num, zlp);
  436. }
  437. }
  438. void qmu_done_rx(struct musb *musb, u8 ep_num)
  439. {
  440. void __iomem *base = qmu_base;
  441. TGPD *gpd = Rx_gpd_last[ep_num];
  442. TGPD *gpd_current = (TGPD *) (unsigned long)MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num));
  443. struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_out;
  444. struct usb_request *request = NULL;
  445. struct musb_request *req;
  446. /* trying to give_back the request to gadget driver. */
  447. req = next_request(musb_ep);
  448. if (!req) {
  449. QMU_ERR("[RXD]" "%s Cannot get next request of %d, but QMU has done.\n",
  450. __func__, ep_num);
  451. return;
  452. }
  453. request = &req->request;
  454. /*Transfer PHY addr got from QMU register to VIR addr */
  455. gpd_current = (TGPD *) gpd_phys_to_virt((dma_addr_t) gpd_current, RXQ, ep_num);
  456. QMU_INFO("[RXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n",
  457. __func__, ep_num, gpd, gpd_current, Rx_gpd_end[ep_num]);
  458. /* gpd_current should at least point to the next GPD to the previous last one */
  459. if (gpd == gpd_current) {
  460. QMU_ERR("[RXD][ERROR] gpd(%p) == gpd_current(%p)\n", gpd, gpd_current);
  461. QMU_ERR("[RXD][ERROR]EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n",
  462. ep_num,
  463. MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)),
  464. MGC_ReadQMU32(base, MGC_O_QMU_RQSAR(ep_num)),
  465. MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num)),
  466. MGC_ReadQMU32(base, MGC_O_QMU_RQLDPR(ep_num)));
  467. QMU_ERR("[RXD][ERROR]QCR0=%x, QCR2=%x, QCR3=%x, QGCSR=%x\n",
  468. MGC_ReadQMU32(base, MGC_O_QMU_QCR0),
  469. MGC_ReadQMU32(base, MGC_O_QMU_QCR2),
  470. MGC_ReadQMU32(base, MGC_O_QMU_QCR3),
  471. MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR));
  472. QMU_ERR("[RXD][ERROR]HWO=%d, Next_GPD=%p ,DataBufLen=%d, DataBuf=%p, RecvLen=%d, Endpoint=%d\n",
  473. (u32) TGPD_GET_FLAG(gpd), TGPD_GET_NEXT(gpd),
  474. (u32) TGPD_GET_DataBUF_LEN(gpd), TGPD_GET_DATA(gpd),
  475. (u32) TGPD_GET_BUF_LEN(gpd), (u32) TGPD_GET_EPaddr(gpd));
  476. return;
  477. }
  478. if (!gpd || !gpd_current) {
  479. QMU_ERR
  480. ("[RXD][ERROR] EP%d, gpd=%p, gpd_current=%p, ishwo=%d, rx_gpd_last=%p, RQCPR=0x%x\n",
  481. ep_num, gpd, gpd_current, ((gpd == NULL) ? 999 : TGPD_IS_FLAGS_HWO(gpd)),
  482. Rx_gpd_last[ep_num], MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num)));
  483. return;
  484. }
  485. if (TGPD_IS_FLAGS_HWO(gpd)) {
  486. QMU_ERR("[RXD][ERROR]" "HWO=1!!\n");
  487. QMU_ERR("[RXD][ERROR]" "HWO=1!!\n");
  488. QMU_ERR("[RXD][ERROR]" "HWO=1!!\n");
  489. QMU_ERR("[RXD][ERROR]" "HWO=1!!\n");
  490. QMU_ERR("[RXD][ERROR]" "HWO=1!!\n");
  491. /* BUG_ON(1); */
  492. return;
  493. }
  494. /* NORMAL EXEC FLOW */
  495. while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) {
  496. u32 rcv_len = (u32) TGPD_GET_BUF_LEN(gpd);
  497. u32 buf_len = (u32) TGPD_GET_DataBUF_LEN(gpd);
  498. if (rcv_len > buf_len)
  499. QMU_ERR("[RXD][ERROR] rcv(%d) > buf(%d) AUK!?\n", rcv_len, buf_len);
  500. QMU_INFO("[RXD]" "gpd=%p ->HWO=%d, Next_GPD=%p, RcvLen=%d, BufLen=%d, pBuf=%p\n",
  501. gpd, TGPD_GET_FLAG(gpd), TGPD_GET_NEXT(gpd), rcv_len, buf_len,
  502. TGPD_GET_DATA(gpd));
  503. request->actual += rcv_len;
  504. if (!TGPD_GET_NEXT(gpd) || !TGPD_GET_DATA(gpd)) {
  505. QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
  506. QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
  507. QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
  508. QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
  509. QMU_ERR("[RXD][ERROR] EP%d ,gpd=%p\n", ep_num, gpd);
  510. /* BUG_ON(1); */
  511. return;
  512. }
  513. gpd = TGPD_GET_NEXT(gpd);
  514. gpd = gpd_phys_to_virt((dma_addr_t) gpd, RXQ, ep_num);
  515. if (!gpd) {
  516. QMU_ERR("[RXD][ERROR] !gpd, EP%d ,gpd=%p\n", ep_num, gpd);
  517. QMU_ERR("[RXD][ERROR] !gpd, EP%d ,gpd=%p\n", ep_num, gpd);
  518. QMU_ERR("[RXD][ERROR] !gpd, EP%d ,gpd=%p\n", ep_num, gpd);
  519. QMU_ERR("[RXD][ERROR] !gpd, EP%d ,gpd=%p\n", ep_num, gpd);
  520. QMU_ERR("[RXD][ERROR] !gpd, EP%d ,gpd=%p\n", ep_num, gpd);
  521. /* BUG_ON(1); */
  522. return;
  523. }
  524. Rx_gpd_last[ep_num] = gpd;
  525. musb_g_giveback(musb_ep, request, 0);
  526. req = next_request(musb_ep);
  527. request = &req->request;
  528. }
  529. /* QMU should keep take HWO gpd , so there is error */
  530. if (gpd != gpd_current && TGPD_IS_FLAGS_HWO(gpd)) {
  531. QMU_ERR("[RXD][ERROR]" "gpd=%p\n", gpd);
  532. QMU_ERR("[RXD][ERROR]" "EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n",
  533. ep_num,
  534. MGC_ReadQMU32(base, MGC_O_QMU_RQCSR(ep_num)),
  535. MGC_ReadQMU32(base, MGC_O_QMU_RQSAR(ep_num)),
  536. MGC_ReadQMU32(base, MGC_O_QMU_RQCPR(ep_num)),
  537. MGC_ReadQMU32(base, MGC_O_QMU_RQLDPR(ep_num)));
  538. QMU_ERR("[RXD][ERROR]" "QCR0=%x, QCR2=%x, QCR3=%x, QGCSR=%x\n",
  539. MGC_ReadQMU32(base, MGC_O_QMU_QCR0),
  540. MGC_ReadQMU32(base, MGC_O_QMU_QCR2),
  541. MGC_ReadQMU32(base, MGC_O_QMU_QCR3),
  542. MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR));
  543. QMU_ERR("[RXD][ERROR]HWO=%d, Next_GPD=%p ,DataBufLen=%d, DataBuf=%p, RecvLen=%d, Endpoint=%d\n",
  544. (u32) TGPD_GET_FLAG(gpd), TGPD_GET_NEXT(gpd),
  545. (u32) TGPD_GET_DataBUF_LEN(gpd), TGPD_GET_DATA(gpd),
  546. (u32) TGPD_GET_BUF_LEN(gpd), (u32) TGPD_GET_EPaddr(gpd));
  547. }
  548. QMU_INFO("[RXD]" "%s EP%d, Last=%p, End=%p, complete\n", __func__,
  549. ep_num, Rx_gpd_last[ep_num], Rx_gpd_end[ep_num]);
  550. }
  551. void qmu_done_tx(struct musb *musb, u8 ep_num)
  552. {
  553. void __iomem *base = qmu_base;
  554. TGPD *gpd = Tx_gpd_last[ep_num];
  555. TGPD *gpd_current = (TGPD *) (unsigned long)MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num));
  556. struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_in;
  557. struct usb_request *request = NULL;
  558. struct musb_request *req = NULL;
  559. /*Transfer PHY addr got from QMU register to VIR addr */
  560. gpd_current = gpd_phys_to_virt((dma_addr_t) gpd_current, TXQ, ep_num);
  561. /*
  562. gpd or Last gdp_current
  563. | |
  564. |-> GPD1 --> GPD2 --> GPD3 --> GPD4 --> GPD5 -|
  565. |----------------------------------------------|
  566. */
  567. QMU_INFO("[TXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n",
  568. __func__, ep_num, gpd, gpd_current, Tx_gpd_end[ep_num]);
  569. /*gpd_current should at least point to the next GPD to the previous last one. */
  570. if (gpd == gpd_current) {
  571. QMU_INFO("[TXD] gpd(%p) == gpd_current(%p)\n", gpd, gpd_current);
  572. return;
  573. }
  574. if (TGPD_IS_FLAGS_HWO(gpd)) {
  575. QMU_ERR("[TXD] HWO=1, CPR=%x\n", MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
  576. QMU_ERR("[TXD] HWO=1, CPR=%x\n", MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
  577. QMU_ERR("[TXD] HWO=1, CPR=%x\n", MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
  578. QMU_ERR("[TXD] HWO=1, CPR=%x\n", MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
  579. QMU_ERR("[TXD] HWO=1, CPR=%x\n", MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
  580. /* BUG_ON(1); */
  581. return;
  582. }
  583. /* NORMAL EXEC FLOW */
  584. while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) {
  585. QMU_INFO("[TXD]" "gpd=%p ->HWO=%d, BPD=%d, Next_GPD=%p, DataBuffer=%p, BufferLen=%d request=%p\n",
  586. gpd, (u32) TGPD_GET_FLAG(gpd), (u32) TGPD_GET_FORMAT(gpd),
  587. TGPD_GET_NEXT(gpd), TGPD_GET_DATA(gpd), (u32) TGPD_GET_BUF_LEN(gpd), req);
  588. if (!TGPD_GET_NEXT(gpd)) {
  589. QMU_ERR("[TXD][ERROR]" "Next GPD is null!!\n");
  590. QMU_ERR("[TXD][ERROR]" "Next GPD is null!!\n");
  591. QMU_ERR("[TXD][ERROR]" "Next GPD is null!!\n");
  592. QMU_ERR("[TXD][ERROR]" "Next GPD is null!!\n");
  593. QMU_ERR("[TXD][ERROR]" "Next GPD is null!!\n");
  594. /* BUG_ON(1); */
  595. return;
  596. }
  597. gpd = TGPD_GET_NEXT(gpd);
  598. gpd = gpd_phys_to_virt((dma_addr_t) gpd, TXQ, ep_num);
  599. /* trying to give_back the request to gadget driver. */
  600. req = next_request(musb_ep);
  601. if (!req) {
  602. QMU_ERR("[TXD]" "%s Cannot get next request of %d, but QMU has done.\n",
  603. __func__, ep_num);
  604. return;
  605. }
  606. request = &req->request;
  607. Tx_gpd_last[ep_num] = gpd;
  608. musb_g_giveback(musb_ep, request, 0);
  609. req = next_request(musb_ep);
  610. if (req != NULL)
  611. request = &req->request;
  612. }
  613. if (gpd != gpd_current && TGPD_IS_FLAGS_HWO(gpd)) {
  614. QMU_ERR("[TXD][ERROR]" "EP%d TQCSR=%x, TQSAR=%x, TQCPR=%x\n",
  615. ep_num,
  616. MGC_ReadQMU32(base, MGC_O_QMU_TQCSR(ep_num)),
  617. MGC_ReadQMU32(base, MGC_O_QMU_TQSAR(ep_num)),
  618. MGC_ReadQMU32(base, MGC_O_QMU_TQCPR(ep_num)));
  619. QMU_ERR("[RXD][ERROR]" "QCR0=%x, QCR2=%x, QCR3=%x, QGCSR=%x\n",
  620. MGC_ReadQMU32(base, MGC_O_QMU_QCR0),
  621. MGC_ReadQMU32(base, MGC_O_QMU_QCR2),
  622. MGC_ReadQMU32(base, MGC_O_QMU_QCR3),
  623. MGC_ReadQUCS32(base, MGC_O_QUCS_USBGCSR));
  624. QMU_ERR("[TXD][ERROR]" "HWO=%d, BPD=%d, Next_GPD=%p, DataBuffer=%p, BufferLen=%d, Endpoint=%d\n",
  625. (u32) TGPD_GET_FLAG(gpd), (u32) TGPD_GET_FORMAT(gpd),
  626. TGPD_GET_NEXT(gpd), TGPD_GET_DATA(gpd),
  627. (u32) TGPD_GET_BUF_LEN(gpd), (u32) TGPD_GET_EPaddr(gpd));
  628. }
  629. QMU_INFO("[TXD]" "%s EP%d, Last=%p, End=%p, complete\n", __func__,
  630. ep_num, Tx_gpd_last[ep_num], Tx_gpd_end[ep_num]);
  631. /* special case handle for zero request , only solve 1 zlp case */
  632. if (req != NULL) {
  633. if (request->length == 0) {
  634. QMU_WARN("[TXD]" "==Send ZLP== %p\n", req);
  635. musb_tx_zlp_qmu(musb, req->epnum);
  636. QMU_WARN("[TXD]" "Giveback ZLP of EP%d, actual:%d, length:%d %p\n",
  637. req->epnum, request->actual, request->length, request);
  638. musb_g_giveback(musb_ep, request, 0);
  639. }
  640. }
  641. }
  642. void flush_ep_csr(struct musb *musb, u8 ep_num, u8 isRx)
  643. {
  644. void __iomem *mbase = musb->mregs;
  645. struct musb_hw_ep *hw_ep = musb->endpoints + ep_num;
  646. void __iomem *epio = hw_ep->regs;
  647. u16 csr, wCsr;
  648. if (epio == NULL)
  649. QMU_ERR("epio == NULL\n");
  650. if (isRx) {
  651. csr = musb_readw(epio, MUSB_RXCSR);
  652. csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
  653. if (musb->is_host)
  654. csr &= ~MUSB_RXCSR_H_REQPKT;
  655. /* write 2x to allow double buffering */
  656. /* CC: see if some check is necessary */
  657. musb_writew(epio, MUSB_RXCSR, csr);
  658. musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_CLRDATATOG);
  659. } else {
  660. csr = musb_readw(epio, MUSB_TXCSR);
  661. if (csr & MUSB_TXCSR_TXPKTRDY) {
  662. wCsr = csr | MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
  663. musb_writew(epio, MUSB_TXCSR, wCsr);
  664. }
  665. csr |= MUSB_TXCSR_FLUSHFIFO & ~MUSB_TXCSR_TXPKTRDY;
  666. musb_writew(epio, MUSB_TXCSR, csr);
  667. musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_CLRDATATOG);
  668. /* CC: why is this special? */
  669. musb_writew(mbase, MUSB_INTRTX, 1 << ep_num);
  670. }
  671. }
  672. void mtk_disable_q(struct musb *musb, u8 ep_num, u8 isRx)
  673. {
  674. void __iomem *mbase = musb->mregs;
  675. struct musb_hw_ep *hw_ep = musb->endpoints + ep_num;
  676. void __iomem *epio = hw_ep->regs;
  677. u16 csr;
  678. mtk_qmu_disable(ep_num, isRx);
  679. qmu_reset_gpd_pool(ep_num, isRx);
  680. musb_ep_select(mbase, ep_num);
  681. if (isRx) {
  682. csr = musb_readw(epio, MUSB_RXCSR);
  683. csr &= ~MUSB_RXCSR_DMAENAB;
  684. musb_writew(epio, MUSB_RXCSR, csr);
  685. flush_ep_csr(musb, ep_num, isRx);
  686. } else {
  687. csr = musb_readw(epio, MUSB_TXCSR);
  688. csr &= ~MUSB_TXCSR_DMAENAB;
  689. musb_writew(epio, MUSB_TXCSR, csr);
  690. flush_ep_csr(musb, ep_num, isRx);
  691. }
  692. }
  693. void mtk_qmu_err_recover(struct musb *musb, u8 ep_num, u8 isRx, bool is_len_err)
  694. {
  695. struct musb_ep *musb_ep;
  696. struct musb_request *request;
  697. /* same action as musb_flush_qmu */
  698. mtk_qmu_stop(ep_num, isRx);
  699. qmu_reset_gpd_pool(ep_num, isRx);
  700. /* same action as musb_restart_qmu */
  701. flush_ep_csr(musb, ep_num, isRx);
  702. mtk_qmu_enable(musb, ep_num, isRx);
  703. if (isRx)
  704. musb_ep = &musb->endpoints[ep_num].ep_out;
  705. else
  706. musb_ep = &musb->endpoints[ep_num].ep_in;
  707. /* requeue all req , basically the same as musb_kick_D_CmdQ */
  708. list_for_each_entry(request, &musb_ep->req_list, list) {
  709. QMU_ERR("request 0x%p length(0x%d) len_err(%d)\n", request, request->request.length,
  710. is_len_err);
  711. if (request->request.dma != DMA_ADDR_INVALID) {
  712. if (request->tx) {
  713. QMU_ERR("[TX] gpd=%p, epnum=%d, len=%d\n", Tx_gpd_end[ep_num],
  714. ep_num, request->request.length);
  715. request->request.actual = request->request.length;
  716. if (request->request.length > 0) {
  717. QMU_ERR("[TX]" "Send non-ZLP cases\n");
  718. mtk_qmu_insert_task(request->epnum,
  719. isRx,
  720. (u8 *) request->request.dma,
  721. request->request.length,
  722. ((request->request.zero == 1) ? 1 : 0));
  723. } else if (request->request.length == 0) {
  724. /* this case may be a problem */
  725. QMU_ERR("[TX]" "Send ZLP cases, may be a problem!!!\n");
  726. musb_tx_zlp_qmu(musb, request->epnum);
  727. musb_g_giveback(musb_ep, &(request->request), 0);
  728. } else {
  729. QMU_ERR("ERR, TX, request->request.length(%d)\n",
  730. request->request.length);
  731. }
  732. } else {
  733. QMU_ERR("[RX] gpd=%p, epnum=%d, len=%d\n",
  734. Rx_gpd_end[ep_num], ep_num, request->request.length);
  735. mtk_qmu_insert_task(request->epnum,
  736. isRx,
  737. (u8 *) request->request.dma,
  738. request->request.length,
  739. ((request->request.zero == 1) ? 1 : 0));
  740. }
  741. }
  742. }
  743. QMU_ERR("RESUME QMU\n");
  744. /* RESUME QMU */
  745. mtk_qmu_resume(ep_num, isRx);
  746. }
  747. void mtk_qmu_irq_err(struct musb *musb, u32 qisar)
  748. {
  749. u8 i;
  750. u32 wQmuVal;
  751. u32 wRetVal;
  752. void __iomem *base = qmu_base;
  753. u8 err_ep_num = 0;
  754. bool is_len_err = false;
  755. u8 isRx;
  756. wQmuVal = qisar;
  757. /* RXQ ERROR */
  758. if (wQmuVal & DQMU_M_RXQ_ERR) {
  759. wRetVal =
  760. MGC_ReadQIRQ32(base,
  761. MGC_O_QIRQ_RQEIR) & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_RQEIMR)));
  762. QMU_ERR("RQ error in QMU mode![0x%x]\n", wRetVal);
  763. isRx = RXQ;
  764. for (i = 1; i <= RXQ_NUM; i++) {
  765. if (wRetVal & DQMU_M_RX_GPDCS_ERR(i)) {
  766. QMU_ERR("RQ %d GPD checksum error!\n", i);
  767. err_ep_num = i;
  768. }
  769. if (wRetVal & DQMU_M_RX_LEN_ERR(i)) {
  770. QMU_ERR("RQ %d receive length error!\n", i);
  771. err_ep_num = i;
  772. is_len_err = true;
  773. }
  774. if (wRetVal & DQMU_M_RX_ZLP_ERR(i))
  775. QMU_ERR("RQ %d receive an zlp packet!\n", i);
  776. }
  777. MGC_WriteQIRQ32(base, MGC_O_QIRQ_RQEIR, wRetVal);
  778. }
  779. /* TXQ ERROR */
  780. if (wQmuVal & DQMU_M_TXQ_ERR) {
  781. isRx = TXQ;
  782. wRetVal =
  783. MGC_ReadQIRQ32(base,
  784. MGC_O_QIRQ_TQEIR) & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_TQEIMR)));
  785. QMU_ERR("TQ error in QMU mode![0x%x]\n", wRetVal);
  786. for (i = 1; i <= RXQ_NUM; i++) {
  787. if (wRetVal & DQMU_M_TX_BDCS_ERR(i)) {
  788. QMU_ERR("TQ %d BD checksum error!\n", i);
  789. err_ep_num = i;
  790. }
  791. if (wRetVal & DQMU_M_TX_GPDCS_ERR(i)) {
  792. QMU_ERR("TQ %d GPD checksum error!\n", i);
  793. err_ep_num = i;
  794. }
  795. if (wRetVal & DQMU_M_TX_LEN_ERR(i)) {
  796. QMU_ERR("TQ %d buffer length error!\n", i);
  797. err_ep_num = i;
  798. is_len_err = true;
  799. }
  800. }
  801. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TQEIR, wRetVal);
  802. }
  803. /* RX EP ERROR */
  804. if (wQmuVal & DQMU_M_RXEP_ERR) {
  805. isRx = RXQ;
  806. wRetVal =
  807. MGC_ReadQIRQ32(base,
  808. MGC_O_QIRQ_REPEIR) &
  809. (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_REPEIMR)));
  810. QMU_ERR("Rx endpoint error in QMU mode![0x%x]\n", wRetVal);
  811. for (i = 1; i <= RXQ_NUM; i++) {
  812. if (wRetVal & DQMU_M_RX_EP_ERR(i)) {
  813. QMU_ERR("RX EP %d ERR\n", i);
  814. err_ep_num = i;
  815. }
  816. }
  817. MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEIR, wRetVal);
  818. }
  819. /* TX EP ERROR */
  820. if (wQmuVal & DQMU_M_TXEP_ERR) {
  821. isRx = TXQ;
  822. wRetVal =
  823. MGC_ReadQIRQ32(base,
  824. MGC_O_QIRQ_TEPEIR) &
  825. (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_TEPEIMR)));
  826. QMU_ERR("Tx endpoint error in QMU mode![0x%x]\n", wRetVal);
  827. for (i = 1; i <= TXQ_NUM; i++) {
  828. if (wRetVal & DQMU_M_TX_EP_ERR(i)) {
  829. QMU_ERR("TX EP %d ERR\n", i);
  830. err_ep_num = i;
  831. }
  832. }
  833. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEIR, wRetVal);
  834. }
  835. /* RXQ EMPTY */
  836. if (wQmuVal & DQMU_M_RQ_EMPTY) {
  837. wRetVal = MGC_ReadQIRQ32(base, MGC_O_QIRQ_REPEMPR)
  838. & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_REPEMPMR)));
  839. QMU_ERR("RQ Empty in QMU mode![0x%x]\n", wRetVal);
  840. for (i = 1; i <= RXQ_NUM; i++) {
  841. if (wRetVal & DQMU_M_RX_EMPTY(i))
  842. QMU_ERR("RQ %d Empty!\n", i);
  843. }
  844. MGC_WriteQIRQ32(base, MGC_O_QIRQ_REPEMPR, wRetVal);
  845. }
  846. /* TXQ EMPTY */
  847. if (wQmuVal & DQMU_M_TQ_EMPTY) {
  848. wRetVal = MGC_ReadQIRQ32(base, MGC_O_QIRQ_TEPEMPR)
  849. & (~(MGC_ReadQIRQ32(base, MGC_O_QIRQ_TEPEMPMR)));
  850. QMU_ERR("TQ Empty in QMU mode![0x%x]\n", wRetVal);
  851. for (i = 1; i <= TXQ_NUM; i++) {
  852. if (wRetVal & DQMU_M_TX_EMPTY(i))
  853. QMU_ERR("TQ %d Empty!\n", i);
  854. }
  855. MGC_WriteQIRQ32(base, MGC_O_QIRQ_TEPEMPR, wRetVal);
  856. }
  857. /* QMU ERR RECOVER , only servie one ep error ? */
  858. if (err_ep_num)
  859. mtk_qmu_err_recover(musb, err_ep_num, isRx, is_len_err);
  860. }
  861. #endif