mu3d_hal_qmu_drv.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441
  1. #ifdef USE_SSUSB_QMU
  2. #include "mu3d_hal_osal.h"
  3. #define _MTK_QMU_DRV_EXT_
  4. #include "mu3d_hal_qmu_drv.h"
  5. #undef _MTK_QMU_DRV_EXT_
  6. #include "mu3d_hal_usb_drv.h"
  7. #include "mu3d_hal_hw.h"
  8. /**
  9. * get_bd - get a null bd
  10. * @args - arg1: dir, arg2: ep number
  11. */
  12. PBD get_bd(USB_DIR dir, DEV_UINT32 num)
  13. {
  14. PBD ptr;
  15. if (dir == USB_RX) {
  16. ptr = (PBD) Rx_bd_List[num].pNext;
  17. os_printk(K_DEBUG, "%s Rx_bd_List[%d].pNext=%p\n", __func__, num,
  18. (Rx_bd_List[num].pNext));
  19. if ((Rx_bd_List[num].pNext + 1) < Rx_bd_List[num].pEnd)
  20. Rx_bd_List[num].pNext++;
  21. else
  22. Rx_bd_List[num].pNext = Rx_bd_List[num].pStart;
  23. } else {
  24. ptr = (PBD) Tx_bd_List[num].pNext;
  25. os_printk(K_DEBUG, "%s Tx_gpd_List[%d].pNext=%p\n", __func__, num,
  26. (Tx_bd_List[num].pNext));
  27. Tx_bd_List[num].pNext++;
  28. Tx_bd_List[num].pNext = Tx_bd_List[num].pNext + AT_BD_EXT_LEN;
  29. if (Tx_bd_List[num].pNext >= Tx_bd_List[num].pEnd)
  30. Tx_bd_List[num].pNext = Tx_bd_List[num].pStart;
  31. }
  32. return ptr;
  33. }
  34. /**
  35. * get_bd - get a null gpd
  36. * @args - arg1: dir, arg2: ep number
  37. */
  38. PGPD get_gpd(USB_DIR dir, DEV_UINT32 num)
  39. {
  40. PGPD ptr;
  41. if (dir == USB_RX) {
  42. ptr = Rx_gpd_List[num].pNext;
  43. /* qmu_printk(K_DEBUG, "[RX]""GPD List[%d]->Next=%p\n", num, Rx_gpd_List[num].pNext); */
  44. Rx_gpd_List[num].pNext =
  45. Rx_gpd_List[num].pNext + (AT_GPD_EXT_LEN / sizeof(TGPD) + 1);
  46. /* qmu_printk(K_DEBUG, "[Rx]""GPD List[%d]->Start=%p, Next=%p, End=%p\n", */
  47. /* num, Rx_gpd_List[num].pStart, Rx_gpd_List[num].pNext, Rx_gpd_List[num].pEnd); */
  48. if (Rx_gpd_List[num].pNext >= Rx_gpd_List[num].pEnd)
  49. Rx_gpd_List[num].pNext = Rx_gpd_List[num].pStart;
  50. } else {
  51. ptr = Tx_gpd_List[num].pNext;
  52. /* qmu_printk(K_DEBUG, "[TX]""GPD List[%d]->Next=%p\n", num, Tx_gpd_List[num].pNext); */
  53. /*
  54. * Here is really tricky.
  55. * The size of a GPD is 16 bytes. But the cache line size is 64B.
  56. * If all GPDs are allocated continiously.
  57. * When doing invalidating the cache. The size of 64B from the specified address would flush to
  58. * the physical memory. This action may cause that other GPDs corrupted, like HWO=1 when receiving
  59. * QMU Done interrupt. Current workaround is that let a GPD as 64 bytes. So the next
  60. * GPD is behind 64bytes.
  61. */
  62. Tx_gpd_List[num].pNext =
  63. Tx_gpd_List[num].pNext + (AT_GPD_EXT_LEN / sizeof(TGPD) + 1);
  64. /* qmu_printk(K_DEBUG, "[TX]""GPD List[%d]->Start=%p, pNext=%p, pEnd=%p\n", */
  65. /* num, Tx_gpd_List[num].pStart, Tx_gpd_List[num].pNext, Tx_gpd_List[num].pEnd); */
  66. if (Tx_gpd_List[num].pNext >= Tx_gpd_List[num].pEnd)
  67. Tx_gpd_List[num].pNext = Tx_gpd_List[num].pStart;
  68. }
  69. return ptr;
  70. }
  71. /**
  72. * get_bd - align gpd ptr to target ptr
  73. * @args - arg1: dir, arg2: ep number, arg3: target ptr
  74. */
  75. void gpd_ptr_align(USB_DIR dir, DEV_UINT32 num, PGPD ptr)
  76. {
  77. DEV_UINT32 run_next;
  78. run_next = true;
  79. /* qmu_printk(K_DEBUG,"%s %d, EP%d, ptr=%p\n", __func__, dir, num, ptr); */
  80. while (run_next) {
  81. if (ptr == get_gpd(dir, num))
  82. run_next = false;
  83. }
  84. }
  85. /**
  86. * bd_virt_to_phys - map bd virtual address to physical address
  87. * @args - arg1: virtual address, arg2: dir, arg3: ep number
  88. * @return - physical address
  89. */
  90. dma_addr_t bd_virt_to_phys(void *vaddr, USB_DIR dir, DEV_UINT32 num)
  91. {
  92. uintptr_t ptr;
  93. if (dir == USB_RX)
  94. ptr = rx_bd_map[num].p_desc_dma;
  95. else
  96. ptr = tx_bd_map[num].p_desc_dma;
  97. os_printk(K_DEBUG, "%s %s[%d]phys=%lx<->virt=%p\n", __func__,
  98. ((dir == USB_RX) ? "RX" : "TX"), num, ptr, vaddr);
  99. return (dma_addr_t) ptr;
  100. }
  101. /**
  102. * bd_phys_to_virt - map bd physical address to virtual address
  103. * @args - arg1: physical address, arg2: dir, arg3: ep number
  104. * @return - virtual address
  105. */
  106. void *bd_phys_to_virt(void *paddr, USB_DIR dir, DEV_UINT32 num)
  107. {
  108. void *ptr;
  109. os_printk(K_DEBUG, "bd_phys_to_virt paddr=%p, num=%d\n", paddr, num);
  110. if (dir == USB_RX)
  111. ptr = rx_bd_map[num].p_desc;
  112. else
  113. ptr = tx_bd_map[num].p_desc;
  114. /*os_printk(K_DEBUG,"%s %s[%d]phys=%p<->virt=%p\n", __func__, \
  115. ((dir==USB_RX)?"RX":"TX"), num , paddr, ptr); */
  116. return ptr;
  117. }
  118. /**
  119. * mu3d_hal_gpd_virt_to_phys - map gpd virtual address to physical address
  120. * @args - arg1: virtual address, arg2: dir, arg3: ep number
  121. * @return - physical address
  122. */
  123. dma_addr_t mu3d_hal_gpd_virt_to_phys(void *vaddr, USB_DIR dir, DEV_UINT32 num)
  124. {
  125. uintptr_t ptr;
  126. if (dir == USB_RX)
  127. ptr = rx_gpd_map[num].p_desc_dma + (dma_addr_t) (vaddr - rx_gpd_map[num].p_desc);
  128. else
  129. ptr = tx_gpd_map[num].p_desc_dma + (dma_addr_t) (vaddr - tx_gpd_map[num].p_desc);
  130. os_printk(K_DEBUG, "%s %s[%d]phys=%lx<->virt=%p\n", __func__,
  131. ((dir == USB_RX) ? "RX" : "TX"), num, ptr, vaddr);
  132. return (dma_addr_t) ptr;
  133. }
  134. /**
  135. * gpd_phys_to_virt - map gpd physical address to virtual address
  136. * @args - arg1: physical address, arg2: dir, arg3: ep number
  137. * @return - virtual address
  138. */
  139. void *gpd_phys_to_virt(void *paddr, USB_DIR dir, DEV_UINT32 num)
  140. {
  141. void *ptr;
  142. /* os_printk(K_DEBUG,"%s paddr=%p, num=%d\n", __func__, paddr, num); */
  143. if (dir == USB_RX) {
  144. /*os_printk(K_DEBUG, "%s Rx_gpd_Offset[%d]=0x%08X\n", __func__, num, \
  145. Rx_gpd_Offset[num]); */
  146. ptr =
  147. (void *)((uintptr_t) rx_gpd_map[num].p_desc +
  148. (uintptr_t) (paddr - rx_gpd_map[num].p_desc_dma));
  149. } else {
  150. /*os_printk(K_DEBUG,"%s Tx_gpd_Offset[%d]=0x%08X\n", __func__, num, \
  151. Tx_gpd_Offset[num]); */
  152. ptr =
  153. (void *)((uintptr_t) tx_gpd_map[num].p_desc +
  154. (uintptr_t) (paddr - tx_gpd_map[num].p_desc_dma));
  155. }
  156. /*os_printk(K_DEBUG,"%s %s[%d]phys=%p<->virt=%p\n", __func__, \
  157. ((dir==USB_RX)?"RX":"TX"), num , paddr, ptr); */
  158. return ptr;
  159. }
  160. /**
  161. * init_bd_list - initialize bd management list
  162. * @args - arg1: dir, arg2: ep number, arg3: bd virtual addr, arg4: bd ioremap addr, arg5: bd number
  163. */
  164. void init_bd_list(USB_DIR dir, int num, PBD ptr, dma_addr_t io_ptr, DEV_UINT32 size)
  165. {
  166. if (dir == USB_RX) {
  167. Rx_bd_List[num].pStart = ptr;
  168. Rx_bd_List[num].pEnd = (PBD) (ptr + size);
  169. rx_bd_map[num].p_desc = (void *)ptr;
  170. rx_bd_map[num].p_desc_dma = io_ptr;
  171. ptr++;
  172. Rx_bd_List[num].pNext = ptr;
  173. os_printk(K_DEBUG, "Rx_bd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
  174. num, Rx_bd_List[num].pStart, Rx_bd_List[num].pNext, Rx_bd_List[num].pEnd);
  175. os_printk(K_DEBUG, "rx_bd_map[%d] vir=%p dma=%08llx\n", num,
  176. rx_bd_map[num].p_desc, (unsigned long long)rx_bd_map[num].p_desc_dma);
  177. os_printk(K_DEBUG, "vir=%p dma=%08llx\n", ptr, (unsigned long long)io_ptr);
  178. } else {
  179. Tx_bd_List[num].pStart = ptr;
  180. Tx_bd_List[num].pEnd = (PBD) ((DEV_UINT8 *) (ptr + size) + AT_BD_EXT_LEN * size);
  181. tx_bd_map[num].p_desc = (void *)ptr;
  182. tx_bd_map[num].p_desc_dma = io_ptr;
  183. ptr++;
  184. Tx_bd_List[num].pNext = (PBD) ((DEV_UINT8 *) ptr + AT_BD_EXT_LEN);
  185. os_printk(K_DEBUG, "Tx_bd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
  186. num, Tx_bd_List[num].pStart, Tx_bd_List[num].pNext, Tx_bd_List[num].pEnd);
  187. os_printk(K_DEBUG, "tx_bd_map[%d] vir=%p dma=%08llx\n", num,
  188. tx_bd_map[num].p_desc, (unsigned long long)tx_bd_map[num].p_desc_dma);
  189. os_printk(K_DEBUG, "vir=%p, dma=%08llx\n", ptr, (unsigned long long)io_ptr);
  190. }
  191. }
  192. /**
  193. * init_gpd_list - initialize gpd management list
  194. * @args - arg1: dir, arg2: ep number, arg3: gpd virtual addr, arg4: gpd ioremap addr, arg5: gpd number
  195. */
  196. void init_gpd_list(USB_DIR dir, int num, PGPD ptr, dma_addr_t io_ptr, DEV_UINT32 size)
  197. {
  198. if (dir == USB_RX) {
  199. Rx_gpd_List[num].pStart = ptr;
  200. Rx_gpd_List[num].pEnd = (PGPD) ((DEV_UINT8 *) (ptr + size) + AT_GPD_EXT_LEN * size);
  201. rx_gpd_map[num].p_desc = (void *)ptr;
  202. rx_gpd_map[num].p_desc_dma = io_ptr;
  203. ptr++;
  204. Rx_gpd_List[num].pNext = (PGPD) ((DEV_UINT8 *) ptr + AT_GPD_EXT_LEN);
  205. qmu_printk(K_INFO, "Rx_gpd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
  206. num, Rx_gpd_List[num].pStart, Rx_gpd_List[num].pNext,
  207. Rx_gpd_List[num].pEnd);
  208. qmu_printk(K_INFO, "rx_gpd_map[%d] vir=%p dma=%08llx\n", num,
  209. rx_gpd_map[num].p_desc, (unsigned long long)rx_gpd_map[num].p_desc_dma);
  210. qmu_printk(K_INFO, "vir=%p, dma=%08llx\n", ptr, (unsigned long long)io_ptr);
  211. } else {
  212. Tx_gpd_List[num].pStart = ptr;
  213. Tx_gpd_List[num].pEnd = (PGPD) ((DEV_UINT8 *) (ptr + size) + AT_GPD_EXT_LEN * size);
  214. tx_gpd_map[num].p_desc = (void *)ptr;
  215. tx_gpd_map[num].p_desc_dma = io_ptr;
  216. ptr++;
  217. Tx_gpd_List[num].pNext = (PGPD) ((DEV_UINT8 *) ptr + AT_GPD_EXT_LEN);
  218. qmu_printk(K_INFO, "Tx_gpd_List[%d].pStart=%p, pNext=%p, pEnd=%p\n",
  219. num, Tx_gpd_List[num].pStart, Tx_gpd_List[num].pNext,
  220. Tx_gpd_List[num].pEnd);
  221. qmu_printk(K_INFO, "tx_gpd_map[%d] vir=%p dma=%08llx\n", num,
  222. tx_gpd_map[num].p_desc, (unsigned long long)tx_gpd_map[num].p_desc_dma);
  223. qmu_printk(K_INFO, "vir=%p, dma=%08llx\n", ptr, (unsigned long long)io_ptr);
  224. }
  225. }
  226. /**
  227. * free_gpd - free gpd management list
  228. * @args - arg1: dir, arg2: ep number
  229. */
  230. void free_gpd(USB_DIR dir, int num)
  231. {
  232. if (dir == USB_RX) {
  233. os_memset(Rx_gpd_List[num].pStart, 0,
  234. MAX_GPD_NUM * (sizeof(TGPD) + AT_GPD_EXT_LEN));
  235. } else {
  236. os_memset(Tx_gpd_List[num].pStart, 0,
  237. MAX_GPD_NUM * (sizeof(TGPD) + AT_GPD_EXT_LEN));
  238. }
  239. }
  240. /**
  241. * mu3d_hal_alloc_qmu_mem - allocate gpd and bd memory for all ep
  242. *
  243. */
  244. /* USBIF */
  245. static dma_addr_t Tx_gpd_ioptr[15];
  246. static dma_addr_t Rx_gpd_ioptr[15];
  247. void _ex_mu3d_hal_free_qmu_mem(struct device *dev)
  248. {
  249. DEV_UINT32 i;
  250. DEV_UINT32 size = (sizeof(TGPD) + AT_GPD_EXT_LEN) * MAX_GPD_NUM;
  251. qmu_printk(K_INFO, "_ex_mu3d_hal_free_qmu_mem +\n");
  252. /*TODO:dma_free_coherent() is needed
  253. if _ex_mu3d_hal_alloc_qmu_mem() would be called more than once
  254. */
  255. for (i = 1; i <= MAX_QMU_EP; i++) {
  256. #if 0
  257. kfree(Rx_gpd_head[i]);
  258. kfree(Tx_gpd_head[i]);
  259. #else
  260. dma_free_coherent(dev, size, Rx_gpd_head[i], Rx_gpd_ioptr[i]);
  261. dma_free_coherent(dev, size, Tx_gpd_head[i], Tx_gpd_ioptr[i]);
  262. #endif
  263. }
  264. qmu_printk(K_INFO, "_ex_mu3d_hal_free_qmu_mem -\n");
  265. }
  266. void _ex_mu3d_hal_alloc_qmu_mem(struct device *dev)
  267. {
  268. DEV_UINT32 i, size;
  269. TGPD *ptr;
  270. dma_addr_t io_ptr;
  271. dma_addr_t dma_handle;
  272. /*TODO: dma_pool_alloc() is an alternative choice
  273. once the memory size is a concern
  274. */
  275. for (i = 1; i <= MAX_QMU_EP; i++) {
  276. /* Allocate Rx GPD */
  277. size = (sizeof(TGPD) + AT_GPD_EXT_LEN) * MAX_GPD_NUM;
  278. ptr = (TGPD *) dma_alloc_coherent(dev, size, &dma_handle, GFP_KERNEL);
  279. memset(ptr, 0, size);
  280. Rx_gpd_ioptr[i] = io_ptr = dma_handle;
  281. init_gpd_list(USB_RX, i, ptr, io_ptr, MAX_GPD_NUM);
  282. Rx_gpd_end[i] = ptr;
  283. qmu_printk(K_INFO, "ALLOC RX GPD End [%d] Virtual Mem=%p, DMA addr=%08llx\n",
  284. i, Rx_gpd_end[i], (unsigned long long)io_ptr);
  285. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[i]);
  286. Rx_gpd_head[i] = Rx_gpd_last[i] = Rx_gpd_end[i];
  287. qmu_printk(K_INFO, "RQSAR[%d]=%08llx\n", i,
  288. (unsigned long long)mu3d_hal_gpd_virt_to_phys(Rx_gpd_end[i], USB_RX, i));
  289. /* Allocate Tx GPD */
  290. size = (sizeof(TGPD) + AT_GPD_EXT_LEN) * MAX_GPD_NUM;
  291. ptr = (TGPD *) dma_alloc_coherent(dev, size, &dma_handle, GFP_KERNEL);
  292. memset(ptr, 0, size);
  293. Tx_gpd_ioptr[i] = io_ptr = dma_handle;
  294. init_gpd_list(USB_TX, i, ptr, io_ptr, MAX_GPD_NUM);
  295. Tx_gpd_end[i] = ptr;
  296. qmu_printk(K_INFO, "ALLOC TX GPD End [%d] Virtual Mem=%p, DMA addr=%08llx\n",
  297. i, Tx_gpd_end[i], (unsigned long long)io_ptr);
  298. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[i]);
  299. Tx_gpd_head[i] = Tx_gpd_last[i] = Tx_gpd_end[i];
  300. qmu_printk(K_INFO, "TQSAR[%d]=%08llx\n", i,
  301. (unsigned long long)mu3d_hal_gpd_virt_to_phys(Tx_gpd_end[i], USB_TX, i));
  302. }
  303. }
  304. void mu3d_hal_free_qmu_mem(void)
  305. {
  306. DEV_UINT32 i;
  307. for (i = 1; i <= MAX_QMU_EP; i++) {
  308. kfree(Rx_gpd_head[i]);
  309. kfree(Tx_gpd_head[i]);
  310. kfree(Rx_bd_List[i].pStart);
  311. kfree(Tx_bd_List[i].pStart);
  312. }
  313. }
  314. void mu3d_hal_alloc_qmu_mem(void)
  315. {
  316. DEV_UINT32 i, size;
  317. TGPD *ptr;
  318. dma_addr_t io_ptr;
  319. TBD *bptr;
  320. dma_addr_t io_bptr;
  321. for (i = 1; i <= MAX_QMU_EP; i++) {
  322. /* Allocate Tx GPD */
  323. size = sizeof(TGPD);
  324. size *= MAX_GPD_NUM;
  325. ptr = (TGPD *) os_mem_alloc(size);
  326. os_memset(ptr, 0, size);
  327. io_ptr = dma_map_single(NULL, ptr, size, DMA_TO_DEVICE);
  328. init_gpd_list(USB_RX, i, ptr, io_ptr, MAX_GPD_NUM);
  329. Rx_gpd_end[i] = ptr;
  330. os_printk(K_DEBUG, "ALLOC RX GPD End [%d] Virtual Mem=%p, DMA addr=%08llx\n",
  331. i, Rx_gpd_end[i], (unsigned long long)io_ptr);
  332. /* os_memset(Rx_gpd_end[i], 0 , sizeof(TGPD)); */
  333. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[i]);
  334. Rx_gpd_head[i] = Rx_gpd_last[i] = Rx_gpd_end[i];
  335. os_printk(K_DEBUG, "RQSAR[%d]=%08llx\n", i,
  336. (unsigned long long)mu3d_hal_gpd_virt_to_phys(Rx_gpd_end[i], USB_RX, i));
  337. /* Allocate Rx GPD */
  338. size = sizeof(TGPD);
  339. size += AT_GPD_EXT_LEN;
  340. size *= MAX_GPD_NUM;
  341. ptr = (TGPD *) os_mem_alloc(size);
  342. os_memset(ptr, 0, size);
  343. io_ptr = dma_map_single(NULL, ptr, size, DMA_TO_DEVICE);
  344. init_gpd_list(USB_TX, i, ptr, io_ptr, MAX_GPD_NUM);
  345. Tx_gpd_end[i] = ptr;
  346. os_printk(K_DEBUG, "ALLOC TX GPD End [%d] Virtual Mem=%p, DMA addr=%08llx\n",
  347. i, Tx_gpd_end[i], (unsigned long long)io_ptr);
  348. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[i]);
  349. Tx_gpd_head[i] = Tx_gpd_last[i] = Tx_gpd_end[i];
  350. os_printk(K_DEBUG, "TQSAR[%d]=%08llx\n", i,
  351. (unsigned long long)mu3d_hal_gpd_virt_to_phys(Tx_gpd_end[i], USB_TX, i));
  352. /* Allocate Tx BD */
  353. size = (sizeof(TBD));
  354. size *= MAX_BD_NUM;
  355. bptr = (TBD *) os_mem_alloc(size);
  356. os_memset(bptr, 0, size);
  357. io_bptr = dma_map_single(NULL, bptr, size, DMA_TO_DEVICE);
  358. init_bd_list(USB_RX, i, bptr, io_bptr, MAX_BD_NUM);
  359. /* Allocate Rx BD */
  360. size = (sizeof(TBD));
  361. size += AT_BD_EXT_LEN;
  362. size *= MAX_BD_NUM;
  363. bptr = (TBD *) os_mem_alloc(size);
  364. os_memset(bptr, 0, size);
  365. io_bptr = dma_map_single(NULL, bptr, size, DMA_TO_DEVICE);
  366. init_bd_list(USB_TX, i, bptr, io_bptr, MAX_BD_NUM);
  367. }
  368. }
  369. /**
  370. * mu3d_hal_init_qmu - initialize qmu
  371. *
  372. */
  373. void _ex_mu3d_hal_init_qmu(void)
  374. {
  375. DEV_UINT32 i;
  376. DEV_UINT32 QCR = 0;
  377. /* Initialize QMU Tx/Rx start address. */
  378. for (i = 1; i <= MAX_QMU_EP; i++) {
  379. qmu_printk(K_INFO, "==EP[%d]==Start addr RXQ=0x%08lx, TXQ=0x%08lx\n", i,
  380. (uintptr_t) mu3d_hal_gpd_virt_to_phys(Rx_gpd_head[i], USB_RX, i),
  381. (uintptr_t) mu3d_hal_gpd_virt_to_phys(Tx_gpd_head[i], USB_TX, i));
  382. QCR |= QMU_RX_EN(i);
  383. QCR |= QMU_TX_EN(i);
  384. os_writel(USB_QMU_RQSAR(i), mu3d_hal_gpd_virt_to_phys(Rx_gpd_head[i], USB_RX, i));
  385. os_writel(USB_QMU_TQSAR(i), mu3d_hal_gpd_virt_to_phys(Tx_gpd_head[i], USB_TX, i));
  386. Tx_gpd_end[i] = Tx_gpd_last[i] = Tx_gpd_head[i];
  387. Rx_gpd_end[i] = Rx_gpd_last[i] = Rx_gpd_head[i];
  388. gpd_ptr_align(USB_TX, i, Tx_gpd_end[i]);
  389. gpd_ptr_align(USB_RX, i, Rx_gpd_end[i]);
  390. }
  391. /* Enable QMU interrupt. */
  392. os_writel(U3D_QIESR1, TXQ_EMPTY_IESR | TXQ_CSERR_IESR | TXQ_LENERR_IESR |
  393. RXQ_EMPTY_IESR | RXQ_CSERR_IESR | RXQ_LENERR_IESR | RXQ_ZLPERR_IESR);
  394. os_writel(U3D_EPIESR, EP0ISR);
  395. }
  396. void mu3d_hal_init_qmu(void)
  397. {
  398. DEV_UINT32 i;
  399. DEV_UINT32 QCR = 0;
  400. /* Initialize QMU Tx/Rx start address. */
  401. for (i = 1; i <= MAX_QMU_EP; i++) {
  402. os_printk(K_DEBUG, "==EP[%d]==Start addr RXQ=0x%08lx, TXQ=0x%08lx\n", i,
  403. (uintptr_t) mu3d_hal_gpd_virt_to_phys(Rx_gpd_head[i], USB_RX, i),
  404. (uintptr_t) mu3d_hal_gpd_virt_to_phys(Tx_gpd_head[i], USB_TX, i));
  405. QCR |= QMU_RX_EN(i);
  406. QCR |= QMU_TX_EN(i);
  407. os_writel(USB_QMU_RQSAR(i), mu3d_hal_gpd_virt_to_phys(Rx_gpd_head[i], USB_RX, i));
  408. os_writel(USB_QMU_TQSAR(i), mu3d_hal_gpd_virt_to_phys(Tx_gpd_head[i], USB_TX, i));
  409. Tx_gpd_end[i] = Tx_gpd_last[i] = Tx_gpd_head[i];
  410. Rx_gpd_end[i] = Rx_gpd_last[i] = Rx_gpd_head[i];
  411. gpd_ptr_align(USB_TX, i, Tx_gpd_end[i]);
  412. gpd_ptr_align(USB_RX, i, Rx_gpd_end[i]);
  413. }
  414. /* Enable QMU Tx/Rx. */
  415. os_writel(U3D_QGCSR, QCR);
  416. os_writel(U3D_QIESR0, QCR);
  417. /* Enable QMU interrupt. */
  418. os_writel(U3D_QIESR1,
  419. TXQ_EMPTY_IESR | TXQ_CSERR_IESR | TXQ_LENERR_IESR | RXQ_EMPTY_IESR |
  420. RXQ_CSERR_IESR | RXQ_LENERR_IESR | RXQ_ZLPERR_IESR);
  421. os_writel(U3D_EPIESR, EP0ISR);
  422. }
  423. /**
  424. * mu3d_hal_cal_checksum - calculate check sum
  425. * @args - arg1: data buffer, arg2: data length
  426. */
  427. noinline DEV_UINT8 mu3d_hal_cal_checksum(DEV_UINT8 *data, DEV_INT32 len)
  428. {
  429. DEV_UINT8 *uDataPtr, ckSum;
  430. DEV_INT32 i;
  431. *(data + 1) = 0x0;
  432. uDataPtr = data;
  433. ckSum = 0;
  434. /* For ALPS01572117, we found calculated QMU check sum is wrong. (Dump memory value directly.) */
  435. /* After check this function, we did not find any flaw. Still cannot find how to get this wrong value. */
  436. /* Maybe it is a memory corruption or complier problem. Add "noinline" and "mb();" to prevent this problem. */
  437. mb();
  438. for (i = 0; i < len; i++)
  439. ckSum += *(uDataPtr + i);
  440. return 0xFF - ckSum;
  441. }
  442. /**
  443. * mu3d_hal_resume_qmu - resume qmu function
  444. * @args - arg1: ep number, arg2: dir
  445. */
  446. void mu3d_hal_resume_qmu(DEV_INT32 q_num, USB_DIR dir)
  447. {
  448. if (dir == USB_TX) {
  449. /* qmu_printk(K_DEBUG, "%s EP%d CSR=%x, CPR=%x\n", __func__, q_num,
  450. os_readl(USB_QMU_TQCSR(q_num)), os_readl(USB_QMU_TQCPR(q_num))); */
  451. os_writel(USB_QMU_TQCSR(q_num), QMU_Q_RESUME);
  452. if (!os_readl(USB_QMU_TQCSR(q_num))) {
  453. qmu_printk(K_WARNIN, "[ERROR]" "%s TQCSR[%d]=%x\n", __func__, q_num,
  454. os_readl(USB_QMU_TQCSR(q_num)));
  455. os_writel(USB_QMU_TQCSR(q_num), QMU_Q_RESUME);
  456. qmu_printk(K_WARNIN, "[ERROR]" "%s TQCSR[%d]=%x\n", __func__, q_num,
  457. os_readl(USB_QMU_TQCSR(q_num)));
  458. }
  459. } else if (dir == USB_RX) {
  460. os_writel(USB_QMU_RQCSR(q_num), QMU_Q_RESUME);
  461. if (!os_readl(USB_QMU_RQCSR(q_num))) {
  462. qmu_printk(K_WARNIN, "[ERROR]" "%s RQCSR[%d]=%x\n", __func__, q_num,
  463. os_readl(USB_QMU_RQCSR(q_num)));
  464. os_writel(USB_QMU_RQCSR(q_num), QMU_Q_RESUME);
  465. qmu_printk(K_WARNIN, "[ERROR]" "%s RQCSR[%d]=%x\n", __func__, q_num,
  466. os_readl(USB_QMU_RQCSR(q_num)));
  467. }
  468. } else {
  469. qmu_printk(K_ERR, "%s wrong direction!!!\n", __func__);
  470. BUG_ON(1);
  471. }
  472. }
  473. /**
  474. * mu3d_hal_prepare_tx_gpd - prepare tx gpd/bd
  475. * @args - arg1: gpd address, arg2: data buffer address, arg3: data length, arg4: ep number,
  476. * arg5: with bd or not, arg6: write hwo bit or not, arg7: write ioc bit or not
  477. */
  478. TGPD *_ex_mu3d_hal_prepare_tx_gpd(TGPD *gpd, dma_addr_t pBuf, DEV_UINT32 data_len,
  479. DEV_UINT8 ep_num, DEV_UINT8 _is_bdp, DEV_UINT8 isHWO,
  480. DEV_UINT8 ioc, DEV_UINT8 bps, DEV_UINT8 zlp)
  481. {
  482. qmu_printk(K_DEBUG,
  483. "[TX]" "%s gpd=%p, epnum=%d, len=%d, zlp=%d, size(TGPD)=%lld, pBuf=%08lx\n",
  484. __func__, gpd, ep_num, data_len, zlp, (u64) sizeof(TGPD), (unsigned long)pBuf);
  485. /*Set actual data point to "DATA Buffer" */
  486. TGPD_SET_DATA(gpd, (unsigned long)pBuf);
  487. /*Clear "BDP(Buffer Descriptor Present)" flag */
  488. TGPD_CLR_FORMAT_BDP(gpd);
  489. /*
  490. * "Data Buffer Length" =
  491. * 0 (If data length > GPD buffer length, use BDs),
  492. * data_len (If data length < GPD buffer length, only use GPD)
  493. */
  494. TGPD_SET_BUF_LEN(gpd, data_len);
  495. /*"GPD extension length" = 0. Does not use GPD EXT!! */
  496. TGPD_SET_EXT_LEN(gpd, 0);
  497. if (zlp)
  498. TGPD_SET_FORMAT_ZLP(gpd);
  499. else
  500. TGPD_CLR_FORMAT_ZLP(gpd);
  501. /*Default: bps=false */
  502. TGPD_CLR_FORMAT_BPS(gpd);
  503. /*Default: ioc=true */
  504. TGPD_SET_FORMAT_IOC(gpd);
  505. /*Get the next GPD */
  506. Tx_gpd_end[ep_num] = get_gpd(USB_TX, ep_num);
  507. qmu_printk(K_DEBUG, "[TX]" "Tx_gpd_end[%d]=%p\n", ep_num, Tx_gpd_end[ep_num]);
  508. /*Initialize the new GPD */
  509. memset(Tx_gpd_end[ep_num], 0, sizeof(TGPD) + AT_GPD_EXT_LEN);
  510. /*Clear "HWO(Hardware Own)" flag */
  511. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[ep_num]);
  512. /*Set "Next GDP pointer" as the next GPD */
  513. TGPD_SET_NEXT(gpd,
  514. (unsigned long)mu3d_hal_gpd_virt_to_phys(Tx_gpd_end[ep_num], USB_TX, ep_num));
  515. /*Default: isHWO=true */
  516. TGPD_SET_CHKSUM(gpd, CHECKSUM_LENGTH); /*Set GPD Checksum */
  517. TGPD_SET_FLAGS_HWO(gpd); /*Set HWO flag */
  518. return gpd;
  519. }
  520. TGPD *mu3d_hal_prepare_tx_gpd(TGPD *gpd, dma_addr_t pBuf, DEV_UINT32 data_len,
  521. DEV_UINT8 ep_num, DEV_UINT8 _is_bdp, DEV_UINT8 isHWO,
  522. DEV_UINT8 ioc, DEV_UINT8 bps, DEV_UINT8 zlp)
  523. {
  524. DEV_UINT32 offset;
  525. DEV_INT32 i;
  526. DEV_INT32 bd_num;
  527. DEV_UINT32 length;
  528. TBD *bd_next;
  529. TBD *bd_head;
  530. TBD *bd;
  531. DEV_UINT8 *pBuffer;
  532. /*If data length is less than the GPD buffer size, just use GPD */
  533. /* if (data_len <= GPD_BUF_SIZE) { */
  534. /* _is_bdp = 0; */
  535. /* } */
  536. os_printk(K_INFO, "%s gpd=%p, epnum=%d, len=%d, _is_bdp=%d\n", __func__,
  537. gpd, ep_num, data_len, _is_bdp);
  538. if (!_is_bdp) {
  539. /*Set actual data point to "DATA Buffer" */
  540. TGPD_SET_DATA(gpd, (unsigned long)pBuf);
  541. /*Clear "BDP(Buffer Descriptor Present)" flag */
  542. TGPD_CLR_FORMAT_BDP(gpd);
  543. } else {
  544. /*Get the first BD */
  545. bd_head = (TBD *) get_bd(USB_TX, ep_num);
  546. os_printk(K_INFO, "bd_head=x%p\n", bd_head);
  547. bd = bd_head;
  548. os_memset(bd, 0, sizeof(TBD));
  549. /*Date length for transfer */
  550. length = data_len;
  551. /*Point of data buffer */
  552. pBuffer = (DEV_UINT8 *) (uintptr_t) (pBuf);
  553. /*The size of BD buffer */
  554. offset = BD_BUF_SIZE;
  555. /*Count how many BD this transfer need. */
  556. bd_num = (!(length % offset)) ? (length / offset) : ((length / offset) + 1);
  557. os_printk(K_INFO, "bd_num=%d\n", bd_num);
  558. /*If the size of BD buffer is bigger than the length of actual transfer, use the actual length */
  559. if (offset > length)
  560. offset = length;
  561. /*Insert data into each BD */
  562. for (i = 0; i < bd_num; i++) {
  563. os_printk(K_INFO, "bd[%d]=%p\n", i, bd);
  564. if (i == (bd_num - 1)) { /*The last BD */
  565. TBD_SET_EXT_LEN(bd, 0); /*"BD Extension Length" = 0. Does not use BD EXT!! */
  566. TBD_SET_BUF_LEN(bd, length); /*"Data Buffer Length" = the rest of data length */
  567. /*Store the data pointer to "Data Buffer" */
  568. TBD_SET_DATA(bd, (unsigned long)pBuffer);
  569. TBD_SET_FLAGS_EOL(bd); /*Set "EOL" */
  570. TBD_SET_NEXT(bd, 0); /*Set "Next BD pointer" = 0 */
  571. TBD_SET_CHKSUM(bd, CHECKSUM_LENGTH); /*Set "BD Checksum" */
  572. /*Flush the data of BD struct to device */
  573. dma_sync_single_for_device(NULL,
  574. bd_virt_to_phys(bd, USB_RX, ep_num),
  575. sizeof(TBD), DMA_BIDIRECTIONAL);
  576. /*There is no data left to be transferred by GPD */
  577. /* data_len=length; */
  578. data_len = 0;
  579. /*There is no data left to insert BD */
  580. length = 0;
  581. } else {
  582. TBD_SET_EXT_LEN(bd, 0); /*"BD Extension length" = 0. Does not use BD EXT!! */
  583. TBD_SET_BUF_LEN(bd, offset); /*"Data Buffer Length" = the MAX BD transfer size */
  584. /*Store the data pointer to "Data Buffer" */
  585. TBD_SET_DATA(bd, (unsigned long)pBuffer);
  586. TBD_CLR_FLAGS_EOL(bd); /*Clear "EOL" */
  587. /*Get the next BD */
  588. bd_next = (TBD *) get_bd(USB_TX, ep_num);
  589. os_memset(bd_next, 0, sizeof(TBD));
  590. /*Set "Next BD pointer" as the next BD */
  591. TBD_SET_NEXT(bd,
  592. (unsigned long)bd_virt_to_phys(bd_next, USB_TX,
  593. ep_num));
  594. TBD_SET_CHKSUM(bd, CHECKSUM_LENGTH); /*Set BD Checksum */
  595. /*Flush the data of BD struct to device */
  596. dma_sync_single_for_device(NULL,
  597. bd_virt_to_phys(bd, USB_RX, ep_num),
  598. sizeof(TBD), DMA_BIDIRECTIONAL);
  599. /*Calculate the left data length */
  600. length -= offset;
  601. /*Move to pointer of buffer */
  602. pBuffer += offset;
  603. /*Move to next BD */
  604. bd = bd_next;
  605. }
  606. }
  607. /*Set the BD pointer into "BD Pointer" at GPD */
  608. TGPD_SET_DATA(gpd, (unsigned long)bd_virt_to_phys(bd_head, USB_TX, ep_num));
  609. /*Set "BDP(Buffer Descriptor Present)" flag */
  610. TGPD_SET_FORMAT_BDP(gpd);
  611. }
  612. os_printk(K_INFO, "%s GPD data_length=%d\n", __func__, data_len);
  613. /*
  614. * "Data Buffer Length" =
  615. * 0 (If data length > GPD buffer length, use BDs),
  616. * data_len (If data length < GPD buffer length, only use GPD)
  617. */
  618. TGPD_SET_BUF_LEN(gpd, data_len);
  619. /*"GPD extension length" = 0. Does not use GPD EXT!! */
  620. TGPD_SET_EXT_LEN(gpd, 0);
  621. /*Default: zlp=false, except type=ISOC */
  622. if (zlp)
  623. TGPD_SET_FORMAT_ZLP(gpd);
  624. else
  625. TGPD_CLR_FORMAT_ZLP(gpd);
  626. /*Default: bps=false */
  627. if (bps)
  628. TGPD_SET_FORMAT_BPS(gpd);
  629. else
  630. TGPD_CLR_FORMAT_BPS(gpd);
  631. /*Default: ioc=true */
  632. if (ioc)
  633. TGPD_SET_FORMAT_IOC(gpd);
  634. else
  635. TGPD_CLR_FORMAT_IOC(gpd);
  636. /*Get the next GPD */
  637. Tx_gpd_end[ep_num] = get_gpd(USB_TX, ep_num);
  638. os_printk(K_INFO, "Tx_gpd_end[%d]=%p\n", ep_num, Tx_gpd_end[ep_num]);
  639. /*Initialize the new GPD */
  640. os_memset(Tx_gpd_end[ep_num], 0, sizeof(TGPD));
  641. /*Clear "HWO(Hardware Own)" flag */
  642. TGPD_CLR_FLAGS_HWO(Tx_gpd_end[ep_num]);
  643. /*Set "Next GDP pointer" as the next GPD */
  644. TGPD_SET_NEXT(gpd,
  645. (unsigned long)mu3d_hal_gpd_virt_to_phys(Tx_gpd_end[ep_num], USB_TX, ep_num));
  646. /*Default: isHWO=true */
  647. if (isHWO) {
  648. TGPD_SET_CHKSUM(gpd, CHECKSUM_LENGTH); /*Set GPD Checksum */
  649. TGPD_SET_FLAGS_HWO(gpd); /*Set HWO flag */
  650. } else {
  651. TGPD_CLR_FLAGS_HWO(gpd);
  652. TGPD_SET_CHKSUM_HWO(gpd, CHECKSUM_LENGTH);
  653. }
  654. /*Flush the data of GPD struct to device */
  655. dma_sync_single_for_device(NULL, mu3d_hal_gpd_virt_to_phys(gpd, USB_TX, ep_num),
  656. sizeof(TGPD), DMA_BIDIRECTIONAL);
  657. #if defined(USB_RISC_CACHE_ENABLED)
  658. os_flushinvalidateDcache();
  659. #endif
  660. return gpd;
  661. }
  662. static inline int check_next_gpd(TGPD *gpd, TGPD *next_gpd)
  663. {
  664. if (((uintptr_t) next_gpd - (uintptr_t) gpd) == 0x40)
  665. return 1;
  666. else if (((uintptr_t) gpd - (uintptr_t) next_gpd) == 0x7c0)
  667. return 1;
  668. /*UNNECESSARY_ELSE*/
  669. qmu_printk(K_ERR, "[RX]" "%p <-> %p\n", gpd, next_gpd);
  670. return 0;
  671. }
  672. /**
  673. * mu3d_hal_prepare_rx_gpd - prepare rx gpd/bd
  674. * @args - arg1: gpd address, arg2: data buffer address, arg3: data length,
  675. * arg4: ep number, arg5: with bd or not, arg6: write hwo bit or not, arg7: write ioc bit or not
  676. */
  677. TGPD *_ex_mu3d_hal_prepare_rx_gpd(TGPD *gpd, dma_addr_t pBuf, DEV_UINT32 data_len,
  678. DEV_UINT8 ep_num, DEV_UINT8 _is_bdp, DEV_UINT8 isHWO,
  679. DEV_UINT8 ioc, DEV_UINT8 bps, DEV_UINT32 cMaxPacketSize)
  680. {
  681. qmu_printk(K_DEBUG, "[RX]" "%s gpd=%p, epnum=%d, len=%d, pBuf=%08lx\n", __func__,
  682. gpd, ep_num, data_len, (unsigned long)pBuf);
  683. /*Set actual data point to "DATA Buffer" */
  684. TGPD_SET_DATA(gpd, (unsigned long)pBuf);
  685. /*Clear "BDP(Buffer Descriptor Present)" flag */
  686. TGPD_CLR_FORMAT_BDP(gpd);
  687. /*
  688. * Set "Allow Data Buffer Length" =
  689. * 0 (If data length > GPD buffer length, use BDs),
  690. * data_len (If data length < GPD buffer length, only use GPD)
  691. */
  692. TGPD_SET_DataBUF_LEN(gpd, data_len);
  693. /*Set "Transferred Data Length" = 0 */
  694. TGPD_SET_BUF_LEN(gpd, 0);
  695. /*Default: bps=false */
  696. TGPD_CLR_FORMAT_BPS(gpd);
  697. /*Default: ioc=true */
  698. TGPD_SET_FORMAT_IOC(gpd);
  699. /*Get the next GPD */
  700. Rx_gpd_end[ep_num] = get_gpd(USB_RX, ep_num);
  701. qmu_printk(K_DEBUG, "[RX]" "Rx_gpd_end[%d]=%p gpd=%p\n", ep_num, Rx_gpd_end[ep_num], gpd);
  702. /* BUG_ON(!check_next_gpd(gpd, Rx_gpd_end[ep_num])); */
  703. /*Initialize the new GPD */
  704. memset(Rx_gpd_end[ep_num], 0, sizeof(TGPD) + AT_GPD_EXT_LEN);
  705. /*Clear "HWO(Hardware Own)" flag */
  706. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[ep_num]);
  707. /*Set Next GDP pointer to the next GPD */
  708. TGPD_SET_NEXT(gpd,
  709. (unsigned long)mu3d_hal_gpd_virt_to_phys(Rx_gpd_end[ep_num], USB_RX, ep_num));
  710. /*Default: isHWO=true */
  711. TGPD_SET_CHKSUM(gpd, CHECKSUM_LENGTH); /*Set GPD Checksum */
  712. TGPD_SET_FLAGS_HWO(gpd); /*Set HWO flag */
  713. /* os_printk(K_DEBUG,"Rx gpd info { HWO %d, Next_GPD %x ,DataBufferLength %d, */
  714. /* DataBuffer %x, Recived Len %d, Endpoint %d, TGL %d, ZLP %d}\n", */
  715. /* (DEV_UINT32)TGPD_GET_FLAG(gpd), (DEV_UINT32)TGPD_GET_NEXT(gpd), */
  716. /* (DEV_UINT32)TGPD_GET_DataBUF_LEN(gpd), (DEV_UINT32)TGPD_GET_DATA(gpd), */
  717. /* (DEV_UINT32)TGPD_GET_BUF_LEN(gpd), (DEV_UINT32)TGPD_GET_EPaddr(gpd), */
  718. /* (DEV_UINT32)TGPD_GET_TGL(gpd), (DEV_UINT32)TGPD_GET_ZLP(gpd)); */
  719. return gpd;
  720. }
  721. TGPD *mu3d_hal_prepare_rx_gpd(TGPD *gpd, dma_addr_t pBuf, DEV_UINT32 data_len,
  722. DEV_UINT8 ep_num, DEV_UINT8 _is_bdp, DEV_UINT8 isHWO,
  723. DEV_UINT8 ioc, DEV_UINT8 bps, DEV_UINT32 cMaxPacketSize)
  724. {
  725. DEV_UINT32 offset;
  726. DEV_INT32 i;
  727. DEV_INT32 bd_num;
  728. DEV_UINT32 length;
  729. TBD *bd_next;
  730. TBD *bd_head;
  731. TBD *bd;
  732. DEV_UINT8 *pBuffer;
  733. /*If data length is less than the GPD buffer size, just use GPD */
  734. if (data_len < GPD_BUF_SIZE)
  735. _is_bdp = 0;
  736. os_printk(K_INFO, "%s gpd=%p, epnum=%d, len=%d, _is_bdp=%d, maxp=%d\n", __func__,
  737. gpd, ep_num, data_len, _is_bdp, cMaxPacketSize);
  738. if (!_is_bdp) {
  739. /*Set actual data point to "DATA Buffer" */
  740. TGPD_SET_DATA(gpd, (unsigned long)pBuf);
  741. /*Clear "BDP(Buffer Descriptor Present)" flag */
  742. TGPD_CLR_FORMAT_BDP(gpd);
  743. } else {
  744. /*Get the first BD */
  745. bd_head = (TBD *) get_bd(USB_RX, ep_num);
  746. os_printk(K_INFO, "bd_head=x%p\n", bd_head);
  747. bd = bd_head;
  748. os_memset(bd, 0, sizeof(TBD));
  749. /*Date length for transfer */
  750. length = data_len;
  751. /*Point of data buffer */
  752. pBuffer = (DEV_UINT8 *) (uintptr_t) (pBuf);
  753. /*The size of BD buffer */
  754. offset = BD_BUF_SIZE;
  755. /*Count how many BD this transfer need. */
  756. bd_num = (!(length % offset)) ? (length / offset) : ((length / offset) + 1);
  757. os_printk(K_INFO, "%s bd_num=%d\n", __func__, bd_num);
  758. /*Insert data into each BD */
  759. for (i = 0; i < bd_num; i++) {
  760. os_printk(K_INFO, "%s bd[%d]=%p\n", __func__, i, bd);
  761. if (i == (bd_num - 1)) {
  762. TBD_SET_BUF_LEN(bd, 0); /*Set "Transferred Data Length" = 0 */
  763. /*The last one's data buffer lengnth must be precise, or the GPD will never
  764. * done unless ZLP or short packet. */
  765. /*"Allow Data Buffer Length" = the rest of data length* */
  766. length =
  767. (!(length % cMaxPacketSize)) ? (length) : ((length /
  768. cMaxPacketSize) +
  769. 1) * cMaxPacketSize;
  770. TBD_SET_DataBUF_LEN(bd, length);
  771. /*Store the data pointer to "Data Buffer" */
  772. TBD_SET_DATA(bd, (unsigned long)pBuffer);
  773. TBD_SET_FLAGS_EOL(bd); /*Set "EOL" */
  774. TBD_SET_NEXT(bd, 0); /*Set "Next BD pointer" = 0 */
  775. TBD_SET_CHKSUM(bd, CHECKSUM_LENGTH); /*Set "BD Checksum" */
  776. /*Flush the data of BD struct to device */
  777. dma_sync_single_for_device(NULL,
  778. bd_virt_to_phys(bd, USB_RX, ep_num),
  779. sizeof(TBD), DMA_BIDIRECTIONAL);
  780. break;
  781. }
  782. /*WARNING:UNNECESSARY_ELSE: else is not generally useful after a break or return*/
  783. /*else*/
  784. {
  785. TBD_SET_BUF_LEN(bd, 0); /*Set "Transferred Data Length" = 0 */
  786. /*"Allow Data Buffer Length" = the MAX BD transfer size */
  787. TBD_SET_DataBUF_LEN(bd, offset);
  788. /*Store the data pointer to "Data Buffer" */
  789. TBD_SET_DATA(bd, (unsigned long)pBuffer);
  790. TBD_CLR_FLAGS_EOL(bd); /*Clear "EOL" */
  791. /*Get the next BD */
  792. bd_next = (TBD *) get_bd(USB_RX, ep_num);
  793. os_memset(bd_next, 0, sizeof(TBD));
  794. /*Set "Next BD pointer" as the next BD */
  795. TBD_SET_NEXT(bd,
  796. (unsigned long)bd_virt_to_phys(bd_next, USB_RX,
  797. ep_num));
  798. TBD_SET_CHKSUM(bd, CHECKSUM_LENGTH); /*Set BD Checksum */
  799. /*Flush the data of BD struct to device */
  800. dma_sync_single_for_device(NULL,
  801. bd_virt_to_phys(bd, USB_RX, ep_num),
  802. sizeof(TBD), DMA_BIDIRECTIONAL);
  803. /*Calculate the left data length */
  804. length -= offset;
  805. /*Move to pointer of buffer */
  806. pBuffer += offset;
  807. /*Move to next BD */
  808. bd = bd_next;
  809. }
  810. }
  811. /*Set the BD pointer into "BD Pointer" at GPD */
  812. TGPD_SET_DATA(gpd, (unsigned long)bd_virt_to_phys(bd_head, USB_RX, ep_num));
  813. /*Set "BDP(Buffer Descriptor Present)" flag */
  814. TGPD_SET_FORMAT_BDP(gpd);
  815. }
  816. os_printk(K_INFO, "%s GPD data_length=%d\n", __func__, data_len);
  817. /*
  818. * Set "Allow Data Buffer Length" =
  819. * 0 (If data length > GPD buffer length, use BDs),
  820. * data_len (If data length < GPD buffer length, only use GPD)
  821. */
  822. TGPD_SET_DataBUF_LEN(gpd, data_len);
  823. /* TGPD_SET_DataBUF_LEN(gpd, gpd_buf_size); */
  824. /*Set "Transferred Data Length" = 0 */
  825. TGPD_SET_BUF_LEN(gpd, 0);
  826. /*Default: bps=false */
  827. if (bps)
  828. TGPD_SET_FORMAT_BPS(gpd);
  829. else
  830. TGPD_CLR_FORMAT_BPS(gpd);
  831. /*Default: ioc=true */
  832. if (ioc)
  833. TGPD_SET_FORMAT_IOC(gpd);
  834. else
  835. TGPD_CLR_FORMAT_IOC(gpd);
  836. /*Get the next GPD */
  837. Rx_gpd_end[ep_num] = get_gpd(USB_RX, ep_num);
  838. os_printk(K_INFO, "%s Rx_gpd_end[%d]=%p\n", __func__, ep_num, Tx_gpd_end[ep_num]);
  839. /*Initialize the new GPD */
  840. os_memset(Rx_gpd_end[ep_num], 0, sizeof(TGPD));
  841. /*Clear "HWO(Hardware Own)" flag */
  842. TGPD_CLR_FLAGS_HWO(Rx_gpd_end[ep_num]);
  843. /*Set Next GDP pointer to the next GPD */
  844. TGPD_SET_NEXT(gpd,
  845. (unsigned long)mu3d_hal_gpd_virt_to_phys(Rx_gpd_end[ep_num], USB_RX, ep_num));
  846. /*Default: isHWO=true */
  847. if (isHWO) {
  848. TGPD_SET_CHKSUM(gpd, CHECKSUM_LENGTH); /*Set GPD Checksum */
  849. TGPD_SET_FLAGS_HWO(gpd); /*Set HWO flag */
  850. } else {
  851. TGPD_CLR_FLAGS_HWO(gpd);
  852. TGPD_SET_CHKSUM_HWO(gpd, CHECKSUM_LENGTH);
  853. }
  854. /* os_printk(K_DEBUG,"Rx gpd info { HWO %d, Next_GPD %x ,DataBufferLength %d,
  855. * DataBuffer %x, Recived Len %d, Endpoint %d, TGL %d, ZLP %d}\n", */
  856. /* (DEV_UINT32)TGPD_GET_FLAG(gpd), (DEV_UINT32)TGPD_GET_NEXT(gpd), */
  857. /* (DEV_UINT32)TGPD_GET_DataBUF_LEN(gpd), (DEV_UINT32)TGPD_GET_DATA(gpd), */
  858. /* (DEV_UINT32)TGPD_GET_BUF_LEN(gpd), (DEV_UINT32)TGPD_GET_EPaddr(gpd), */
  859. /* (DEV_UINT32)TGPD_GET_TGL(gpd), (DEV_UINT32)TGPD_GET_ZLP(gpd)); */
  860. /*Flush the data of GPD struct to device */
  861. dma_sync_single_for_device(NULL, mu3d_hal_gpd_virt_to_phys(gpd, USB_RX, ep_num),
  862. sizeof(TGPD), DMA_BIDIRECTIONAL);
  863. return gpd;
  864. }
  865. /*
  866. * mu3d_hal_insert_transfer_gpd - insert new gpd/bd
  867. * @args - arg1: ep number, arg2: dir, arg3: data buffer, arg4: data length,
  868. * arg5: write hwo bit or not, arg6: write ioc bit or not
  869. */
  870. void _ex_mu3d_hal_insert_transfer_gpd(DEV_INT32 ep_num, USB_DIR dir, dma_addr_t buf,
  871. DEV_UINT32 count, DEV_UINT8 isHWO, DEV_UINT8 ioc,
  872. DEV_UINT8 bps, DEV_UINT8 zlp, DEV_UINT32 maxp)
  873. {
  874. TGPD *gpd;
  875. if (dir == USB_TX) {
  876. gpd = Tx_gpd_end[ep_num];
  877. _ex_mu3d_hal_prepare_tx_gpd(gpd, buf, count, ep_num, IS_BDP, isHWO, ioc, bps, zlp);
  878. } else if (dir == USB_RX) {
  879. gpd = Rx_gpd_end[ep_num];
  880. _ex_mu3d_hal_prepare_rx_gpd(gpd, buf, count, ep_num, IS_BDP, isHWO, ioc, bps, maxp);
  881. }
  882. }
  883. void mu3d_hal_insert_transfer_gpd(DEV_INT32 ep_num, USB_DIR dir, dma_addr_t buf,
  884. DEV_UINT32 count, DEV_UINT8 isHWO, DEV_UINT8 ioc,
  885. DEV_UINT8 bps, DEV_UINT8 zlp, DEV_UINT32 maxp)
  886. {
  887. TGPD *gpd;
  888. if (dir == USB_TX) {
  889. gpd = Tx_gpd_end[ep_num];
  890. /* os_printk(K_INFO,"TX gpd :%x\n", (unsigned int)gpd); */
  891. mu3d_hal_prepare_tx_gpd(gpd, buf, count, ep_num, IS_BDP, isHWO, ioc, bps, zlp);
  892. } else if (dir == USB_RX) {
  893. gpd = Rx_gpd_end[ep_num];
  894. /* os_printk(K_INFO,"RX gpd :%x\n",(unsigned int)gpd); */
  895. mu3d_hal_prepare_rx_gpd(gpd, buf, count, ep_num, IS_BDP, isHWO, ioc, bps, maxp);
  896. }
  897. }
  898. /**
  899. * mu3d_hal_start_qmu - start qmu function (QMU flow :
  900. * mu3d_hal_init_qmu ->mu3d_hal_start_qmu -> mu3d_hal_insert_transfer_gpd -> mu3d_hal_resume_qmu)
  901. * @args - arg1: ep number, arg2: dir
  902. */
  903. void mu3d_hal_start_qmu(DEV_INT32 Q_num, USB_DIR dir)
  904. {
  905. DEV_UINT32 QCR;
  906. DEV_UINT32 txcsr;
  907. if (dir == USB_TX) {
  908. txcsr = USB_ReadCsr32(U3D_TX1CSR0, Q_num) & 0xFFFEFFFF;
  909. USB_WriteCsr32(U3D_TX1CSR0, Q_num, txcsr | TX_DMAREQEN);
  910. QCR = os_readl(U3D_QCR0);
  911. os_writel(U3D_QCR0, QCR | QMU_TX_CS_EN(Q_num));
  912. #if (TXZLP == HW_MODE)
  913. QCR = os_readl(U3D_QCR1);
  914. os_writel(U3D_QCR1, QCR & ~QMU_TX_ZLP(Q_num));
  915. QCR = os_readl(U3D_QCR2);
  916. os_writel(U3D_QCR2, QCR | QMU_TX_ZLP(Q_num));
  917. #elif (TXZLP == GPD_MODE)
  918. QCR = os_readl(U3D_QCR1);
  919. os_writel(U3D_QCR1, QCR | QMU_TX_ZLP(Q_num));
  920. #endif
  921. os_writel(U3D_QEMIESR, os_readl(U3D_QEMIESR) | QMU_TX_EMPTY(Q_num));
  922. os_writel(U3D_TQERRIESR0, QMU_TX_LEN_ERR(Q_num) | QMU_TX_CS_ERR(Q_num));
  923. qmu_printk(K_INFO, "USB_QMU_TQCSR:0x%08X\n", os_readl(USB_QMU_TQCSR(Q_num)));
  924. if (os_readl(USB_QMU_TQCSR(Q_num)) & QMU_Q_ACTIVE) {
  925. qmu_printk(K_INFO, "Tx %d Active Now!\n", Q_num);
  926. return;
  927. }
  928. os_writel(USB_QMU_TQCSR(Q_num), QMU_Q_START);
  929. qmu_printk(K_INFO, "USB_QMU_TQCSR:0x%08X\n", os_readl(USB_QMU_TQCSR(Q_num)));
  930. } else if (dir == USB_RX) {
  931. USB_WriteCsr32(U3D_RX1CSR0, Q_num,
  932. USB_ReadCsr32(U3D_RX1CSR0, Q_num) | (RX_DMAREQEN));
  933. QCR = os_readl(U3D_QCR0);
  934. os_writel(U3D_QCR0, QCR | QMU_RX_CS_EN(Q_num));
  935. #ifdef CFG_RX_ZLP_EN
  936. QCR = os_readl(U3D_QCR3);
  937. os_writel(U3D_QCR3, QCR | QMU_RX_ZLP(Q_num));
  938. #else
  939. QCR = os_readl(U3D_QCR3);
  940. os_writel(U3D_QCR3, QCR & ~(QMU_RX_ZLP(Q_num)));
  941. #endif
  942. #ifdef CFG_RX_COZ_EN
  943. QCR = os_readl(U3D_QCR3);
  944. os_writel(U3D_QCR3, QCR | QMU_RX_COZ(Q_num));
  945. #else
  946. QCR = os_readl(U3D_QCR3);
  947. os_writel(U3D_QCR3, QCR & ~(QMU_RX_COZ(Q_num)));
  948. #endif
  949. os_writel(U3D_QEMIESR, os_readl(U3D_QEMIESR) | QMU_RX_EMPTY(Q_num));
  950. os_writel(U3D_RQERRIESR0, QMU_RX_LEN_ERR(Q_num) | QMU_RX_CS_ERR(Q_num));
  951. os_writel(U3D_RQERRIESR1, QMU_RX_EP_ERR(Q_num) | QMU_RX_ZLP_ERR(Q_num));
  952. qmu_printk(K_INFO, "USB_QMU_RQCSR:0x%08X\n", os_readl(USB_QMU_RQCSR(Q_num)));
  953. if (os_readl(USB_QMU_RQCSR(Q_num)) & QMU_Q_ACTIVE) {
  954. qmu_printk(K_INFO, "Rx %d Active Now!\n", Q_num);
  955. return;
  956. }
  957. os_writel(USB_QMU_RQCSR(Q_num), QMU_Q_START);
  958. qmu_printk(K_INFO, "USB_QMU_RQCSR:0x%08X\n", os_readl(USB_QMU_RQCSR(Q_num)));
  959. }
  960. #if (CHECKSUM_TYPE == CS_16B)
  961. os_writel(U3D_QCR0, os_readl(U3D_QCR0) | CS16B_EN);
  962. #else
  963. os_writel(U3D_QCR0, os_readl(U3D_QCR0) & ~CS16B_EN);
  964. #endif
  965. }
  966. /**
  967. * mu3d_hal_stop_qmu - stop qmu function (after qmu stop, fifo should be flushed)
  968. * @args - arg1: ep number, arg2: dir
  969. */
  970. void mu3d_hal_stop_qmu(DEV_INT32 q_num, USB_DIR dir)
  971. {
  972. if (dir == USB_TX) {
  973. if (!(os_readl(USB_QMU_TQCSR(q_num)) & (QMU_Q_ACTIVE))) {
  974. qmu_printk(K_CRIT, "Tx%d inActive Now!\n", q_num);
  975. return;
  976. }
  977. os_writel(USB_QMU_TQCSR(q_num), QMU_Q_STOP);
  978. mb();
  979. if (wait_for_value(USB_QMU_TQCSR(q_num), QMU_Q_ACTIVE, 0, 10, 100) == RET_SUCCESS)
  980. qmu_printk(K_CRIT, "Tx%d stop Now! CSR=0x%x\n", q_num,
  981. os_readl(USB_QMU_TQCSR(q_num)));
  982. else {
  983. qmu_printk(K_CRIT, "Tx%d UNSTOPABLE!! CSR=0x%x\n", q_num,
  984. os_readl(USB_QMU_TQCSR(q_num)));
  985. WARN_ON(1);
  986. }
  987. } else if (dir == USB_RX) {
  988. if (!(os_readl(USB_QMU_RQCSR(q_num)) & QMU_Q_ACTIVE)) {
  989. qmu_printk(K_CRIT, "Rx%d inActive Now!\n", q_num);
  990. return;
  991. }
  992. os_writel(USB_QMU_RQCSR(q_num), QMU_Q_STOP);
  993. mb();
  994. if (wait_for_value(USB_QMU_RQCSR(q_num), QMU_Q_ACTIVE, 0, 10, 100) == RET_SUCCESS)
  995. qmu_printk(K_CRIT, "Rx%d stop Now! CSR=0x%x\n", q_num,
  996. os_readl(USB_QMU_RQCSR(q_num)));
  997. else {
  998. qmu_printk(K_CRIT, "Rx%d UNSTOPABLE!! CSR=0x%x\n", q_num,
  999. os_readl(USB_QMU_RQCSR(q_num)));
  1000. WARN_ON(1);
  1001. }
  1002. }
  1003. }
  1004. /**
  1005. * mu3d_hal_send_stall - send stall
  1006. * @args - arg1: ep number, arg2: dir
  1007. */
  1008. void mu3d_hal_send_stall(DEV_INT32 q_num, USB_DIR dir)
  1009. {
  1010. if (dir == USB_TX) {
  1011. USB_WriteCsr32(U3D_TX1CSR0, q_num,
  1012. USB_ReadCsr32(U3D_TX1CSR0, q_num) | TX_SENDSTALL);
  1013. while (!(USB_ReadCsr32(U3D_TX1CSR0, q_num) & TX_SENTSTALL))
  1014. ;
  1015. USB_WriteCsr32(U3D_TX1CSR0, q_num,
  1016. USB_ReadCsr32(U3D_TX1CSR0, q_num) | TX_SENTSTALL);
  1017. USB_WriteCsr32(U3D_TX1CSR0, q_num,
  1018. USB_ReadCsr32(U3D_TX1CSR0, q_num) & ~TX_SENDSTALL);
  1019. } else if (dir == USB_RX) {
  1020. USB_WriteCsr32(U3D_RX1CSR0, q_num,
  1021. USB_ReadCsr32(U3D_RX1CSR0, q_num) | RX_SENDSTALL);
  1022. while (!(USB_ReadCsr32(U3D_RX1CSR0, q_num) & RX_SENTSTALL))
  1023. ;
  1024. USB_WriteCsr32(U3D_RX1CSR0, q_num,
  1025. USB_ReadCsr32(U3D_RX1CSR0, q_num) | RX_SENTSTALL);
  1026. USB_WriteCsr32(U3D_RX1CSR0, q_num,
  1027. USB_ReadCsr32(U3D_RX1CSR0, q_num) & ~RX_SENDSTALL);
  1028. }
  1029. os_printk(K_CRIT, "%s %s-EP[%d] sent stall\n", __func__, ((dir == USB_TX) ? "TX" : "RX"),
  1030. q_num);
  1031. }
  1032. /**
  1033. * mu3d_hal_restart_qmu - clear toggle(or sequence) number and start qmu
  1034. * @args - arg1: ep number, arg2: dir
  1035. */
  1036. void mu3d_hal_restart_qmu(DEV_INT32 q_num, USB_DIR dir)
  1037. {
  1038. DEV_UINT32 ep_rst;
  1039. qmu_printk(K_CRIT, "%s : Reset %s-EP[%d]\n", __func__, ((dir == USB_TX) ? "TX" : "RX"),
  1040. q_num);
  1041. if (dir == USB_TX) {
  1042. ep_rst = BIT16 << q_num;
  1043. os_writel(U3D_EP_RST, ep_rst);
  1044. os_ms_delay(1);
  1045. os_writel(U3D_EP_RST, 0);
  1046. } else {
  1047. ep_rst = 1 << q_num;
  1048. os_writel(U3D_EP_RST, ep_rst);
  1049. os_ms_delay(1);
  1050. os_writel(U3D_EP_RST, 0);
  1051. }
  1052. mu3d_hal_start_qmu(q_num, dir);
  1053. }
  1054. /**
  1055. * flush_qmu - stop qmu and align qmu start ptr t0 current ptr
  1056. * @args - arg1: ep number, arg2: dir
  1057. */
  1058. void _ex_mu3d_hal_flush_qmu(DEV_INT32 Q_num, USB_DIR dir)
  1059. {
  1060. TGPD *gpd_current;
  1061. qmu_printk(K_CRIT, "%s flush QMU %s-EP[%d]\n", __func__, ((dir == USB_TX) ? "TX" : "RX"),
  1062. Q_num);
  1063. if (dir == USB_TX) {
  1064. /*Stop QMU */
  1065. mu3d_hal_stop_qmu(Q_num, USB_TX);
  1066. /*Get TX Queue Current Pointer Register */
  1067. /* QMU GPD address --> CPU DMA address */
  1068. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_TQCPR(Q_num)));
  1069. /*If gpd_current = 0, it means QMU has not yet to execute GPD in QMU. */
  1070. if (!gpd_current) {
  1071. /*Get TX Queue Starting Address Register */
  1072. /* QMU GPD address --> CPU DMA address */
  1073. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_TQSAR(Q_num)));
  1074. }
  1075. /*
  1076. * Even if the GPD pointer got from SAR is corrupted. We should use the head of GPD list.
  1077. * We know that Tx_gpd_head[Q_num] is always correct.
  1078. */
  1079. if (!gpd_current) {
  1080. gpd_current = Tx_gpd_head[Q_num];
  1081. qmu_printk(K_CRIT, "gpd is null, so use the head of GPD list %p\n",
  1082. gpd_current);
  1083. } else {
  1084. /*Switch physical to virtual address */
  1085. qmu_printk(K_CRIT, "gpd_current(P) %p\n", gpd_current);
  1086. gpd_current = gpd_phys_to_virt((void *)gpd_current, USB_TX, Q_num);
  1087. qmu_printk(K_CRIT, "gpd_current(V) %p\n", (void *)gpd_current);
  1088. }
  1089. /*Reset the TX GPD list state */
  1090. Tx_gpd_end[Q_num] = Tx_gpd_last[Q_num] = gpd_current;
  1091. gpd_ptr_align(dir, Q_num, Tx_gpd_end[Q_num]);
  1092. free_gpd(dir, Q_num);
  1093. /*FIXME: Do not know why... */
  1094. os_writel(USB_QMU_TQSAR(Q_num),
  1095. mu3d_hal_gpd_virt_to_phys(Tx_gpd_last[Q_num], USB_TX, Q_num));
  1096. qmu_printk(K_ERR, "USB_QMU_TQSAR %x\n", os_readl(USB_QMU_TQSAR(Q_num)));
  1097. } else if (dir == USB_RX) {
  1098. /*Stop QMU */
  1099. mu3d_hal_stop_qmu(Q_num, USB_RX);
  1100. /*Get RX Queue Current Pointer Register */
  1101. /* QMU GPD address --> CPU DMA address */
  1102. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_RQCPR(Q_num)));
  1103. if (!gpd_current) {
  1104. /*Get RX Queue Starting Address Register */
  1105. /* QMU GPD address --> CPU DMA address */
  1106. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_RQSAR(Q_num)));
  1107. }
  1108. /*
  1109. * Even if the GPD pointer got from SAR is corrupted. We should use the head of GPD list.
  1110. * We know that Rx_gpd_head[Q_num] is always correct.
  1111. */
  1112. if (!gpd_current) {
  1113. gpd_current = Rx_gpd_head[Q_num];
  1114. qmu_printk(K_CRIT, "gpd is null, so use the head of GPD list %p\n",
  1115. gpd_current);
  1116. } else {
  1117. /*Switch physical to virtual address */
  1118. qmu_printk(K_CRIT, "gpd_current(P) %p\n", gpd_current);
  1119. gpd_current = gpd_phys_to_virt((void *)gpd_current, USB_RX, Q_num);
  1120. qmu_printk(K_CRIT, "gpd_current(V) %p\n", (void *)gpd_current);
  1121. }
  1122. /*Reset the RX GPD list state */
  1123. Rx_gpd_end[Q_num] = Rx_gpd_last[Q_num] = gpd_current;
  1124. gpd_ptr_align(dir, Q_num, Rx_gpd_end[Q_num]);
  1125. free_gpd(dir, Q_num);
  1126. /*FIXME: Do not know why... */
  1127. os_writel(USB_QMU_RQSAR(Q_num),
  1128. mu3d_hal_gpd_virt_to_phys(Rx_gpd_end[Q_num], USB_RX, Q_num));
  1129. qmu_printk(K_ERR, "USB_QMU_RQSAR %x\n", os_readl(USB_QMU_RQSAR(Q_num)));
  1130. }
  1131. }
  1132. void mu3d_hal_flush_qmu(DEV_INT32 Q_num, USB_DIR dir)
  1133. {
  1134. TGPD *gpd_current;
  1135. struct USB_REQ *req = mu3d_hal_get_req(Q_num, dir);
  1136. os_printk(K_CRIT, "%s flush QMU %s\n", __func__, ((dir == USB_TX) ? "TX" : "RX"));
  1137. if (dir == USB_TX) {
  1138. /*Stop QMU */
  1139. mu3d_hal_stop_qmu(Q_num, USB_TX);
  1140. /*Get TX Queue Current Pointer Register */
  1141. /* QMU GPD address --> CPU DMA address */
  1142. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_TQCPR(Q_num)));
  1143. /*If gpd_current = 0, it means QMU has not yet to execute GPD in QMU. */
  1144. if (!gpd_current) {
  1145. /*Get TX Queue Starting Address Register */
  1146. /* QMU GPD address --> CPU DMA address */
  1147. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_TQSAR(Q_num)));
  1148. }
  1149. /*Switch physical to virtual address */
  1150. os_printk(K_CRIT, "gpd_current(P) %p\n", gpd_current);
  1151. gpd_current = gpd_phys_to_virt(gpd_current, USB_TX, Q_num);
  1152. os_printk(K_CRIT, "gpd_current(V) %p\n", gpd_current);
  1153. /*Reset the TX GPD list state */
  1154. Tx_gpd_end[Q_num] = Tx_gpd_last[Q_num] = gpd_current;
  1155. gpd_ptr_align(dir, Q_num, Tx_gpd_end[Q_num]);
  1156. free_gpd(dir, Q_num);
  1157. /*FIXME: Do not know why... */
  1158. os_writel(USB_QMU_TQSAR(Q_num),
  1159. mu3d_hal_gpd_virt_to_phys(Tx_gpd_last[Q_num], USB_TX, Q_num));
  1160. os_printk(K_ERR, "USB_QMU_TQSAR %x\n", os_readl(USB_QMU_TQSAR(Q_num)));
  1161. req->complete = true;
  1162. /* os_printk(K_ERR,"TxQ %d Flush Now!\n", Q_num); */
  1163. } else if (dir == USB_RX) {
  1164. /*Stop QMU */
  1165. mu3d_hal_stop_qmu(Q_num, USB_RX);
  1166. /*Get RX Queue Current Pointer Register */
  1167. /* QMU GPD address --> CPU DMA address */
  1168. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_RQCPR(Q_num)));
  1169. if (!gpd_current) {
  1170. /*Get RX Queue Starting Address Register */
  1171. /* QMU GPD address --> CPU DMA address */
  1172. gpd_current = (TGPD *) (uintptr_t) (os_readl(USB_QMU_RQSAR(Q_num)));
  1173. }
  1174. /*Switch physical to virtual address */
  1175. os_printk(K_CRIT, "gpd_current(P) %p\n", gpd_current);
  1176. gpd_current = gpd_phys_to_virt(gpd_current, USB_RX, Q_num);
  1177. os_printk(K_CRIT, "gpd_current(V) %p\n", gpd_current);
  1178. /*Reset the RX GPD list state */
  1179. Rx_gpd_end[Q_num] = Rx_gpd_last[Q_num] = gpd_current;
  1180. gpd_ptr_align(dir, Q_num, Rx_gpd_end[Q_num]);
  1181. free_gpd(dir, Q_num);
  1182. /*FIXME: Do not know why... */
  1183. os_writel(USB_QMU_RQSAR(Q_num),
  1184. mu3d_hal_gpd_virt_to_phys(Rx_gpd_end[Q_num], USB_RX, Q_num));
  1185. os_printk(K_ERR, "USB_QMU_RQSAR %x\n", os_readl(USB_QMU_RQSAR(Q_num)));
  1186. req->complete = true;
  1187. /* os_printk(K_ERR,"RxQ %d Flush Now!\n", Q_num); */
  1188. }
  1189. }
  1190. #endif