mu3d_hal_qmu_drv.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. #ifdef USE_SSUSB_QMU
  2. #include <linux/dma-mapping.h>
  3. /* #include "mu3d_hal_osal.h" */
  4. /* #define _MTK_QMU_DRV_EXT_ */
  5. #include "mu3d_hal_qmu_drv.h"
  6. /* #undef _MTK_QMU_DRV_EXT_ */
  7. #include "mu3d_hal_usb_drv.h"
  8. #include "mu3d_hal_hw.h"
  9. #include "ssusb_io.h"
  10. static struct ssusb_gpd_range Rx_gpd_List[15];
  11. static struct ssusb_gpd_range Tx_gpd_List[15];
  12. /**
  13. * get_bd - get a null gpd
  14. * @args - arg1: dir, arg2: ep number
  15. */
  16. static struct ssusb_gpd *get_next_gpd(USB_DIR dir, u32 num)
  17. {
  18. struct ssusb_gpd *ptr;
  19. if (dir == USB_RX) {
  20. ptr = Rx_gpd_List[num].next;
  21. /* qmu_dbg(K_DEBUG, "[RX]""GPD List[%d]->Next=%p\n", num, Rx_gpd_List[num].next); */
  22. Rx_gpd_List[num].next = Rx_gpd_List[num].next + 1;
  23. /* qmu_dbg(K_DEBUG, "[Rx]""GPD List[%d]->Start=%p, Next=%p, End=%p\n", */
  24. /* num, Rx_gpd_List[num].start, Rx_gpd_List[num].next, Rx_gpd_List[num].end); */
  25. if (Rx_gpd_List[num].next > Rx_gpd_List[num].end)
  26. Rx_gpd_List[num].next = Rx_gpd_List[num].start;
  27. } else {
  28. ptr = Tx_gpd_List[num].next;
  29. /* qmu_dbg(K_DEBUG, "[TX]""GPD List[%d]->Next=%p\n", num, Tx_gpd_List[num].next); */
  30. Tx_gpd_List[num].next = Tx_gpd_List[num].next + 1;
  31. /* qmu_dbg(K_DEBUG, "[TX]""GPD List[%d]->Start=%p, next=%p, end=%p\n", */
  32. /* num, Tx_gpd_List[num].start, Tx_gpd_List[num].next, Tx_gpd_List[num].end); */
  33. if (Tx_gpd_List[num].next > Tx_gpd_List[num].end)
  34. Tx_gpd_List[num].next = Tx_gpd_List[num].start;
  35. }
  36. return ptr;
  37. }
  38. static struct ssusb_gpd *mu3d_get_gpd_from_dma(USB_DIR dir, int num, dma_addr_t gpd_dma_addr)
  39. {
  40. dma_addr_t dma_base = (dir == USB_RX) ? Rx_gpd_List[num].dma : Tx_gpd_List[num].dma;
  41. struct ssusb_gpd *gpd_head =
  42. (dir == USB_RX) ? Rx_gpd_List[num].start : Tx_gpd_List[num].start;
  43. unsigned int i = (gpd_dma_addr - dma_base) / sizeof(struct ssusb_gpd);
  44. /* if equal, overflow infact, should not use, only for compare */
  45. if (i > MAX_GPD_NUM)
  46. return NULL;
  47. return gpd_head + i;
  48. }
  49. static dma_addr_t mu3d_gpd_virt_to_dma(USB_DIR dir, int num, struct ssusb_gpd *ptr)
  50. {
  51. dma_addr_t dma_base = (dir == USB_RX) ? Rx_gpd_List[num].dma : Tx_gpd_List[num].dma;
  52. struct ssusb_gpd *gpd_head =
  53. (dir == USB_RX) ? Rx_gpd_List[num].start : Tx_gpd_List[num].start;
  54. unsigned int offset;
  55. if (!ptr)
  56. return 0;
  57. offset = ptr - gpd_head;
  58. if (offset > MAX_GPD_NUM)
  59. return 0;
  60. return dma_base + (offset * sizeof(*ptr));
  61. }
  62. /**
  63. * init_gpd_list - initialize gpd management list
  64. * @args - arg1: dir, arg2: ep number, arg3: gpd virtual addr, arg4: gpd ioremap addr, arg5: gpd number
  65. */
  66. static void init_gpd_list(USB_DIR dir, int num, struct ssusb_gpd *ptr, dma_addr_t io_ptr, u32 size)
  67. {
  68. if (dir == USB_RX) {
  69. Rx_gpd_List[num].start = ptr;
  70. Rx_gpd_List[num].enqueue = ptr;
  71. Rx_gpd_List[num].dequeue = ptr;
  72. Rx_gpd_List[num].end = ptr + size - 1; /* the first overflow one */
  73. Rx_gpd_List[num].next = ptr + 1;
  74. qmu_dbg(K_DEBUG, "Rx_gpd_List[%d].start=%p, next=%p, end=%p\n",
  75. num, Rx_gpd_List[num].start, Rx_gpd_List[num].next, Rx_gpd_List[num].end);
  76. qmu_dbg(K_DEBUG, "virtual start=%p, end=%p\n", ptr, ptr + size);
  77. qmu_dbg(K_DEBUG, "dma addr start=%#lx, end=%#lx\n", (unsigned long)io_ptr,
  78. (unsigned long)(io_ptr + size * sizeof(*ptr)));
  79. qmu_dbg(K_DEBUG, "dma addr start=%#lx, end=%#lx\n",
  80. (unsigned long)mu3d_gpd_virt_to_dma(dir, num, ptr),
  81. (unsigned long)mu3d_gpd_virt_to_dma(dir, num, (ptr + size)));
  82. } else {
  83. Tx_gpd_List[num].start = ptr;
  84. Tx_gpd_List[num].enqueue = ptr;
  85. Tx_gpd_List[num].dequeue = ptr;
  86. Tx_gpd_List[num].end = ptr + size - 1;
  87. Tx_gpd_List[num].next = ptr + 1;
  88. qmu_dbg(K_DEBUG, "Tx_gpd_List[%d].start=%p, next=%p, end=%p\n",
  89. num, Tx_gpd_List[num].start, Tx_gpd_List[num].next, Tx_gpd_List[num].end);
  90. /* qmu_dbg(K_INFO, "Tx_gpd_Offset[%d]=0x%08X\n", num, Tx_gpd_Offset[num]); */
  91. qmu_dbg(K_DEBUG, "virtual start=%p, end=%p\n", ptr, ptr + size);
  92. qmu_dbg(K_DEBUG, "dma addr start=%#lx, end=%#lx\n", (unsigned long)io_ptr,
  93. (unsigned long)(io_ptr + size * sizeof(*ptr)));
  94. qmu_dbg(K_DEBUG, "dma addr start=%#lx, end=%#lx\n",
  95. (unsigned long)mu3d_gpd_virt_to_dma(dir, num, ptr),
  96. (unsigned long)mu3d_gpd_virt_to_dma(dir, num, (ptr + size)));
  97. }
  98. }
  99. static void reset_gpd_list(USB_DIR dir, int num)
  100. {
  101. if (dir == USB_RX) {
  102. Rx_gpd_List[num].enqueue = Rx_gpd_List[num].start;
  103. Rx_gpd_List[num].dequeue = Rx_gpd_List[num].enqueue;
  104. Rx_gpd_List[num].next = Rx_gpd_List[num].enqueue + 1;
  105. } else {
  106. Tx_gpd_List[num].enqueue = Tx_gpd_List[num].start;
  107. Tx_gpd_List[num].dequeue = Tx_gpd_List[num].enqueue;
  108. Tx_gpd_List[num].next = Tx_gpd_List[num].enqueue + 1;
  109. }
  110. }
  111. /**
  112. * free_gpd - free gpd management list
  113. * @args - arg1: dir, arg2: ep number
  114. */
  115. static void free_gpd(USB_DIR dir, int num)
  116. {
  117. if (dir == USB_RX)
  118. memset(Rx_gpd_List[num].start, 0, MAX_GPD_NUM * sizeof(struct ssusb_gpd));
  119. else
  120. memset(Tx_gpd_List[num].start, 0, MAX_GPD_NUM * sizeof(struct ssusb_gpd));
  121. }
  122. /**
  123. * mu3d_hal_alloc_qmu_mem - allocate gpd and bd memory for all ep
  124. *
  125. */
  126. void mu3d_hal_alloc_qmu_mem(struct musb *musb)
  127. {
  128. u32 i, size;
  129. struct ssusb_gpd *ptr;
  130. for (i = 1; i <= MAX_QMU_EP; i++) {
  131. /* Allocate Rx GPD */
  132. size = sizeof(struct ssusb_gpd) * MAX_GPD_NUM;
  133. ptr =
  134. (struct ssusb_gpd *)dma_alloc_coherent(musb->controller, size,
  135. &Rx_gpd_List[i].dma, GFP_KERNEL);
  136. memset(ptr, 0, size);
  137. init_gpd_list(USB_RX, i, ptr, Rx_gpd_List[i].dma, MAX_GPD_NUM);
  138. qmu_dbg(K_DEBUG, "ALLOC RX GPD End [%d] Virtual Mem=%p, DMA addr=%#lx\n", i,
  139. Rx_gpd_List[i].enqueue, (unsigned long)Rx_gpd_List[i].dma);
  140. TGPD_CLR_FLAGS_HWO(Rx_gpd_List[i].enqueue);
  141. /* Allocate Tx GPD */
  142. size = sizeof(struct ssusb_gpd) * MAX_GPD_NUM;
  143. ptr =
  144. (struct ssusb_gpd *)dma_alloc_coherent(musb->controller, size,
  145. &Tx_gpd_List[i].dma, GFP_KERNEL);
  146. memset(ptr, 0, size);
  147. init_gpd_list(USB_TX, i, ptr, Tx_gpd_List[i].dma, MAX_GPD_NUM);
  148. qmu_dbg(K_DEBUG, "ALLOC TX GPD End [%d] Virtual Mem=%p, DMA addr=%#lx\n", i,
  149. Tx_gpd_List[i].enqueue, (unsigned long)Tx_gpd_List[i].dma);
  150. TGPD_CLR_FLAGS_HWO(Tx_gpd_List[i].enqueue);
  151. }
  152. }
  153. void mu3d_hal_free_qmu_mem(struct musb *musb)
  154. {
  155. u32 i, size;
  156. for (i = 1; i <= MAX_QMU_EP; i++) {
  157. size = sizeof(struct ssusb_gpd) * MAX_GPD_NUM;
  158. if (Rx_gpd_List[i].start) {
  159. dma_free_coherent(musb->controller, size, Rx_gpd_List[i].start,
  160. Rx_gpd_List[i].dma);
  161. Rx_gpd_List[i].start = NULL;
  162. }
  163. if (Tx_gpd_List[i].start) {
  164. dma_free_coherent(musb->controller, size, Tx_gpd_List[i].start,
  165. Tx_gpd_List[i].dma);
  166. Tx_gpd_List[i].start = NULL;
  167. }
  168. }
  169. }
  170. /**
  171. * mu3d_hal_init_qmu - initialize qmu
  172. *
  173. */
  174. void mu3d_hal_init_qmu(struct musb *musb)
  175. {
  176. void __iomem *mbase = musb->mac_base;
  177. u32 i;
  178. /* u32 QCR = 0; */
  179. /* Initialize QMU Tx/Rx start address. */
  180. for (i = 1; i <= MAX_QMU_EP; i++) {
  181. qmu_dbg(K_INFO, "==EP[%d]==Start addr RXQ=%#lx, TXQ=%#lx\n", i,
  182. (unsigned long)Rx_gpd_List[i].dma, (unsigned long)Tx_gpd_List[i].dma);
  183. mu3d_writel(mbase, USB_QMU_RQSAR(i), Rx_gpd_List[i].dma);
  184. mu3d_writel(mbase, USB_QMU_TQSAR(i), Tx_gpd_List[i].dma);
  185. reset_gpd_list(USB_RX, i);
  186. reset_gpd_list(USB_TX, i);
  187. }
  188. /* Enable QMU interrupt. */
  189. mu3d_writel(mbase, U3D_QIESR1, TXQ_EMPTY_IESR | TXQ_CSERR_IESR | TXQ_LENERR_IESR |
  190. RXQ_EMPTY_IESR | RXQ_CSERR_IESR | RXQ_LENERR_IESR | RXQ_ZLPERR_IESR);
  191. mu3d_writel(mbase, U3D_EPIESR, EP0ISR);
  192. }
  193. /**
  194. * mu3d_hal_cal_checksum - calculate check sum
  195. * @args - arg1: data buffer, arg2: data length
  196. */
  197. static noinline u8 mu3d_hal_cal_checksum(u8 *data, int len)
  198. {
  199. u8 *uDataPtr, ckSum;
  200. int i;
  201. *(data + 1) = 0x0;
  202. uDataPtr = data;
  203. ckSum = 0;
  204. /* For ALPS01572117, we found calculated QMU check sum is wrong. (Dump memory value directly.) */
  205. /* After check this function, we did not find any flaw. Still cannot find how to get this wrong value. */
  206. /* Maybe it is a memory corruption or complier problem. Add "noinline" and "mb();" to prevent this problem. */
  207. mb();
  208. for (i = 0; i < len; i++)
  209. ckSum += *(uDataPtr + i);
  210. return 0xFF - ckSum;
  211. }
  212. /**
  213. * mu3d_hal_resume_qmu - resume qmu function
  214. * @args - arg1: ep number, arg2: dir
  215. */
  216. void mu3d_hal_resume_qmu(struct musb *musb, int q_num, USB_DIR dir)
  217. {
  218. void __iomem *mbase = musb->mac_base;
  219. if (dir == USB_TX) {
  220. mu3d_writel(mbase, USB_QMU_TQCSR(q_num), QMU_Q_RESUME);
  221. if (!mu3d_readl(mbase, USB_QMU_TQCSR(q_num))) {
  222. qmu_dbg(K_WARNIN, "[ERROR]" "%s TQCSR[%d]=%x\n", __func__, q_num,
  223. mu3d_readl(mbase, USB_QMU_TQCSR(q_num)));
  224. mu3d_writel(mbase, USB_QMU_TQCSR(q_num), QMU_Q_RESUME);
  225. qmu_dbg(K_WARNIN, "[ERROR]" "%s TQCSR[%d]=%x\n", __func__, q_num,
  226. mu3d_readl(mbase, USB_QMU_TQCSR(q_num)));
  227. }
  228. } else if (dir == USB_RX) {
  229. mu3d_writel(mbase, USB_QMU_RQCSR(q_num), QMU_Q_RESUME);
  230. if (!mu3d_readl(mbase, USB_QMU_RQCSR(q_num))) {
  231. qmu_dbg(K_WARNIN, "[ERROR]" "%s RQCSR[%d]=%x\n", __func__, q_num,
  232. mu3d_readl(mbase, USB_QMU_RQCSR(q_num)));
  233. mu3d_writel(mbase, USB_QMU_RQCSR(q_num), QMU_Q_RESUME);
  234. qmu_dbg(K_WARNIN, "[ERROR]" "%s RQCSR[%d]=%x\n", __func__, q_num,
  235. mu3d_readl(mbase, USB_QMU_RQCSR(q_num)));
  236. }
  237. } else {
  238. qmu_dbg(K_ERR, "%s wrong direction!!!\n", __func__);
  239. BUG_ON(1);
  240. }
  241. }
  242. /**
  243. * mu3d_hal_prepare_tx_gpd - prepare tx gpd/bd
  244. * @args - arg1: gpd address, arg2: data buffer address, arg3: data length, arg4: ep number, arg5: with bd or not, arg6: write hwo bit or not, arg7: write ioc bit or not
  245. */
  246. static struct ssusb_gpd *mu3d_hal_prepare_tx_gpd(struct ssusb_gpd *gpd, dma_addr_t pBuf,
  247. u32 data_len, u8 ep_num, u8 _is_bdp, u8 isHWO,
  248. u8 ioc, u8 bps, u8 zlp)
  249. {
  250. struct ssusb_gpd *enq;
  251. /*Set actual data point to "DATA Buffer" */
  252. TGPD_SET_DATA(gpd, pBuf);
  253. /*Clear "BDP(Buffer Descriptor Present)" flag */
  254. TGPD_CLR_FORMAT_BDP(gpd);
  255. /*
  256. * "Data Buffer Length" =
  257. * 0 (If data length > GPD buffer length, use BDs),
  258. * data_len (If data length < GPD buffer length, only use GPD)
  259. */
  260. TGPD_SET_BUF_LEN(gpd, data_len);
  261. /*"GPD extension length" = 0. Does not use GPD EXT!! */
  262. TGPD_SET_EXT_LEN(gpd, 0);
  263. if (zlp)
  264. TGPD_SET_FORMAT_ZLP(gpd);
  265. else
  266. TGPD_CLR_FORMAT_ZLP(gpd);
  267. /*Default: bps=false */
  268. TGPD_CLR_FORMAT_BPS(gpd);
  269. /*Default: ioc=true */
  270. TGPD_SET_FORMAT_IOC(gpd);
  271. /*Get the next GPD */
  272. Tx_gpd_List[ep_num].enqueue = get_next_gpd(USB_TX, ep_num);
  273. enq = Tx_gpd_List[ep_num].enqueue;
  274. qmu_dbg(K_DEBUG, "[TX]" "Tx_gpd_end[%d]=%p\n", ep_num, enq);
  275. /*Initialize the new GPD */
  276. memset(enq, 0, sizeof(struct ssusb_gpd));
  277. /*Clear "HWO(Hardware Own)" flag */
  278. TGPD_CLR_FLAGS_HWO(enq);
  279. /*Set "Next GDP pointer" as the next GPD */
  280. TGPD_SET_NEXT(gpd, mu3d_gpd_virt_to_dma(USB_TX, ep_num, enq));
  281. /*Default: isHWO=true */
  282. TGPD_SET_CHKSUM(gpd, CHECKSUM_LENGTH); /*Set GPD Checksum */
  283. TGPD_SET_FLAGS_HWO(gpd); /*Set HWO flag */
  284. return gpd;
  285. }
  286. /**
  287. * mu3d_hal_prepare_rx_gpd - prepare rx gpd/bd
  288. * @args - arg1: gpd address, arg2: data buffer address, arg3: data length, arg4: ep number, arg5: with bd or not, arg6: write hwo bit or not, arg7: write ioc bit or not
  289. */
  290. static struct ssusb_gpd *mu3d_hal_prepare_rx_gpd(struct ssusb_gpd *gpd, dma_addr_t pBuf,
  291. u32 data_len, u8 ep_num, u8 _is_bdp, u8 isHWO,
  292. u8 ioc, u8 bps, u32 cMaxPacketSize)
  293. {
  294. struct ssusb_gpd *enq;
  295. qmu_dbg(K_DEBUG, "[RX]" "%s gpd=%p, epnum=%d, len=%d\n", __func__, gpd, ep_num, data_len);
  296. /*Set actual data point to "DATA Buffer" */
  297. TGPD_SET_DATA(gpd, pBuf);
  298. /*Clear "BDP(Buffer Descriptor Present)" flag */
  299. TGPD_CLR_FORMAT_BDP(gpd);
  300. /*
  301. * Set "Allow Data Buffer Length" =
  302. * 0 (If data length > GPD buffer length, use BDs),
  303. * data_len (If data length < GPD buffer length, only use GPD)
  304. */
  305. TGPD_SET_DATA_BUF_LEN(gpd, data_len);
  306. /*Set "Transferred Data Length" = 0 */
  307. TGPD_SET_BUF_LEN(gpd, 0);
  308. /*Default: bps=false */
  309. TGPD_CLR_FORMAT_BPS(gpd);
  310. /*Default: ioc=true */
  311. TGPD_SET_FORMAT_IOC(gpd);
  312. /*Get the next GPD */
  313. Rx_gpd_List[ep_num].enqueue = get_next_gpd(USB_RX, ep_num);
  314. enq = Rx_gpd_List[ep_num].enqueue;
  315. qmu_dbg(K_DEBUG, "[RX]" "Rx_gpd_end[%d]=%p gpd=%p\n", ep_num, enq, gpd);
  316. /* BUG_ON(!check_next_gpd(gpd, Rx_gpd_end[ep_num])); */
  317. /*Initialize the new GPD */
  318. memset(enq, 0, sizeof(struct ssusb_gpd));
  319. /*Clear "HWO(Hardware Own)" flag */
  320. TGPD_CLR_FLAGS_HWO(enq);
  321. /*Set Next GDP pointer to the next GPD */
  322. TGPD_SET_NEXT(gpd, mu3d_gpd_virt_to_dma(USB_RX, ep_num, enq));
  323. /*Default: isHWO=true */
  324. TGPD_SET_CHKSUM(gpd, CHECKSUM_LENGTH); /*Set GPD Checksum */
  325. TGPD_SET_FLAGS_HWO(gpd); /*Set HWO flag */
  326. return gpd;
  327. }
  328. /**
  329. * mu3d_hal_insert_transfer_gpd - insert new gpd/bd
  330. * @args - arg1: ep number, arg2: dir, arg3: data buffer, arg4: data length, arg5: write hwo bit or not, arg6: write ioc bit or not
  331. */
  332. void mu3d_hal_insert_transfer_gpd(int ep_num, USB_DIR dir, dma_addr_t buf,
  333. u32 count, u8 isHWO, u8 ioc, u8 bps, u8 zlp, u32 maxp)
  334. {
  335. struct ssusb_gpd *gpd;
  336. if (dir == USB_TX) {
  337. gpd = Tx_gpd_List[ep_num].enqueue;
  338. mu3d_hal_prepare_tx_gpd(gpd, buf, count, ep_num, IS_BDP, isHWO, ioc, bps, zlp);
  339. } else if (dir == USB_RX) {
  340. gpd = Rx_gpd_List[ep_num].enqueue;
  341. mu3d_hal_prepare_rx_gpd(gpd, buf, count, ep_num, IS_BDP, isHWO, ioc, bps, maxp);
  342. }
  343. }
  344. /**
  345. * mu3d_hal_start_qmu - start qmu function (QMU flow : mu3d_hal_init_qmu ->mu3d_hal_start_qmu -> mu3d_hal_insert_transfer_gpd -> mu3d_hal_resume_qmu)
  346. * @args - arg1: ep number, arg2: dir
  347. */
  348. void mu3d_hal_start_qmu(struct musb *musb, int q_num, USB_DIR dir)
  349. {
  350. void __iomem *mbase = musb->mac_base;
  351. /* u32 QCR; */
  352. u32 txcsr;
  353. u32 rxcsr;
  354. if (dir == USB_TX) {
  355. txcsr = mu3d_xcsr_readl(mbase, U3D_TX1CSR0, q_num); /* & 0xFFFEFFFF; */
  356. mu3d_xcsr_writel(mbase, U3D_TX1CSR0, q_num, txcsr | TX_DMAREQEN);
  357. mu3d_setmsk(mbase, U3D_QCR0, QMU_TX_CS_EN(q_num));
  358. #if (TXZLP == HW_MODE)
  359. mu3d_clrmsk(mbase, U3D_QCR1, QMU_TX_ZLP(q_num));
  360. mu3d_setmsk(mbase, U3D_QCR2, QMU_TX_ZLP(q_num));
  361. #elif (TXZLP == GPD_MODE)
  362. mu3d_setmsk(mbase, U3D_QCR1, QMU_TX_ZLP(q_num));
  363. #endif
  364. mu3d_setmsk(mbase, U3D_QEMIESR, QMU_TX_EMPTY(q_num));
  365. mu3d_writel(mbase, U3D_TQERRIESR0, QMU_TX_LEN_ERR(q_num) | QMU_TX_CS_ERR(q_num));
  366. qmu_dbg(K_DEBUG, "USB_QMU_TQCSR:0x%08X\n", mu3d_readl(mbase, USB_QMU_TQCSR(q_num)));
  367. if (mu3d_readl(mbase, USB_QMU_TQCSR(q_num)) & QMU_Q_ACTIVE) {
  368. qmu_dbg(K_INFO, "Tx %d Active Now!\n", q_num);
  369. return;
  370. }
  371. mu3d_writel(mbase, USB_QMU_TQCSR(q_num), QMU_Q_START);
  372. qmu_dbg(K_DEBUG, "USB_QMU_TQCSR:0x%08X\n", mu3d_readl(mbase, USB_QMU_TQCSR(q_num)));
  373. } else if (dir == USB_RX) {
  374. rxcsr = mu3d_xcsr_readl(mbase, U3D_RX1CSR0, q_num);
  375. mu3d_xcsr_writel(mbase, U3D_RX1CSR0, q_num, rxcsr | (RX_DMAREQEN));
  376. mu3d_setmsk(mbase, U3D_QCR0, QMU_RX_CS_EN(q_num));
  377. #ifdef CFG_RX_ZLP_EN
  378. mu3d_setmsk(mbase, U3D_QCR3, QMU_RX_ZLP(q_num));
  379. #else
  380. mu3d_clrmsk(mbase, U3D_QCR3, QMU_RX_ZLP(q_num));
  381. #endif
  382. #ifdef CFG_RX_COZ_EN
  383. mu3d_setmsk(mbase, U3D_QCR3, QMU_RX_COZ(q_num));
  384. #else
  385. mu3d_clrmsk(mbase, U3D_QCR3, QMU_RX_COZ(q_num));
  386. #endif
  387. mu3d_setmsk(mbase, U3D_QEMIESR, QMU_RX_EMPTY(q_num));
  388. mu3d_writel(mbase, U3D_RQERRIESR0, QMU_RX_LEN_ERR(q_num) | QMU_RX_CS_ERR(q_num));
  389. mu3d_writel(mbase, U3D_RQERRIESR1, QMU_RX_EP_ERR(q_num) | QMU_RX_ZLP_ERR(q_num));
  390. qmu_dbg(K_DEBUG, "USB_QMU_RQCSR:0x%08X\n", mu3d_readl(mbase, USB_QMU_RQCSR(q_num)));
  391. if (mu3d_readl(mbase, USB_QMU_RQCSR(q_num)) & QMU_Q_ACTIVE) {
  392. qmu_dbg(K_INFO, "Rx %d Active Now!\n", q_num);
  393. return;
  394. }
  395. mu3d_writel(mbase, USB_QMU_RQCSR(q_num), QMU_Q_START);
  396. qmu_dbg(K_DEBUG, "USB_QMU_RQCSR:0x%08X\n", mu3d_readl(mbase, USB_QMU_RQCSR(q_num)));
  397. }
  398. #if (CHECKSUM_TYPE == CS_16B)
  399. mu3d_setmsk(mbase, U3D_QCR0, CS16B_EN);
  400. #else
  401. mu3d_clrmsk(mbase, U3D_QCR0, CS16B_EN);
  402. #endif
  403. }
  404. /**
  405. * mu3d_hal_stop_qmu - stop qmu function (after qmu stop, fifo should be flushed)
  406. * @args - arg1: ep number, arg2: dir
  407. */
  408. void mu3d_hal_stop_qmu(struct musb *musb, int q_num, USB_DIR dir)
  409. {
  410. void __iomem *mbase = musb->mac_base;
  411. if (dir == USB_TX) {
  412. if (!(mu3d_readl(mbase, USB_QMU_TQCSR(q_num)) & (QMU_Q_ACTIVE))) {
  413. qmu_dbg(K_DEBUG, "Tx%d inActive Now!\n", q_num);
  414. return;
  415. }
  416. mu3d_writel(mbase, USB_QMU_TQCSR(q_num), QMU_Q_STOP);
  417. while ((mu3d_readl(mbase, USB_QMU_TQCSR(q_num)) & (QMU_Q_ACTIVE)))
  418. ;
  419. qmu_dbg(K_CRIT, "Tx%d stop Now!\n", q_num);
  420. } else if (dir == USB_RX) {
  421. if (!(mu3d_readl(mbase, USB_QMU_RQCSR(q_num)) & QMU_Q_ACTIVE)) {
  422. qmu_dbg(K_DEBUG, "Rx%d inActive Now!\n", q_num);
  423. return;
  424. }
  425. mu3d_writel(mbase, USB_QMU_RQCSR(q_num), QMU_Q_STOP);
  426. while ((mu3d_readl(mbase, USB_QMU_RQCSR(q_num)) & (QMU_Q_ACTIVE)))
  427. ;
  428. qmu_dbg(K_CRIT, "Rx%d stop now!\n", q_num);
  429. }
  430. }
  431. /**
  432. * mu3d_hal_send_stall - send stall
  433. * @args - arg1: ep number, arg2: dir
  434. */
  435. void mu3d_hal_send_stall(struct musb *musb, int q_num, USB_DIR dir)
  436. {
  437. void __iomem *mbase = musb->mac_base;
  438. u32 tmp;
  439. if (dir == USB_TX) {
  440. tmp = mu3d_xcsr_readl(mbase, U3D_TX1CSR0, q_num);
  441. mu3d_xcsr_writel(mbase, U3D_TX1CSR0, q_num, tmp | TX_SENDSTALL);
  442. while (!(mu3d_xcsr_readl(mbase, U3D_TX1CSR0, q_num) & TX_SENTSTALL))
  443. ;
  444. tmp = mu3d_xcsr_readl(mbase, U3D_TX1CSR0, q_num);
  445. mu3d_xcsr_writel(mbase, U3D_TX1CSR0, q_num, tmp | TX_SENTSTALL);
  446. tmp = mu3d_xcsr_readl(mbase, U3D_TX1CSR0, q_num);
  447. mu3d_xcsr_writel(mbase, U3D_TX1CSR0, q_num, tmp & (~TX_SENDSTALL));
  448. } else if (dir == USB_RX) {
  449. tmp = mu3d_xcsr_readl(mbase, U3D_RX1CSR0, q_num);
  450. mu3d_xcsr_writel(mbase, U3D_RX1CSR0, q_num, tmp | RX_SENDSTALL);
  451. while (!(mu3d_xcsr_readl(mbase, U3D_RX1CSR0, q_num) & RX_SENTSTALL))
  452. ;
  453. tmp = mu3d_xcsr_readl(mbase, U3D_RX1CSR0, q_num);
  454. mu3d_xcsr_writel(mbase, U3D_RX1CSR0, q_num, tmp | RX_SENTSTALL);
  455. tmp = mu3d_xcsr_readl(mbase, U3D_RX1CSR0, q_num);
  456. mu3d_xcsr_writel(mbase, U3D_RX1CSR0, q_num, tmp & (~RX_SENDSTALL));
  457. }
  458. qmu_dbg(K_CRIT, "%s %s-EP[%d] sent stall\n", __func__, ((dir == USB_TX) ? "TX" : "RX"),
  459. q_num);
  460. }
  461. /**
  462. * mu3d_hal_restart_qmu - clear toggle(or sequence) number and start qmu
  463. * @args - arg1: ep number, arg2: dir
  464. */
  465. void mu3d_hal_restart_qmu(struct musb *musb, int q_num, USB_DIR dir)
  466. {
  467. void __iomem *mbase = musb->mac_base;
  468. u32 ep_rst;
  469. qmu_dbg(K_CRIT, "%s : Rest %s-EP[%d]\n", __func__, ((dir == USB_TX) ? "TX" : "RX"), q_num);
  470. if (dir == USB_TX) {
  471. ep_rst = BIT16 << q_num;
  472. mu3d_writel(mbase, U3D_EP_RST, ep_rst);
  473. mdelay(1);
  474. mu3d_writel(mbase, U3D_EP_RST, 0);
  475. } else {
  476. ep_rst = 1 << q_num;
  477. mu3d_writel(mbase, U3D_EP_RST, ep_rst);
  478. mdelay(1);
  479. mu3d_writel(mbase, U3D_EP_RST, 0);
  480. }
  481. mu3d_hal_start_qmu(musb, q_num, dir);
  482. }
  483. /**
  484. * flush_qmu - stop qmu and align qmu start ptr t0 current ptr
  485. * @args - arg1: ep number, arg2: dir
  486. */
  487. void mu3d_hal_flush_qmu(struct musb *musb, int q_num, USB_DIR dir)
  488. {
  489. qmu_dbg(K_CRIT, "%s flush QMU %s\n", __func__, ((dir == USB_TX) ? "TX" : "RX"));
  490. /*Stop QMU */
  491. mu3d_hal_stop_qmu(musb, q_num, dir);
  492. free_gpd(dir, q_num);
  493. reset_gpd_list(dir, q_num);
  494. }
  495. /*
  496. * 1. Find the last gpd HW has executed and update Tx_gpd_last[]
  497. * 2. Set the flag for txstate to know that TX has been completed
  498. * caller:qmu_interrupt after getting QMU done interrupt and TX is raised
  499. *
  500. * NOTE: request list maybe is already empty as following case:
  501. * queue_tx --> qmu_interrupt(clear int pending, schedule tasklet)-->
  502. * queue_tx --> process_tasklet(at the same time, the second one tx over,
  503. * tasklet process both of them)-->qmu_interrupt for second one.
  504. * To avoid upper case, put qmu_done_tx in ISR directly to process it.
  505. */
  506. void qmu_done_tx(struct musb *musb, u8 ep_num, unsigned long flags)
  507. {
  508. struct ssusb_gpd *gpd = Tx_gpd_List[ep_num].dequeue;
  509. struct ssusb_gpd *gpd_current = NULL;
  510. struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_in;
  511. void __iomem *mbase = musb->mac_base;
  512. dma_addr_t gpd_dma = mu3d_readl(mbase, USB_QMU_TQCPR(ep_num));
  513. struct usb_request *request = NULL;
  514. struct musb_request *req;
  515. /* trying to give_back the request to gadget driver. */
  516. req = next_request(musb_ep);
  517. if (req)
  518. request = &req->request;
  519. else
  520. return;
  521. /*Transfer PHY addr got from QMU register to VIR addr */
  522. gpd_current = mu3d_get_gpd_from_dma(USB_TX, ep_num, gpd_dma);
  523. /*
  524. gpd or Last gdp_current
  525. | |
  526. |-> GPD1 --> GPD2 --> GPD3 --> GPD4 --> GPD5 -|
  527. |----------------------------------------------|
  528. */
  529. qmu_dbg(K_DEBUG, "[TXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n",
  530. __func__, ep_num, gpd, gpd_current, Tx_gpd_List[ep_num].enqueue);
  531. /*gpd_current should at least point to the next GPD to the previous last one. */
  532. if (gpd == gpd_current) {
  533. qmu_dbg(K_ERR, "[TXD][warn] %s gpd(%p) == gpd_current(%p)\n", __func__, gpd,
  534. gpd_current);
  535. return;
  536. }
  537. if (TGPD_IS_FLAGS_HWO(gpd)) {
  538. qmu_dbg(K_DEBUG, "[TXD][ERROR] %s HWO=1, CPR=%x\n", __func__,
  539. mu3d_readl(mbase, USB_QMU_TQCPR(ep_num)));
  540. BUG_ON(1);
  541. }
  542. while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) {
  543. if (!TGPD_GET_NEXT(gpd)) {
  544. qmu_dbg(K_ERR, "[TXD][ERROR]" "Next GPD is null!!\n");
  545. /* BUG_ON(1); */
  546. break;
  547. }
  548. gpd_dma = (dma_addr_t) TGPD_GET_NEXT(gpd);
  549. gpd = mu3d_get_gpd_from_dma(USB_TX, ep_num, gpd_dma);
  550. Tx_gpd_List[ep_num].dequeue = gpd;
  551. musb_g_giveback(musb_ep, request, 0);
  552. req = next_request(musb_ep);
  553. request = &req->request;
  554. }
  555. if (gpd != gpd_current && TGPD_IS_FLAGS_HWO(gpd)) {
  556. qmu_dbg(K_ERR, "[TXD][ERROR]" "EP%d TQCSR=%x, TQSAR=%x, TQCPR=%x\n",
  557. ep_num, mu3d_readl(mbase, USB_QMU_TQCSR(ep_num)),
  558. mu3d_readl(mbase, USB_QMU_TQSAR(ep_num)),
  559. mu3d_readl(mbase, USB_QMU_TQCPR(ep_num)));
  560. }
  561. qmu_dbg(K_DEBUG, "[TXD]" "%s EP%d, Last=%p, End=%p, complete\n", __func__,
  562. ep_num, Tx_gpd_List[ep_num].dequeue, Tx_gpd_List[ep_num].enqueue);
  563. if (req != NULL) {
  564. if (request->length == 0) {
  565. u32 txcsr = 0;
  566. qmu_dbg(K_DEBUG, "[TXD]" "==Send ZLP== %p\n", req);
  567. /* NEED to add timeout process */
  568. while (!(mu3d_xcsr_readl(mbase, U3D_TX1CSR0, req->epnum) & TX_FIFOFULL)) {
  569. txcsr = mu3d_xcsr_readl(mbase, U3D_TX1CSR0, req->epnum);
  570. mu3d_xcsr_writel(mbase, U3D_TX1CSR0, req->epnum,
  571. txcsr & ~TX_DMAREQEN);
  572. txcsr = mu3d_xcsr_readl(mbase, U3D_TX1CSR0, req->epnum);
  573. mu3d_xcsr_writel(mbase, U3D_TX1CSR0, req->epnum,
  574. txcsr | TX_TXPKTRDY);
  575. break;
  576. }
  577. qmu_dbg(K_DEBUG,
  578. "[TXD]" "Giveback ZLP of EP%d, actual:%d, length:%d %p\n",
  579. req->epnum, request->actual, request->length, request);
  580. musb_g_giveback(musb_ep, request, 0);
  581. }
  582. }
  583. }
  584. /*
  585. When receiving RXQ done interrupt, qmu_interrupt calls this function.
  586. 1. Traverse GPD/BD data structures to count actual transferred length.
  587. 2. Set the done flag to notify rxstate_qmu() to report status to upper gadget driver.
  588. ported from proc_qmu_rx() from test driver.
  589. caller:qmu_interrupt after getting QMU done interrupt and TX is raised
  590. */
  591. void qmu_done_rx(struct musb *musb, u8 ep_num, unsigned long flags)
  592. {
  593. struct ssusb_gpd *gpd = Rx_gpd_List[ep_num].dequeue;
  594. struct ssusb_gpd *gpd_current = NULL;
  595. struct musb_ep *musb_ep = &musb->endpoints[ep_num].ep_out;
  596. void __iomem *mbase = musb->mac_base;
  597. dma_addr_t gpd_dma = (mu3d_readl(mbase, USB_QMU_RQCPR(ep_num)));
  598. struct usb_request *request = NULL;
  599. struct musb_request *req;
  600. /* trying to give_back the request to gadget driver. */
  601. req = next_request(musb_ep);
  602. if (req)
  603. request = &req->request;
  604. else
  605. return;
  606. /*Transfer PHY addr got from QMU register to VIR addr */
  607. gpd_current = mu3d_get_gpd_from_dma(USB_RX, ep_num, gpd_dma);
  608. qmu_dbg(K_DEBUG, "[RXD]" "%s EP%d, Last=%p, Current=%p, End=%p\n",
  609. __func__, ep_num, gpd, gpd_current, Rx_gpd_List[ep_num].enqueue);
  610. /*gpd_current should at least point to the next GPD to the previous last one. */
  611. if (gpd == gpd_current) {
  612. qmu_dbg(K_ERR, "[RXD][ERROR]" "%s gpd(%p) == gpd_current(%p)\n", __func__, gpd,
  613. gpd_current);
  614. qmu_dbg(K_ERR, "[RXD][ERROR]" "EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n",
  615. ep_num, mu3d_readl(mbase, USB_QMU_RQCSR(ep_num)),
  616. mu3d_readl(mbase, USB_QMU_RQSAR(ep_num)),
  617. mu3d_readl(mbase, USB_QMU_RQCPR(ep_num)),
  618. mu3d_readl(mbase, USB_QMU_RQLDPR(ep_num)));
  619. return;
  620. }
  621. if (!gpd || !gpd_current)
  622. return;
  623. if (TGPD_IS_FLAGS_HWO(gpd)) {
  624. qmu_dbg(K_ERR, "[RXD][ERROR]" "HWO=1!!\n");
  625. BUG_ON(1);
  626. }
  627. while (gpd != gpd_current && !TGPD_IS_FLAGS_HWO(gpd)) {
  628. u32 rcv_len = (u32) TGPD_GET_BUF_LEN(gpd);
  629. u32 buf_len = (u32) TGPD_GET_DATA_BUF_LEN(gpd);
  630. if (rcv_len > buf_len)
  631. qmu_dbg(K_ERR, "[RXD][ERROR]" "%s rcv(%d) > buf(%d) AUK!?\n", __func__,
  632. rcv_len, buf_len);
  633. qmu_dbg(K_DEBUG,
  634. "[RXD]" "gpd=%p ->HWO=%d, Next_GPD=%x, RcvLen=%d, BufLen=%d, pBuf=%#x\n",
  635. gpd, TGPD_GET_FLAG(gpd), TGPD_GET_NEXT(gpd), rcv_len, buf_len,
  636. TGPD_GET_DATA(gpd));
  637. request->actual += rcv_len;
  638. if (!TGPD_GET_NEXT(gpd) || !TGPD_GET_DATA(gpd)) {
  639. qmu_dbg(K_ERR, "[RXD][ERROR]" "%s EP%d ,gpd=%p\n", __func__, ep_num, gpd);
  640. BUG_ON(1);
  641. }
  642. gpd_dma = (dma_addr_t) TGPD_GET_NEXT(gpd);
  643. gpd = mu3d_get_gpd_from_dma(USB_RX, ep_num, gpd_dma);
  644. if (!gpd) {
  645. qmu_dbg(K_ERR, "[RXD][ERROR]" "%s EP%d ,gpd=%p\n", __func__, ep_num, gpd);
  646. BUG_ON(1);
  647. }
  648. Rx_gpd_List[ep_num].dequeue = gpd;
  649. musb_g_giveback(musb_ep, request, 0);
  650. req = next_request(musb_ep);
  651. request = &req->request;
  652. }
  653. if (gpd != gpd_current && TGPD_IS_FLAGS_HWO(gpd)) {
  654. qmu_dbg(K_ERR, "[RXD][ERROR]" "gpd=%p\n", gpd);
  655. qmu_dbg(K_ERR, "[RXD][ERROR]" "EP%d RQCSR=%x, RQSAR=%x, RQCPR=%x, RQLDPR=%x\n",
  656. ep_num, mu3d_readl(mbase, USB_QMU_RQCSR(ep_num)),
  657. mu3d_readl(mbase, USB_QMU_RQSAR(ep_num)),
  658. mu3d_readl(mbase, USB_QMU_RQCPR(ep_num)),
  659. mu3d_readl(mbase, USB_QMU_RQLDPR(ep_num)));
  660. }
  661. qmu_dbg(K_DEBUG, "[RXD]" "%s EP%d, Last=%p, End=%p, complete\n", __func__,
  662. ep_num, Rx_gpd_List[ep_num].dequeue, Rx_gpd_List[ep_num].enqueue);
  663. }
  664. void qmu_done_tasklet(unsigned long data)
  665. {
  666. unsigned int qmu_val;
  667. unsigned int i;
  668. unsigned long flags;
  669. struct musb *musb = (struct musb *)data;
  670. spin_lock_irqsave(&musb->lock, flags);
  671. qmu_val = musb->qmu_done_intr;
  672. musb->qmu_done_intr = 0;
  673. for (i = 1; i <= MAX_QMU_EP; i++) {
  674. if (qmu_val & QMU_RX_DONE(i))
  675. qmu_done_rx(musb, i, flags);
  676. if (qmu_val & QMU_TX_DONE(i))
  677. qmu_done_tx(musb, i, flags);
  678. }
  679. spin_unlock_irqrestore(&musb->lock, flags);
  680. }
  681. void qmu_exception_interrupt(struct musb *musb, u32 wQmuVal)
  682. {
  683. void __iomem *mbase = musb->mac_base;
  684. u32 wErrVal;
  685. int i = (int)wQmuVal;
  686. if (wQmuVal & RXQ_CSERR_INT)
  687. qmu_dbg(K_ERR, "==Rx %d checksum error==\n", i);
  688. if (wQmuVal & RXQ_LENERR_INT)
  689. qmu_dbg(K_ERR, "==Rx %d length error==\n", i);
  690. if (wQmuVal & TXQ_CSERR_INT)
  691. qmu_dbg(K_ERR, "==Tx %d checksum error==\n", i);
  692. if (wQmuVal & TXQ_LENERR_INT)
  693. qmu_dbg(K_ERR, "==Tx %d length error==\n", i);
  694. if ((wQmuVal & RXQ_CSERR_INT) || (wQmuVal & RXQ_LENERR_INT)) {
  695. wErrVal = mu3d_readl(mbase, U3D_RQERRIR0);
  696. qmu_dbg(K_DEBUG, "Rx Queue error in QMU mode![0x%x]\r\n", (unsigned int)wErrVal);
  697. for (i = 1; i <= MAX_QMU_EP; i++) {
  698. if (wErrVal & QMU_RX_CS_ERR(i))
  699. qmu_dbg(K_ERR, "Rx %d CS error!\r\n", i);
  700. if (wErrVal & QMU_RX_LEN_ERR(i))
  701. qmu_dbg(K_ERR, "RX EP%d Recv Length error\n", i);
  702. }
  703. mu3d_writel(mbase, U3D_RQERRIR0, wErrVal);
  704. }
  705. if (wQmuVal & RXQ_ZLPERR_INT) {
  706. wErrVal = mu3d_readl(mbase, U3D_RQERRIR1);
  707. qmu_dbg(K_DEBUG, "Rx Queue error in QMU mode![0x%x]\r\n", (unsigned int)wErrVal);
  708. for (i = 1; i <= MAX_QMU_EP; i++) {
  709. if (wErrVal & QMU_RX_ZLP_ERR(i)) {
  710. /*FIXME: should _NOT_ got this error. But now just accept. */
  711. qmu_dbg(K_INFO, "RX EP%d Recv ZLP\n", i);
  712. }
  713. }
  714. mu3d_writel(mbase, U3D_RQERRIR1, wErrVal);
  715. }
  716. if ((wQmuVal & TXQ_CSERR_INT) || (wQmuVal & TXQ_LENERR_INT)) {
  717. wErrVal = mu3d_readl(mbase, U3D_TQERRIR0);
  718. qmu_dbg(K_DEBUG, "Tx Queue error in QMU mode![0x%x]\r\n", (unsigned int)wErrVal);
  719. for (i = 1; i <= MAX_QMU_EP; i++) {
  720. if (wErrVal & QMU_TX_CS_ERR(i))
  721. qmu_dbg(K_ERR, "Tx %d checksum error!\r\n", i);
  722. if (wErrVal & QMU_TX_LEN_ERR(i))
  723. qmu_dbg(K_ERR, "Tx %d buffer length error!\r\n", i);
  724. }
  725. mu3d_writel(mbase, U3D_TQERRIR0, wErrVal);
  726. }
  727. if ((wQmuVal & RXQ_EMPTY_INT) || (wQmuVal & TXQ_EMPTY_INT)) {
  728. u32 wEmptyVal = mu3d_readl(mbase, U3D_QEMIR);
  729. qmu_dbg(K_DEBUG, "%s Empty in QMU mode![0x%x]\r\n",
  730. (wQmuVal & TXQ_EMPTY_INT) ? "TX" : "RX", wEmptyVal);
  731. mu3d_writel(mbase, U3D_QEMIR, wEmptyVal);
  732. }
  733. }
  734. #endif