musb_gadget.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028
  1. /*
  2. * MUSB OTG driver peripheral support
  3. *
  4. * Copyright 2005 Mentor Graphics Corporation
  5. * Copyright (C) 2005-2006 by Texas Instruments
  6. * Copyright (C) 2006-2007 Nokia Corporation
  7. * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21. * 02110-1301 USA
  22. *
  23. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  26. * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. */
  35. #include <linux/kernel.h>
  36. #include <linux/list.h>
  37. #include <linux/timer.h>
  38. #include <linux/module.h>
  39. #include <linux/smp.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/delay.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/slab.h>
  44. #include "musb_core.h"
  45. #include "mu3d_hal_osal.h"
  46. #include "mu3d_hal_qmu_drv.h"
  47. #include "mu3d_hal_usb_drv.h"
  48. #include "mu3d_hal_hw.h"
  49. #include "ssusb_qmu.h"
  50. #include "musb_gadget.h"
  51. /* MUSB PERIPHERAL status 3-mar-2006:
  52. *
  53. * - EP0 seems solid. It passes both USBCV and usbtest control cases.
  54. * Minor glitches:
  55. *
  56. * + remote wakeup to Linux hosts work, but saw USBCV failures;
  57. * in one test run (operator error?)
  58. * + endpoint halt tests -- in both usbtest and usbcv -- seem
  59. * to break when dma is enabled ... is something wrongly
  60. * clearing SENDSTALL?
  61. *
  62. * - Mass storage behaved ok when last tested. Network traffic patterns
  63. * (with lots of short transfers etc) need retesting; they turn up the
  64. * worst cases of the DMA, since short packets are typical but are not
  65. * required.
  66. *
  67. * - TX/IN
  68. * + both pio and dma behave in with network and g_zero tests
  69. * + no cppi throughput issues other than no-hw-queueing
  70. * + failed with FLAT_REG (DaVinci)
  71. * + seems to behave with double buffering, PIO -and- CPPI
  72. * + with gadgetfs + AIO, requests got lost?
  73. *
  74. * - RX/OUT
  75. * + both pio and dma behave in with network and g_zero tests
  76. * + dma is slow in typical case (short_not_ok is clear)
  77. * + double buffering ok with PIO
  78. * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
  79. * + request lossage observed with gadgetfs
  80. *
  81. * - ISO not tested ... might work, but only weakly isochronous
  82. *
  83. * - Gadget driver disabling of softconnect during bind() is ignored; so
  84. * drivers can't hold off host requests until userspace is ready.
  85. * (Workaround: they can turn it off later.)
  86. *
  87. * - PORTABILITY (assumes PIO works):
  88. * + DaVinci, basically works with cppi dma
  89. * + OMAP 2430, ditto with mentor dma
  90. * + TUSB 6010, platform-specific dma in the works
  91. */
  92. /* ----------------------------------------------------------------------- */
  93. #define is_buffer_mapped(req) (is_dma_capable() && \
  94. (req->map_state != UN_MAPPED))
  95. /* Maps the buffer to dma */
  96. static inline void map_dma_buffer(struct musb_request *request,
  97. struct musb *musb, struct musb_ep *musb_ep)
  98. {
  99. #ifndef USE_SSUSB_QMU
  100. int compatible = true;
  101. struct dma_controller *dma = musb->dma_controller;
  102. #endif
  103. unsigned length;
  104. length = ALIGN(request->request.length, dma_get_cache_alignment());
  105. request->map_state = UN_MAPPED;
  106. #ifndef USE_SSUSB_QMU
  107. if (!is_dma_capable() || !musb_ep->dma)
  108. return;
  109. /* Check if DMA engine can handle this request.
  110. * DMA code must reject the USB request explicitly.
  111. * Default behaviour is to map the request.
  112. */
  113. if (dma->is_compatible)
  114. compatible = dma->is_compatible(musb_ep->dma,
  115. musb_ep->packet_sz, request->request.buf,
  116. request->request.length);
  117. if (!compatible)
  118. return;
  119. #endif
  120. if (request->request.dma == DMA_ADDR_INVALID) {
  121. request->request.dma = dma_map_single(musb->controller,
  122. request->request.buf,
  123. length,
  124. request->tx
  125. ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  126. request->map_state = MUSB_MAPPED;
  127. } else {
  128. dma_sync_single_for_device(musb->controller,
  129. request->request.dma,
  130. length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  131. request->map_state = PRE_MAPPED;
  132. }
  133. }
  134. /* Unmap the buffer from dma and maps it back to cpu */
  135. static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb)
  136. {
  137. unsigned length;
  138. length = ALIGN(request->request.length, dma_get_cache_alignment());
  139. if (!is_buffer_mapped(request))
  140. return;
  141. if (request->request.dma == DMA_ADDR_INVALID) {
  142. dev_vdbg(musb->controller, "not unmapping a never mapped buffer\n");
  143. return;
  144. }
  145. if (request->map_state == MUSB_MAPPED) {
  146. dma_unmap_single(musb->controller,
  147. request->request.dma,
  148. length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  149. request->request.dma = DMA_ADDR_INVALID;
  150. } else { /* PRE_MAPPED */
  151. dma_sync_single_for_cpu(musb->controller,
  152. request->request.dma,
  153. length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  154. }
  155. request->map_state = UN_MAPPED;
  156. }
  157. /*
  158. * Immediately complete a request.
  159. *
  160. * @param request the request to complete
  161. * @param status the status to complete the request with
  162. * Context: controller locked, IRQs blocked.
  163. */
  164. void musb_g_giveback(struct musb_ep *ep,
  165. struct usb_request *request,
  166. int status) __releases(ep->musb->lock) __acquires(ep->musb->lock)
  167. {
  168. struct musb_request *req;
  169. struct musb *musb;
  170. int busy = ep->busy;
  171. if (unlikely(list_empty(&ep->req_list))) {
  172. os_printk(K_ERR, "ep->req_list is empty:%s\n", ep->end_point.name);
  173. return;
  174. }
  175. req = to_musb_request(request);
  176. list_del(&req->list);
  177. if (req->request.status == -EINPROGRESS)
  178. req->request.status = status;
  179. musb = req->musb;
  180. ep->busy = 1;
  181. spin_unlock(&musb->lock);
  182. unmap_dma_buffer(req, musb);
  183. if (request->status == 0)
  184. /* dev_dbg(musb->controller, "%s done request %p, %d/%d\n", */
  185. os_printk(K_DEBUG, "%s done request %p, %d/%d\n",
  186. ep->end_point.name, request, req->request.actual, req->request.length);
  187. else
  188. /* dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", */
  189. os_printk(K_DEBUG, "%s request %p, %d/%d fault %d\n",
  190. ep->end_point.name, request,
  191. req->request.actual, req->request.length, request->status);
  192. os_printk(K_DEBUG, "*************** musb_g_giveback : %p, #%d\n", request,
  193. req->request.actual);
  194. req->request.complete(&req->ep->end_point, &req->request);
  195. spin_lock(&musb->lock);
  196. ep->busy = busy;
  197. }
  198. /* ----------------------------------------------------------------------- */
  199. /*
  200. * Abort requests queued to an endpoint using the status. Synchronous.
  201. * caller locked controller and blocked irqs, and selected this ep.
  202. */
  203. static void nuke(struct musb_ep *ep, const int status)
  204. {
  205. struct musb_request *req = NULL;
  206. os_printk(K_INFO, "%s status=%d %s-%s\n", __func__, status, ep->end_point.name,
  207. (ep->is_in ? "IN" : "OUT"));
  208. ep->busy = 1;
  209. #ifdef USE_SSUSB_QMU
  210. _ex_mu3d_hal_flush_qmu(ep->hw_ep->epnum, (ep->is_in ? USB_TX : USB_RX));
  211. /* mu3d_hal_start_qmu(ep->musb->mregs, ep->hw_ep->epnum, (ep->is_in? USB_TX: USB_RX)); */
  212. #endif
  213. while (!list_empty(&ep->req_list)) {
  214. req = list_first_entry(&ep->req_list, struct musb_request, list);
  215. musb_g_giveback(ep, &req->request, status);
  216. os_printk(K_INFO, "%s call musb_g_giveback() EP is %s\n", __func__,
  217. ep->end_point.name);
  218. }
  219. }
  220. /* ----------------------------------------------------------------------- */
  221. /* Data transfers - pure PIO, pure DMA, or mixed mode */
  222. /*
  223. * This assumes the separate CPPI engine is responding to DMA requests
  224. * from the usb core ... sequenced a bit differently from mentor dma.
  225. */
  226. static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
  227. {
  228. if (can_bulk_split(musb, ep->type))
  229. return ep->hw_ep->max_packet_sz_tx;
  230. else
  231. return ep->packet_sz;
  232. }
  233. #ifdef CONFIG_USB_INVENTRA_DMA
  234. /* Peripheral tx (IN) using Mentor DMA works as follows:
  235. Only mode 0 is used for transfers <= wPktSize,
  236. mode 1 is used for larger transfers,
  237. One of the following happens:
  238. - Host sends IN token which causes an endpoint interrupt
  239. -> TxAvail
  240. -> if DMA is currently busy, exit.
  241. -> if queue is non-empty, txstate().
  242. - Request is queued by the gadget driver.
  243. -> if queue was previously empty, txstate()
  244. txstate()
  245. -> start
  246. /\ -> setup DMA
  247. | (data is transferred to the FIFO, then sent out when
  248. | IN token(s) are recd from Host.
  249. | -> DMA interrupt on completion
  250. | calls TxAvail.
  251. | -> stop DMA, ~DMAENAB,
  252. | -> set TxPktRdy for last short pkt or zlp
  253. | -> Complete Request
  254. | -> Continue next request (call txstate)
  255. |___________________________________|
  256. * Non-Mentor DMA engines can of course work differently, such as by
  257. * upleveling from irq-per-packet to irq-per-buffer.
  258. */
  259. #endif
  260. #ifndef USE_SSUSB_QMU
  261. /*
  262. * An endpoint is transmitting data. This can be called either from
  263. * the IRQ routine or from ep.queue() to kickstart a request on an
  264. * endpoint.
  265. *
  266. * Context: controller locked, IRQs blocked, endpoint selected
  267. */
  268. static void txstate(struct musb *musb, struct musb_request *req)
  269. {
  270. u8 epnum = req->epnum;
  271. struct musb_ep *musb_ep;
  272. struct usb_request *request;
  273. u16 fifo_count = 0;
  274. u32 txcsr0 = 0, maxp;
  275. int use_dma = 0;
  276. os_printk(K_DEBUG, "%s\n", __func__);
  277. musb_ep = req->ep;
  278. /* we shouldn't get here while DMA is active ... but we do ... */
  279. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  280. dev_dbg(musb->controller, "dma pending...\n");
  281. return;
  282. }
  283. /* read TXCSR before */
  284. txcsr0 = os_readl(musb->endpoints[epnum].addr_txcsr0);
  285. request = &req->request;
  286. fifo_count = min(max_ep_writesize(musb, musb_ep), (int)(request->length - request->actual));
  287. if (txcsr0 & TX_TXPKTRDY)
  288. return;
  289. if (txcsr0 & TX_SENDSTALL)
  290. return;
  291. if (!use_dma) {
  292. /*
  293. * Unmap the dma buffer back to cpu if dma channel
  294. * programming fails
  295. */
  296. unmap_dma_buffer(req, musb);
  297. maxp = musb_ep->packet_sz;
  298. mu3d_hal_write_fifo(epnum, fifo_count, (u8 *) (request->buf + request->actual),
  299. maxp);
  300. request->actual += fifo_count;
  301. os_printk(K_DEBUG, "%s actual=%d, fifo_count=%d\n", __func__, request->actual,
  302. fifo_count);
  303. #if 0
  304. musb_write_fifo(musb_ep->hw_ep, fifo_count,
  305. (u8 *) (request->buf + request->actual));
  306. request->actual += fifo_count;
  307. txcsr0 &= TX_W1C_BITS;
  308. txcsr0 |= TX_TXPKTRDY;
  309. os_writel(musb->endpoints[epnum].addr_txcsr0, txcsr0);
  310. #endif
  311. }
  312. }
  313. #endif
  314. /*
  315. * FIFO state update (e.g. data ready).
  316. * Called from IRQ, with controller locked.
  317. */
  318. void musb_g_tx(struct musb *musb, u8 epnum)
  319. {
  320. u32 txcsr0;
  321. struct musb_request *req;
  322. struct usb_request *request;
  323. struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
  324. struct dma_channel *dma;
  325. req = next_request(musb_ep);
  326. request = &req->request;
  327. txcsr0 = os_readl(musb->endpoints[epnum].addr_txcsr0);
  328. dma = is_dma_capable() ? musb_ep->dma : NULL;
  329. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  330. /*
  331. * SHOULD NOT HAPPEN... has with CPPI though, after
  332. * changing SENDSTALL (and other cases); harmless?
  333. */
  334. dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
  335. return;
  336. }
  337. if (request) {
  338. /*
  339. * First, maybe a terminating short packet. Some DMA
  340. * engines might handle this by themselves.
  341. */
  342. if ((request->zero && request->length && (request->length % musb_ep->packet_sz == 0)
  343. && (request->actual == request->length))
  344. ) {
  345. /*
  346. * On DMA completion, FIFO may not be
  347. * available yet...
  348. */
  349. if (txcsr0 & TX_TXPKTRDY)
  350. return;
  351. dev_dbg(musb->controller, "sending zero pkt\n");
  352. /* os_writel(musb->endpoints[epnum].addr_txcsr0, (txcsr0 & TX_W1C_BITS) | TX_TXPKTRDY); */
  353. writel((txcsr0 & TX_W1C_BITS) | TX_TXPKTRDY,
  354. musb->endpoints[epnum].addr_txcsr0);
  355. request->zero = 0;
  356. }
  357. if (request->actual == request->length) {
  358. musb_g_giveback(musb_ep, request, 0);
  359. req = musb_ep->desc ? next_request(musb_ep) : NULL;
  360. if (!req) {
  361. dev_dbg(musb->controller, "%s idle now\n", musb_ep->end_point.name);
  362. return;
  363. }
  364. }
  365. #ifdef USE_SSUSB_QMU
  366. /* txstate_qmu(musb, req); */
  367. #else
  368. txstate(musb, req);
  369. #endif
  370. }
  371. }
  372. /* ------------------------------------------------------------ */
  373. #ifdef CONFIG_USB_INVENTRA_DMA
  374. /* Peripheral rx (OUT) using Mentor DMA works as follows:
  375. - Only mode 0 is used.
  376. - Request is queued by the gadget class driver.
  377. -> if queue was previously empty, rxstate()
  378. - Host sends OUT token which causes an endpoint interrupt
  379. /\ -> RxReady
  380. | -> if request queued, call rxstate
  381. | /\ -> setup DMA
  382. | | -> DMA interrupt on completion
  383. | | -> RxReady
  384. | | -> stop DMA
  385. | | -> ack the read
  386. | | -> if data recd = max expected
  387. | | by the request, or host
  388. | | sent a short packet,
  389. | | complete the request,
  390. | | and start the next one.
  391. | |_____________________________________|
  392. | else just wait for the host
  393. | to send the next OUT token.
  394. |__________________________________________________|
  395. * Non-Mentor DMA engines can of course work differently.
  396. */
  397. #endif
  398. /*
  399. * Context: controller locked, IRQs blocked, endpoint selected
  400. */
  401. static void rxstate(struct musb *musb, struct musb_request *req)
  402. {
  403. const u8 epnum = req->epnum;
  404. struct usb_request *request = &req->request;
  405. struct musb_ep *musb_ep;
  406. u16 fifo_count;
  407. unsigned len = 0;
  408. u32 rxcsr0 = os_readl(musb->endpoints[epnum].addr_rxcsr0);
  409. struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
  410. musb_ep = &hw_ep->ep_out;
  411. fifo_count = musb_ep->packet_sz;
  412. /* We shouldn't get here while DMA is active, but we do... */
  413. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  414. dev_dbg(musb->controller, "DMA pending...\n");
  415. return;
  416. }
  417. os_printk(K_DEBUG, "epnum=%d, rxcsr addr=%lX, rxcsr0=%X\n", epnum,
  418. (uintptr_t) (musb->endpoints[epnum].addr_rxcsr0), rxcsr0);
  419. if (rxcsr0 & RX_SENDSTALL)
  420. return;
  421. if (rxcsr0 & RX_RXPKTRDY) {
  422. fifo_count = (USB_ReadCsr32(U3D_RX1CSR3, epnum) >> EP_RX_COUNT_OFST);
  423. if (request->actual < request->length) {
  424. len = request->length - request->actual;
  425. fifo_count = min_t(unsigned, len, fifo_count);
  426. /* fifo_count = mu3d_hal_read_fifo( epnum, (request->buf + request->actual)); */
  427. musb_read_fifo(&musb->endpoints[epnum], fifo_count,
  428. (request->buf + request->actual));
  429. request->actual += fifo_count;
  430. /* ack the read! */
  431. #ifdef AUTOCLEAR
  432. if (!fifo_count) {
  433. USB_WriteCsr32(U3D_RX1CSR0, epnum,
  434. USB_ReadCsr32(U3D_RX1CSR0, epnum) | RX_RXPKTRDY);
  435. }
  436. #else
  437. USB_WriteCsr32(U3D_RX1CSR0, epnum,
  438. USB_ReadCsr32(U3D_RX1CSR0, epnum) | RX_RXPKTRDY);
  439. #endif
  440. }
  441. }
  442. os_printk(K_DEBUG, "%s length=%d, actual=%d, fifo_count=%d\n", __func__, request->length,
  443. request->actual, fifo_count);
  444. os_printk(K_DEBUG, "%s len=%d, packet_sz=%d\n", __func__, len, musb_ep->packet_sz);
  445. /* reach the end or short packet detected */
  446. if (request->actual == request->length || fifo_count < musb_ep->packet_sz)
  447. musb_g_giveback(musb_ep, request, 0);
  448. #ifdef NEVER
  449. /* Check other slot is empty */
  450. rxcsr0 = os_readl(musb->endpoints[epnum].addr_rxcsr0);
  451. os_printk(K_DEBUG, "rxcsr0 fifoempty=0x%x\n", rxcsr0 & RX_FIFOEMPTY);
  452. if (!(rxcsr0 & RX_FIFOEMPTY)) {
  453. os_printk(K_DEBUG, "==READ AGAIN!!!==\n");
  454. musb_g_rx(musb, epnum);
  455. }
  456. #endif /* NEVER */
  457. }
  458. /*
  459. * Data ready for a request; called from IRQ
  460. */
  461. void musb_g_rx(struct musb *musb, u8 epnum)
  462. {
  463. u32 rxcsr0;
  464. struct musb_request *req;
  465. struct usb_request *request;
  466. struct musb_ep *musb_ep;
  467. struct dma_channel *dma;
  468. struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
  469. musb_ep = &hw_ep->ep_out;
  470. req = next_request(musb_ep);
  471. if (!req)
  472. return;
  473. request = &req->request;
  474. rxcsr0 = os_readl(musb->endpoints[epnum].addr_rxcsr0);
  475. if (rxcsr0 & RX_SENTSTALL) {
  476. /* EPN needs to continuous sending STALL until host set clear_feature to clear the status. */
  477. /* musb_writew(epio, MUSB_RXCSR, csr); */
  478. /*SENTSTALL is W1C. So write again to clear it.*/
  479. /* os_writel(musb->endpoints[epnum].addr_rxcsr0, (rxcsr0 & RX_W1C_BITS) | RX_SENTSTALL); */
  480. writel((rxcsr0 & RX_W1C_BITS) | RX_SENTSTALL, musb->endpoints[epnum].addr_rxcsr0);
  481. return;
  482. }
  483. dma = is_dma_capable() ? musb_ep->dma : NULL;
  484. /* Analyze request */
  485. rxstate(musb, req);
  486. }
  487. /* ------------------------------------------------------------ */
  488. static int musb_gadget_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
  489. {
  490. unsigned long flags;
  491. struct musb_ep *musb_ep;
  492. struct musb_hw_ep *hw_ep;
  493. struct musb *musb;
  494. void __iomem *mbase;
  495. u8 epnum = 0;
  496. unsigned maxp = 0;
  497. int status = -EINVAL;
  498. TRANSFER_TYPE type = USB_CTRL;
  499. USB_DIR dir = USB_TX;
  500. if (!ep || !desc)
  501. return -EINVAL;
  502. os_printk(K_INFO, "musb_gadget_enable %s\n", ep->name);
  503. musb_ep = to_musb_ep(ep);
  504. hw_ep = musb_ep->hw_ep;
  505. musb = musb_ep->musb;
  506. mbase = musb->mregs;
  507. epnum = hw_ep->epnum;
  508. if (!musb->is_active)
  509. return -EINVAL;
  510. spin_lock_irqsave(&musb->lock, flags);
  511. if (musb_ep->desc) {
  512. status = -EBUSY;
  513. goto fail;
  514. }
  515. musb_ep->type = usb_endpoint_type(desc);
  516. /* check direction and (later) maxpacket size against endpoint */
  517. if (usb_endpoint_num(desc) != epnum)
  518. goto fail;
  519. /* REVISIT this rules out high bandwidth periodic transfers */
  520. maxp = le16_to_cpu(desc->wMaxPacketSize);
  521. if (maxp & ~0x07ff)
  522. goto fail;
  523. musb_ep->packet_sz = maxp;
  524. os_printk(K_DEBUG, "U3D_EPIER=0x%X\n", os_readl(U3D_EPIER));
  525. /* enable the interrupts for the endpoint, set the endpoint
  526. * packet size (or fail), set the mode, clear the fifo
  527. */
  528. if (usb_endpoint_dir_in(desc)) { /* TX */
  529. #ifndef USE_SSUSB_QMU
  530. u32 int_txe = os_readl(U3D_EPIER);
  531. #endif
  532. if (hw_ep->is_shared_fifo)
  533. musb_ep->is_in = 1;
  534. if (!musb_ep->is_in)
  535. goto fail;
  536. if (maxp > hw_ep->max_packet_sz_tx)
  537. goto fail;
  538. #ifndef USE_SSUSB_QMU
  539. os_printk(K_DEBUG, "epnum=%d, int_txe=0x%x, EPIER=0x%x+\n", epnum, int_txe,
  540. os_readl(U3D_EPIER));
  541. int_txe |= (1 << epnum);
  542. os_writel(U3D_EPIESR, int_txe);
  543. os_printk(K_DEBUG, "epnum=%d, int_txe=0x%x, EPIER=0x%x-\n", epnum, int_txe,
  544. os_readl(U3D_EPIER));
  545. #endif
  546. dir = USB_TX;
  547. switch (musb_ep->type) {
  548. case USB_ENDPOINT_XFER_BULK:
  549. type = USB_BULK;
  550. break;
  551. case USB_ENDPOINT_XFER_ISOC:
  552. type = USB_ISO;
  553. break;
  554. case USB_ENDPOINT_XFER_INT:
  555. type = USB_INTR;
  556. break;
  557. }
  558. } else {
  559. #ifndef USE_SSUSB_QMU
  560. u32 int_rxe = os_readl(U3D_EPIER);
  561. #endif
  562. if (hw_ep->is_shared_fifo)
  563. musb_ep->is_in = 0;
  564. if (musb_ep->is_in)
  565. goto fail;
  566. if (maxp > hw_ep->max_packet_sz_rx) {
  567. dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
  568. goto fail;
  569. }
  570. #ifndef USE_SSUSB_QMU
  571. os_printk(K_DEBUG, "int_rxe=0x%x, EPIER=0x%x+\n", int_rxe, os_readl(U3D_EPIER));
  572. int_rxe |= (BIT16 << epnum);
  573. os_writel(U3D_EPIESR, int_rxe);
  574. os_printk(K_DEBUG, "int_rxe=0x%x, EPIER=0x%x-\n", int_rxe, os_readl(U3D_EPIER));
  575. #endif
  576. dir = USB_RX;
  577. switch (musb_ep->type) {
  578. case USB_ENDPOINT_XFER_BULK:
  579. type = USB_BULK;
  580. break;
  581. case USB_ENDPOINT_XFER_ISOC:
  582. type = USB_ISO;
  583. break;
  584. case USB_ENDPOINT_XFER_INT:
  585. type = USB_INTR;
  586. break;
  587. }
  588. }
  589. /*
  590. * At PIO with 2 slot(double buffer), the host transfers 512 + N bytes.
  591. * 512 would fill the 1st slot. And the rest of N bytes will put into
  592. * the 2nd slot. The interrupt is coming. The driver reads the data
  593. * stored at the 1st slot. Then driver expects the next interrupt to
  594. * read the data at the 2nd slot. But the interrupt does not show up!
  595. * Designer says maybe the interrupt the driver handles is the interrupt
  596. * come from the 2nd slot. the system does not fast enough. So the later
  597. * one and the previous one merge to one interrupt.
  598. * So at FPGA stage and PIO, just use _ONE_ slot.
  599. */
  600. #ifdef USE_SSUSB_QMU
  601. _ex_mu3d_hal_ep_enable(epnum, dir, type, maxp, 0, MAX_SLOT, 0, 0);
  602. #else
  603. /*TODO: Check support mulitslots on real ship */
  604. _ex_mu3d_hal_ep_enable(epnum, dir, type, maxp, 0, 0, 0, 0);
  605. #endif
  606. #ifdef USE_SSUSB_QMU
  607. mu3d_hal_start_qmu(epnum, dir);
  608. #endif
  609. /* NOTE: all the I/O code _should_ work fine without DMA, in case
  610. * for some reason you run out of channels here.
  611. */
  612. if (is_dma_capable() && musb->dma_controller) {
  613. struct dma_controller *c = musb->dma_controller;
  614. musb_ep->dma = c->channel_alloc(c, hw_ep, (desc->bEndpointAddress & USB_DIR_IN));
  615. } else
  616. musb_ep->dma = NULL;
  617. musb_ep->desc = desc;
  618. musb_ep->busy = 0;
  619. musb_ep->wedged = 0;
  620. status = 0;
  621. musb->active_ep++;
  622. os_printk(K_INFO, "[U3D]%s active_ep=%d\n", __func__, musb->active_ep);
  623. /* pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", */
  624. os_printk(K_INFO, "[U3D]%s periph: enabled %s for %s %s, %smaxpacket %d\n",
  625. musb_driver_name, musb_ep->end_point.name, ({
  626. char *s;
  627. switch (musb_ep->type) {
  628. case USB_ENDPOINT_XFER_BULK:
  629. s = "bulk";
  630. break;
  631. case USB_ENDPOINT_XFER_INT:
  632. s = "int";
  633. break;
  634. default:
  635. s = "iso";
  636. break;
  637. };
  638. s;
  639. }),
  640. musb_ep->is_in ? "IN" : "OUT", musb_ep->dma ? "dma, " : "", musb_ep->packet_sz);
  641. schedule_work(&musb->irq_work);
  642. fail:
  643. spin_unlock_irqrestore(&musb->lock, flags);
  644. return status;
  645. }
  646. /*
  647. * Disable an endpoint flushing all requests queued.
  648. */
  649. static int musb_gadget_disable(struct usb_ep *ep)
  650. {
  651. unsigned long flags;
  652. struct musb *musb;
  653. u8 epnum;
  654. struct musb_ep *musb_ep;
  655. int status = 0;
  656. os_printk(K_INFO, "%s %s\n", __func__, ep->name);
  657. musb_ep = to_musb_ep(ep);
  658. musb = musb_ep->musb;
  659. epnum = musb_ep->current_epnum;
  660. spin_lock_irqsave(&musb->lock, flags);
  661. #ifdef USE_SSUSB_QMU
  662. /* zero the endpoint sizes */
  663. if (musb_ep->is_in)
  664. os_writelmskumsk(musb->endpoints[epnum].addr_rxcsr0, 0, TX_TXMAXPKTSZ, TX_W1C_BITS);
  665. else
  666. os_writelmskumsk(musb->endpoints[epnum].addr_rxcsr0, 0, RX_RXMAXPKTSZ, RX_W1C_BITS);
  667. #else
  668. /* zero the endpoint sizes */
  669. if (musb_ep->is_in) { /* TX */
  670. u32 int_txe = os_readl(U3D_EPIER);
  671. int_txe &= ~(1 << epnum);
  672. os_writel(U3D_EPIESR, int_txe);
  673. os_writelmskumsk(musb->endpoints[epnum].addr_rxcsr0, 0, TX_TXMAXPKTSZ, TX_W1C_BITS);
  674. } else {
  675. u32 int_rxe = os_readl(U3D_EPIER);
  676. int_rxe &= ~(BIT16 << epnum);
  677. os_writel(U3D_EPIESR, int_rxe);
  678. os_writelmskumsk(musb->endpoints[epnum].addr_rxcsr0, 0, RX_RXMAXPKTSZ, RX_W1C_BITS);
  679. }
  680. #endif
  681. musb_ep->desc = NULL;
  682. /* abort all pending DMA and requests */
  683. nuke(musb_ep, -ESHUTDOWN);
  684. mu3d_hal_unfigured_ep_num(epnum, (musb_ep->is_in ? USB_TX : USB_RX));
  685. schedule_work(&musb->irq_work);
  686. musb->active_ep--;
  687. os_printk(K_INFO, "[U3D]%s active_ep=%d\n", __func__, musb->active_ep);
  688. if (musb->active_ep == 0 && musb->is_active == 0)
  689. schedule_work(&musb->suspend_work);
  690. spin_unlock_irqrestore(&(musb->lock), flags);
  691. dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
  692. return status;
  693. }
  694. /*
  695. * Allocate a request for an endpoint.
  696. * Reused by ep0 code.
  697. */
  698. struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  699. {
  700. struct musb_ep *musb_ep = to_musb_ep(ep);
  701. /*struct musb *musb = musb_ep->musb;*/
  702. struct musb_request *request = NULL;
  703. request = kzalloc(sizeof(struct musb_request), gfp_flags);
  704. if (!request) {
  705. /*WARNING:OOM_MESSAGE: Possible unnecessary 'out of memory' message*/
  706. /*dev_dbg(musb->controller, "not enough memory\n");*/
  707. return NULL;
  708. }
  709. request->request.dma = DMA_ADDR_INVALID;
  710. request->epnum = musb_ep->current_epnum;
  711. request->ep = musb_ep;
  712. return &request->request;
  713. }
  714. /*
  715. * Free a request
  716. * Reused by ep0 code.
  717. */
  718. void musb_free_request(struct usb_ep *ep, struct usb_request *req)
  719. {
  720. kfree(to_musb_request(req));
  721. }
  722. static LIST_HEAD(buffers);
  723. struct free_record {
  724. struct list_head list;
  725. struct device *dev;
  726. unsigned bytes;
  727. dma_addr_t dma;
  728. };
  729. /*
  730. * Context: controller locked, IRQs blocked.
  731. */
  732. void musb_ep_restart(struct musb *musb, struct musb_request *req)
  733. {
  734. /*
  735. * We don't do anything if QMU because QMU is already
  736. * waiting there when musb_gadget_queue().
  737. */
  738. #ifndef USE_SSUSB_QMU
  739. /* dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", */
  740. os_printk(K_DEBUG, "%s %s request %p len %u on hw_ep%d\n",
  741. __func__, req->tx ? "TX/IN" : "RX/OUT",
  742. &req->request, req->request.length, req->epnum);
  743. if (req->tx)
  744. txstate(musb, req);
  745. else
  746. rxstate(musb, req);
  747. #endif
  748. }
  749. static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags)
  750. {
  751. struct musb_ep *musb_ep;
  752. struct musb_request *request;
  753. struct musb *musb;
  754. int status = 0;
  755. unsigned long lockflags;
  756. if (!ep || !req)
  757. return -EINVAL;
  758. if (!req->buf)
  759. return -ENODATA;
  760. musb_ep = to_musb_ep(ep);
  761. musb = musb_ep->musb;
  762. request = to_musb_request(req);
  763. request->musb = musb;
  764. if (request->ep != musb_ep)
  765. return -EINVAL;
  766. os_printk(K_DEBUG, "%s %s, req=%p, len#%d\n", __func__, ep->name, req,
  767. request->request.length);
  768. /* request is mine now... */
  769. request->request.actual = 0;
  770. request->request.status = -EINPROGRESS;
  771. request->epnum = musb_ep->current_epnum;
  772. request->tx = musb_ep->is_in;
  773. #ifdef CONFIG_MTK_LM_MODE
  774. /*
  775. * Do NOT map the buffer when the request length is 0 on 4GB env.
  776. * But the request is still set as MUSB_MAPPED for triggering ZLP.
  777. */
  778. if (request->tx && (request->request.length == 0))
  779. request->map_state = MUSB_MAPPED;
  780. else
  781. map_dma_buffer(request, musb, musb_ep);
  782. #else
  783. map_dma_buffer(request, musb, musb_ep);
  784. #endif
  785. spin_lock_irqsave(&musb->lock, lockflags);
  786. /* don't queue if the ep is down */
  787. if (!musb_ep->desc) {
  788. dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
  789. req, ep->name, "disabled");
  790. status = -ESHUTDOWN;
  791. goto cleanup;
  792. }
  793. /* add request to the list */
  794. list_add_tail(&request->list, &musb_ep->req_list);
  795. #ifdef EP_PROFILING
  796. if (is_prof != 0)
  797. ep_prof[request->epnum - 1][request->tx ? 0 : 1] += request->request.length;
  798. #endif
  799. #ifdef USE_SSUSB_QMU
  800. #ifdef CONFIG_MTK_LM_MODE
  801. if ((request->request.dma != DMA_ADDR_INVALID) || (request->map_state != UN_MAPPED)) {
  802. #else
  803. if (request->request.dma != DMA_ADDR_INVALID) {
  804. #endif
  805. qmu_printk(K_DEBUG, "%s %s EP%d, len=%d, maxp=%d request=%p\n",
  806. request->tx ? "[TX]" : "[RX]", __func__, request->epnum,
  807. request->request.length, ep->maxpacket, request);
  808. if (request->tx) {
  809. request->request.actual = request->request.length;
  810. if (request->request.length > 0) {
  811. u32 txcsr;
  812. _ex_mu3d_hal_insert_transfer_gpd(request->epnum, USB_TX,
  813. request->request.dma,
  814. request->request.length, true,
  815. true, false,
  816. ((request->request.zero ==
  817. 1) ? 1 : 0), ep->maxpacket);
  818. /*Enable Tx_DMAREQEN */
  819. txcsr = USB_ReadCsr32(U3D_TX1CSR0, request->epnum) | TX_DMAREQEN;
  820. mb();
  821. USB_WriteCsr32(U3D_TX1CSR0, request->epnum, txcsr);
  822. mu3d_hal_resume_qmu(request->epnum, USB_TX);
  823. } else if (request->request.length == 0) {
  824. /* If there is only ZLP in the reqeest list. Just send ZLP directly */
  825. int utime = 1000000; /* 1 sec */
  826. /* Send ZLP by setting TXPKTRDY */
  827. u32 val = 0;
  828. qmu_printk(K_DEBUG, "[TX]" "Send ZLP\n");
  829. if (wait_for_value_us
  830. (USB_END_OFFSET(request->epnum, U3D_TX1CSR0), TX_FIFOEMPTY,
  831. TX_FIFOEMPTY, 1, utime) == RET_SUCCESS) {
  832. qmu_printk(K_DEBUG, "Tx[%d] 0x%x\n", request->epnum,
  833. USB_ReadCsr32(U3D_TX1CSR0, request->epnum));
  834. } else {
  835. qmu_printk(K_CRIT,
  836. "Tx[%d] Fail to send ZLP 0x%x, utime %d, skip and wait until QMU Done\n",
  837. request->epnum, USB_ReadCsr32(U3D_TX1CSR0,
  838. request->epnum),
  839. utime);
  840. goto cleanup;
  841. }
  842. /*Disable Tx_DMAREQEN */
  843. val = USB_ReadCsr32(U3D_TX1CSR0, request->epnum) & ~TX_DMAREQEN;
  844. mb();
  845. USB_WriteCsr32(U3D_TX1CSR0, request->epnum, val);
  846. val = USB_ReadCsr32(U3D_TX1CSR0, request->epnum) | TX_TXPKTRDY;
  847. mb();
  848. USB_WriteCsr32(U3D_TX1CSR0, request->epnum, val);
  849. qmu_printk(K_DEBUG,
  850. "[TX] Give back ZLP of EP%d. actual:%d, length:%d %p\n",
  851. request->epnum, request->request.actual,
  852. request->request.length, request);
  853. /* call giveback directlly? no need to wait tx completed? */
  854. musb_g_giveback(musb_ep, &(request->request), 0);
  855. }
  856. } else {
  857. _ex_mu3d_hal_insert_transfer_gpd(request->epnum, USB_RX,
  858. request->request.dma,
  859. request->request.length, true, true, false,
  860. (musb_ep->type ==
  861. USB_ENDPOINT_XFER_ISOC ? 0 : 1),
  862. ep->maxpacket);
  863. mu3d_hal_resume_qmu(request->epnum, USB_RX);
  864. }
  865. }
  866. #endif
  867. /* it this is the head of the queue, start i/o ... */
  868. if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
  869. musb_ep_restart(musb, request);
  870. cleanup:
  871. spin_unlock_irqrestore(&musb->lock, lockflags);
  872. return status;
  873. }
  874. static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
  875. {
  876. struct musb_ep *musb_ep = to_musb_ep(ep);
  877. struct musb_request *req = to_musb_request(request);
  878. struct musb_request *r;
  879. unsigned long flags;
  880. int status = 0;
  881. struct musb *musb = musb_ep->musb;
  882. if (!ep || !request || to_musb_request(request)->ep != musb_ep)
  883. return -EINVAL;
  884. os_printk(K_INFO, "%s : request 0x%p\n", __func__, request);
  885. spin_lock_irqsave(&musb->lock, flags);
  886. list_for_each_entry(r, &musb_ep->req_list, list) {
  887. if (r == req)
  888. break;
  889. }
  890. if (r != req) {
  891. dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
  892. status = -EINVAL;
  893. goto done;
  894. }
  895. /* if the hardware doesn't have the request, easy ... */
  896. if (musb_ep->req_list.next != &req->list || musb_ep->busy)
  897. musb_g_giveback(musb_ep, request, -ECONNRESET);
  898. /* ... else abort the dma transfer ... */
  899. #ifdef USE_SSUSB_QMU
  900. else {
  901. _ex_mu3d_hal_flush_qmu(musb_ep->hw_ep->epnum, (musb_ep->is_in ? USB_TX : USB_RX));
  902. musb_g_giveback(musb_ep, request, -ECONNRESET);
  903. /* only start qmu, don't need to reset EP */
  904. /* mu3d_hal_restart_qmu(musb_ep->hw_ep->epnum, (musb_ep->is_in? USB_TX: USB_RX)); */
  905. mu3d_hal_start_qmu(musb_ep->hw_ep->epnum, (musb_ep->is_in ? USB_TX : USB_RX));
  906. status = 0;
  907. }
  908. #else
  909. else if (is_dma_capable() && musb_ep->dma) {
  910. struct dma_controller *c = musb->dma_controller;
  911. if (c->channel_abort)
  912. status = c->channel_abort(musb_ep->dma);
  913. else
  914. status = -EBUSY;
  915. if (status == 0)
  916. musb_g_giveback(musb_ep, request, -ECONNRESET);
  917. } else {
  918. /* NOTE: by sticking to easily tested hardware/driver states,
  919. * we leave counting of in-flight packets imprecise.
  920. */
  921. musb_g_giveback(musb_ep, request, -ECONNRESET);
  922. }
  923. #endif
  924. done:
  925. spin_unlock_irqrestore(&musb->lock, flags);
  926. return status;
  927. }
  928. /*
  929. * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
  930. * data but will queue requests.
  931. *
  932. * exported to ep0 code
  933. */
  934. static int musb_gadget_set_halt(struct usb_ep *ep, int value)
  935. {
  936. struct musb_ep *musb_ep = to_musb_ep(ep);
  937. u8 epnum = musb_ep->current_epnum;
  938. struct musb *musb = musb_ep->musb;
  939. void __iomem *mbase;
  940. unsigned long flags;
  941. u32 txcsr0 = 0, rxcsr0 = 0;
  942. struct musb_request *request;
  943. int status = 0;
  944. if (!ep)
  945. return -EINVAL;
  946. mbase = musb->mregs;
  947. os_printk(K_DEBUG, "musb_gadget_set_halt : %s...", ep->name);
  948. spin_lock_irqsave(&musb->lock, flags);
  949. if (USB_ENDPOINT_XFER_ISOC == musb_ep->type) {
  950. status = -EINVAL;
  951. goto done;
  952. }
  953. request = next_request(musb_ep);
  954. if (value) {
  955. if (request) {
  956. dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
  957. ep->name);
  958. status = -EAGAIN;
  959. goto done;
  960. }
  961. /* Cannot portably stall with non-empty FIFO */
  962. if (musb_ep->is_in) {
  963. txcsr0 = os_readl(musb->endpoints[epnum].addr_txcsr0);
  964. if (!(txcsr0 & TX_FIFOEMPTY)) {
  965. dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
  966. status = -EAGAIN;
  967. goto done;
  968. }
  969. }
  970. } else
  971. musb_ep->wedged = 0;
  972. /* set/clear the stall and toggle bits */
  973. dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
  974. if (musb_ep->is_in) { /* TX */
  975. txcsr0 = os_readl(musb->endpoints[epnum].addr_txcsr0) & TX_W1C_BITS;
  976. if (value) { /* set */
  977. txcsr0 |= TX_SENDSTALL;
  978. /* os_writel(musb->endpoints[epnum].addr_txcsr0, txcsr0); */
  979. writel(txcsr0, musb->endpoints[epnum].addr_txcsr0);
  980. } else { /* clear */
  981. /* we need to also clear SENTSTALL to let the EP work normaly. */
  982. txcsr0 = (txcsr0 & (~TX_SENDSTALL)) | TX_SENTSTALL;
  983. /* os_writel(musb->endpoints[epnum].addr_txcsr0, txcsr0); */
  984. writel(txcsr0, musb->endpoints[epnum].addr_txcsr0);
  985. /* reset TX EP */
  986. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) | (BIT16 << epnum));
  987. /* reset reset TX EP */
  988. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) & ~(BIT16 << epnum));
  989. /* We cannot flush QMU now, because the MSC gadget will not re-submit
  990. * the CBW request after clear halt. */
  991. /* _ex_mu3d_hal_flush_qmu(epnum, USB_TX); */
  992. /* mu3d_hal_restart_qmu(epnum, USB_TX); */
  993. }
  994. } else {
  995. rxcsr0 = os_readl(musb->endpoints[epnum].addr_rxcsr0) & RX_W1C_BITS;
  996. if (value) { /* set stall */
  997. rxcsr0 &= RX_W1C_BITS;
  998. rxcsr0 |= RX_SENDSTALL;
  999. /* os_writel(musb->endpoints[epnum].addr_rxcsr0, rxcsr0); */
  1000. writel(rxcsr0, musb->endpoints[epnum].addr_rxcsr0);
  1001. } else { /* clear stall */
  1002. /* we need to also clear SENTSTALL to let the EP work normaly. */
  1003. rxcsr0 = (rxcsr0 & (~RX_SENDSTALL)) | RX_SENTSTALL;
  1004. /* os_writel(musb->endpoints[epnum].addr_rxcsr0, rxcsr0); */
  1005. writel(rxcsr0, musb->endpoints[epnum].addr_rxcsr0);
  1006. /* reset RX EP */
  1007. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) | (1 << epnum));
  1008. /* reset reset RX EP */
  1009. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) & (~(1 << epnum)));
  1010. /* We cannot flush QMU now, because the MSC gadget will not re-submit
  1011. * the CBW request after clear halt. */
  1012. /* _ex_mu3d_hal_flush_qmu(epnum, USB_RX); */
  1013. /* mu3d_hal_restart_qmu(epnum, USB_RX); */
  1014. }
  1015. }
  1016. /* maybe start the first request in the queue */
  1017. if (!musb_ep->busy && !value && request) {
  1018. dev_dbg(musb->controller, "restarting the request\n");
  1019. musb_ep_restart(musb, request);
  1020. }
  1021. done:
  1022. spin_unlock_irqrestore(&musb->lock, flags);
  1023. return status;
  1024. }
  1025. /*
  1026. * Sets the halt feature with the clear requests ignored
  1027. */
  1028. static int musb_gadget_set_wedge(struct usb_ep *ep)
  1029. {
  1030. struct musb_ep *musb_ep = to_musb_ep(ep);
  1031. if (!ep)
  1032. return -EINVAL;
  1033. musb_ep->wedged = 1;
  1034. return usb_ep_set_halt(ep);
  1035. }
  1036. static int musb_gadget_fifo_status(struct usb_ep *ep)
  1037. {
  1038. struct musb_ep *musb_ep = to_musb_ep(ep);
  1039. int retval = -EINVAL;
  1040. if (musb_ep->desc && !musb_ep->is_in) {
  1041. struct musb *musb = musb_ep->musb;
  1042. int epnum = musb_ep->current_epnum;
  1043. unsigned long flags;
  1044. spin_lock_irqsave(&musb->lock, flags);
  1045. /* FIXME return zero unless RXPKTRDY is set */
  1046. retval = os_readl(musb->endpoints[epnum].addr_rxcsr3) >> 16;
  1047. spin_unlock_irqrestore(&musb->lock, flags);
  1048. }
  1049. return retval;
  1050. }
  1051. static void musb_gadget_fifo_flush(struct usb_ep *ep)
  1052. {
  1053. struct musb_ep *musb_ep = to_musb_ep(ep);
  1054. struct musb *musb = musb_ep->musb;
  1055. u8 epnum = musb_ep->current_epnum;
  1056. unsigned long flags;
  1057. #ifndef USE_SSUSB_QMU /* we don't enable EP interrupts in QMU mode. */
  1058. u32 int_txe;
  1059. #endif
  1060. u32 txcsr0 = 0;
  1061. spin_lock_irqsave(&musb->lock, flags);
  1062. /* disable interrupts */
  1063. #ifndef USE_SSUSB_QMU /* we don't enable EP interrupts in QMU mode. */
  1064. int_txe = os_readl(U3D_EPIER);
  1065. os_writel(U3D_EPIECR, ~(int_txe) | (1 << epnum)); /* set clear register */
  1066. #endif
  1067. if (musb_ep->is_in) { /* TX */
  1068. #ifdef USE_SSUSB_QMU
  1069. _ex_mu3d_hal_flush_qmu(epnum, USB_TX);
  1070. mu3d_hal_restart_qmu(epnum, USB_TX);
  1071. #endif
  1072. txcsr0 = os_readl(musb->endpoints[epnum].addr_txcsr0);
  1073. if (!(txcsr0 & TX_FIFOEMPTY)) {
  1074. os_printk(K_DEBUG, "%s RESET\n", ep->name);
  1075. /* reset TX EP */
  1076. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) | (BIT16 << epnum));
  1077. /* reset reset TX EP */
  1078. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) & ~(BIT16 << epnum));
  1079. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  1080. }
  1081. } else {
  1082. #ifdef USE_SSUSB_QMU
  1083. _ex_mu3d_hal_flush_qmu(epnum, USB_RX);
  1084. mu3d_hal_restart_qmu(epnum, USB_RX);
  1085. #endif
  1086. os_printk(K_DEBUG, "%s RESET\n", ep->name);
  1087. /* os_writew(musb->endpoints[epnum].addr_rxcsr0, rxcsr0 | USB_RXCSR_FLUSHFIFO); */
  1088. /* os_writew(musb->endpoints[epnum].addr_rxcsr0, rxcsr0 & (~USB_RXCSR_FLUSHFIFO)); */
  1089. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) | (1 << epnum)); /* reset RX EP */
  1090. os_writel(U3D_EP_RST, os_readl(U3D_EP_RST) & (~(1 << epnum))); /* reset reset RX EP */
  1091. }
  1092. /* re-enable interrupt */
  1093. #ifndef USE_SSUSB_QMU
  1094. os_writel(U3D_EPIESR, int_txe);
  1095. #endif
  1096. spin_unlock_irqrestore(&musb->lock, flags);
  1097. }
  1098. static const struct usb_ep_ops musb_ep_ops = {
  1099. .enable = musb_gadget_enable,
  1100. .disable = musb_gadget_disable,
  1101. .alloc_request = musb_alloc_request,
  1102. .free_request = musb_free_request,
  1103. .queue = musb_gadget_queue,
  1104. .dequeue = musb_gadget_dequeue,
  1105. .set_halt = musb_gadget_set_halt,
  1106. .set_wedge = musb_gadget_set_wedge,
  1107. .fifo_status = musb_gadget_fifo_status,
  1108. .fifo_flush = musb_gadget_fifo_flush
  1109. };
  1110. /* ----------------------------------------------------------------------- */
  1111. static int musb_gadget_get_frame(struct usb_gadget *gadget)
  1112. {
  1113. return (int)os_readl(U3D_USB20_FRAME_NUM);
  1114. }
  1115. static int musb_gadget_wakeup(struct usb_gadget *gadget)
  1116. {
  1117. struct musb *musb = gadget_to_musb(gadget);
  1118. unsigned long flags;
  1119. int status = -EINVAL;
  1120. u8 devctl;
  1121. int retries;
  1122. spin_lock_irqsave(&musb->lock, flags);
  1123. os_printk(K_DEBUG, "musb_gadget_wakeup\n");
  1124. switch (musb->xceiv->state) {
  1125. case OTG_STATE_B_PERIPHERAL:
  1126. /* NOTE: OTG state machine doesn't include B_SUSPENDED;
  1127. * that's part of the standard usb 1.1 state machine, and
  1128. * doesn't affect OTG transitions.
  1129. */
  1130. if (musb->may_wakeup && musb->is_suspended)
  1131. break;
  1132. goto done;
  1133. case OTG_STATE_B_IDLE:
  1134. /* Start SRP ... OTG not required. */
  1135. devctl = os_readl(U3D_DEVICE_CONTROL);
  1136. dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
  1137. devctl |= USB_DEVCTL_SESSION;
  1138. /* os_writel(U3D_DEVICE_CONTROL, devctl);
  1139. //We temoporarily disable DEV_CONTROL for writing SESSION. */
  1140. devctl = os_readl(U3D_DEVICE_CONTROL);
  1141. retries = 100;
  1142. while (!(devctl & USB_DEVCTL_SESSION)) {
  1143. devctl = os_readl(U3D_DEVICE_CONTROL);
  1144. if (retries-- < 1)
  1145. break;
  1146. }
  1147. retries = 10000;
  1148. while (devctl & USB_DEVCTL_SESSION) {
  1149. devctl = os_readl(U3D_DEVICE_CONTROL);
  1150. if (retries-- < 1)
  1151. break;
  1152. }
  1153. spin_unlock_irqrestore(&musb->lock, flags);
  1154. otg_start_srp(musb->xceiv->otg);
  1155. spin_lock_irqsave(&musb->lock, flags);
  1156. /* Block idling for at least 1s */
  1157. musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(1 * HZ));
  1158. status = 0;
  1159. goto done;
  1160. default:
  1161. dev_dbg(musb->controller, "Unhandled wake: %s\n",
  1162. usb_otg_state_string(musb->xceiv->state));
  1163. goto done;
  1164. }
  1165. status = 0;
  1166. os_printk(K_DEBUG, "****************** musb_gadget_wakeup......\n");
  1167. /* mu3d_hal_resume(); */
  1168. done:
  1169. spin_unlock_irqrestore(&musb->lock, flags);
  1170. return status;
  1171. }
  1172. static int musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
  1173. {
  1174. struct musb *musb = gadget_to_musb(gadget);
  1175. musb->is_self_powered = !!is_selfpowered;
  1176. return 0;
  1177. }
  1178. static void musb_pullup(struct musb *musb, int is_on)
  1179. {
  1180. if (musb->is_active == 0) {
  1181. os_printk(K_INFO, "power and clk is not ready!\n");
  1182. return;
  1183. }
  1184. if (is_on) {
  1185. #ifdef SUPPORT_U3
  1186. mu3d_hal_u3dev_en();
  1187. #else
  1188. mu3d_hal_u2dev_connect();
  1189. #endif
  1190. } else {
  1191. #ifdef SUPPORT_U3
  1192. mu3d_hal_u3dev_dis();
  1193. #endif
  1194. mu3d_hal_u2dev_disconn();
  1195. }
  1196. dev_notice(musb->controller, "gadget D+ pullup %s\n", is_on ? "on" : "off");
  1197. }
  1198. #if 0
  1199. static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
  1200. {
  1201. dev_dbg(musb->controller, "<= %s =>\n", __func__);
  1202. /*
  1203. * FIXME iff driver's softconnect flag is set (as it is during probe,
  1204. * though that can clear it), just musb_pullup().
  1205. */
  1206. return -EINVAL;
  1207. }
  1208. #endif
  1209. static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
  1210. {
  1211. struct musb *musb = gadget_to_musb(gadget);
  1212. if (!musb->xceiv->set_power)
  1213. return -EOPNOTSUPP;
  1214. return usb_phy_set_power(musb->xceiv, mA);
  1215. }
  1216. static int usb_rdy; /* default value 0 */
  1217. static struct delayed_work mu3d_clk_off_work;
  1218. static struct musb *mu3d_clk_off_musb;
  1219. static void do_mu3d_clk_off_work(struct work_struct *work)
  1220. {
  1221. os_printk(K_NOTICE, "do_mu3d_clk_off_work, issue connection work\n");
  1222. schedule_delayed_work_on(0, &mu3d_clk_off_musb->connection_work, 0);
  1223. }
  1224. void set_usb_rdy(void)
  1225. {
  1226. os_printk(K_NOTICE, "set usb_rdy, wake up bat\n");
  1227. usb_rdy = 1;
  1228. #if defined(CONFIG_MTK_SMART_BATTERY)
  1229. wake_up_bat();
  1230. #endif
  1231. }
  1232. bool is_usb_rdy(void)
  1233. {
  1234. if (usb_rdy)
  1235. return true;
  1236. else
  1237. return false;
  1238. }
  1239. static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1240. {
  1241. struct musb *musb = gadget_to_musb(gadget);
  1242. unsigned long flags;
  1243. is_on = !!is_on;
  1244. pm_runtime_get_sync(musb->controller);
  1245. /* NOTE: this assumes we are sensing vbus; we'd rather
  1246. * not pullup unless the B-session is active.
  1247. */
  1248. spin_lock_irqsave(&musb->lock, flags);
  1249. if (is_on != musb->softconnect) {
  1250. musb->softconnect = is_on;
  1251. musb_pullup(musb, is_on);
  1252. }
  1253. spin_unlock_irqrestore(&musb->lock, flags);
  1254. pm_runtime_put(musb->controller);
  1255. if (is_usb_rdy() == false && is_on) {
  1256. set_usb_rdy();
  1257. if (!is_otg_enabled(musb)) {
  1258. #define MY_DELAY 2000
  1259. INIT_DELAYED_WORK(&mu3d_clk_off_work, do_mu3d_clk_off_work);
  1260. mu3d_clk_off_musb = musb;
  1261. os_printk(K_NOTICE, "queue mu3d_clk_off_work, %d ms delayed\n", MY_DELAY);
  1262. schedule_delayed_work(&mu3d_clk_off_work, msecs_to_jiffies(MY_DELAY));
  1263. }
  1264. }
  1265. return 0;
  1266. }
  1267. static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver);
  1268. static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver);
  1269. static const struct usb_gadget_ops musb_gadget_operations = {
  1270. .get_frame = musb_gadget_get_frame,
  1271. .wakeup = musb_gadget_wakeup,
  1272. .set_selfpowered = musb_gadget_set_self_powered,
  1273. /* .vbus_session = musb_gadget_vbus_session, */
  1274. .vbus_draw = musb_gadget_vbus_draw,
  1275. .pullup = musb_gadget_pullup,
  1276. .udc_start = musb_gadget_start,
  1277. .udc_stop = musb_gadget_stop,
  1278. /*REVISIT-J: Do we need implement "get_config_params" to config U1/U2 */
  1279. };
  1280. /* ----------------------------------------------------------------------- */
  1281. /* Registration */
  1282. /* Only this registration code "knows" the rule (from USB standards)
  1283. * about there being only one external upstream port. It assumes
  1284. * all peripheral ports are external...
  1285. */
  1286. #ifdef NEVER
  1287. static void musb_gadget_release(struct device *dev)
  1288. {
  1289. /* kref_put(WHAT) */
  1290. dev_dbg(dev, "%s\n", __func__);
  1291. }
  1292. #endif /* NEVER */
  1293. static void init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
  1294. {
  1295. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  1296. memset(ep, 0, sizeof(*ep));
  1297. ep->current_epnum = epnum;
  1298. ep->musb = musb;
  1299. ep->hw_ep = hw_ep;
  1300. ep->is_in = is_in;
  1301. INIT_LIST_HEAD(&ep->req_list);
  1302. sprintf(ep->name, "ep%d%s", epnum,
  1303. (!epnum || hw_ep->is_shared_fifo) ? "" : (is_in ? "in" : "out"));
  1304. os_printk(K_INFO, "%s, name=%s\n", __func__, ep->name);
  1305. ep->end_point.name = ep->name;
  1306. INIT_LIST_HEAD(&ep->end_point.ep_list);
  1307. if (!epnum) {
  1308. ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
  1309. ep->end_point.ops = &musb_g_ep0_ops;
  1310. musb->g.ep0 = &ep->end_point;
  1311. } else {
  1312. if (is_in)
  1313. ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
  1314. else
  1315. ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
  1316. ep->end_point.ops = &musb_ep_ops;
  1317. list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
  1318. }
  1319. os_printk(K_INFO, "%s, name=%s, maxp=%d\n", __func__, ep->end_point.name,
  1320. ep->end_point.maxpacket);
  1321. }
  1322. /*
  1323. * Initialize the endpoints exposed to peripheral drivers, with backlinks
  1324. * to the rest of the driver state.
  1325. */
  1326. static inline void musb_g_init_endpoints(struct musb *musb)
  1327. {
  1328. u8 epnum;
  1329. struct musb_hw_ep *hw_ep;
  1330. unsigned count = 0;
  1331. /* initialize endpoint list just once */
  1332. INIT_LIST_HEAD(&(musb->g.ep_list));
  1333. os_printk(K_INFO, "%s nr_endpoints=%d\n", __func__, musb->nr_endpoints);
  1334. for (epnum = 0, hw_ep = musb->endpoints; epnum < musb->nr_endpoints; epnum++, hw_ep++) {
  1335. os_printk(K_INFO, "%s epnum=%d shared_fifo=%d rx_maxp=%d tx_maxp=%d\n",
  1336. __func__, epnum, hw_ep->is_shared_fifo, hw_ep->max_packet_sz_rx ? : 0,
  1337. hw_ep->max_packet_sz_tx ? : 0);
  1338. if (hw_ep->is_shared_fifo /* || !epnum */) {
  1339. init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
  1340. count++;
  1341. } else {
  1342. if (hw_ep->max_packet_sz_tx) {
  1343. init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 1);
  1344. count++;
  1345. }
  1346. if (hw_ep->max_packet_sz_rx) {
  1347. init_peripheral_ep(musb, &hw_ep->ep_out, epnum, 0);
  1348. count++;
  1349. }
  1350. }
  1351. }
  1352. }
  1353. /* called once during driver setup to initialize and link into
  1354. * the driver model; memory is zeroed.
  1355. */
  1356. int musb_gadget_setup(struct musb *musb)
  1357. {
  1358. int status;
  1359. /* REVISIT minor race: if (erroneously) setting up two
  1360. * musb peripherals at the same time, only the bus lock
  1361. * is probably held.
  1362. */
  1363. musb->g.ops = &musb_gadget_operations;
  1364. musb->g.max_speed = USB_SPEED_SUPER;
  1365. musb->g.speed = USB_SPEED_UNKNOWN;
  1366. /* this "gadget" abstracts/virtualizes the controller */
  1367. /* dev_set_name(&musb->g.dev, "gadget"); */
  1368. /* musb->g.dev.parent = musb->controller; */
  1369. /* musb->g.dev.dma_mask = musb->controller->dma_mask; */
  1370. /* musb->g.dev.release = musb_gadget_release; */
  1371. musb->g.name = musb_driver_name;
  1372. if (is_otg_enabled(musb))
  1373. musb->g.is_otg = 1;
  1374. musb_g_init_endpoints(musb);
  1375. musb->is_active = 0;
  1376. musb_platform_try_idle(musb, 0);
  1377. /* status = device_register(&musb->g.dev); */
  1378. /* if (status != 0) { */
  1379. /* put_device(&musb->g.dev); */
  1380. /* return status; */
  1381. /* } */
  1382. status = usb_add_gadget_udc(musb->controller, &musb->g);
  1383. if (status)
  1384. goto err;
  1385. return 0;
  1386. err:
  1387. musb->g.dev.parent = NULL;
  1388. device_unregister(&musb->g.dev);
  1389. return status;
  1390. }
  1391. void musb_gadget_cleanup(struct musb *musb)
  1392. {
  1393. usb_del_gadget_udc(&musb->g);
  1394. /* if (musb->g.dev.parent) */
  1395. /* device_unregister(&musb->g.dev); */
  1396. }
  1397. /*
  1398. * Register the gadget driver. Used by gadget drivers when
  1399. * registering themselves with the controller.
  1400. *
  1401. * -EINVAL something went wrong (not driver)
  1402. * -EBUSY another gadget is already using the controller
  1403. * -ENOMEM no memory to perform the operation
  1404. *
  1405. * @param driver the gadget driver
  1406. * @return <0 if error, 0 if everything is fine
  1407. */
  1408. static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver)
  1409. {
  1410. struct musb *musb = gadget_to_musb(g);
  1411. struct usb_otg *otg = musb->xceiv->otg;
  1412. struct usb_hcd *hcd = musb_to_hcd(musb);
  1413. unsigned long flags;
  1414. int retval = 0;
  1415. if (driver->max_speed < USB_SPEED_HIGH) {
  1416. retval = -EINVAL;
  1417. goto err;
  1418. }
  1419. pm_runtime_get_sync(musb->controller);
  1420. dev_dbg(musb->controller, "registering driver %s\n", driver->function);
  1421. musb->softconnect = 0;
  1422. musb->gadget_driver = driver;
  1423. spin_lock_irqsave(&musb->lock, flags);
  1424. if (is_usb_rdy() == true)
  1425. musb->is_active = 1;
  1426. else
  1427. os_printk(K_NOTICE, "skip set is_active to 1, leave it to connection_work\n");
  1428. otg_set_peripheral(otg, &musb->g);
  1429. musb->xceiv->state = OTG_STATE_B_IDLE;
  1430. /*
  1431. * FIXME this ignores the softconnect flag. Drivers are
  1432. * allowed hold the peripheral inactive until for example
  1433. * userspace hooks up printer hardware or DSP codecs, so
  1434. * hosts only see fully functional devices.
  1435. */
  1436. spin_unlock_irqrestore(&musb->lock, flags);
  1437. if (is_otg_enabled(musb)) {
  1438. dev_dbg(musb->controller, "OTG startup...\n");
  1439. /* REVISIT: funcall to other code, which also
  1440. * handles power budgeting ... this way also
  1441. * ensures HdrcStart is indirectly called.
  1442. */
  1443. retval = usb_add_hcd(hcd, 0, 0);
  1444. if (retval < 0) {
  1445. dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
  1446. goto err2;
  1447. }
  1448. if (musb->xceiv->last_event == USB_EVENT_ID)
  1449. musb_platform_set_vbus(musb, 1);
  1450. hcd->self.uses_pio_for_control = 1;
  1451. }
  1452. if (musb->xceiv->last_event == USB_EVENT_NONE)
  1453. pm_runtime_put(musb->controller);
  1454. return 0;
  1455. err2:
  1456. if (!is_otg_enabled(musb))
  1457. musb_stop(musb);
  1458. err:
  1459. return retval;
  1460. }
  1461. static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
  1462. {
  1463. int i;
  1464. struct musb_hw_ep *hw_ep;
  1465. /* don't disconnect if it's not connected */
  1466. if (musb->g.speed == USB_SPEED_UNKNOWN)
  1467. driver = NULL;
  1468. else
  1469. musb->g.speed = USB_SPEED_UNKNOWN;
  1470. /* deactivate the hardware */
  1471. if (musb->softconnect) {
  1472. musb->softconnect = 0;
  1473. musb_pullup(musb, 0);
  1474. }
  1475. musb_stop(musb);
  1476. /* killing any outstanding requests will quiesce the driver;
  1477. * then report disconnect
  1478. */
  1479. if (driver) {
  1480. for (i = 0, hw_ep = musb->endpoints; i < musb->nr_endpoints; i++, hw_ep++) {
  1481. if (hw_ep->is_shared_fifo /* || !epnum */) {
  1482. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  1483. } else {
  1484. if (hw_ep->max_packet_sz_tx)
  1485. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  1486. if (hw_ep->max_packet_sz_rx)
  1487. nuke(&hw_ep->ep_out, -ESHUTDOWN);
  1488. }
  1489. }
  1490. }
  1491. }
  1492. /*
  1493. * Unregister the gadget driver. Used by gadget drivers when
  1494. * unregistering themselves from the controller.
  1495. *
  1496. * @param driver the gadget driver to unregister
  1497. */
  1498. static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver)
  1499. {
  1500. struct musb *musb = gadget_to_musb(g);
  1501. unsigned long flags;
  1502. if (musb->xceiv->last_event == USB_EVENT_NONE)
  1503. pm_runtime_get_sync(musb->controller);
  1504. /*
  1505. * REVISIT always use otg_set_peripheral() here too;
  1506. * this needs to shut down the OTG engine.
  1507. */
  1508. spin_lock_irqsave(&musb->lock, flags);
  1509. musb_hnp_stop(musb);
  1510. (void)musb_gadget_vbus_draw(&musb->g, 0);
  1511. musb->xceiv->state = OTG_STATE_UNDEFINED;
  1512. stop_activity(musb, driver);
  1513. otg_set_peripheral(musb->xceiv->otg, NULL);
  1514. musb->is_active = 0;
  1515. musb_platform_try_idle(musb, 0);
  1516. spin_unlock_irqrestore(&musb->lock, flags);
  1517. if (is_otg_enabled(musb)) {
  1518. usb_remove_hcd(musb_to_hcd(musb));
  1519. /* FIXME we need to be able to register another
  1520. * gadget driver here and have everything work;
  1521. * that currently misbehaves.
  1522. */
  1523. }
  1524. if (!is_otg_enabled(musb))
  1525. musb_stop(musb);
  1526. pm_runtime_put(musb->controller);
  1527. return 0;
  1528. }
  1529. /* ----------------------------------------------------------------------- */
  1530. /* lifecycle operations called through plat_uds.c */
  1531. void musb_g_resume(struct musb *musb)
  1532. {
  1533. musb->is_suspended = 0;
  1534. switch (musb->xceiv->state) {
  1535. case OTG_STATE_B_IDLE:
  1536. break;
  1537. case OTG_STATE_B_WAIT_ACON:
  1538. case OTG_STATE_B_PERIPHERAL:
  1539. musb->is_active = 1;
  1540. if (musb->gadget_driver && musb->gadget_driver->resume) {
  1541. spin_unlock(&musb->lock);
  1542. musb->gadget_driver->resume(&musb->g);
  1543. spin_lock(&musb->lock);
  1544. }
  1545. break;
  1546. default:
  1547. WARNING("unhandled RESUME transition (%s)\n",
  1548. usb_otg_state_string(musb->xceiv->state));
  1549. }
  1550. }
  1551. /* called when SOF packets stop for 3+ msec */
  1552. void musb_g_suspend(struct musb *musb)
  1553. {
  1554. u32 devctl;
  1555. devctl = os_readl(U3D_DEVICE_CONTROL);
  1556. dev_notice(musb->controller, "devctl %02x\n", devctl);
  1557. switch (musb->xceiv->state) {
  1558. case OTG_STATE_B_IDLE:
  1559. if ((devctl & USB_DEVCTL_VBUSMASK) == USB_DEVCTL_VBUSVALID)
  1560. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  1561. break;
  1562. case OTG_STATE_B_PERIPHERAL:
  1563. musb->is_suspended = 1;
  1564. if (musb->gadget_driver && musb->gadget_driver->suspend) {
  1565. spin_unlock(&musb->lock);
  1566. musb->gadget_driver->suspend(&musb->g);
  1567. spin_lock(&musb->lock);
  1568. }
  1569. musb_sync_with_bat(musb, USB_SUSPEND); /* announce to the battery */
  1570. break;
  1571. default:
  1572. /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
  1573. * A_PERIPHERAL may need care too
  1574. */
  1575. WARNING("unhandled SUSPEND transition (%s)\n",
  1576. usb_otg_state_string(musb->xceiv->state));
  1577. }
  1578. }
  1579. /* Called during SRP */
  1580. void musb_g_wakeup(struct musb *musb)
  1581. {
  1582. musb_gadget_wakeup(&musb->g);
  1583. }
  1584. /* called when VBUS drops below session threshold, and in other cases */
  1585. void musb_g_disconnect(struct musb *musb)
  1586. {
  1587. u32 devctl = os_readl(U3D_DEVICE_CONTROL);
  1588. dev_notice(musb->controller, "devctl %02x\n", devctl);
  1589. /* clear HR */
  1590. /* marked off for 3.0 reset device test */
  1591. /* os_writel(U3D_DEVICE_CONTROL, devctl & USB_DEVCTL_SESSION); */
  1592. /* don't draw vbus until new b-default session */
  1593. (void)musb_gadget_vbus_draw(&musb->g, 0);
  1594. musb->g.speed = USB_SPEED_UNKNOWN;
  1595. if (musb->gadget_driver && musb->gadget_driver->disconnect) {
  1596. spin_unlock(&musb->lock);
  1597. musb->gadget_driver->disconnect(&musb->g);
  1598. spin_lock(&musb->lock);
  1599. }
  1600. switch (musb->xceiv->state) {
  1601. default:
  1602. dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
  1603. usb_otg_state_string(musb->xceiv->state));
  1604. musb->xceiv->state = OTG_STATE_A_IDLE;
  1605. MUSB_HST_MODE(musb);
  1606. break;
  1607. case OTG_STATE_A_PERIPHERAL:
  1608. musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
  1609. MUSB_HST_MODE(musb);
  1610. break;
  1611. case OTG_STATE_B_WAIT_ACON:
  1612. case OTG_STATE_B_HOST:
  1613. case OTG_STATE_B_PERIPHERAL:
  1614. case OTG_STATE_B_IDLE:
  1615. musb->xceiv->state = OTG_STATE_B_IDLE;
  1616. break;
  1617. case OTG_STATE_B_SRP_INIT:
  1618. break;
  1619. }
  1620. musb->is_active = 0;
  1621. }
  1622. void musb_conifg_ep0(struct musb *musb)
  1623. {
  1624. os_printk(K_DEBUG, "U3D_DEVICE_CONF: %x\n", os_readl(U3D_DEVICE_CONF));
  1625. if (os_readl(U3D_DEVICE_CONF) & HW_USB2_3_SEL) { /* SS */
  1626. musb->g.speed = USB_SPEED_SUPER;
  1627. musb->g.ep0->maxpacket = 512;
  1628. os_printk(K_INFO, "musb_g_reset musb->g.speed: super\n");
  1629. ep0_setup(musb, musb->endpoints, &ep0_cfg_u3);
  1630. } else { /* HS, FS */
  1631. musb->g.speed = (u8) (os_readl(U3D_POWER_MANAGEMENT) & HS_MODE)
  1632. ? USB_SPEED_HIGH : USB_SPEED_FULL;
  1633. musb->g.ep0->maxpacket = 64;
  1634. os_printk(K_INFO, "musb_g_reset musb->g.speed: %s\n",
  1635. (musb->g.speed == USB_SPEED_HIGH) ? "high" : "full");
  1636. ep0_setup(musb, musb->endpoints, &ep0_cfg_u2);
  1637. }
  1638. os_printk(K_DEBUG, "U3D_EP0CSR: %x\n", os_readl(U3D_EP0CSR));
  1639. os_printk(K_DEBUG, "U3D_RXCOUNT0: %x\n", os_readl(U3D_RXCOUNT0));
  1640. }
  1641. void musb_g_reset(struct musb *musb) __releases(musb->lock) __acquires(musb->lock)
  1642. {
  1643. u32 devctl = os_readl(U3D_DEVICE_CONTROL);
  1644. /* it may greater than 2.5mA , but it should meet the spec's requirement !! */
  1645. if (musb->test_mode == 0)
  1646. musb_sync_with_bat(musb, USB_UNCONFIGURED);
  1647. mu3d_hal_unfigured_ep();
  1648. /* report disconnect, if we didn't already (flushing EP state) */
  1649. if (musb->g.speed != USB_SPEED_UNKNOWN)
  1650. musb_g_disconnect(musb);
  1651. /* clear HR */
  1652. else if (devctl & USB_DEVCTL_HOSTREQUEST) {
  1653. /* marked off for 3.0 reset device test */
  1654. /* os_writel(U3D_DEVICE_CONTROL, USB_DEVCTL_SESSION); */
  1655. }
  1656. musb_conifg_ep0(musb);
  1657. /* start in USB_STATE_DEFAULT */
  1658. musb->is_active = 1;
  1659. musb->is_suspended = 0;
  1660. MUSB_DEV_MODE(musb);
  1661. musb->address = 0;
  1662. musb->ep0_state = MUSB_EP0_STAGE_SETUP;
  1663. musb->may_wakeup = 0;
  1664. musb->g.b_hnp_enable = 0;
  1665. musb->g.a_alt_hnp_support = 0;
  1666. musb->g.a_hnp_support = 0;
  1667. /* Normal reset, as B-Device;
  1668. * or else after HNP, as A-Device
  1669. */
  1670. if (devctl & USB_DEVCTL_BDEVICE) {
  1671. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  1672. musb->g.is_a_peripheral = 0;
  1673. } else if (is_otg_enabled(musb)) {
  1674. musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
  1675. musb->g.is_a_peripheral = 1;
  1676. } else
  1677. WARN_ON(1);
  1678. /* start with default limits on VBUS power draw */
  1679. (void)musb_gadget_vbus_draw(&musb->g, is_otg_enabled(musb) ? 8 : 100);
  1680. }