musb_gadget.c 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260
  1. /*
  2. * MUSB OTG driver peripheral support
  3. *
  4. * Copyright 2005 Mentor Graphics Corporation
  5. * Copyright (C) 2005-2006 by Texas Instruments
  6. * Copyright (C) 2006-2007 Nokia Corporation
  7. * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21. * 02110-1301 USA
  22. *
  23. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  26. * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. */
  35. #include <linux/kernel.h>
  36. #include <linux/list.h>
  37. #include <linux/timer.h>
  38. #include <linux/module.h>
  39. #include <linux/smp.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/delay.h>
  42. #include <linux/moduleparam.h>
  43. #include <linux/stat.h>
  44. #include <linux/dma-mapping.h>
  45. #include <linux/slab.h>
  46. #include <linux/kfifo.h>
  47. #if defined(CONFIG_USBIF_COMPLIANCE)
  48. #include <linux/kthread.h>
  49. #include <linux/sched.h>
  50. #endif
  51. #include <linux/usb/composite.h>
  52. #include "musb_core.h"
  53. #ifdef MUSB_QMU_SUPPORT
  54. #include "musb_qmu.h"
  55. #endif
  56. #define FIFO_START_ADDR 512
  57. /* #define RX_DMA_MODE1 1 */
  58. /* MUSB PERIPHERAL status 3-mar-2006:
  59. *
  60. * - EP0 seems solid. It passes both USBCV and usbtest control cases.
  61. * Minor glitches:
  62. *
  63. * + remote wakeup to Linux hosts work, but saw USBCV failures;
  64. * in one test run (operator error?)
  65. * + endpoint halt tests -- in both usbtest and usbcv -- seem
  66. * to break when dma is enabled ... is something wrongly
  67. * clearing SENDSTALL?
  68. *
  69. * - Mass storage behaved ok when last tested. Network traffic patterns
  70. * (with lots of short transfers etc) need retesting; they turn up the
  71. * worst cases of the DMA, since short packets are typical but are not
  72. * required.
  73. *
  74. * - TX/IN
  75. * + both pio and dma behave in with network and g_zero tests
  76. * + no cppi throughput issues other than no-hw-queueing
  77. * + failed with FLAT_REG (DaVinci)
  78. * + seems to behave with double buffering, PIO -and- CPPI
  79. * + with gadgetfs + AIO, requests got lost?
  80. *
  81. * - RX/OUT
  82. * + both pio and dma behave in with network and g_zero tests
  83. * + dma is slow in typical case (short_not_ok is clear)
  84. * + double buffering ok with PIO
  85. * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
  86. * + request lossage observed with gadgetfs
  87. *
  88. * - ISO not tested ... might work, but only weakly isochronous
  89. *
  90. * - Gadget driver disabling of softconnect during bind() is ignored; so
  91. * drivers can't hold off host requests until userspace is ready.
  92. * (Workaround: they can turn it off later.)
  93. *
  94. * - PORTABILITY (assumes PIO works):
  95. * + DaVinci, basically works with cppi dma
  96. * + OMAP 2430, ditto with mentor dma
  97. * + TUSB 6010, platform-specific dma in the works
  98. */
  99. /* ----------------------------------------------------------------------- */
  100. /* __ADB_DEBUG__ start */
  101. struct amessage {
  102. unsigned command; /* command identifier constant */
  103. unsigned arg0; /* first argument */
  104. unsigned arg1; /* second argument */
  105. unsigned data_length; /* length of payload (0 is allowed) */
  106. unsigned data_check; /* checksum of data payload */
  107. unsigned magic; /* command ^ 0xffffffff */
  108. };
  109. struct debuginfo {
  110. unsigned headtoken;
  111. unsigned command; /* command identifier constant */
  112. unsigned msg_check;
  113. unsigned data_check;
  114. unsigned count;
  115. unsigned dummy;
  116. unsigned tailtoken;
  117. };
  118. typedef struct amessage amessage;
  119. typedef struct debuginfo debuginfo;
  120. #define A_SYNC 0x434e5953
  121. #define A_CNXN 0x4e584e43
  122. #define A_OPEN 0x4e45504f
  123. #define A_OKAY 0x59414b4f
  124. #define A_CLSE 0x45534c43
  125. #define A_WRTE 0x45545257
  126. #define A_AUTH 0x48545541
  127. #define A_DBUG 0x41424a42
  128. #define DBGHEADTOKEN 0x13579bdf
  129. #define DBGTAILTOKEN 0xdca86420
  130. struct adbDbg_t {
  131. u32 cmdChkSum;
  132. u32 dataSize;
  133. u32 dataChkSum;
  134. };
  135. typedef struct adbDbg_t adbDbg_t;
  136. adbDbg_t adbDbg[2] = { {0}, {0} };
  137. adbDbg_t adbDbgTest;
  138. spinlock_t debugLock;
  139. struct kfifo fifo;
  140. #define FIFO_SIZE 32
  141. static u32 adbDoChkSum(u32 length, u8 *buf)
  142. {
  143. u32 i;
  144. u32 chkSum = 0;
  145. for (i = 0; i < length; i++)
  146. chkSum ^= buf[i];
  147. return chkSum;
  148. }
  149. static void adbCmdLog(u8 *buf, u32 length, u8 is_in, char *func)
  150. {
  151. amessage *msg = (amessage *) buf;
  152. int status = -1;
  153. if (bitdebug_enabled == 1) {
  154. if (msg != NULL) {
  155. if (sizeof(amessage) == length) {
  156. switch (msg->command) {
  157. case A_SYNC:
  158. case A_CNXN:
  159. case A_OPEN:
  160. case A_OKAY:
  161. case A_CLSE:
  162. case A_WRTE:
  163. case A_AUTH:
  164. status = 0;
  165. break;
  166. case A_DBUG:
  167. pr_info(
  168. "adb: %s ERROR A_DBUG should not be tranfsered\n",
  169. func);
  170. break;
  171. default:
  172. status = 1;
  173. break;
  174. }
  175. } else {
  176. status = 1;
  177. /* pr_info("adb: %s A_DATA, data length = 0x%x\n", func, length); */
  178. }
  179. } else
  180. pr_info("adb: %s ERROR: amessage = NULL\n", func);
  181. if (0 == status) {
  182. adbDbg[is_in].dataSize = msg->data_length;
  183. adbDbg[is_in].cmdChkSum = adbDoChkSum(length, buf);
  184. if (0 == adbDbg[is_in].dataSize)
  185. adbDbg[is_in].dataChkSum = 0;
  186. pr_info(
  187. "adb: %s cmd = 0x%x, pack length = 0x%x, checksum = 0x%x\n", func,
  188. msg->command, msg->data_length, adbDbg[is_in].cmdChkSum);
  189. }
  190. if (1 == status) {
  191. if (adbDbg[is_in].dataSize != length) {
  192. if (0 != length)
  193. pr_info(
  194. "adb: %s ERROR: data size not match, adbDbg.dataSize = 0x%x, actual = 0x%x\n",
  195. func, adbDbg[is_in].dataSize, length);
  196. } else {
  197. adbDbg[is_in].dataChkSum = adbDoChkSum(length, buf);
  198. pr_info("adb: %s data length = 0x%x, checksum = 0x%x\n",
  199. func, length, adbDbg[is_in].dataChkSum);
  200. }
  201. }
  202. }
  203. }
  204. static int adbDbgInfoCheck(u8 *buf, u32 length, u8 is_in, char *func)
  205. {
  206. debuginfo *dbg = (debuginfo *) buf;
  207. int status = -1;
  208. if (dbg != NULL) {
  209. if (sizeof(debuginfo) == length) {
  210. switch (dbg->command) {
  211. case A_DBUG:
  212. /* pr_info("adb: %s dbg->headtoken = 0x%x, dbg->tailtoken = 0x%x, is_in = %d\n",
  213. func, dbg->headtoken , dbg->tailtoken, is_in); */
  214. /* pr_info("adb: %s dbg->msg_check = 0x%x, dbg->data_check = 0x%x, is_in = %d\n",
  215. func, dbg->msg_check , dbg->data_check, is_in); */
  216. if (dbg->command == A_DBUG && dbg->headtoken == DBGHEADTOKEN
  217. && dbg->tailtoken == DBGTAILTOKEN) {
  218. status = 0;
  219. if (adbDbg[is_in].cmdChkSum != dbg->msg_check)
  220. pr_info(
  221. "adb: %s ERROR: cmdChkSum = 0x%x, msg_check = 0x%x, is_in = %d\n",
  222. func, adbDbg[is_in].cmdChkSum,
  223. dbg->msg_check, is_in);
  224. /* else */
  225. /* pr_info("adb: %s cmdChkSum match, count = %d\n",
  226. func, bitdebug_writeCnt); */
  227. if (adbDbg[is_in].dataChkSum != dbg->data_check)
  228. pr_info(
  229. "adb: %s ERROR: dataChkSum = 0x%x, data_check = 0x%x, is_in = %d\n",
  230. func, adbDbg[is_in].dataChkSum,
  231. dbg->data_check, is_in);
  232. /* else */
  233. /* pr_info("adb: %s dataChkSum match, count = %d\n",
  234. func, bitdebug_writeCnt); */
  235. adbDbg[is_in].cmdChkSum = 0;
  236. adbDbg[is_in].dataChkSum = 0;
  237. if (bitdebug_writeCnt != dbg->count)
  238. pr_info(
  239. "adb: %s ERROR: miss count = %d, dbg->count = %d\n",
  240. func, bitdebug_writeCnt, dbg->count);
  241. bitdebug_writeCnt++;
  242. } else
  243. pr_info(
  244. "adb: %s ERROR: not A_DBUG, data length = 0x%x, is_in = %d\n",
  245. func, length, is_in);
  246. break;
  247. }
  248. }
  249. } else
  250. pr_info("adb: %s ERROR: debuginfo = NULL, is_in = %d\n", func, is_in);
  251. return status;
  252. }
  253. static int adbDebugInfoWrite(struct musb_ep *ep, struct usb_request *req)
  254. {
  255. static u32 isDebugCmd;
  256. static bool isDataCmd;
  257. static bool isNormalCmd;
  258. static bool packLength;
  259. unsigned long flags;
  260. if (!((1 == ep->is_in) && (ep_in == &(ep->end_point))))
  261. return -1;
  262. spin_lock_irqsave(&debugLock, flags);
  263. if (req->length == sizeof(amessage)) {
  264. amessage *msg = (amessage *) req->buf;
  265. if (msg != NULL) {
  266. switch (msg->command) {
  267. case A_SYNC:
  268. case A_CNXN:
  269. case A_OPEN:
  270. case A_OKAY:
  271. case A_CLSE:
  272. case A_WRTE:
  273. case A_AUTH:
  274. isNormalCmd = true;
  275. packLength = msg->data_length;
  276. pr_info(
  277. "adb: adb_complete_in msg (0x%x) (0x%x) (0x%x), isNormalCmd = %d\n",
  278. msg->command, msg->data_length, msg->data_check,
  279. isNormalCmd);
  280. break;
  281. default:
  282. isDataCmd = true;
  283. pr_info(
  284. "adb: adb_complete_in msg A_DATA, isDataCmd = %d\n",
  285. isDataCmd);
  286. break;
  287. }
  288. }
  289. } else if (req->length == sizeof(debuginfo)) {
  290. debuginfo *dbg = (debuginfo *) req->buf;
  291. if (dbg != NULL && dbg->command == A_DBUG && dbg->headtoken == DBGHEADTOKEN
  292. && dbg->tailtoken == DBGTAILTOKEN) {
  293. isDebugCmd++;
  294. pr_info(
  295. "adb: adb_complete_in A_DBUG (0x%x) (0x%x) (0x%x), isDebugCmd = %d\n",
  296. dbg->command, dbg->msg_check, dbg->data_check, isDebugCmd);
  297. /* if (false == isDataCmd) */
  298. /* pr_info("adb_complete_in dbg WARNING, Data is not ready\n"); */
  299. kfifo_in(&fifo, dbg, sizeof(debuginfo));
  300. /* pr_info("adb_complete_in A_DBUG a\n"); */
  301. } else {
  302. isDataCmd = true;
  303. pr_info("adb: adb_complete_in msg A_DATA, isDataCmd = %d\n",
  304. isDataCmd);
  305. }
  306. } else {
  307. isDataCmd = true;
  308. pr_info("adb: adb_complete_in msg A_DATA, isDataCmd = %d\n", isDataCmd);
  309. }
  310. if ((isNormalCmd && isDataCmd && isDebugCmd)
  311. || (isNormalCmd && (0 == packLength) && isDebugCmd)) {
  312. debuginfo tmp;
  313. unsigned int ret;
  314. ret = kfifo_out(&fifo, &tmp, sizeof(debuginfo));
  315. /* pr_info("adb_complete_in kfifo_out = %d\n", isDebugCmd); */
  316. if (-1 < adbDbgInfoCheck((u8 *) &tmp, sizeof(debuginfo), 1, "adb_complete_in")) {
  317. isDebugCmd--;
  318. isDataCmd = false;
  319. isNormalCmd = false;
  320. packLength = 0;
  321. /* pr_info("adb_complete_in Clear isDebugCmd = 0x%x, isDataCmd = %d, isNormalCmd = %d\n",
  322. isDebugCmd, isDataCmd, isNormalCmd); */
  323. } else
  324. pr_info(
  325. "adb: adb_complete_in ERROR adbDbgInfoCheck = %d, headtoken = 0x%x, command = 0x%x, msg_check = 0x%x, data_check = 0x%x\n",
  326. isDebugCmd, tmp.headtoken, tmp.command, tmp.msg_check,
  327. tmp.data_check);
  328. }
  329. spin_unlock_irqrestore(&debugLock, flags);
  330. return 0;
  331. }
  332. static int adbDegInfoHandle(struct usb_ep *ep, struct usb_request *req, char *func)
  333. {
  334. struct musb_ep *musb_ep;
  335. struct musb_request *request;
  336. int status = -1;
  337. if (bitdebug_enabled == 1) {
  338. musb_ep = to_musb_ep(ep);
  339. request = to_musb_request(req);
  340. if ((musb_ep->is_in == 0) && (ep_out == &(musb_ep->end_point))) {
  341. if (req->length == sizeof(debuginfo)) {
  342. debuginfo *dbg = (debuginfo *) req->buf;
  343. if (dbg != NULL && dbg->command == A_DBUG) {
  344. dbg->msg_check = adbDbg[musb_ep->is_in].cmdChkSum;
  345. dbg->data_check = adbDbg[musb_ep->is_in].dataChkSum;
  346. dbg->count = bitdebug_readCnt++;
  347. /* pr_info("adb: %s dbg (0x%x) (0x%x) (0x%x)\n",
  348. func, dbg->command, dbg->msg_check, dbg->data_check); */
  349. request->request.complete(&request->ep->end_point,
  350. &request->request);
  351. return -EINPROGRESS;
  352. }
  353. }
  354. }
  355. if ((musb_ep->is_in == 1) && (ep_in == &(musb_ep->end_point))) {
  356. #if 0
  357. if (req->length == sizeof(amessage)) {
  358. amessage *msg = (amessage *) req->buf;
  359. if (msg != NULL) {
  360. switch (msg->command) {
  361. case A_SYNC:
  362. case A_CNXN:
  363. case A_OPEN:
  364. case A_OKAY:
  365. case A_CLSE:
  366. case A_WRTE:
  367. case A_AUTH:
  368. pr_info(
  369. "adb: %s msg (0x%x) (0x%x) (0x%x) (0x%x) (0x%x) (0x%x)\n",
  370. func, msg->command, msg->arg0, msg->arg1,
  371. msg->data_length, msg->data_check,
  372. msg->magic);
  373. break;
  374. default:
  375. pr_info("adb: %s msg A_DATA or A_DBUG\n",
  376. func);
  377. break;
  378. }
  379. }
  380. }
  381. #endif
  382. if (req->length == sizeof(debuginfo)) {
  383. debuginfo *dbg = (debuginfo *) req->buf;
  384. if (dbg != NULL && dbg->command == A_DBUG) {
  385. /* pr_info("adb: %s dbg (0x%x) (0x%x) (0x%x)\n",
  386. func, dbg->command, dbg->msg_check, dbg->data_check); */
  387. adbDebugInfoWrite(musb_ep, req);
  388. request->request.complete(&request->ep->end_point,
  389. &request->request);
  390. return 0;
  391. }
  392. }
  393. }
  394. }
  395. return status;
  396. }
  397. /* __ADB_DEBUG__ end */
  398. #define is_buffer_mapped(req) (is_dma_capable() && \
  399. (req->map_state != UN_MAPPED))
  400. /* Maps the buffer to dma */
  401. static inline void map_dma_buffer(struct musb_request *request,
  402. struct musb *musb, struct musb_ep *musb_ep)
  403. {
  404. #ifndef MUSB_QMU_SUPPORT
  405. int compatible = true;
  406. struct dma_controller *dma = musb->dma_controller;
  407. #endif
  408. unsigned length;
  409. length = ALIGN(request->request.length, dma_get_cache_alignment());
  410. request->map_state = UN_MAPPED;
  411. #ifndef MUSB_QMU_SUPPORT
  412. if (!is_dma_capable() || !musb_ep->dma)
  413. return;
  414. /* Check if DMA engine can handle this request.
  415. * DMA code must reject the USB request explicitly.
  416. * Default behaviour is to map the request.
  417. */
  418. if (dma->is_compatible)
  419. compatible = dma->is_compatible(musb_ep->dma,
  420. musb_ep->packet_sz, request->request.buf,
  421. request->request.length);
  422. if (!compatible)
  423. return;
  424. #endif
  425. if (request->request.dma == DMA_ADDR_INVALID) {
  426. request->request.dma = dma_map_single(musb->controller,
  427. request->request.buf,
  428. length,
  429. request->tx
  430. ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  431. request->map_state = MUSB_MAPPED;
  432. } else {
  433. dma_sync_single_for_device(musb->controller,
  434. request->request.dma,
  435. length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  436. request->map_state = PRE_MAPPED;
  437. }
  438. }
  439. /* Unmap the buffer from dma and maps it back to cpu */
  440. static inline void unmap_dma_buffer(struct musb_request *request, struct musb *musb)
  441. {
  442. unsigned length;
  443. length = ALIGN(request->request.length, dma_get_cache_alignment());
  444. if (!is_buffer_mapped(request))
  445. return;
  446. if (request->request.dma == DMA_ADDR_INVALID) {
  447. DBG(1, "not unmapping a never mapped buffer\n");
  448. return;
  449. }
  450. if (request->map_state == MUSB_MAPPED) {
  451. dma_unmap_single(musb->controller,
  452. request->request.dma,
  453. length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  454. request->request.dma = DMA_ADDR_INVALID;
  455. } else { /* PRE_MAPPED */
  456. dma_sync_single_for_cpu(musb->controller,
  457. request->request.dma,
  458. length, request->tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  459. }
  460. request->map_state = UN_MAPPED;
  461. }
  462. /*
  463. * Immediately complete a request.
  464. *
  465. * @param request the request to complete
  466. * @param status the status to complete the request with
  467. * Context: controller locked, IRQs blocked.
  468. */
  469. void musb_g_giveback(struct musb_ep *ep,
  470. struct usb_request *request,
  471. int status) __releases(ep->musb->lock) __acquires(ep->musb->lock)
  472. {
  473. struct musb_request *req;
  474. struct musb *musb;
  475. int busy = ep->busy;
  476. req = to_musb_request(request);
  477. list_del(&req->list);
  478. if (req->request.status == -EINPROGRESS)
  479. req->request.status = status;
  480. musb = req->musb;
  481. ep->busy = 1;
  482. spin_unlock(&musb->lock);
  483. unmap_dma_buffer(req, musb);
  484. if (request->status == 0)
  485. DBG(1, "%s done request %p, %d/%d\n",
  486. ep->end_point.name, request, req->request.actual, req->request.length);
  487. else
  488. DBG(1, "%s request %p, %d/%d fault %d\n",
  489. ep->end_point.name, request,
  490. req->request.actual, req->request.length, request->status);
  491. /* __ADB_DEBUG__ start */
  492. if (bitdebug_enabled == 1)
  493. adbDebugInfoWrite(ep, request);
  494. /* __ADB_DEBUG__ end */
  495. req->request.complete(&req->ep->end_point, &req->request);
  496. spin_lock(&musb->lock);
  497. ep->busy = busy;
  498. }
  499. /* ----------------------------------------------------------------------- */
  500. /*
  501. * Abort requests queued to an endpoint using the status. Synchronous.
  502. * caller locked controller and blocked irqs, and selected this ep.
  503. */
  504. static void nuke(struct musb_ep *ep, const int status)
  505. {
  506. /*struct musb *musb = ep->musb; */
  507. struct musb_request *req = NULL;
  508. #ifndef MUSB_QMU_SUPPORT
  509. void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
  510. #endif
  511. ep->busy = 1;
  512. #ifdef MUSB_QMU_SUPPORT
  513. musb_flush_qmu(ep->hw_ep->epnum, (ep->is_in ? TXQ : RXQ));
  514. #else
  515. if (is_dma_capable() && ep->dma) {
  516. struct dma_controller *c = ep->musb->dma_controller;
  517. int value;
  518. if (ep->is_in) {
  519. /*
  520. * The programming guide says that we must not clear
  521. * the DMAMODE bit before DMAENAB, so we only
  522. * clear it in the second write...
  523. */
  524. musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
  525. musb_writew(epio, MUSB_TXCSR, 0 | MUSB_TXCSR_FLUSHFIFO);
  526. } else {
  527. musb_writew(epio, MUSB_RXCSR, 0 | MUSB_RXCSR_FLUSHFIFO);
  528. musb_writew(epio, MUSB_RXCSR, 0 | MUSB_RXCSR_FLUSHFIFO);
  529. }
  530. value = c->channel_abort(ep->dma);
  531. DBG(0, "%s: %s: abort DMA --> %d\n", __func__, ep->name, value);
  532. c->channel_release(ep->dma);
  533. ep->dma = NULL;
  534. }
  535. #endif
  536. while (!list_empty(&ep->req_list)) {
  537. req = list_first_entry(&ep->req_list, struct musb_request, list);
  538. musb_g_giveback(ep, &req->request, status);
  539. DBG(0, "call musb_g_giveback on function %s ep is %s\n", __func__,
  540. ep->end_point.name);
  541. }
  542. }
  543. /* ----------------------------------------------------------------------- */
  544. /* Data transfers - pure PIO, pure DMA, or mixed mode */
  545. /*
  546. * This assumes the separate CPPI engine is responding to DMA requests
  547. * from the usb core ... sequenced a bit differently from mentor dma.
  548. */
  549. static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
  550. {
  551. if (can_bulk_split(musb, ep->type))
  552. return ep->hw_ep->max_packet_sz_tx;
  553. else
  554. return ep->packet_sz;
  555. }
  556. /* Peripheral tx (IN) using Mentor DMA works as follows:
  557. Only mode 0 is used for transfers <= wPktSize,
  558. mode 1 is used for larger transfers,
  559. One of the following happens:
  560. - Host sends IN token which causes an endpoint interrupt
  561. -> TxAvail
  562. -> if DMA is currently busy, exit.
  563. -> if queue is non-empty, txstate().
  564. - Request is queued by the gadget driver.
  565. -> if queue was previously empty, txstate()
  566. txstate()
  567. -> start
  568. /\ -> setup DMA
  569. | (data is transferred to the FIFO, then sent out when
  570. | IN token(s) are recd from Host.
  571. | -> DMA interrupt on completion
  572. | calls TxAvail.
  573. | -> stop DMA, ~DMAENAB,
  574. | -> set TxPktRdy for last short pkt or zlp
  575. | -> Complete Request
  576. | -> Continue next request (call txstate)
  577. |___________________________________|
  578. * Non-Mentor DMA engines can of course work differently, such as by
  579. * upleveling from irq-per-packet to irq-per-buffer.
  580. */
  581. /*
  582. * An endpoint is transmitting data. This can be called either from
  583. * the IRQ routine or from ep.queue() to kickstart a request on an
  584. * endpoint.
  585. *
  586. * Context: controller locked, IRQs blocked, endpoint selected
  587. */
  588. static void txstate(struct musb *musb, struct musb_request *req)
  589. {
  590. u8 epnum = req->epnum;
  591. struct musb_ep *musb_ep;
  592. void __iomem *epio = musb->endpoints[epnum].regs;
  593. struct usb_request *request;
  594. u16 fifo_count = 0, csr;
  595. int use_dma = 0;
  596. musb_ep = req->ep;
  597. /* Check if EP is disabled */
  598. if (!musb_ep->desc) {
  599. DBG(0, "ep:%s disabled - ignore request\n", musb_ep->end_point.name);
  600. return;
  601. }
  602. /* we shouldn't get here while DMA is active ... but we do ... */
  603. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  604. DBG(0, "dma pending...\n");
  605. return;
  606. }
  607. /* read TXCSR before */
  608. csr = musb_readw(epio, MUSB_TXCSR);
  609. request = &req->request;
  610. fifo_count = min(max_ep_writesize(musb, musb_ep), (int)(request->length - request->actual));
  611. if (csr & MUSB_TXCSR_TXPKTRDY) {
  612. DBG(1, "%s old packet still ready , txcsr %03x\n", musb_ep->end_point.name, csr);
  613. return;
  614. }
  615. if (csr & MUSB_TXCSR_P_SENDSTALL) {
  616. DBG(0, "%s stalling, txcsr %03x\n", musb_ep->end_point.name, csr);
  617. return;
  618. }
  619. DBG(1, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
  620. epnum, musb_ep->packet_sz, fifo_count, csr);
  621. USB_LOGGER(TXSTATE, TXSTATE, epnum, musb_ep->packet_sz, fifo_count, csr);
  622. if (is_buffer_mapped(req)) {
  623. struct dma_controller *c = musb->dma_controller;
  624. size_t request_size;
  625. /* setup DMA, then program endpoint CSR */
  626. request_size = min_t(size_t, request->length - request->actual,
  627. musb_ep->dma->max_len);
  628. use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
  629. /* MUSB_TXCSR_P_ISO is still set correctly */
  630. if (request_size < musb_ep->packet_sz)
  631. musb_ep->dma->desired_mode = 0;
  632. else
  633. musb_ep->dma->desired_mode = 1;
  634. use_dma = use_dma && c->channel_program(musb_ep->dma, musb_ep->packet_sz,
  635. musb_ep->dma->desired_mode,
  636. request->dma + request->actual,
  637. request_size);
  638. if (use_dma) {
  639. if (musb_ep->dma->desired_mode == 0) {
  640. /*
  641. * We must not clear the DMAMODE bit
  642. * before the DMAENAB bit -- and the
  643. * latter doesn't always get cleared
  644. * before we get here...
  645. */
  646. csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
  647. musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS);
  648. csr &= ~MUSB_TXCSR_DMAMODE;
  649. csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_MODE);
  650. /* against programming guide */
  651. } else {
  652. csr |= (MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
  653. if (!musb_ep->hb_mult)
  654. csr |= MUSB_TXCSR_AUTOSET;
  655. }
  656. csr &= ~MUSB_TXCSR_P_UNDERRUN;
  657. /* __ADB_DEBUG__ start */
  658. if (bitdebug_enabled == 1) {
  659. if (ep_in == &(musb_ep->end_point)) {
  660. adbCmdLog(request->buf, request->length, musb_ep->is_in,
  661. "txstate");
  662. /* pr_info("adb: musb_g_tx length = 0x%x, actual = 0x%x, packet_sz = 0x%x\n",
  663. request->length, request->actual, musb_ep->packet_sz); */
  664. }
  665. }
  666. /* __ADB_DEBUG__ end */
  667. musb_writew(epio, MUSB_TXCSR, csr);
  668. }
  669. }
  670. if (!use_dma) {
  671. /*
  672. * Unmap the dma buffer back to cpu if dma channel
  673. * programming fails
  674. */
  675. unmap_dma_buffer(req, musb);
  676. musb_write_fifo(musb_ep->hw_ep, fifo_count,
  677. (u8 *) (request->buf + request->actual));
  678. request->actual += fifo_count;
  679. csr |= MUSB_TXCSR_TXPKTRDY;
  680. csr &= ~MUSB_TXCSR_P_UNDERRUN;
  681. musb_writew(epio, MUSB_TXCSR, csr);
  682. }
  683. /* host may already have the data when this message shows... */
  684. DBG(1, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
  685. musb_ep->end_point.name, use_dma ? "dma" : "pio",
  686. request->actual, request->length,
  687. musb_readw(epio, MUSB_TXCSR), fifo_count, musb_readw(epio, MUSB_TXMAXP));
  688. USB_LOGGER(TXSTATE_END, TXSTATE, musb_ep->end_point.name, use_dma ? "dma" : "pio",
  689. request->actual, request->length, musb_readw(epio, MUSB_TXCSR), fifo_count,
  690. musb_readw(epio, MUSB_TXMAXP));
  691. }
  692. /*
  693. * FIFO state update (e.g. data ready).
  694. * Called from IRQ, with controller locked.
  695. */
  696. void musb_g_tx(struct musb *musb, u8 epnum)
  697. {
  698. u16 csr;
  699. struct musb_request *req;
  700. struct usb_request *request;
  701. u8 __iomem *mbase = musb->mregs;
  702. struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
  703. void __iomem *epio = musb->endpoints[epnum].regs;
  704. struct dma_channel *dma;
  705. musb_ep_select(mbase, epnum);
  706. req = next_request(musb_ep);
  707. request = &req->request;
  708. csr = musb_readw(epio, MUSB_TXCSR);
  709. DBG(1, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
  710. USB_LOGGER(MUSB_G_TX, MUSB_G_TX, musb_ep->end_point.name, csr);
  711. /* __ADB_DEBUG__ start */
  712. if (bitdebug_enabled == 1) {
  713. /* if(ep_in == &(musb_ep->end_point)){ */
  714. /* pr_info("adb: musb_g_tx length = 0x%x, csr = 0x%x, musb_ep->is_in = %d\n",
  715. request->length, csr, musb_ep->is_in); */
  716. /* } */
  717. }
  718. /* __ADB_DEBUG__ end */
  719. dma = is_dma_capable() ? musb_ep->dma : NULL;
  720. /*
  721. * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
  722. * probably rates reporting as a host error.
  723. */
  724. if (csr & MUSB_TXCSR_P_SENTSTALL) {
  725. csr |= MUSB_TXCSR_P_WZC_BITS;
  726. csr &= ~MUSB_TXCSR_P_SENTSTALL;
  727. musb_writew(epio, MUSB_TXCSR, csr);
  728. return;
  729. }
  730. if (csr & MUSB_TXCSR_P_UNDERRUN) {
  731. /* We NAKed, no big deal... little reason to care. */
  732. csr |= MUSB_TXCSR_P_WZC_BITS;
  733. csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
  734. musb_writew(epio, MUSB_TXCSR, csr);
  735. DBG(1, "underrun on ep%d, req %p\n", epnum, request);
  736. }
  737. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  738. /*
  739. * SHOULD NOT HAPPEN... has with CPPI though, after
  740. * changing SENDSTALL (and other cases); harmless?
  741. */
  742. DBG(1, "%s dma still busy?\n", musb_ep->end_point.name);
  743. return;
  744. }
  745. if (request) {
  746. u8 is_dma = 0;
  747. if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
  748. is_dma = 1;
  749. csr |= MUSB_TXCSR_P_WZC_BITS;
  750. csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
  751. MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
  752. musb_writew(epio, MUSB_TXCSR, csr);
  753. /* Ensure writebuffer is empty. */
  754. csr = musb_readw(epio, MUSB_TXCSR);
  755. request->actual += musb_ep->dma->actual_len;
  756. DBG(3, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
  757. epnum, csr, musb_ep->dma->actual_len, request);
  758. }
  759. /*
  760. * First, maybe a terminating short packet. Some DMA
  761. * engines might handle this by themselves.
  762. */
  763. if ((request->zero && request->length && (request->length % musb_ep->packet_sz == 0)
  764. && (request->actual == request->length))
  765. || (is_dma && (!dma->desired_mode || (request->actual % musb_ep->packet_sz)))
  766. ) {
  767. /*
  768. * On DMA completion, FIFO may not be
  769. * available yet...
  770. */
  771. if (csr & MUSB_TXCSR_TXPKTRDY)
  772. return;
  773. DBG(4, "sending zero pkt\n");
  774. musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
  775. | MUSB_TXCSR_TXPKTRDY | (csr & MUSB_TXCSR_P_ISO));
  776. request->zero = 0;
  777. /*
  778. * Return from here with the expectation of the endpoint
  779. * interrupt for further action.
  780. */
  781. return;
  782. }
  783. if (request->actual == request->length) {
  784. #if 0
  785. if (ep_in == &(musb_ep->end_point)) {
  786. adbCmdLog(request->buf, request->actual, musb_ep->is_in,
  787. "musb_g_tx");
  788. /* pr_info("adb: musb_g_tx length = 0x%x, actual = 0x%x, packet_sz = 0x%x\n",
  789. request->length, request->actual, musb_ep->packet_sz); */
  790. }
  791. #endif
  792. musb_g_giveback(musb_ep, request, 0);
  793. /*
  794. * In the giveback function the MUSB lock is
  795. * released and acquired after sometime. During
  796. * this time period the INDEX register could get
  797. * changed by the gadget_queue function especially
  798. * on SMP systems. Reselect the INDEX to be sure
  799. * we are reading/modifying the right registers
  800. */
  801. musb_ep_select(mbase, epnum);
  802. /*
  803. * Kickstart next transfer if appropriate;
  804. * the packet that just completed might not
  805. * be transmitted for hours or days.
  806. * REVISIT for double buffering...
  807. * FIXME revisit for stalls too...
  808. */
  809. /* If configured as DB, then FIFONOTEMPTY doesn't mean no space for new packet */
  810. if (!(musb_read_txfifosz(mbase) & MUSB_FIFOSZ_DPB)) {
  811. csr = musb_readw(epio, MUSB_TXCSR);
  812. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  813. if ((csr & MUSB_TXCSR_TXPKTRDY) == 0) {
  814. musb_writew(epio, MUSB_TXCSR, /*MUSB_TXCSR_MODE
  815. | */ MUSB_TXCSR_TXPKTRDY);
  816. }
  817. return;
  818. }
  819. }
  820. req = musb_ep->desc ? next_request(musb_ep) : NULL;
  821. if (!req) {
  822. DBG(1, "%s idle now\n", musb_ep->end_point.name);
  823. return;
  824. }
  825. }
  826. txstate(musb, req);
  827. }
  828. }
  829. /* ------------------------------------------------------------ */
  830. /* Peripheral rx (OUT) using Mentor DMA works as follows:
  831. - Only mode 0 is used.
  832. - Request is queued by the gadget class driver.
  833. -> if queue was previously empty, rxstate()
  834. - Host sends OUT token which causes an endpoint interrupt
  835. /\ -> RxReady
  836. | -> if request queued, call rxstate
  837. | /\ -> setup DMA
  838. | | -> DMA interrupt on completion
  839. | | -> RxReady
  840. | | -> stop DMA
  841. | | -> ack the read
  842. | | -> if data recd = max expected
  843. | | by the request, or host
  844. | | sent a short packet,
  845. | | complete the request,
  846. | | and start the next one.
  847. | |_____________________________________|
  848. | else just wait for the host
  849. | to send the next OUT token.
  850. |__________________________________________________|
  851. * Non-Mentor DMA engines can of course work differently.
  852. */
  853. /*
  854. * Context: controller locked, IRQs blocked, endpoint selected
  855. */
  856. static void rxstate(struct musb *musb, struct musb_request *req)
  857. {
  858. const u8 epnum = req->epnum;
  859. struct usb_request *request = &req->request;
  860. struct musb_ep *musb_ep;
  861. void __iomem *epio = musb->endpoints[epnum].regs;
  862. unsigned len = 0;
  863. u16 fifo_count;
  864. u16 csr = musb_readw(epio, MUSB_RXCSR);
  865. struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
  866. u8 use_mode_1;
  867. if (hw_ep->is_shared_fifo)
  868. musb_ep = &hw_ep->ep_in;
  869. else
  870. musb_ep = &hw_ep->ep_out;
  871. fifo_count = musb_ep->packet_sz;
  872. /* Check if EP is disabled */
  873. if (!musb_ep->desc) {
  874. DBG(0, "ep:%s disabled - ignore request\n", musb_ep->end_point.name);
  875. return;
  876. }
  877. /* We shouldn't get here while DMA is active, but we do... */
  878. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  879. DBG(0, "DMA pending...\n");
  880. return;
  881. }
  882. if (csr & MUSB_RXCSR_P_SENDSTALL) {
  883. DBG(0, "%s stalling, RXCSR %04x\n", musb_ep->end_point.name, csr);
  884. return;
  885. }
  886. if (csr & MUSB_RXCSR_RXPKTRDY) {
  887. fifo_count = musb_readw(epio, MUSB_RXCOUNT);
  888. DBG(1, "rxstate epnum %d len %d\n ", epnum, len);
  889. /*
  890. * Enable Mode 1 on RX transfers only when short_not_ok flag
  891. * is set. Currently short_not_ok flag is set only from
  892. * file_storage and f_mass_storage drivers
  893. */
  894. #ifdef RX_DMA_MODE1
  895. if (fifo_count == musb_ep->packet_sz)
  896. #else
  897. if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
  898. #endif
  899. use_mode_1 = 1;
  900. else
  901. use_mode_1 = 0;
  902. if (request->actual < request->length) {
  903. #ifdef RX_DMA_MODE1
  904. if (is_buffer_mapped(req) && use_mode_1) {
  905. struct dma_controller *c;
  906. struct dma_channel *channel;
  907. int use_dma = 0;
  908. int transfer_size;
  909. c = musb->dma_controller;
  910. channel = musb_ep->dma;
  911. /* Experimental: Mode1 works with mass storage use cases */
  912. csr |= MUSB_RXCSR_AUTOCLEAR;
  913. musb_writew(epio, MUSB_RXCSR, csr);
  914. csr |= MUSB_RXCSR_DMAENAB;
  915. musb_writew(epio, MUSB_RXCSR, csr);
  916. musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_DMAMODE);
  917. transfer_size = min(request->length - request->actual,
  918. channel->max_len);
  919. /* Program the transfer length to be
  920. * a multiple of packet size because
  921. * short packets cant be transferred
  922. * over mode1
  923. */
  924. transfer_size = transfer_size -
  925. (transfer_size % musb_ep->packet_sz);
  926. musb_ep->dma->prog_len = transfer_size;
  927. musb_ep->dma->desired_mode = 1;
  928. use_dma = c->channel_program(channel,
  929. musb_ep->packet_sz,
  930. channel->desired_mode,
  931. request->dma
  932. + request->actual, transfer_size);
  933. if (use_dma)
  934. return;
  935. }
  936. #else
  937. if (is_buffer_mapped(req)) {
  938. struct dma_controller *c;
  939. struct dma_channel *channel;
  940. int use_dma = 0;
  941. int transfer_size;
  942. c = musb->dma_controller;
  943. channel = musb_ep->dma;
  944. /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
  945. * mode 0 only. So we do not get endpoint interrupts due to DMA
  946. * completion. We only get interrupts from DMA controller.
  947. *
  948. * We could operate in DMA mode 1 if we knew the size of the transfer
  949. * in advance. For mass storage class, request->length = what the host
  950. * sends, so that'd work. But for pretty much everything else,
  951. * request->length is routinely more than what the host sends. For
  952. * most these gadgets, end of is signified either by a short packet,
  953. * or filling the last byte of the buffer. (Sending extra data in
  954. * that last pckate should trigger an overflow fault.) But in mode 1,
  955. * we don't get DMA completion interrupt for short packets.
  956. *
  957. * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
  958. * to get endpoint interrupt on every DMA req, but that didn't seem
  959. * to work reliably.
  960. *
  961. * REVISIT an updated g_file_storage can set req->short_not_ok, which
  962. * then becomes usable as a runtime "use mode 1" hint...
  963. */
  964. /* Experimental: Mode1 works with mass storage use cases */
  965. if (use_mode_1) {
  966. csr |= MUSB_RXCSR_AUTOCLEAR;
  967. musb_writew(epio, MUSB_RXCSR, csr);
  968. csr |= MUSB_RXCSR_DMAENAB;
  969. musb_writew(epio, MUSB_RXCSR, csr);
  970. /*
  971. * this special sequence (enabling and then
  972. * disabling MUSB_RXCSR_DMAMODE) is required
  973. * to get DMAReq to activate
  974. */
  975. musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_DMAMODE);
  976. musb_writew(epio, MUSB_RXCSR, csr);
  977. transfer_size = min_t(unsigned, request->length - request->actual,
  978. channel->max_len);
  979. musb_ep->dma->desired_mode = 1;
  980. } else {
  981. /*
  982. * Comment out here, cuz we dont have "hb_mult"
  983. * and follow the original setting. Dont want to
  984. * change it.
  985. * if (!musb_ep->hb_mult &&
  986. * musb_ep->hw_ep->rx_double_buffered)
  987. * csr |= MUSB_RXCSR_AUTOCLEAR;
  988. */
  989. csr |= MUSB_RXCSR_DMAENAB;
  990. musb_writew(epio, MUSB_RXCSR, csr);
  991. transfer_size = min_t(unsigned, request->length - request->actual,
  992. fifo_count);
  993. musb_ep->dma->desired_mode = 0;
  994. }
  995. use_dma = c->channel_program(channel,
  996. musb_ep->packet_sz,
  997. channel->desired_mode,
  998. request->dma
  999. + request->actual, transfer_size);
  1000. if (use_dma)
  1001. return;
  1002. }
  1003. #endif
  1004. len = request->length - request->actual;
  1005. DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
  1006. musb_ep->end_point.name, len, fifo_count, musb_ep->packet_sz);
  1007. fifo_count = min_t(unsigned, len, fifo_count);
  1008. /*
  1009. * Unmap the dma buffer back to cpu if dma channel
  1010. * programming fails. This buffer is mapped if the
  1011. * channel allocation is successful
  1012. */
  1013. if (is_buffer_mapped(req)) {
  1014. unmap_dma_buffer(req, musb);
  1015. /*
  1016. * Clear DMAENAB and AUTOCLEAR for the
  1017. * PIO mode transfer
  1018. */
  1019. csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
  1020. musb_writew(epio, MUSB_RXCSR, csr);
  1021. }
  1022. musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
  1023. (request->buf + request->actual));
  1024. request->actual += fifo_count;
  1025. /* REVISIT if we left anything in the fifo, flush
  1026. * it and report -EOVERFLOW
  1027. */
  1028. /* ack the read! */
  1029. csr |= MUSB_RXCSR_P_WZC_BITS;
  1030. csr &= ~MUSB_RXCSR_RXPKTRDY;
  1031. musb_writew(epio, MUSB_RXCSR, csr);
  1032. }
  1033. }
  1034. /* reach the end or short packet detected */
  1035. if (request->actual == request->length || fifo_count < musb_ep->packet_sz) {
  1036. /* __ADB_DEBUG__ start */
  1037. if (bitdebug_enabled == 1) {
  1038. if (ep_out == &(musb_ep->end_point)) {
  1039. adbCmdLog(request->buf, request->actual, musb_ep->is_in, "rxstate");
  1040. /* pr_info("adb: rxstate length = 0x%x, actual = 0x%x, len = 0x%x, packet_sz = 0x%x\n",
  1041. request->length, request->actual, len, musb_ep->packet_sz); */
  1042. }
  1043. }
  1044. /* __ADB_DEBUG__ end */
  1045. musb_g_giveback(musb_ep, request, 0);
  1046. }
  1047. }
  1048. /*
  1049. * Data ready for a request; called from IRQ
  1050. */
  1051. void musb_g_rx(struct musb *musb, u8 epnum)
  1052. {
  1053. u16 csr;
  1054. struct musb_request *req;
  1055. struct usb_request *request;
  1056. void __iomem *mbase = musb->mregs;
  1057. struct musb_ep *musb_ep;
  1058. void __iomem *epio = musb->endpoints[epnum].regs;
  1059. struct dma_channel *dma;
  1060. struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
  1061. #ifdef RX_DMA_MODE1
  1062. u16 len;
  1063. u32 residue;
  1064. struct dma_controller *c = musb->dma_controller;
  1065. int status;
  1066. #endif
  1067. if (hw_ep->is_shared_fifo)
  1068. musb_ep = &hw_ep->ep_in;
  1069. else
  1070. musb_ep = &hw_ep->ep_out;
  1071. musb_ep_select(mbase, epnum);
  1072. req = next_request(musb_ep);
  1073. if (!req) {
  1074. #ifdef RX_DMA_MODE1
  1075. musb_ep->rx_pending = 1;
  1076. DBG(2, "Packet received on %s but no request queued\n", musb_ep->end_point.name);
  1077. #endif
  1078. return;
  1079. }
  1080. request = &req->request;
  1081. csr = musb_readw(epio, MUSB_RXCSR);
  1082. dma = is_dma_capable() ? musb_ep->dma : NULL;
  1083. /* __ADB_DEBUG__ start */
  1084. if (bitdebug_enabled == 1) {
  1085. /* if(ep_out == &(musb_ep->end_point)){ */
  1086. /* pr_info("adb: musb_g_rx length = 0x%x, csr = 0x%x, musb_ep->is_in = %d\n",
  1087. request->length, csr, musb_ep->is_in); */
  1088. /* } */
  1089. }
  1090. /* __ADB_DEBUG__ end */
  1091. DBG(1, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
  1092. csr, dma ? " (dma)" : "", request);
  1093. USB_LOGGER(MUSB_G_RX, MUSB_G_RX, musb_ep->end_point.name, csr,
  1094. (dma != NULL) ? "DMA" : "PIO", request);
  1095. if (csr & MUSB_RXCSR_P_SENTSTALL) {
  1096. csr |= MUSB_RXCSR_P_WZC_BITS;
  1097. csr &= ~MUSB_RXCSR_P_SENTSTALL;
  1098. DBG(0, "%s sendstall on %p\n", musb_ep->name, request);
  1099. musb_writew(epio, MUSB_RXCSR, csr);
  1100. return;
  1101. }
  1102. if (csr & MUSB_RXCSR_P_OVERRUN) {
  1103. /* csr |= MUSB_RXCSR_P_WZC_BITS; */
  1104. csr &= ~MUSB_RXCSR_P_OVERRUN;
  1105. musb_writew(epio, MUSB_RXCSR, csr);
  1106. DBG(0, "%s iso overrun on %p\n", musb_ep->name, request);
  1107. if (request->status == -EINPROGRESS)
  1108. request->status = -EOVERFLOW;
  1109. }
  1110. if (csr & MUSB_RXCSR_INCOMPRX) {
  1111. /* REVISIT not necessarily an error */
  1112. DBG(1, "%s, incomprx\n", musb_ep->end_point.name);
  1113. }
  1114. if (csr & MUSB_RXCSR_FIFOFULL)
  1115. DBG(1, "%s, FIFO full\n", musb_ep->end_point.name);
  1116. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  1117. #ifdef RX_DMA_MODE1
  1118. /* For short_not_ok type transfers and mode0 transfers */
  1119. if (dma->desired_mode == 0 || request->short_not_ok)
  1120. return;
  1121. if (!(csr & MUSB_RXCSR_RXPKTRDY)) {
  1122. DBG(1, "%s, DMA busy and Packet not ready\n", musb_ep->end_point.name);
  1123. return;
  1124. }
  1125. /* For Mode1 we get here for the last short packet */
  1126. len = musb_readw(epio, MUSB_RXCOUNT);
  1127. /* We should get here only for a short packet. */
  1128. if (len == musb_ep->packet_sz) {
  1129. DBG(2, "%s, Packet not short RXCOUNT=%d\n", musb_ep->end_point.name, len);
  1130. return;
  1131. }
  1132. /* Pause the channel to get the correct transfer residue. */
  1133. status = c->channel_pause(musb_ep->dma);
  1134. residue = c->tx_status(musb_ep->dma);
  1135. status = c->check_residue(musb_ep->dma, residue);
  1136. DBG(2, "len=%d, residue=%d\n", len, residue);
  1137. if (status) {
  1138. /* Something's wrong */
  1139. status = c->channel_resume(musb_ep->dma);
  1140. return;
  1141. }
  1142. /* In cases when we don't know the transfer length the short
  1143. * packet indicates end of current transfer.
  1144. */
  1145. status = c->channel_abort(musb_ep->dma);
  1146. /* Update with the actual number of bytes transferred */
  1147. request->actual = musb_ep->dma->prog_len - residue;
  1148. /* Clear DMA bits in the CSR */
  1149. csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_DMAMODE);
  1150. musb_writew(epio, MUSB_RXCSR, csr);
  1151. /* Proceed to read the short packet */
  1152. rxstate(musb, req);
  1153. /* Don't program next transfer, it will tamper with the DMA
  1154. * busy condition. Wait for next OUT
  1155. */
  1156. #else
  1157. /* "should not happen"; likely RXPKTRDY pending for DMA */
  1158. DBG((csr & MUSB_RXCSR_DMAENAB) ? 40 : 40,
  1159. "%s busy, csr %04x\n", musb_ep->end_point.name, csr);
  1160. #endif
  1161. return;
  1162. }
  1163. if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
  1164. csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB | MUSB_RXCSR_DMAMODE);
  1165. musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_P_WZC_BITS | csr);
  1166. request->actual += musb_ep->dma->actual_len;
  1167. DBG(1, "RXCSR%d %04x, dma off, %04x, len %zu, req %p ep %d\n",
  1168. epnum, csr,
  1169. musb_readw(epio, MUSB_RXCSR), musb_ep->dma->actual_len, request, epnum);
  1170. /* Autoclear doesn't clear RxPktRdy for short packets */
  1171. if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
  1172. || (dma->actual_len & (musb_ep->packet_sz - 1))) {
  1173. /* ack the read! */
  1174. csr &= ~MUSB_RXCSR_RXPKTRDY;
  1175. musb_writew(epio, MUSB_RXCSR, csr);
  1176. }
  1177. #ifdef RX_DMA_MODE1
  1178. /* We get here after DMA completion */
  1179. if ((dma->desired_mode == 1) && (!request->short_not_ok)) {
  1180. /* Incomplete? wait for next OUT packet */
  1181. if (request->actual < request->length) {
  1182. DBG(2, "Wait for next OUT\n");
  1183. } else if (request->actual == request->length) {
  1184. DBG(2, "Transfer over mode1 done\n");
  1185. musb_g_giveback(musb_ep, request, 0);
  1186. } else {
  1187. DBG(2, "Transfer length exceeded!!\n");
  1188. }
  1189. return;
  1190. }
  1191. #endif
  1192. /* incomplete, and not short? wait for next IN packet */
  1193. if ((request->actual < request->length)
  1194. && (musb_ep->dma->actual_len == musb_ep->packet_sz)) {
  1195. /* In double buffer case, continue to unload fifo if
  1196. * there is Rx packet in FIFO.
  1197. **/
  1198. csr = musb_readw(epio, MUSB_RXCSR);
  1199. if ((csr & MUSB_RXCSR_RXPKTRDY) && hw_ep->rx_double_buffered)
  1200. goto exit;
  1201. return;
  1202. }
  1203. /* __ADB_DEBUG__ start */
  1204. if (bitdebug_enabled == 1) {
  1205. if (ep_out == &(musb_ep->end_point)) {
  1206. adbCmdLog(request->buf, request->actual, musb_ep->is_in,
  1207. "musb_g_rx");
  1208. /* pr_info("adb: musb_g_rx length = 0x%x, actual = 0x%x, packet_sz = 0x%x\n",
  1209. request->length, request->actual, musb_ep->packet_sz); */
  1210. }
  1211. }
  1212. /* __ADB_DEBUG__ end */
  1213. musb_g_giveback(musb_ep, request, 0);
  1214. /*
  1215. * In the giveback function the MUSB lock is
  1216. * released and acquired after sometime. During
  1217. * this time period the INDEX register could get
  1218. * changed by the gadget_queue function especially
  1219. * on SMP systems. Reselect the INDEX to be sure
  1220. * we are reading/modifying the right registers
  1221. */
  1222. musb_ep_select(mbase, epnum);
  1223. req = next_request(musb_ep);
  1224. if (!req)
  1225. return;
  1226. }
  1227. exit:
  1228. /* Analyze request */
  1229. rxstate(musb, req);
  1230. }
  1231. /*
  1232. * at the safe mode, ACM IN-BULK-> Double Buffer, OUT-BULK-> Signle Buffer, IN-INT-> Signle Buffer
  1233. * ADB IN-BULK-> Signle Buffer, OUT-BULK-> Signle Buffer
  1234. */
  1235. static int is_db_ok(struct musb *musb, struct musb_ep *musb_ep)
  1236. {
  1237. struct usb_composite_dev *cdev = (musb->g).ep0->driver_data;
  1238. struct usb_configuration *c = cdev->config;
  1239. struct usb_gadget *gadget = &(musb->g);
  1240. int tmp;
  1241. int ret = 1;
  1242. for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
  1243. struct usb_function *f = c->interface[tmp];
  1244. struct usb_descriptor_header **descriptors;
  1245. if (!f)
  1246. break;
  1247. DBG(0, "Ifc name=%s\n", f->name);
  1248. switch (gadget->speed) {
  1249. case USB_SPEED_SUPER:
  1250. descriptors = f->ss_descriptors;
  1251. break;
  1252. case USB_SPEED_HIGH:
  1253. descriptors = f->hs_descriptors;
  1254. break;
  1255. default:
  1256. descriptors = f->fs_descriptors;
  1257. }
  1258. for (; *descriptors; ++descriptors) {
  1259. struct usb_endpoint_descriptor *ep;
  1260. int is_in;
  1261. int epnum;
  1262. if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
  1263. continue;
  1264. ep = (struct usb_endpoint_descriptor *)*descriptors;
  1265. is_in = (ep->bEndpointAddress & 0x80) >> 7;
  1266. epnum = (ep->bEndpointAddress & 0x0f);
  1267. /*
  1268. * Under saving mode, some kinds of EPs have to be set as Single Buffer
  1269. * ACM OUT-BULK - Signle
  1270. * ACM IN-BULK - Double
  1271. * ADB OUT-BULK - Signle
  1272. * ADB IN-BULK - Single
  1273. */
  1274. /* ep must be matched */
  1275. if (ep->bEndpointAddress == (musb_ep->end_point).address) {
  1276. DBG(0, "%s %s desc-addr=%x, addr=%x\n", f->name,
  1277. is_in ? "IN" : "OUT", ep->bEndpointAddress,
  1278. (musb_ep->end_point).address);
  1279. if (!strcmp(f->name, "acm") && !is_in)
  1280. ret = 0;
  1281. else if (!strcmp(f->name, "adb"))
  1282. ret = 0;
  1283. if (ret == 0)
  1284. DBG(0, "[%s] EP%d-%s as signle buffer\n", f->name, epnum,
  1285. (is_in ? "IN" : "OUT"));
  1286. else
  1287. DBG(0, "[%s] EP%d-%s as double buffer\n", f->name, epnum,
  1288. (is_in ? "IN" : "OUT"));
  1289. goto end;
  1290. }
  1291. }
  1292. }
  1293. end:
  1294. return ret;
  1295. }
  1296. #ifdef CONFIG_MTK_C2K_SUPPORT
  1297. static char *musb_dbuffer_avail_function_list[] = {
  1298. "adb",
  1299. "mtp",
  1300. "Mass Storage Function",
  1301. "rndis",
  1302. "acm",
  1303. "rawbulk-modem",
  1304. NULL
  1305. };
  1306. static int check_musb_dbuffer_avail(struct musb *musb, struct musb_ep *musb_ep)
  1307. {
  1308. /* #define TIME_SPENT_CHECK_MUSB_DBUFFER_AVAIL */
  1309. #ifdef TIME_SPENT_CHECK_MUSB_DBUFFER_AVAIL
  1310. struct timeval tv_before, tv_after;
  1311. do_gettimeofday(&tv_before);
  1312. #endif
  1313. int tmp;
  1314. struct usb_composite_dev *cdev = (musb->g).ep0->driver_data;
  1315. struct usb_configuration *c = cdev->config;
  1316. struct usb_gadget *gadget = &(musb->g);
  1317. if (c == NULL)
  1318. return 0;
  1319. for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
  1320. struct usb_function *f = c->interface[tmp];
  1321. struct usb_descriptor_header **descriptors;
  1322. if (!f)
  1323. break;
  1324. pr_warn("<%s, %d>, name: %s\n", __func__, __LINE__, f->name);
  1325. switch (gadget->speed) {
  1326. case USB_SPEED_SUPER:
  1327. descriptors = f->ss_descriptors;
  1328. break;
  1329. case USB_SPEED_HIGH:
  1330. descriptors = f->hs_descriptors;
  1331. break;
  1332. default:
  1333. descriptors = f->fs_descriptors;
  1334. }
  1335. for (; *descriptors; ++descriptors) {
  1336. struct usb_endpoint_descriptor *ep;
  1337. int is_in;
  1338. int epnum;
  1339. if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
  1340. continue;
  1341. ep = (struct usb_endpoint_descriptor *)*descriptors;
  1342. is_in = (ep->bEndpointAddress & 0x80) >> 7;
  1343. epnum = (ep->bEndpointAddress & 0x0f);
  1344. pr_warn("<%s, %d>, ep->bEndpointAddress(%x), address(%x)\n",
  1345. __func__, __LINE__, ep->bEndpointAddress,
  1346. (musb_ep->end_point).address);
  1347. /* ep must be matched */
  1348. if (ep->bEndpointAddress == (musb_ep->end_point).address) {
  1349. int i;
  1350. for (i = 0;; i++) {
  1351. if (musb_dbuffer_avail_function_list[i] == NULL)
  1352. break;
  1353. pr_warn("<%s, %d>, comparing:%s\n", __func__,
  1354. __LINE__, musb_dbuffer_avail_function_list[i]);
  1355. if (!strcmp(f->name, musb_dbuffer_avail_function_list[i])) {
  1356. pr_warn("<%s, %d>, got bulk ep:%x in function :%s\n",
  1357. __func__, __LINE__, ep->bEndpointAddress,
  1358. f->name);
  1359. #ifdef TIME_SPENT_CHECK_MUSB_DBUFFER_AVAIL
  1360. do_gettimeofday(&tv_after);
  1361. pr_warn("<%s, %d>, sec:%d, usec:%d\n",
  1362. __func__, __LINE__,
  1363. (tv_after.tv_sec - tv_before.tv_sec),
  1364. (tv_after.tv_usec - tv_before.tv_usec));
  1365. #endif
  1366. return 1;
  1367. }
  1368. }
  1369. #ifdef TIME_SPENT_CHECK_MUSB_DBUFFER_AVAIL
  1370. do_gettimeofday(&tv_after);
  1371. pr_warn("<%s, %d>, sec:%d, usec:%d\n", __func__,
  1372. __LINE__, (tv_after.tv_sec - tv_before.tv_sec),
  1373. (tv_after.tv_usec - tv_before.tv_usec));
  1374. #endif
  1375. return 0;
  1376. }
  1377. }
  1378. }
  1379. return 0;
  1380. pr_warn("<%s, %d>, should not be here\n", __func__, __LINE__);
  1381. }
  1382. #endif
  1383. static void fifo_setup(struct musb *musb, struct musb_ep *musb_ep)
  1384. {
  1385. void __iomem *mbase = musb->mregs;
  1386. int size = 0;
  1387. u16 maxpacket = musb_ep->fifo_size;
  1388. u16 c_off = musb->fifo_addr >> 3;
  1389. u8 c_size;
  1390. int dbuffer_needed = 0;
  1391. /* expect hw_ep has already been zero-initialized */
  1392. size = ffs(max_t(u16, maxpacket, 8)) - 1;
  1393. maxpacket = 1 << size;
  1394. DBG(0, "musb type=%s\n", (musb_ep->type == USB_ENDPOINT_XFER_BULK ? "BULK" :
  1395. (musb_ep->type == USB_ENDPOINT_XFER_INT ? "INT" :
  1396. (musb_ep->type == USB_ENDPOINT_XFER_ISOC ? "ISO" : "CONTROL"))));
  1397. c_size = size - 3;
  1398. /* Set double buffer, if the transfer type is bulk or isoc. */
  1399. /* So user need to take care the fifo buffer is enough or not. */
  1400. if (musb_ep->fifo_mode == MUSB_BUF_DOUBLE
  1401. && (musb_ep->type == USB_ENDPOINT_XFER_BULK
  1402. || musb_ep->type == USB_ENDPOINT_XFER_ISOC)) {
  1403. #ifdef CONFIG_MTK_C2K_SUPPORT
  1404. if (check_musb_dbuffer_avail(musb, musb_ep))
  1405. dbuffer_needed = 1;
  1406. #else
  1407. dbuffer_needed = 1;
  1408. #endif
  1409. }
  1410. if (dbuffer_needed) {
  1411. if ((musb->fifo_addr + (maxpacket << 1)) > (musb->fifo_size)) {
  1412. DBG(0, "MUSB_BUF_DOUBLE USB FIFO is not enough!!! (%d>%d), fifo_addr=%d\n",
  1413. (musb->fifo_addr + (maxpacket << 1)), (musb->fifo_size),
  1414. musb->fifo_addr);
  1415. return;
  1416. }
  1417. if (is_saving_mode()) {
  1418. if (is_db_ok(musb, musb_ep)) {
  1419. DBG(0, "Saving mode, but EP%d supports DBBUF\n",
  1420. musb_ep->current_epnum);
  1421. c_size |= MUSB_FIFOSZ_DPB;
  1422. }
  1423. } else {
  1424. DBG(0, "EP%d supports DBBUF\n", musb_ep->current_epnum);
  1425. c_size |= MUSB_FIFOSZ_DPB;
  1426. }
  1427. } else if ((musb->fifo_addr + maxpacket) > (musb->fifo_size)) {
  1428. DBG(0, "MUSB_BUF_SINGLE USB FIFO is not enough!!! (%d>%d)\n",
  1429. (musb->fifo_addr + maxpacket), (musb->fifo_size));
  1430. return;
  1431. }
  1432. /* configure the FIFO */
  1433. /* musb_writeb(mbase, MUSB_INDEX, musb_ep->hw_ep->epnum); */
  1434. DBG(0, "fifo size is %d after %d, fifo address is %d, epnum %d,hwepnum %d\n",
  1435. c_size, maxpacket, musb->fifo_addr, musb_ep->current_epnum, musb_ep->hw_ep->epnum);
  1436. if (musb_ep->is_in) {
  1437. musb_write_txfifosz(mbase, c_size);
  1438. musb_write_txfifoadd(mbase, c_off);
  1439. } else {
  1440. musb_write_rxfifosz(mbase, c_size);
  1441. musb_write_rxfifoadd(mbase, c_off);
  1442. }
  1443. musb->fifo_addr += (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
  1444. }
  1445. /* ------------------------------------------------------------ */
  1446. static int musb_gadget_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
  1447. {
  1448. unsigned long flags;
  1449. struct musb_ep *musb_ep;
  1450. struct musb_hw_ep *hw_ep;
  1451. void __iomem *regs;
  1452. struct musb *musb;
  1453. void __iomem *mbase;
  1454. u8 epnum;
  1455. u16 csr;
  1456. unsigned tmp;
  1457. int status = -EINVAL;
  1458. if (!ep || !desc)
  1459. return -EINVAL;
  1460. if (bitdebug_enabled == 1) {
  1461. unsigned int ret;
  1462. ret = kfifo_alloc(&fifo, PAGE_SIZE, GFP_KERNEL);
  1463. spin_lock_init(&debugLock);
  1464. }
  1465. musb_ep = to_musb_ep(ep);
  1466. hw_ep = musb_ep->hw_ep;
  1467. regs = hw_ep->regs;
  1468. musb = musb_ep->musb;
  1469. mbase = musb->mregs;
  1470. epnum = musb_ep->current_epnum;
  1471. spin_lock_irqsave(&musb->lock, flags);
  1472. if (musb_ep->desc) {
  1473. status = -EBUSY;
  1474. goto fail;
  1475. }
  1476. musb_ep->type = usb_endpoint_type(desc);
  1477. /* check direction and (later) maxpacket size against endpoint */
  1478. if (usb_endpoint_num(desc) != epnum)
  1479. goto fail;
  1480. /* REVISIT this rules out high bandwidth periodic transfers */
  1481. tmp = usb_endpoint_maxp(desc);
  1482. if (tmp & ~0x07ff) {
  1483. int ok;
  1484. if (usb_endpoint_dir_in(desc))
  1485. ok = musb->hb_iso_tx;
  1486. else
  1487. ok = musb->hb_iso_rx;
  1488. if (!ok) {
  1489. DBG(2, "no support for high bandwidth ISO\n");
  1490. goto fail;
  1491. }
  1492. musb_ep->hb_mult = (tmp >> 11) & 3;
  1493. } else {
  1494. musb_ep->hb_mult = 0;
  1495. }
  1496. musb_ep->packet_sz = tmp & 0x7ff;
  1497. tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
  1498. /* enable the interrupts for the endpoint, set the endpoint
  1499. * packet size (or fail), set the mode, clear the fifo
  1500. */
  1501. musb_ep_select(mbase, epnum);
  1502. if (usb_endpoint_dir_in(desc)) {
  1503. if (hw_ep->is_shared_fifo)
  1504. musb_ep->is_in = 1;
  1505. if (!musb_ep->is_in)
  1506. goto fail;
  1507. if (tmp > hw_ep->max_packet_sz_tx) {
  1508. DBG(0, "packet size beyond hardware FIFO size\n");
  1509. goto fail;
  1510. }
  1511. #ifndef MUSB_QMU_SUPPORT
  1512. musb->intrtxe |= (1 << epnum);
  1513. musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
  1514. #endif
  1515. /* REVISIT if can_bulk_split(), use by updating "tmp";
  1516. * likewise high bandwidth periodic tx
  1517. */
  1518. /* Set TXMAXP with the FIFO size of the endpoint
  1519. * to disable double buffering mode.
  1520. */
  1521. if (musb->double_buffer_not_ok)
  1522. musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
  1523. else
  1524. musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
  1525. | (musb_ep->hb_mult << 11));
  1526. csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
  1527. if (musb_readw(regs, MUSB_TXCSR)
  1528. & MUSB_TXCSR_FIFONOTEMPTY)
  1529. csr |= MUSB_TXCSR_FLUSHFIFO;
  1530. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  1531. csr |= MUSB_TXCSR_P_ISO;
  1532. /* set twice in case of double buffering */
  1533. musb_writew(regs, MUSB_TXCSR, csr);
  1534. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  1535. musb_writew(regs, MUSB_TXCSR, csr);
  1536. } else {
  1537. if (hw_ep->is_shared_fifo)
  1538. musb_ep->is_in = 0;
  1539. if (musb_ep->is_in)
  1540. goto fail;
  1541. if (tmp > hw_ep->max_packet_sz_rx) {
  1542. DBG(0, "packet size beyond hardware FIFO size\n");
  1543. goto fail;
  1544. }
  1545. #ifndef MUSB_QMU_SUPPORT
  1546. musb->intrrxe |= (1 << epnum);
  1547. musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
  1548. #endif
  1549. /* REVISIT if can_bulk_combine() use by updating "tmp"
  1550. * likewise high bandwidth periodic rx
  1551. */
  1552. /* Set RXMAXP with the FIFO size of the endpoint
  1553. * to disable double buffering mode.
  1554. */
  1555. if (musb->double_buffer_not_ok)
  1556. musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
  1557. else
  1558. musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
  1559. | (musb_ep->hb_mult << 11));
  1560. /* force shared fifo to OUT-only mode */
  1561. if (hw_ep->is_shared_fifo) {
  1562. csr = musb_readw(regs, MUSB_TXCSR);
  1563. csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
  1564. musb_writew(regs, MUSB_TXCSR, csr);
  1565. }
  1566. /* don't flush fifo when enable, because sometimes usb
  1567. * will receive packets before ep enabled. when flush fifo
  1568. * here will lost those packets. We will flush fifo during
  1569. * disabe ep */
  1570. #if 0
  1571. csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
  1572. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  1573. csr |= MUSB_RXCSR_P_ISO;
  1574. else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
  1575. csr |= MUSB_RXCSR_DISNYET;
  1576. /* set twice in case of double buffering */
  1577. musb_writew(regs, MUSB_RXCSR, csr);
  1578. musb_writew(regs, MUSB_RXCSR, csr);
  1579. #endif
  1580. }
  1581. fifo_setup(musb, musb_ep);
  1582. #ifndef MUSB_QMU_SUPPORT
  1583. /* NOTE: all the I/O code _should_ work fine without DMA, in case
  1584. * for some reason you run out of channels here.
  1585. */
  1586. /* interrupt mode ep don't use dma */
  1587. if (is_dma_capable() && musb->dma_controller && musb_ep->type != USB_ENDPOINT_XFER_INT) {
  1588. struct dma_controller *c = musb->dma_controller;
  1589. musb_ep->dma = c->channel_alloc(c, hw_ep, (desc->bEndpointAddress & USB_DIR_IN));
  1590. } else
  1591. musb_ep->dma = NULL;
  1592. #endif
  1593. musb_ep->desc = desc;
  1594. musb_ep->busy = 0;
  1595. musb_ep->wedged = 0;
  1596. status = 0;
  1597. #ifdef MUSB_QMU_SUPPORT
  1598. mtk_qmu_enable(musb, epnum, !(musb_ep->is_in));
  1599. #endif
  1600. DBG(0, "%s periph: enabled %s for %s %s, %smaxpacket %d\n",
  1601. musb_driver_name, musb_ep->end_point.name, ({
  1602. char *s; switch (musb_ep->type) {
  1603. case USB_ENDPOINT_XFER_BULK:
  1604. s = "bulk"; break; case USB_ENDPOINT_XFER_INT:
  1605. s = "int"; break; default:
  1606. s = "iso"; break; }; s; }
  1607. ), musb_ep->is_in ? "IN" : "OUT", musb_ep->dma ? "dma, " : "", musb_ep->packet_sz);
  1608. schedule_work(&musb->irq_work);
  1609. fail:
  1610. spin_unlock_irqrestore(&musb->lock, flags);
  1611. return status;
  1612. }
  1613. /*
  1614. * Disable an endpoint flushing all requests queued.
  1615. */
  1616. static int musb_gadget_disable(struct usb_ep *ep)
  1617. {
  1618. unsigned long flags;
  1619. struct musb *musb;
  1620. u8 epnum;
  1621. struct musb_ep *musb_ep;
  1622. void __iomem *epio;
  1623. int status = 0;
  1624. musb_ep = to_musb_ep(ep);
  1625. musb = musb_ep->musb;
  1626. epnum = musb_ep->current_epnum;
  1627. epio = musb->endpoints[epnum].regs;
  1628. spin_lock_irqsave(&musb->lock, flags);
  1629. musb_ep_select(musb->mregs, epnum);
  1630. /* zero the endpoint sizes */
  1631. if (musb_ep->is_in) {
  1632. #ifndef MUSB_QMU_SUPPORT
  1633. musb->intrtxe &= ~(1 << epnum);
  1634. musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
  1635. #endif
  1636. musb_writew(epio, MUSB_TXMAXP, 0);
  1637. } else {
  1638. u16 csr;
  1639. #ifndef MUSB_QMU_SUPPORT
  1640. musb->intrrxe &= ~(1 << epnum);
  1641. musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
  1642. #endif
  1643. /* flush fifo here */
  1644. csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
  1645. /* set twice in case of double buffering */
  1646. musb_writew(epio, MUSB_RXCSR, csr);
  1647. musb_writew(epio, MUSB_RXCSR, csr);
  1648. musb_writew(epio, MUSB_RXMAXP, 0);
  1649. }
  1650. musb_ep->desc = NULL;
  1651. musb_ep->end_point.desc = NULL;
  1652. /* abort all pending DMA and requests */
  1653. nuke(musb_ep, -ESHUTDOWN);
  1654. schedule_work(&musb->irq_work);
  1655. spin_unlock_irqrestore(&(musb->lock), flags);
  1656. DBG(2, "%s\n", musb_ep->end_point.name);
  1657. return status;
  1658. }
  1659. /*
  1660. * Allocate a request for an endpoint.
  1661. * Reused by ep0 code.
  1662. */
  1663. struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  1664. {
  1665. struct musb_ep *musb_ep = to_musb_ep(ep);
  1666. /* struct musb *musb = musb_ep->musb; */
  1667. struct musb_request *request = NULL;
  1668. request = kzalloc(sizeof(*request), gfp_flags);
  1669. if (!request) {
  1670. DBG(0, "not enough memory\n");
  1671. return NULL;
  1672. }
  1673. request->request.dma = DMA_ADDR_INVALID;
  1674. request->epnum = musb_ep->current_epnum;
  1675. request->ep = musb_ep;
  1676. return &request->request;
  1677. }
  1678. /*
  1679. * Free a request
  1680. * Reused by ep0 code.
  1681. */
  1682. void musb_free_request(struct usb_ep *ep, struct usb_request *req)
  1683. {
  1684. kfree(to_musb_request(req));
  1685. }
  1686. static LIST_HEAD(buffers);
  1687. struct free_record {
  1688. struct list_head list;
  1689. struct device *dev;
  1690. unsigned bytes;
  1691. dma_addr_t dma;
  1692. };
  1693. /*
  1694. * Context: controller locked, IRQs blocked.
  1695. */
  1696. void musb_ep_restart(struct musb *musb, struct musb_request *req)
  1697. {
  1698. #ifdef MUSB_QMU_SUPPORT
  1699. /* limit debug mechanism to avoid printk too much */
  1700. static DEFINE_RATELIMIT_STATE(ratelimit, 1 * HZ, 10);
  1701. if (!(__ratelimit(&ratelimit)))
  1702. return;
  1703. QMU_WARN("<== %s request %p len %u on hw_ep%d\n",
  1704. req->tx ? "TX/IN" : "RX/OUT", &req->request, req->request.length, req->epnum);
  1705. #else
  1706. DBG(2, "<== %s request %p len %u on hw_ep%d\n",
  1707. req->tx ? "TX/IN" : "RX/OUT", &req->request, req->request.length, req->epnum);
  1708. musb_ep_select(musb->mregs, req->epnum);
  1709. if (req->tx)
  1710. txstate(musb, req);
  1711. else
  1712. rxstate(musb, req);
  1713. #endif
  1714. }
  1715. static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags)
  1716. {
  1717. struct musb_ep *musb_ep;
  1718. struct musb_request *request;
  1719. struct musb *musb;
  1720. int status = 0;
  1721. unsigned long lockflags;
  1722. /* __ADB_DEBUG__ start */
  1723. int adbStatus = 0;
  1724. /* __ADB_DEBUG__ end */
  1725. if (!ep || !req)
  1726. return -EINVAL;
  1727. if (!req->buf)
  1728. return -ENODATA;
  1729. musb_ep = to_musb_ep(ep);
  1730. musb = musb_ep->musb;
  1731. request = to_musb_request(req);
  1732. request->musb = musb;
  1733. if (request->ep != musb_ep)
  1734. return -EINVAL;
  1735. /* __ADB_DEBUG__ start */
  1736. if (bitdebug_enabled == 1) {
  1737. adbStatus = adbDegInfoHandle(ep, req, "musb_gadget_queue");
  1738. if (-1 != adbStatus)
  1739. return adbStatus;
  1740. }
  1741. /* __ADB_DEBUG__ end */
  1742. DBG(2, "<== to %s request=%p\n", ep->name, req);
  1743. /* request is mine now... */
  1744. request->request.actual = 0;
  1745. request->request.status = -EINPROGRESS;
  1746. request->epnum = musb_ep->current_epnum;
  1747. request->tx = musb_ep->is_in;
  1748. map_dma_buffer(request, musb, musb_ep);
  1749. spin_lock_irqsave(&musb->lock, lockflags);
  1750. /* don't queue if the ep is down */
  1751. if (!musb_ep->desc) {
  1752. DBG(2, "req %p queued to %s while ep %s\n", req, ep->name, "disabled");
  1753. status = -ESHUTDOWN;
  1754. goto cleanup;
  1755. }
  1756. /* add request to the list */
  1757. list_add_tail(&request->list, &musb_ep->req_list);
  1758. #ifdef MUSB_QMU_SUPPORT
  1759. if (request->request.dma != DMA_ADDR_INVALID) {
  1760. /* TX case */
  1761. if (request->tx) {
  1762. /* TX QMU don't have info for length sent, set this field in advance */
  1763. request->request.actual = request->request.length;
  1764. /* only enqueue for length > 0 packet. Don't send ZLP here for MSC protocol. */
  1765. if (request->request.length > 0) {
  1766. musb_kick_D_CmdQ(musb, request);
  1767. } else if (request->request.length == 0) { /* for UMS special case */
  1768. unsigned long timeout = jiffies + HZ;
  1769. int is_timeout = 1;
  1770. QMU_WARN("TX ZLP sent case\n");
  1771. /* wait QMU tx done, should be enough in UMS case due to protocol */
  1772. while (time_before_eq(jiffies, timeout)) {
  1773. if (musb_is_qmu_stop(request->epnum, request->tx ? 0 : 1)) {
  1774. is_timeout = 0;
  1775. break;
  1776. }
  1777. }
  1778. if (!is_timeout) {
  1779. musb_tx_zlp_qmu(musb, request->epnum);
  1780. musb_g_giveback(musb_ep, &(request->request), 0);
  1781. } else {
  1782. /* let qmu_done_tx to handle this */
  1783. QMU_WARN("TX ZLP sent in qmu_done_tx\n");
  1784. goto cleanup;
  1785. }
  1786. } else {
  1787. QMU_ERR("ERR, TX, request->request.length(%d)\n",
  1788. request->request.length);
  1789. }
  1790. } else { /* RX case */
  1791. musb_kick_D_CmdQ(musb, request);
  1792. }
  1793. }
  1794. goto cleanup;
  1795. #else
  1796. #ifdef RX_DMA_MODE1
  1797. /* it this is the head of the queue, start i/o ... */
  1798. if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
  1799. /* In case of RX, if there is no packet pending to be read
  1800. * from fifo then wait for next interrupt
  1801. */
  1802. if (!request->tx) {
  1803. if (!musb_ep->rx_pending) {
  1804. DBG(2, "No packet pending for %s\n", ep->name);
  1805. goto cleanup;
  1806. } else {
  1807. musb_ep->rx_pending = 0;
  1808. DBG(2, "Read packet from fifo %s\n", ep->name);
  1809. }
  1810. }
  1811. musb_ep_restart(musb, request);
  1812. }
  1813. #else
  1814. /* it this is the head of the queue, start i/o ... */
  1815. if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
  1816. musb_ep_restart(musb, request);
  1817. #endif
  1818. #endif
  1819. cleanup:
  1820. spin_unlock_irqrestore(&musb->lock, lockflags);
  1821. return status;
  1822. }
  1823. static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
  1824. {
  1825. struct musb_ep *musb_ep = to_musb_ep(ep);
  1826. struct musb_request *req = to_musb_request(request);
  1827. struct musb_request *r;
  1828. unsigned long flags;
  1829. int status = 0;
  1830. struct musb *musb = musb_ep->musb;
  1831. if (!ep || !request || to_musb_request(request)->ep != musb_ep)
  1832. return -EINVAL;
  1833. spin_lock_irqsave(&musb->lock, flags);
  1834. list_for_each_entry(r, &musb_ep->req_list, list) {
  1835. if (r == req)
  1836. break;
  1837. }
  1838. if (r != req) {
  1839. DBG(2, "request %p not queued to %s\n", request, ep->name);
  1840. status = -EINVAL;
  1841. goto done;
  1842. }
  1843. /* if the hardware doesn't have the request, easy ... */
  1844. if (musb_ep->req_list.next != &req->list || musb_ep->busy)
  1845. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1846. #ifdef MUSB_QMU_SUPPORT
  1847. else {
  1848. QMU_DBG("dequeue req(%p), ep(%d), swep(%d)\n", request, musb_ep->hw_ep->epnum,
  1849. ep->address);
  1850. musb_flush_qmu(musb_ep->hw_ep->epnum, (musb_ep->is_in ? TXQ : RXQ));
  1851. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1852. musb_restart_qmu(musb, musb_ep->hw_ep->epnum, (musb_ep->is_in ? TXQ : RXQ));
  1853. }
  1854. #else
  1855. /* ... else abort the dma transfer ... */
  1856. else if (is_dma_capable() && musb_ep->dma) {
  1857. struct dma_controller *c = musb->dma_controller;
  1858. musb_ep_select(musb->mregs, musb_ep->current_epnum);
  1859. if (c->channel_abort)
  1860. status = c->channel_abort(musb_ep->dma);
  1861. else
  1862. status = -EBUSY;
  1863. if (status == 0)
  1864. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1865. } else {
  1866. /* NOTE: by sticking to easily tested hardware/driver states,
  1867. * we leave counting of in-flight packets imprecise.
  1868. */
  1869. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1870. }
  1871. #endif
  1872. done:
  1873. spin_unlock_irqrestore(&musb->lock, flags);
  1874. return status;
  1875. }
  1876. /*
  1877. * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
  1878. * data but will queue requests.
  1879. *
  1880. * exported to ep0 code
  1881. */
  1882. static int musb_gadget_set_halt(struct usb_ep *ep, int value)
  1883. {
  1884. struct musb_ep *musb_ep = to_musb_ep(ep);
  1885. u8 epnum = musb_ep->current_epnum;
  1886. struct musb *musb = musb_ep->musb;
  1887. void __iomem *epio = musb->endpoints[epnum].regs;
  1888. void __iomem *mbase;
  1889. unsigned long flags;
  1890. u16 csr;
  1891. struct musb_request *request;
  1892. int status = 0;
  1893. if (!ep)
  1894. return -EINVAL;
  1895. mbase = musb->mregs;
  1896. spin_lock_irqsave(&musb->lock, flags);
  1897. if (USB_ENDPOINT_XFER_ISOC == musb_ep->type) {
  1898. status = -EINVAL;
  1899. goto done;
  1900. }
  1901. musb_ep_select(mbase, epnum);
  1902. request = next_request(musb_ep);
  1903. if (value) {
  1904. if (request) {
  1905. DBG(0, "request in progress, cannot halt %s\n", ep->name);
  1906. status = -EAGAIN;
  1907. goto done;
  1908. }
  1909. /* Cannot portably stall with non-empty FIFO */
  1910. if (musb_ep->is_in) {
  1911. csr = musb_readw(epio, MUSB_TXCSR);
  1912. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  1913. DBG(0, "FIFO busy, cannot halt %s\n", ep->name);
  1914. status = -EAGAIN;
  1915. goto done;
  1916. }
  1917. }
  1918. } else
  1919. musb_ep->wedged = 0;
  1920. /* set/clear the stall and toggle bits */
  1921. DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
  1922. if (musb_ep->is_in) {
  1923. csr = musb_readw(epio, MUSB_TXCSR);
  1924. csr |= MUSB_TXCSR_P_WZC_BITS | MUSB_TXCSR_CLRDATATOG;
  1925. if (value)
  1926. csr |= MUSB_TXCSR_P_SENDSTALL;
  1927. else
  1928. csr &= ~(MUSB_TXCSR_P_SENDSTALL | MUSB_TXCSR_P_SENTSTALL);
  1929. csr &= ~MUSB_TXCSR_TXPKTRDY;
  1930. musb_writew(epio, MUSB_TXCSR, csr);
  1931. } else {
  1932. csr = musb_readw(epio, MUSB_RXCSR);
  1933. csr |= MUSB_RXCSR_P_WZC_BITS | MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
  1934. if (value)
  1935. csr |= MUSB_RXCSR_P_SENDSTALL;
  1936. else
  1937. csr &= ~(MUSB_RXCSR_P_SENDSTALL | MUSB_RXCSR_P_SENTSTALL);
  1938. musb_writew(epio, MUSB_RXCSR, csr);
  1939. }
  1940. /* maybe start the first request in the queue */
  1941. if (!musb_ep->busy && !value && request) {
  1942. DBG(0, "restarting the request\n");
  1943. musb_ep_restart(musb, request);
  1944. }
  1945. done:
  1946. spin_unlock_irqrestore(&musb->lock, flags);
  1947. return status;
  1948. }
  1949. /*
  1950. * Sets the halt feature with the clear requests ignored
  1951. */
  1952. static int musb_gadget_set_wedge(struct usb_ep *ep)
  1953. {
  1954. struct musb_ep *musb_ep = to_musb_ep(ep);
  1955. if (!ep)
  1956. return -EINVAL;
  1957. musb_ep->wedged = 1;
  1958. return usb_ep_set_halt(ep);
  1959. }
  1960. static int musb_gadget_fifo_status(struct usb_ep *ep)
  1961. {
  1962. struct musb_ep *musb_ep = to_musb_ep(ep);
  1963. void __iomem *epio = musb_ep->hw_ep->regs;
  1964. int retval = -EINVAL;
  1965. if (musb_ep->desc && !musb_ep->is_in) {
  1966. struct musb *musb = musb_ep->musb;
  1967. int epnum = musb_ep->current_epnum;
  1968. void __iomem *mbase = musb->mregs;
  1969. unsigned long flags;
  1970. spin_lock_irqsave(&musb->lock, flags);
  1971. musb_ep_select(mbase, epnum);
  1972. /* FIXME return zero unless RXPKTRDY is set */
  1973. retval = musb_readw(epio, MUSB_RXCOUNT);
  1974. spin_unlock_irqrestore(&musb->lock, flags);
  1975. }
  1976. return retval;
  1977. }
  1978. static void musb_gadget_fifo_flush(struct usb_ep *ep)
  1979. {
  1980. struct musb_ep *musb_ep = to_musb_ep(ep);
  1981. struct musb *musb = musb_ep->musb;
  1982. u8 epnum = musb_ep->current_epnum;
  1983. void __iomem *epio = musb->endpoints[epnum].regs;
  1984. void __iomem *mbase;
  1985. unsigned long flags;
  1986. u16 csr;
  1987. mbase = musb->mregs;
  1988. spin_lock_irqsave(&musb->lock, flags);
  1989. musb_ep_select(mbase, (u8) epnum);
  1990. #ifndef MUSB_QMU_SUPPORT
  1991. /* disable interrupts */
  1992. musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
  1993. #endif
  1994. if (musb_ep->is_in) {
  1995. #ifdef MUSB_QMU_SUPPORT
  1996. QMU_WARN("fifo flush(%d), sw(%d)\n", epnum, ep->address);
  1997. musb_flush_qmu(epnum, TXQ);
  1998. musb_restart_qmu(musb, epnum, TXQ);
  1999. #endif
  2000. csr = musb_readw(epio, MUSB_TXCSR);
  2001. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  2002. csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
  2003. /*
  2004. * Setting both TXPKTRDY and FLUSHFIFO makes controller
  2005. * to interrupt current FIFO loading, but not flushing
  2006. * the already loaded ones.
  2007. */
  2008. csr &= ~MUSB_TXCSR_TXPKTRDY;
  2009. musb_writew(epio, MUSB_TXCSR, csr);
  2010. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  2011. musb_writew(epio, MUSB_TXCSR, csr);
  2012. }
  2013. } else {
  2014. #ifdef MUSB_QMU_SUPPORT
  2015. QMU_WARN("fifo flush(%d), sw(%d)\n", epnum, ep->address);
  2016. musb_flush_qmu(epnum, RXQ);
  2017. musb_restart_qmu(musb, epnum, RXQ);
  2018. #endif
  2019. csr = musb_readw(epio, MUSB_RXCSR);
  2020. csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
  2021. musb_writew(epio, MUSB_RXCSR, csr);
  2022. musb_writew(epio, MUSB_RXCSR, csr);
  2023. }
  2024. #ifndef MUSB_QMU_SUPPORT
  2025. /* re-enable interrupt */
  2026. musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
  2027. #endif
  2028. spin_unlock_irqrestore(&musb->lock, flags);
  2029. }
  2030. static const struct usb_ep_ops musb_ep_ops = {
  2031. .enable = musb_gadget_enable,
  2032. .disable = musb_gadget_disable,
  2033. .alloc_request = musb_alloc_request,
  2034. .free_request = musb_free_request,
  2035. .queue = musb_gadget_queue,
  2036. .dequeue = musb_gadget_dequeue,
  2037. .set_halt = musb_gadget_set_halt,
  2038. .set_wedge = musb_gadget_set_wedge,
  2039. .fifo_status = musb_gadget_fifo_status,
  2040. .fifo_flush = musb_gadget_fifo_flush
  2041. };
  2042. /* ----------------------------------------------------------------------- */
  2043. static int musb_gadget_get_frame(struct usb_gadget *gadget)
  2044. {
  2045. struct musb *musb = gadget_to_musb(gadget);
  2046. return (int)musb_readw(musb->mregs, MUSB_FRAME);
  2047. }
  2048. static int musb_gadget_wakeup(struct usb_gadget *gadget)
  2049. {
  2050. struct musb *musb = gadget_to_musb(gadget);
  2051. void __iomem *mregs = musb->mregs;
  2052. unsigned long flags;
  2053. int status = -EINVAL;
  2054. u8 power, devctl;
  2055. int retries;
  2056. spin_lock_irqsave(&musb->lock, flags);
  2057. switch (musb->xceiv->state) {
  2058. case OTG_STATE_B_PERIPHERAL:
  2059. /* NOTE: OTG state machine doesn't include B_SUSPENDED;
  2060. * that's part of the standard usb 1.1 state machine, and
  2061. * doesn't affect OTG transitions.
  2062. */
  2063. if (musb->may_wakeup && musb->is_suspended)
  2064. break;
  2065. goto done;
  2066. case OTG_STATE_B_IDLE:
  2067. /* Start SRP ... OTG not required. */
  2068. devctl = musb_readb(mregs, MUSB_DEVCTL);
  2069. DBG(2, "Sending SRP: devctl: %02x\n", devctl);
  2070. devctl |= MUSB_DEVCTL_SESSION;
  2071. musb_writeb(mregs, MUSB_DEVCTL, devctl);
  2072. devctl = musb_readb(mregs, MUSB_DEVCTL);
  2073. retries = 100;
  2074. while (!(devctl & MUSB_DEVCTL_SESSION)) {
  2075. devctl = musb_readb(mregs, MUSB_DEVCTL);
  2076. if (retries-- < 1)
  2077. break;
  2078. }
  2079. retries = 10000;
  2080. while (devctl & MUSB_DEVCTL_SESSION) {
  2081. devctl = musb_readb(mregs, MUSB_DEVCTL);
  2082. if (retries-- < 1)
  2083. break;
  2084. }
  2085. spin_unlock_irqrestore(&musb->lock, flags);
  2086. otg_start_srp(musb->xceiv->otg);
  2087. spin_lock_irqsave(&musb->lock, flags);
  2088. /* Block idling for at least 1s */
  2089. musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(1 * HZ));
  2090. status = 0;
  2091. goto done;
  2092. default:
  2093. DBG(2, "Unhandled wake: %s\n", otg_state_string(musb->xceiv->state));
  2094. goto done;
  2095. }
  2096. status = 0;
  2097. power = musb_readb(mregs, MUSB_POWER);
  2098. power |= MUSB_POWER_RESUME;
  2099. musb_writeb(mregs, MUSB_POWER, power);
  2100. DBG(2, "issue wakeup\n");
  2101. /* FIXME do this next chunk in a timer callback, no udelay */
  2102. mdelay(2);
  2103. power = musb_readb(mregs, MUSB_POWER);
  2104. power &= ~MUSB_POWER_RESUME;
  2105. musb_writeb(mregs, MUSB_POWER, power);
  2106. done:
  2107. spin_unlock_irqrestore(&musb->lock, flags);
  2108. return status;
  2109. }
  2110. static int musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
  2111. {
  2112. struct musb *musb = gadget_to_musb(gadget);
  2113. musb->is_self_powered = !!is_selfpowered;
  2114. return 0;
  2115. }
  2116. static void musb_pullup(struct musb *musb, int is_on, bool usb_in)
  2117. {
  2118. u8 power;
  2119. DBG(0, "MUSB: gadget pull up %d start\n", is_on);
  2120. if (musb->power) {
  2121. power = musb_readb(musb->mregs, MUSB_POWER);
  2122. if (is_on)
  2123. power |= MUSB_POWER_SOFTCONN;
  2124. else
  2125. power &= ~MUSB_POWER_SOFTCONN;
  2126. musb_writeb(musb->mregs, MUSB_POWER, power);
  2127. } else {
  2128. if (!usb_in && is_on)
  2129. DBG(0, "no USB cable, don't need to turn on USB\n");
  2130. else if (musb->is_host)
  2131. DBG(0, "USB is host, don't need to control USB\n");
  2132. else if (musb->in_ipo_off)
  2133. DBG(0, "USB is charging mdoe, don't need to control USB\n");
  2134. else if (is_on)
  2135. musb_start(musb);
  2136. else
  2137. musb_stop(musb);
  2138. }
  2139. DBG(0, "MUSB: gadget pull up %d end\n", is_on);
  2140. }
  2141. #if 0
  2142. static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
  2143. {
  2144. DBG(2, "<= %s =>\n", __func__);
  2145. /*
  2146. * FIXME iff driver's softconnect flag is set (as it is during probe,
  2147. * though that can clear it), just musb_pullup().
  2148. */
  2149. return -EINVAL;
  2150. }
  2151. #endif
  2152. static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
  2153. {
  2154. struct musb *musb = gadget_to_musb(gadget);
  2155. if (!musb->xceiv->set_power)
  2156. return -EOPNOTSUPP;
  2157. return usb_phy_set_power(musb->xceiv, mA);
  2158. }
  2159. int first_connect = 1;
  2160. int check_delay_done = 1;
  2161. static unsigned long target_jffy;
  2162. #define ENUM_GAP_DELAY 50
  2163. #define ENUM_GAP_SEC 2
  2164. static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
  2165. {
  2166. struct musb *musb = gadget_to_musb(gadget);
  2167. /* unsigned long flags; */
  2168. bool usb_in;
  2169. DBG(0, "is_on=%d, softconnect=%d ++\n", is_on, musb->softconnect);
  2170. is_on = !!is_on;
  2171. /* delay peform at most once to avoid pmic cocurrency with init */
  2172. if (!check_delay_done && musb->is_ready && !first_connect) {
  2173. /* perform delay*/
  2174. DBG(0, "check perform delay needed\n");
  2175. while (time_before_eq(jiffies, target_jffy)) {
  2176. DBG(0, "sleep %d ms\n", ENUM_GAP_DELAY);
  2177. msleep(ENUM_GAP_DELAY);
  2178. if (first_connect) {
  2179. DBG(0, "got first_conn in loop\n");
  2180. break;
  2181. }
  2182. }
  2183. DBG(0, "delay done, check_delay_done to 1\n");
  2184. check_delay_done = 1;
  2185. }
  2186. /* only set once when user space function is ready */
  2187. if (is_on && !musb->is_ready) {
  2188. musb->is_ready = true;
  2189. target_jffy = jiffies + ENUM_GAP_SEC*HZ;
  2190. }
  2191. pm_runtime_get_sync(musb->controller);
  2192. /* NOTE: pmic would enable irq internally */
  2193. usb_in = usb_cable_connected();
  2194. /* NOTE: this assumes we are sensing vbus; we'd rather
  2195. * not pullup unless the B-session is active.
  2196. */
  2197. /* Remove spin_lock to prevent dead lock */
  2198. /* spin_lock_irqsave(&musb->lock, flags); */
  2199. if (is_on != musb->softconnect) {
  2200. musb->softconnect = is_on;
  2201. musb_pullup(musb, is_on, usb_in);
  2202. }
  2203. /* spin_unlock_irqrestore(&musb->lock, flags); */
  2204. pm_runtime_put(musb->controller);
  2205. return 0;
  2206. }
  2207. static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver);
  2208. static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver);
  2209. static const struct usb_gadget_ops musb_gadget_operations = {
  2210. .get_frame = musb_gadget_get_frame,
  2211. .wakeup = musb_gadget_wakeup,
  2212. .set_selfpowered = musb_gadget_set_self_powered,
  2213. /* .vbus_session = musb_gadget_vbus_session, */
  2214. .vbus_draw = musb_gadget_vbus_draw,
  2215. .pullup = musb_gadget_pullup,
  2216. .udc_start = musb_gadget_start,
  2217. .udc_stop = musb_gadget_stop,
  2218. };
  2219. /* ----------------------------------------------------------------------- */
  2220. /* Registration */
  2221. /* Only this registration code "knows" the rule (from USB standards)
  2222. * about there being only one external upstream port. It assumes
  2223. * all peripheral ports are external...
  2224. */
  2225. /* #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) */
  2226. #if 0
  2227. static void musb_gadget_release(struct device *dev)
  2228. {
  2229. /* kref_put(WHAT) */
  2230. }
  2231. #endif
  2232. static void init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
  2233. {
  2234. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  2235. /* memset(ep, 0, sizeof *ep); */
  2236. ep->current_epnum = epnum;
  2237. ep->musb = musb;
  2238. ep->hw_ep = hw_ep;
  2239. ep->is_in = is_in;
  2240. INIT_LIST_HEAD(&ep->req_list);
  2241. sprintf(ep->name, "ep%d%s", epnum,
  2242. (!epnum || hw_ep->is_shared_fifo) ? "" : (is_in ? "in" : "out"));
  2243. DBG(0, "EP %d name is %s\n", epnum, ep->name);
  2244. ep->end_point.name = ep->name;
  2245. INIT_LIST_HEAD(&ep->end_point.ep_list);
  2246. if (!epnum) {
  2247. ep->end_point.maxpacket = 64;
  2248. ep->end_point.ops = &musb_g_ep0_ops;
  2249. musb->g.ep0 = &ep->end_point;
  2250. } else {
  2251. if (is_in)
  2252. ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
  2253. else
  2254. ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
  2255. ep->end_point.ops = &musb_ep_ops;
  2256. list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
  2257. }
  2258. }
  2259. /*
  2260. * Initialize the endpoints exposed to peripheral drivers, with backlinks
  2261. * to the rest of the driver state.
  2262. */
  2263. static inline void musb_g_init_endpoints(struct musb *musb)
  2264. {
  2265. u8 epnum;
  2266. struct musb_hw_ep *hw_ep;
  2267. unsigned count = 0;
  2268. /* initialize endpoint list just once */
  2269. INIT_LIST_HEAD(&(musb->g.ep_list));
  2270. for (epnum = 0, hw_ep = musb->endpoints; epnum < musb->nr_endpoints; epnum++, hw_ep++) {
  2271. if (hw_ep->is_shared_fifo /* || !epnum */) {
  2272. init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
  2273. count++;
  2274. } else {
  2275. if (hw_ep->max_packet_sz_tx) {
  2276. init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 1);
  2277. count++;
  2278. }
  2279. if (hw_ep->max_packet_sz_rx) {
  2280. init_peripheral_ep(musb, &hw_ep->ep_out, epnum, 0);
  2281. count++;
  2282. }
  2283. }
  2284. }
  2285. }
  2286. /* called once during driver setup to initialize and link into
  2287. * the driver model; memory is zeroed.
  2288. */
  2289. int musb_gadget_setup(struct musb *musb)
  2290. {
  2291. int status;
  2292. /* REVISIT minor race: if (erroneously) setting up two
  2293. * musb peripherals at the same time, only the bus lock
  2294. * is probably held.
  2295. */
  2296. musb->g.ops = &musb_gadget_operations;
  2297. musb->g.max_speed = USB_SPEED_HIGH;
  2298. musb->g.speed = USB_SPEED_UNKNOWN;
  2299. /* this "gadget" abstracts/virtualizes the controller */
  2300. /* #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) */
  2301. #if 0
  2302. dev_set_name(&musb->g.dev, "gadget");
  2303. musb->g.dev.parent = musb->controller;
  2304. musb->g.dev.dma_mask = musb->controller->dma_mask;
  2305. musb->g.dev.release = musb_gadget_release;
  2306. #endif
  2307. musb->g.name = musb_driver_name;
  2308. musb->g.is_otg = 1;
  2309. musb_g_init_endpoints(musb);
  2310. musb->is_active = 0;
  2311. musb_platform_try_idle(musb, 0);
  2312. /* #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) */
  2313. #if 0
  2314. status = device_register(&musb->g.dev);
  2315. if (status != 0) {
  2316. put_device(&musb->g.dev);
  2317. return status;
  2318. }
  2319. #endif
  2320. status = usb_add_gadget_udc(musb->controller, &musb->g);
  2321. if (status)
  2322. goto err;
  2323. return 0;
  2324. err:
  2325. musb->g.dev.parent = NULL;
  2326. device_unregister(&musb->g.dev);
  2327. return status;
  2328. }
  2329. void musb_gadget_cleanup(struct musb *musb)
  2330. {
  2331. usb_del_gadget_udc(&musb->g);
  2332. /* #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0) */
  2333. #if 0
  2334. if (musb->g.dev.parent)
  2335. device_unregister(&musb->g.dev);
  2336. #endif
  2337. }
  2338. /*
  2339. * Register the gadget driver. Used by gadget drivers when
  2340. * registering themselves with the controller.
  2341. *
  2342. * -EINVAL something went wrong (not driver)
  2343. * -EBUSY another gadget is already using the controller
  2344. * -ENOMEM no memory to perform the operation
  2345. *
  2346. * @param driver the gadget driver
  2347. * @return <0 if error, 0 if everything is fine
  2348. */
  2349. static int musb_gadget_start(struct usb_gadget *g, struct usb_gadget_driver *driver)
  2350. {
  2351. struct musb *musb = gadget_to_musb(g);
  2352. struct usb_otg *otg = musb->xceiv->otg;
  2353. struct usb_hcd *hcd = musb_to_hcd(musb);
  2354. unsigned long flags;
  2355. int retval = 0;
  2356. DBG(0, "musb_gadget_start\n");
  2357. if (driver->max_speed < USB_SPEED_HIGH) {
  2358. retval = -EINVAL;
  2359. goto err;
  2360. }
  2361. pm_runtime_get_sync(musb->controller);
  2362. DBG(2, "registering driver %s\n", driver->function);
  2363. musb->softconnect = 0;
  2364. musb->gadget_driver = driver;
  2365. spin_lock_irqsave(&musb->lock, flags);
  2366. musb->is_active = 1;
  2367. otg_set_peripheral(otg, &musb->g);
  2368. musb->xceiv->state = OTG_STATE_B_IDLE;
  2369. spin_unlock_irqrestore(&musb->lock, flags);
  2370. /* REVISIT: funcall to other code, which also
  2371. * handles power budgeting ... this way also
  2372. * ensures HdrcStart is indirectly called.
  2373. */
  2374. retval = usb_add_hcd(hcd, 0, 0);
  2375. if (retval < 0) {
  2376. DBG(2, "add_hcd failed, %d\n", retval);
  2377. goto err;
  2378. }
  2379. if ((musb->xceiv->last_event == USB_EVENT_ID)
  2380. && otg->set_vbus)
  2381. otg_set_vbus(otg, 1);
  2382. hcd->self.uses_pio_for_control = 1;
  2383. if (musb->xceiv->last_event == USB_EVENT_NONE)
  2384. pm_runtime_put(musb->controller);
  2385. return 0;
  2386. err:
  2387. return retval;
  2388. }
  2389. static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
  2390. {
  2391. int i;
  2392. struct musb_hw_ep *hw_ep;
  2393. /* don't disconnect if it's not connected */
  2394. if (musb->g.speed == USB_SPEED_UNKNOWN)
  2395. driver = NULL;
  2396. else
  2397. musb->g.speed = USB_SPEED_UNKNOWN;
  2398. /* deactivate the hardware */
  2399. if (musb->softconnect) {
  2400. musb->softconnect = 0;
  2401. musb_pullup(musb, 0, false);
  2402. }
  2403. musb_stop(musb);
  2404. /* killing any outstanding requests will quiesce the driver;
  2405. * then report disconnect
  2406. */
  2407. if (driver) {
  2408. for (i = 0, hw_ep = musb->endpoints; i < musb->nr_endpoints; i++, hw_ep++) {
  2409. musb_ep_select(musb->mregs, i);
  2410. if (hw_ep->is_shared_fifo /* || !epnum */) {
  2411. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  2412. } else {
  2413. if (hw_ep->max_packet_sz_tx)
  2414. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  2415. if (hw_ep->max_packet_sz_rx)
  2416. nuke(&hw_ep->ep_out, -ESHUTDOWN);
  2417. }
  2418. }
  2419. }
  2420. }
  2421. /*
  2422. * Unregister the gadget driver. Used by gadget drivers when
  2423. * unregistering themselves from the controller.
  2424. *
  2425. * @param driver the gadget driver to unregister
  2426. */
  2427. static int musb_gadget_stop(struct usb_gadget *g, struct usb_gadget_driver *driver)
  2428. {
  2429. struct musb *musb = gadget_to_musb(g);
  2430. unsigned long flags;
  2431. if (musb->xceiv->last_event == USB_EVENT_NONE)
  2432. pm_runtime_get_sync(musb->controller);
  2433. /*
  2434. * REVISIT always use otg_set_peripheral() here too;
  2435. * this needs to shut down the OTG engine.
  2436. */
  2437. spin_lock_irqsave(&musb->lock, flags);
  2438. musb_hnp_stop(musb);
  2439. (void)musb_gadget_vbus_draw(&musb->g, 0);
  2440. musb->xceiv->state = OTG_STATE_UNDEFINED;
  2441. stop_activity(musb, driver);
  2442. otg_set_peripheral(musb->xceiv->otg, NULL);
  2443. DBG(2, "unregistering driver %s\n", driver->function);
  2444. musb->is_active = 0;
  2445. musb_platform_try_idle(musb, 0);
  2446. spin_unlock_irqrestore(&musb->lock, flags);
  2447. usb_remove_hcd(musb_to_hcd(musb));
  2448. /*
  2449. * FIXME we need to be able to register another
  2450. * gadget driver here and have everything work;
  2451. * that currently misbehaves.
  2452. */
  2453. pm_runtime_put(musb->controller);
  2454. return 0;
  2455. }
  2456. /* ----------------------------------------------------------------------- */
  2457. /* lifecycle operations called through plat_uds.c */
  2458. void musb_g_resume(struct musb *musb)
  2459. {
  2460. musb->is_suspended = 0;
  2461. switch (musb->xceiv->state) {
  2462. case OTG_STATE_B_IDLE:
  2463. break;
  2464. case OTG_STATE_B_WAIT_ACON:
  2465. case OTG_STATE_B_PERIPHERAL:
  2466. musb->is_active = 1;
  2467. if (musb->gadget_driver && musb->gadget_driver->resume) {
  2468. spin_unlock(&musb->lock);
  2469. musb->gadget_driver->resume(&musb->g);
  2470. spin_lock(&musb->lock);
  2471. }
  2472. break;
  2473. default:
  2474. WARNING("unhandled RESUME transition (%s)\n", otg_state_string(musb->xceiv->state));
  2475. }
  2476. }
  2477. /* called when SOF packets stop for 3+ msec */
  2478. void musb_g_suspend(struct musb *musb)
  2479. {
  2480. u8 devctl;
  2481. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  2482. DBG(0, "devctl %02x\n", devctl);
  2483. switch (musb->xceiv->state) {
  2484. case OTG_STATE_B_IDLE:
  2485. if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
  2486. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  2487. break;
  2488. case OTG_STATE_B_PERIPHERAL:
  2489. musb->is_suspended = 1;
  2490. if (musb->gadget_driver && musb->gadget_driver->suspend) {
  2491. spin_unlock(&musb->lock);
  2492. musb->gadget_driver->suspend(&musb->g);
  2493. spin_lock(&musb->lock);
  2494. }
  2495. musb_sync_with_bat(musb, USB_SUSPEND);
  2496. break;
  2497. default:
  2498. /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
  2499. * A_PERIPHERAL may need care too
  2500. */
  2501. WARNING("unhandled SUSPEND transition (%s)\n",
  2502. otg_state_string(musb->xceiv->state));
  2503. }
  2504. }
  2505. /* Called during SRP */
  2506. void musb_g_wakeup(struct musb *musb)
  2507. {
  2508. musb_gadget_wakeup(&musb->g);
  2509. }
  2510. #if defined(CONFIG_USBIF_COMPLIANCE)
  2511. static unsigned long vbus_polling_timeout;
  2512. int polling_vbus_value(void *data)
  2513. {
  2514. unsigned int vbus_value;
  2515. bool timeout_flag = false;
  2516. u8 devctl;
  2517. u8 power;
  2518. u8 opstate;
  2519. while (!kthread_should_stop()) {
  2520. timeout_flag = false;
  2521. #if defined(CONFIG_USBIF_COMPLIANCE_PMIC)
  2522. polling_vbus = true;
  2523. vbus_value = PMIC_IMM_GetOneChannelValue(AUX_VCDT_AP, 1, 1);
  2524. vbus_value = (((R_CHARGER_1 + R_CHARGER_2) * 100 * vbus_value) / R_CHARGER_2) / 100;
  2525. #else
  2526. vbus_value = battery_meter_get_charger_voltage();
  2527. #endif
  2528. DBG(0, "musb::Vbus (%d)\n", vbus_value);
  2529. DBG(0, "OTG_State: (%s)\n", otg_state_string(mtk_musb->xceiv->state));
  2530. switch (mtk_musb->xceiv->state) {
  2531. case OTG_STATE_B_IDLE:
  2532. case OTG_STATE_B_PERIPHERAL:
  2533. vbus_polling_timeout = jiffies + 5 * HZ;
  2534. while (vbus_value < 3800) {
  2535. DBG(0, "%s: not above B-device operating voltage! (%d)\n", __func__,
  2536. vbus_value);
  2537. if (time_after(jiffies, vbus_polling_timeout)) {
  2538. timeout_flag = true;
  2539. break;
  2540. }
  2541. mdelay(10);
  2542. #if defined(CONFIG_USBIF_COMPLIANCE_PMIC)
  2543. vbus_value = PMIC_IMM_GetOneChannelValue(AUX_VCDT_AP, 1, 1);
  2544. vbus_value =
  2545. (((R_CHARGER_1 +
  2546. R_CHARGER_2) * 100 * vbus_value) / R_CHARGER_2) / 100;
  2547. #else
  2548. vbus_value = battery_meter_get_charger_voltage();
  2549. #endif
  2550. }
  2551. DBG(0, "%s: Vbus (%d)\n", __func__, vbus_value);
  2552. if (!timeout_flag) {
  2553. DBG(0, "CONNECT USB (B-device Operating Voltage! (%d)\n",
  2554. vbus_value);
  2555. mt_usb_connect();
  2556. }
  2557. break;
  2558. case OTG_STATE_B_SRP_INIT:
  2559. while (vbus_value > 700) {
  2560. if (time_after(jiffies, vbus_polling_timeout)) {
  2561. timeout_flag = true;
  2562. break;
  2563. }
  2564. mdelay(10);
  2565. #if defined(CONFIG_USBIF_COMPLIANCE_PMIC)
  2566. vbus_value = PMIC_IMM_GetOneChannelValue(AUX_VCDT_AP, 1, 1);
  2567. vbus_value =
  2568. (((R_CHARGER_1 +
  2569. R_CHARGER_2) * 100 * vbus_value) / R_CHARGER_2) / 100;
  2570. #else
  2571. vbus_value = battery_meter_get_charger_voltage();
  2572. #endif
  2573. }
  2574. DBG(0, "%s: Vbus (%d)\n", __func__, vbus_value);
  2575. USBPHY_WRITE8(0x6c, 0x13);
  2576. USBPHY_WRITE8(0x6d, 0x3f);
  2577. /* Set Vbus Pulsing Length */
  2578. musb_writeb(mtk_musb->mregs, 0x7B, 1);
  2579. mdelay(1800);
  2580. devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL);
  2581. DBG(0, "Sending SRP: devctl: %02x\n", devctl);
  2582. devctl |= MUSB_DEVCTL_SESSION;
  2583. musb_writeb(mtk_musb->mregs, MUSB_DEVCTL, devctl);
  2584. devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL);
  2585. DBG(0, "Sending SRP Done: devctl: %02x\n", devctl);
  2586. DBG(0, "polling_vbus_value - before OTG_STATE_B_IDLE\n");
  2587. mtk_musb->xceiv->state = OTG_STATE_B_IDLE;
  2588. DBG(0, "polling_vbus_value - after OTG_STATE_B_IDLE\n");
  2589. vbus_polling_timeout = jiffies + 5 * HZ;
  2590. while (vbus_value < 4000) {
  2591. DBG(0, "musb::not above Session-Valid! (%d)\n", vbus_value);
  2592. if (time_after(jiffies, vbus_polling_timeout)) {
  2593. timeout_flag = true;
  2594. break;
  2595. }
  2596. mdelay(20);
  2597. #if defined(CONFIG_USBIF_COMPLIANCE_PMIC)
  2598. vbus_value = PMIC_IMM_GetOneChannelValue(AUX_VCDT_AP, 1, 1);
  2599. vbus_value =
  2600. (((R_CHARGER_1 +
  2601. R_CHARGER_2) * 100 * vbus_value) / R_CHARGER_2) / 100;
  2602. #else
  2603. vbus_value = battery_meter_get_charger_voltage();
  2604. #endif
  2605. }
  2606. DBG(0, "musb::Vbus (%d)\n", vbus_value);
  2607. if (!timeout_flag) {
  2608. USBPHY_WRITE8(0x6c, 0x2f);
  2609. power = musb_readb(mtk_musb->mregs, MUSB_POWER);
  2610. DBG(0, "Setting SOFT CONNECT: power: %02x\n", power);
  2611. power |= MUSB_POWER_SOFTCONN;
  2612. musb_writeb(mtk_musb->mregs, MUSB_POWER, power);
  2613. power = musb_readb(mtk_musb->mregs, MUSB_POWER);
  2614. DBG(0, "Setting SOFT CONNECT Done: power: %02x\n", power);
  2615. } else {
  2616. devctl = musb_readb(mtk_musb->mregs, MUSB_DEVCTL);
  2617. opstate = musb_readb(mtk_musb->mregs, MUSB_OPSTATE);
  2618. DBG(0, "SRP: Polling VBUS TimeOut, DEVCTL: 0x%x, OPSTATE: 0x%x\n",
  2619. devctl, opstate);
  2620. send_otg_event(OTG_EVENT_NO_RESP_FOR_SRP);
  2621. polling_vbus = false;
  2622. mt_usb_disconnect();
  2623. }
  2624. DBG(0, "polling_vbus_value - Done - %s\n",
  2625. otg_state_string(mtk_musb->xceiv->state));
  2626. break;
  2627. #if 0
  2628. case OTG_STATE_A_WAIT_BCON:
  2629. case OTG_STATE_A_IDLE:
  2630. case OTG_STATE_A_WAIT_VRISE:
  2631. case OTG_STATE_A_WAIT_BCON:
  2632. case OTG_STATE_A_HOST:
  2633. case OTG_STATE_A_SUSPEND:
  2634. case OTG_STATE_A_WAIT_VFALL:
  2635. case OTG_STATE_A_VBUS_ERR:
  2636. case OTG_STATE_A_PERIPHERAL:
  2637. pmic_bvalid_det_int_en(0);
  2638. break;
  2639. #endif
  2640. }
  2641. DBG(0, "musb::enable mt_usb_disconnect!\n");
  2642. polling_vbus = false;
  2643. DBG(0, "Re-Schedule vbus_polling_tsk (TASK_INTERRRUPTIBLE)!\n");
  2644. set_current_state(TASK_INTERRUPTIBLE);
  2645. schedule();
  2646. }
  2647. DBG(0, "SET Current State - vbus_polling_tsk (TASK_RUNNING)!\n");
  2648. __set_current_state(TASK_RUNNING);
  2649. return 1;
  2650. }
  2651. #endif
  2652. /* called when VBUS drops below session threshold, and in other cases */
  2653. void musb_g_disconnect(struct musb *musb)
  2654. {
  2655. void __iomem *mregs = musb->mregs;
  2656. u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
  2657. DBG(2, "devctl %02x\n", devctl);
  2658. #if defined(CONFIG_USBIF_COMPLIANCE)
  2659. pr_info("%s: %02x, otg_srp_rqd: 0x%x (%s)\n", __func__, devctl, musb->g.otg_srp_reqd,
  2660. otg_state_string(musb->xceiv->state));
  2661. pr_info("devctl %02x\n", devctl);
  2662. if (musb->g.otg_srp_reqd)
  2663. musb->xceiv->state = OTG_STATE_B_SRP_INIT;
  2664. #endif
  2665. /* clear HR */
  2666. musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
  2667. /* don't draw vbus until new b-default session */
  2668. (void)musb_gadget_vbus_draw(&musb->g, 0);
  2669. musb->g.speed = USB_SPEED_UNKNOWN;
  2670. if (musb->gadget_driver && musb->gadget_driver->disconnect) {
  2671. spin_unlock(&musb->lock);
  2672. musb->gadget_driver->disconnect(&musb->g);
  2673. spin_lock(&musb->lock);
  2674. }
  2675. switch (musb->xceiv->state) {
  2676. default:
  2677. DBG(2, "Unhandled disconnect %s, setting a_idle\n",
  2678. otg_state_string(musb->xceiv->state));
  2679. musb->xceiv->state = OTG_STATE_A_IDLE;
  2680. MUSB_HST_MODE(musb);
  2681. break;
  2682. case OTG_STATE_A_PERIPHERAL:
  2683. musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
  2684. MUSB_HST_MODE(musb);
  2685. break;
  2686. case OTG_STATE_B_WAIT_ACON:
  2687. case OTG_STATE_B_HOST:
  2688. case OTG_STATE_B_PERIPHERAL:
  2689. case OTG_STATE_B_IDLE:
  2690. musb->xceiv->state = OTG_STATE_B_IDLE;
  2691. #if defined(CONFIG_USBIF_COMPLIANCE)
  2692. pr_info("%s: %x\n", __func__, musb->g.host_request);
  2693. musb_set_host_request_flag(musb, 0);
  2694. #endif
  2695. break;
  2696. case OTG_STATE_B_SRP_INIT:
  2697. #if defined(CONFIG_USBIF_COMPLIANCE)
  2698. pr_info("%s: %s\n", __func__, otg_state_string(musb->xceiv->state));
  2699. if (musb->g.otg_srp_reqd) {
  2700. pr_info("disconnect, Check otg_srp_reqd: 0x%x, devctl %02x\n",
  2701. musb->g.otg_srp_reqd, devctl);
  2702. musb->g.otg_srp_reqd = 0;
  2703. vbus_polling_timeout = jiffies + 5 * HZ; /* Add 0.5 seconds to fix TD 5.1-5s. */
  2704. wake_up_process(vbus_polling_tsk);
  2705. }
  2706. #endif
  2707. break;
  2708. }
  2709. musb->is_active = 0;
  2710. }
  2711. void musb_g_reset(struct musb *musb) __releases(musb->lock) __acquires(musb->lock)
  2712. {
  2713. void __iomem *mbase = musb->mregs;
  2714. u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
  2715. u8 power;
  2716. DBG(2, "<== %s driver '%s'\n", (devctl & MUSB_DEVCTL_BDEVICE)
  2717. ? "B-Device" : "A-Device",
  2718. musb->gadget_driver ? musb->gadget_driver->driver.name : NULL);
  2719. if (musb->test_mode == 0)
  2720. musb_sync_with_bat(musb, USB_UNCONFIGURED);
  2721. /* report disconnect, if we didn't already (flushing EP state) */
  2722. if (musb->g.speed != USB_SPEED_UNKNOWN)
  2723. musb_g_disconnect(musb);
  2724. /* clear HR */
  2725. else if (devctl & MUSB_DEVCTL_HR)
  2726. musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
  2727. /* active wake lock */
  2728. if (!wake_lock_active(&musb->usb_lock))
  2729. wake_lock(&musb->usb_lock);
  2730. musb_platform_reset(musb);
  2731. musb_generic_disable(musb);
  2732. musb->intrtxe = 0x1;
  2733. musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); /* enable ep0 interrupt */
  2734. musb_writeb(mbase, MUSB_INTRUSBE, MUSB_INTR_SUSPEND | MUSB_INTR_RESUME | MUSB_INTR_RESET
  2735. #if defined(CONFIG_USBIF_COMPLIANCE)
  2736. | MUSB_INTR_CONNECT /* Trying to Fix not CONNECT in B_WAIT_ACON */
  2737. #endif
  2738. | MUSB_INTR_DISCONNECT);
  2739. /* what speed did we negotiate? */
  2740. power = musb_readb(mbase, MUSB_POWER);
  2741. musb->g.speed = (power & MUSB_POWER_HSMODE)
  2742. ? USB_SPEED_HIGH : USB_SPEED_FULL;
  2743. /* clear address */
  2744. musb_writeb(musb->mregs, MUSB_FADDR, 0);
  2745. /* reset fifo size */
  2746. musb->fifo_addr = FIFO_START_ADDR;
  2747. /* start in USB_STATE_DEFAULT */
  2748. musb->is_active = 1;
  2749. musb->is_suspended = 0;
  2750. MUSB_DEV_MODE(musb);
  2751. musb->address = 0;
  2752. musb->ep0_state = MUSB_EP0_STAGE_SETUP;
  2753. musb->may_wakeup = 0;
  2754. musb->g.b_hnp_enable = 0;
  2755. musb->g.a_alt_hnp_support = 0;
  2756. musb->g.a_hnp_support = 0;
  2757. /* Normal reset, as B-Device;
  2758. * or else after HNP, as A-Device
  2759. */
  2760. if (devctl & MUSB_DEVCTL_BDEVICE) {
  2761. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  2762. musb->g.is_a_peripheral = 0;
  2763. } else {
  2764. musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
  2765. musb->g.is_a_peripheral = 1;
  2766. }
  2767. /* start with default limits on VBUS power draw */
  2768. (void)musb_gadget_vbus_draw(&musb->g, 8);
  2769. }