f_mtp.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303
  1. /*
  2. * Gadget Function Driver for MTP
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. * Author: Mike Lockwood <lockwood@android.com>
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. /* #define DEBUG */
  18. /* #define VERBOSE_DEBUG */
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/poll.h>
  22. #include <linux/delay.h>
  23. #include <linux/wait.h>
  24. #include <linux/err.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/types.h>
  27. #include <linux/file.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/usb.h>
  31. #include <linux/usb_usual.h>
  32. #include <linux/usb/ch9.h>
  33. #include <linux/usb/f_mtp.h>
  34. #include <linux/delay.h>
  35. #include <linux/time.h>
  36. #define MTP_BULK_BUFFER_SIZE 16384
  37. #define INTR_BUFFER_SIZE 28
  38. /* String IDs */
  39. #define INTERFACE_STRING_INDEX 0
  40. /* values for mtp_dev.state */
  41. #define STATE_OFFLINE 0 /* initial state, disconnected */
  42. #define STATE_READY 1 /* ready for userspace calls */
  43. #define STATE_BUSY 2 /* processing userspace calls */
  44. #define STATE_CANCELED 3 /* transaction canceled by host */
  45. #define STATE_ERROR 4 /* error from completion routine */
  46. #define STATE_RESET 5 /* reset from device reset request */
  47. /* number of tx and rx requests to allocate */
  48. #define TX_REQ_MAX 4
  49. #define RX_REQ_MAX 2
  50. #define INTR_REQ_MAX 5
  51. /* ID for Microsoft MTP OS String */
  52. #define MTP_OS_STRING_ID 0xEE
  53. /* MTP class reqeusts */
  54. #define MTP_REQ_CANCEL 0x64
  55. #define MTP_REQ_GET_EXT_EVENT_DATA 0x65
  56. #define MTP_REQ_RESET 0x66
  57. #define MTP_REQ_GET_DEVICE_STATUS 0x67
  58. /* constants for device status */
  59. #define MTP_RESPONSE_OK 0x2001
  60. #define MTP_RESPONSE_DEVICE_BUSY 0x2019
  61. #define MTP_RESPONSE_DEVICE_CANCEL 0x201F
  62. static const char mtp_shortname[] = "mtp_usb";
  63. /*#ifdef DBG
  64. #undef DBG
  65. #endif
  66. #define DBG(level, fmt, args...) \
  67. do { \
  68. printk( fmt, ##args); \
  69. } while (0)
  70. #ifdef VDBG
  71. #undef VDBG
  72. #endif
  73. #define VDBG(level, fmt, args...) \
  74. do { \
  75. printk( fmt, ##args); \
  76. } while (0)
  77. #ifdef pr_debug
  78. #undef pr_debug
  79. #endif
  80. #define pr_debug(fmt, args...) \
  81. do { \
  82. printk( fmt, ##args); \
  83. } while (0)
  84. #ifdef pr_info
  85. #undef pr_info
  86. #endif
  87. #define pr_info(fmt, args...) \
  88. do { \
  89. printk( fmt, ##args); \
  90. } while (0)
  91. */
  92. struct mtp_dev {
  93. struct usb_function function;
  94. struct usb_composite_dev *cdev;
  95. spinlock_t lock;
  96. struct usb_ep *ep_in;
  97. struct usb_ep *ep_out;
  98. struct usb_ep *ep_intr;
  99. int state;
  100. /* synchronize access to our device file */
  101. atomic_t open_excl;
  102. /* to enforce only one ioctl at a time */
  103. atomic_t ioctl_excl;
  104. struct list_head tx_idle;
  105. struct list_head intr_idle;
  106. wait_queue_head_t read_wq;
  107. wait_queue_head_t write_wq;
  108. wait_queue_head_t intr_wq;
  109. struct usb_request *rx_req[RX_REQ_MAX];
  110. int rx_done;
  111. /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
  112. * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
  113. */
  114. struct workqueue_struct *wq;
  115. struct work_struct send_file_work;
  116. struct work_struct receive_file_work;
  117. struct file *xfer_file;
  118. loff_t xfer_file_offset;
  119. int64_t xfer_file_length;
  120. unsigned xfer_send_header;
  121. uint16_t xfer_command;
  122. uint32_t xfer_transaction_id;
  123. int xfer_result;
  124. struct work_struct device_reset_work;
  125. int fileTransferSend;
  126. char usb_functions[32];
  127. int curr_mtp_func_index;
  128. int usb_functions_no;
  129. int epOut_halt;
  130. int dev_disconnected;
  131. };
  132. static struct usb_interface_descriptor mtp_interface_desc = {
  133. .bLength = USB_DT_INTERFACE_SIZE,
  134. .bDescriptorType = USB_DT_INTERFACE,
  135. .bInterfaceNumber = 0,
  136. .bNumEndpoints = 3,
  137. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  138. .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
  139. .bInterfaceProtocol = 0,
  140. };
  141. static struct usb_interface_descriptor ptp_interface_desc = {
  142. .bLength = USB_DT_INTERFACE_SIZE,
  143. .bDescriptorType = USB_DT_INTERFACE,
  144. .bInterfaceNumber = 0,
  145. .bNumEndpoints = 3,
  146. .bInterfaceClass = USB_CLASS_STILL_IMAGE,
  147. .bInterfaceSubClass = 1,
  148. .bInterfaceProtocol = 1,
  149. };
  150. static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
  151. .bLength = USB_DT_ENDPOINT_SIZE,
  152. .bDescriptorType = USB_DT_ENDPOINT,
  153. .bEndpointAddress = USB_DIR_IN,
  154. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  155. .wMaxPacketSize = __constant_cpu_to_le16(1024),
  156. };
  157. static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
  158. .bLength = sizeof mtp_superspeed_in_comp_desc,
  159. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  160. /* the following 2 values can be tweaked if necessary */
  161. /* .bMaxBurst = 0, */
  162. /* .bmAttributes = 0, */
  163. };
  164. static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
  165. .bLength = USB_DT_ENDPOINT_SIZE,
  166. .bDescriptorType = USB_DT_ENDPOINT,
  167. .bEndpointAddress = USB_DIR_OUT,
  168. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  169. .wMaxPacketSize = __constant_cpu_to_le16(1024),
  170. };
  171. static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
  172. .bLength = sizeof mtp_superspeed_out_comp_desc,
  173. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  174. /* the following 2 values can be tweaked if necessary */
  175. /* .bMaxBurst = 0, */
  176. /* .bmAttributes = 0, */
  177. };
  178. static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
  179. .bLength = USB_DT_ENDPOINT_SIZE,
  180. .bDescriptorType = USB_DT_ENDPOINT,
  181. .bEndpointAddress = USB_DIR_IN,
  182. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  183. .wMaxPacketSize = __constant_cpu_to_le16(512),
  184. };
  185. static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
  186. .bLength = USB_DT_ENDPOINT_SIZE,
  187. .bDescriptorType = USB_DT_ENDPOINT,
  188. .bEndpointAddress = USB_DIR_OUT,
  189. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  190. .wMaxPacketSize = __constant_cpu_to_le16(512),
  191. };
  192. static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
  193. .bLength = USB_DT_ENDPOINT_SIZE,
  194. .bDescriptorType = USB_DT_ENDPOINT,
  195. .bEndpointAddress = USB_DIR_IN,
  196. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  197. };
  198. static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
  199. .bLength = USB_DT_ENDPOINT_SIZE,
  200. .bDescriptorType = USB_DT_ENDPOINT,
  201. .bEndpointAddress = USB_DIR_OUT,
  202. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  203. };
  204. static struct usb_endpoint_descriptor mtp_intr_desc = {
  205. .bLength = USB_DT_ENDPOINT_SIZE,
  206. .bDescriptorType = USB_DT_ENDPOINT,
  207. .bEndpointAddress = USB_DIR_IN,
  208. .bmAttributes = USB_ENDPOINT_XFER_INT,
  209. .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
  210. .bInterval = 6,
  211. };
  212. static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
  213. .bLength = sizeof mtp_superspeed_intr_comp_desc,
  214. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  215. /* the following 3 values can be tweaked if necessary */
  216. /* .bMaxBurst = 0, */
  217. /* .bmAttributes = 0, */
  218. .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
  219. };
  220. static struct usb_descriptor_header *fs_mtp_descs[] = {
  221. (struct usb_descriptor_header *) &mtp_interface_desc,
  222. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  223. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  224. (struct usb_descriptor_header *) &mtp_intr_desc,
  225. NULL,
  226. };
  227. static struct usb_descriptor_header *hs_mtp_descs[] = {
  228. (struct usb_descriptor_header *) &mtp_interface_desc,
  229. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  230. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  231. (struct usb_descriptor_header *) &mtp_intr_desc,
  232. NULL,
  233. };
  234. static struct usb_descriptor_header *ss_mtp_descs[] = {
  235. (struct usb_descriptor_header *) &mtp_interface_desc,
  236. (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
  237. (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
  238. (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
  239. (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
  240. (struct usb_descriptor_header *) &mtp_intr_desc,
  241. (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
  242. NULL,
  243. };
  244. static struct usb_descriptor_header *fs_ptp_descs[] = {
  245. (struct usb_descriptor_header *) &ptp_interface_desc,
  246. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  247. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  248. (struct usb_descriptor_header *) &mtp_intr_desc,
  249. NULL,
  250. };
  251. static struct usb_descriptor_header *hs_ptp_descs[] = {
  252. (struct usb_descriptor_header *) &ptp_interface_desc,
  253. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  254. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  255. (struct usb_descriptor_header *) &mtp_intr_desc,
  256. NULL,
  257. };
  258. static struct usb_descriptor_header *ss_ptp_descs[] = {
  259. (struct usb_descriptor_header *) &ptp_interface_desc,
  260. (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
  261. (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
  262. (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
  263. (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
  264. (struct usb_descriptor_header *) &mtp_intr_desc,
  265. (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
  266. NULL,
  267. };
  268. static struct usb_string mtp_string_defs[] = {
  269. /* Naming interface "MTP" so libmtp will recognize us */
  270. [INTERFACE_STRING_INDEX].s = "MTP",
  271. { }, /* end of list */
  272. };
  273. static struct usb_gadget_strings mtp_string_table = {
  274. .language = 0x0409, /* en-US */
  275. .strings = mtp_string_defs,
  276. };
  277. static struct usb_gadget_strings *mtp_strings[] = {
  278. &mtp_string_table,
  279. NULL,
  280. };
  281. /* Microsoft MTP OS String */
  282. static u8 mtp_os_string[] = {
  283. 18, /* sizeof(mtp_os_string) */
  284. USB_DT_STRING,
  285. /* Signature field: "MSFT100" */
  286. 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
  287. /* vendor code */
  288. 1,
  289. /* padding */
  290. 0
  291. };
  292. /* Microsoft Extended Property OS Feature Descriptor Header Section */
  293. struct mtp_ext_prop_desc_header {
  294. __le32 dwLength;
  295. __u16 bcdVersion;
  296. __le16 wIndex;
  297. __u16 wCount;
  298. };
  299. /* Microsoft xtended Property OS Feature Function Section */
  300. struct mtp_ext_prop_desc_property {
  301. __le32 dwSize;
  302. __le32 dwPropertyDataType;
  303. __le16 wPropertyNameLength;
  304. __u8 bPropertyName[8]; /* MTP */
  305. __le32 dwPropertyDataLength;
  306. __u8 bPropertyData[22]; /* MTP Device */
  307. }mtp_ext_prop_desc_property;
  308. /* MTP Extended Configuration Descriptor */
  309. struct {
  310. struct mtp_ext_prop_desc_header header;
  311. struct mtp_ext_prop_desc_property customProp;
  312. } mtp_ext_prop_desc = {
  313. .header = {
  314. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_prop_desc)),
  315. .bcdVersion = __constant_cpu_to_le16(0x0100),
  316. .wIndex = __constant_cpu_to_le16(5),
  317. .wCount = __constant_cpu_to_le16(1),
  318. },
  319. .customProp = {
  320. .dwSize = __constant_cpu_to_le32(sizeof(mtp_ext_prop_desc_property)),
  321. .dwPropertyDataType = __constant_cpu_to_le32(1),
  322. .wPropertyNameLength = __constant_cpu_to_le16(8),
  323. .bPropertyName = {'M', 0, 'T', 0, 'P', 0, 0, 0}, /* MTP */
  324. .dwPropertyDataLength = __constant_cpu_to_le32(22),
  325. .bPropertyData = {'M', 0, 'T', 0, 'P', 0, ' ', 0, 'D', 0, 'e', 0, 'v', 0, 'i', 0, 'c', 0, 'e', 0, 0, 0}, /* MTP Device */
  326. },
  327. };
  328. #define MSFT_bMS_VENDOR_CODE 1
  329. #ifdef CONFIG_MTK_TC1_FEATURE
  330. #define USB_MTP_FUNCTIONS 8
  331. #else
  332. #define USB_MTP_FUNCTIONS 6
  333. #endif
  334. #define USB_MTP "mtp\n"
  335. #define USB_MTP_ACM "mtp,acm\n"
  336. #define USB_MTP_ADB "mtp,adb\n"
  337. #define USB_MTP_ADB_ACM "mtp,adb,acm\n"
  338. #define USB_MTP_UMS "mtp,mass_storage\n"
  339. #define USB_MTP_UMS_ADB "mtp,mass_storage,adb\n"
  340. #ifdef CONFIG_MTK_TC1_FEATURE
  341. #define USB_TC1_MTP_ADB "acm,gser,mtp,adb\n"
  342. #define USB_TC1_MTP "acm,gser,mtp\n"
  343. #endif
  344. static char * USB_MTP_FUNC[USB_MTP_FUNCTIONS] =
  345. {
  346. USB_MTP,
  347. USB_MTP_ACM,
  348. USB_MTP_ADB,
  349. USB_MTP_ADB_ACM,
  350. USB_MTP_UMS,
  351. USB_MTP_UMS_ADB,
  352. #ifdef CONFIG_MTK_TC1_FEATURE
  353. USB_TC1_MTP_ADB,
  354. USB_TC1_MTP
  355. #endif
  356. };
  357. /* Microsoft Extended Configuration Descriptor Header Section */
  358. struct mtp_ext_config_desc_header {
  359. __le32 dwLength;
  360. __u16 bcdVersion;
  361. __le16 wIndex;
  362. __u8 bCount;
  363. __u8 reserved[7];
  364. };
  365. /* Microsoft Extended Configuration Descriptor Function Section */
  366. struct mtp_ext_config_desc_function {
  367. __u8 bFirstInterfaceNumber;
  368. __u8 bInterfaceCount;
  369. __u8 compatibleID[8];
  370. __u8 subCompatibleID[8];
  371. __u8 reserved[6];
  372. };
  373. /* MTP Extended Configuration Descriptor */
  374. struct {
  375. struct mtp_ext_config_desc_header header;
  376. struct mtp_ext_config_desc_function function;
  377. } mtp_ext_config_desc = {
  378. .header = {
  379. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
  380. .bcdVersion = __constant_cpu_to_le16(0x0100),
  381. .wIndex = __constant_cpu_to_le16(4),
  382. /* .bCount = __constant_cpu_to_le16(1), */
  383. .bCount = 0x01,
  384. },
  385. .function = {
  386. .bFirstInterfaceNumber = 0,
  387. .bInterfaceCount = 1,
  388. .compatibleID = { 'M', 'T', 'P' },
  389. },
  390. };
  391. struct {
  392. struct mtp_ext_config_desc_header header;
  393. struct mtp_ext_config_desc_function function1;
  394. struct mtp_ext_config_desc_function function2;
  395. } mtp_ext_config_desc_2 = {
  396. .header = {
  397. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_2)),
  398. .bcdVersion = __constant_cpu_to_le16(0x0100),
  399. .wIndex = __constant_cpu_to_le16(4),
  400. /* .bCount = __constant_cpu_to_le16(1), */
  401. .bCount = 0x02,
  402. .reserved = { 0 },
  403. },
  404. .function1 =
  405. {
  406. .bFirstInterfaceNumber = 0,
  407. .bInterfaceCount = 1,
  408. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  409. .subCompatibleID = { 0 },
  410. .reserved = { 0 },
  411. },
  412. .function2 =
  413. {
  414. .bFirstInterfaceNumber = 1,
  415. .bInterfaceCount = 1,
  416. .compatibleID = { 0 },
  417. .subCompatibleID = { 0 },
  418. .reserved = { 0 },
  419. },
  420. };
  421. struct {
  422. struct mtp_ext_config_desc_header header;
  423. struct mtp_ext_config_desc_function function1;
  424. struct mtp_ext_config_desc_function function2;
  425. struct mtp_ext_config_desc_function function3;
  426. } mtp_ext_config_desc_3 = {
  427. .header = {
  428. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_3)),
  429. .bcdVersion = __constant_cpu_to_le16(0x0100),
  430. .wIndex = __constant_cpu_to_le16(4),
  431. /* .bCount = __constant_cpu_to_le16(1), */
  432. .bCount = 0x03,
  433. .reserved = { 0 },
  434. },
  435. .function1 =
  436. {
  437. .bFirstInterfaceNumber = 0,
  438. .bInterfaceCount = 1,
  439. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  440. .subCompatibleID = { 0 },
  441. .reserved = { 0 },
  442. },
  443. .function2 =
  444. {
  445. .bFirstInterfaceNumber = 1,
  446. .bInterfaceCount = 1,
  447. .compatibleID = { 0 },
  448. .subCompatibleID = { 0 },
  449. .reserved = { 0 },
  450. },
  451. .function3 =
  452. {
  453. .bFirstInterfaceNumber = 2,
  454. .bInterfaceCount = 1,
  455. .compatibleID = { 0 },
  456. .subCompatibleID = { 0 },
  457. .reserved = { 0 },
  458. },
  459. };
  460. #ifdef CONFIG_MTK_TC1_FEATURE
  461. struct {
  462. struct mtp_ext_config_desc_header header;
  463. struct mtp_ext_config_desc_function function1;
  464. struct mtp_ext_config_desc_function function2;
  465. struct mtp_ext_config_desc_function function3;
  466. struct mtp_ext_config_desc_function function4;
  467. } mtp_ext_config_desc_4 = {
  468. .header = {
  469. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_4)),
  470. .bcdVersion = __constant_cpu_to_le16(0x0100),
  471. .wIndex = __constant_cpu_to_le16(4),
  472. /* .bCount = __constant_cpu_to_le16(1), */
  473. .bCount = 0x04,
  474. .reserved = { 0 },
  475. },
  476. .function1 =
  477. {
  478. .bFirstInterfaceNumber = 0,
  479. .bInterfaceCount = 2,
  480. .compatibleID = { 0 },
  481. .subCompatibleID = { 0 },
  482. .reserved = { 0 },
  483. },
  484. .function2 =
  485. {
  486. .bFirstInterfaceNumber = 2,
  487. .bInterfaceCount = 1,
  488. .compatibleID = { 0 },
  489. .subCompatibleID = { 0 },
  490. .reserved = { 0 },
  491. },
  492. .function3 =
  493. {
  494. .bFirstInterfaceNumber = 3,
  495. .bInterfaceCount = 1,
  496. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  497. .subCompatibleID = { 0 },
  498. .reserved = { 0 },
  499. },
  500. .function4 =
  501. {
  502. .bFirstInterfaceNumber = 4,
  503. .bInterfaceCount = 1,
  504. .compatibleID = { 0 },
  505. .subCompatibleID = { 0 },
  506. .reserved = { 0 },
  507. },
  508. };
  509. struct {
  510. struct mtp_ext_config_desc_header header;
  511. struct mtp_ext_config_desc_function function1;
  512. struct mtp_ext_config_desc_function function2;
  513. struct mtp_ext_config_desc_function function3;
  514. } mtp_ext_config_desc_5 = {
  515. .header = {
  516. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_5)),
  517. .bcdVersion = __constant_cpu_to_le16(0x0100),
  518. .wIndex = __constant_cpu_to_le16(4),
  519. /* .bCount = __constant_cpu_to_le16(1), */
  520. .bCount = 0x03,
  521. .reserved = { 0 },
  522. },
  523. .function1 =
  524. {
  525. .bFirstInterfaceNumber = 0,
  526. .bInterfaceCount = 2,
  527. .compatibleID = { 0 },
  528. .subCompatibleID = { 0 },
  529. .reserved = { 0 },
  530. },
  531. .function2 =
  532. {
  533. .bFirstInterfaceNumber = 2,
  534. .bInterfaceCount = 1,
  535. .compatibleID = { 0 },
  536. .subCompatibleID = { 0 },
  537. .reserved = { 0 },
  538. },
  539. .function3 =
  540. {
  541. .bFirstInterfaceNumber = 3,
  542. .bInterfaceCount = 1,
  543. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  544. .subCompatibleID = { 0 },
  545. .reserved = { 0 },
  546. },
  547. };
  548. #endif
  549. struct mtp_device_status {
  550. __le16 wLength;
  551. __le16 wCode;
  552. };
  553. struct mtp_data_header {
  554. /* length of packet, including this header */
  555. __le32 length;
  556. /* container type (2 for data packet) */
  557. __le16 type;
  558. /* MTP command code */
  559. __le16 command;
  560. /* MTP transaction ID */
  561. __le32 transaction_id;
  562. };
  563. static void mtp_ueventToDisconnect(struct mtp_dev *dev);
  564. /* temporary variable used between mtp_open() and mtp_gadget_bind() */
  565. static struct mtp_dev *_mtp_dev;
  566. static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
  567. {
  568. return container_of(f, struct mtp_dev, function);
  569. }
  570. static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
  571. {
  572. struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
  573. if (!req)
  574. return NULL;
  575. /* now allocate buffers for the requests */
  576. #if defined(CONFIG_64BIT) && defined(CONFIG_MTK_LM_MODE)
  577. req->buf = kmalloc(buffer_size, GFP_KERNEL | GFP_DMA);
  578. #else
  579. req->buf = kmalloc(buffer_size, GFP_KERNEL);
  580. #endif
  581. if (!req->buf) {
  582. usb_ep_free_request(ep, req);
  583. return NULL;
  584. }
  585. return req;
  586. }
  587. static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
  588. {
  589. if (req) {
  590. kfree(req->buf);
  591. usb_ep_free_request(ep, req);
  592. }
  593. }
  594. static inline int mtp_lock(atomic_t *excl)
  595. {
  596. if (atomic_inc_return(excl) == 1) {
  597. return 0;
  598. } else {
  599. atomic_dec(excl);
  600. return -1;
  601. }
  602. }
  603. static inline void mtp_unlock(atomic_t *excl)
  604. {
  605. atomic_dec(excl);
  606. }
  607. /* add a request to the tail of a list */
  608. static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
  609. struct usb_request *req)
  610. {
  611. unsigned long flags;
  612. spin_lock_irqsave(&dev->lock, flags);
  613. list_add_tail(&req->list, head);
  614. spin_unlock_irqrestore(&dev->lock, flags);
  615. }
  616. /* remove a request from the head of a list */
  617. static struct usb_request
  618. *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
  619. {
  620. unsigned long flags;
  621. struct usb_request *req;
  622. spin_lock_irqsave(&dev->lock, flags);
  623. if (list_empty(head)) {
  624. req = 0;
  625. } else {
  626. req = list_first_entry(head, struct usb_request, list);
  627. list_del(&req->list);
  628. }
  629. spin_unlock_irqrestore(&dev->lock, flags);
  630. return req;
  631. }
  632. static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
  633. {
  634. struct mtp_dev *dev = _mtp_dev;
  635. if (req->status != 0)
  636. dev->state = STATE_ERROR;
  637. mtp_req_put(dev, &dev->tx_idle, req);
  638. wake_up(&dev->write_wq);
  639. }
  640. static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
  641. {
  642. struct mtp_dev *dev = _mtp_dev;
  643. dev->rx_done = 1;
  644. if (req->status != 0)
  645. dev->state = STATE_ERROR;
  646. wake_up(&dev->read_wq);
  647. }
  648. static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
  649. {
  650. struct mtp_dev *dev = _mtp_dev;
  651. if (req->status != 0)
  652. dev->state = STATE_ERROR;
  653. mtp_req_put(dev, &dev->intr_idle, req);
  654. wake_up(&dev->intr_wq);
  655. }
  656. static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
  657. struct usb_endpoint_descriptor *in_desc,
  658. struct usb_endpoint_descriptor *out_desc,
  659. struct usb_endpoint_descriptor *intr_desc)
  660. {
  661. struct usb_composite_dev *cdev = dev->cdev;
  662. struct usb_request *req;
  663. struct usb_ep *ep;
  664. int i;
  665. DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
  666. ep = usb_ep_autoconfig(cdev->gadget, in_desc);
  667. if (!ep) {
  668. DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
  669. return -ENODEV;
  670. }
  671. DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
  672. ep->driver_data = dev; /* claim the endpoint */
  673. dev->ep_in = ep;
  674. ep = usb_ep_autoconfig(cdev->gadget, out_desc);
  675. if (!ep) {
  676. DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
  677. return -ENODEV;
  678. }
  679. DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
  680. ep->driver_data = dev; /* claim the endpoint */
  681. dev->ep_out = ep;
  682. ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
  683. if (!ep) {
  684. DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
  685. return -ENODEV;
  686. }
  687. DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
  688. ep->driver_data = dev; /* claim the endpoint */
  689. dev->ep_intr = ep;
  690. /* now allocate requests for our endpoints */
  691. for (i = 0; i < TX_REQ_MAX; i++) {
  692. req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
  693. if (!req)
  694. goto fail;
  695. req->complete = mtp_complete_in;
  696. mtp_req_put(dev, &dev->tx_idle, req);
  697. }
  698. for (i = 0; i < RX_REQ_MAX; i++) {
  699. req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
  700. if (!req)
  701. goto fail;
  702. req->complete = mtp_complete_out;
  703. dev->rx_req[i] = req;
  704. }
  705. for (i = 0; i < INTR_REQ_MAX; i++) {
  706. req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
  707. if (!req)
  708. goto fail;
  709. req->complete = mtp_complete_intr;
  710. mtp_req_put(dev, &dev->intr_idle, req);
  711. }
  712. return 0;
  713. fail:
  714. printk(KERN_ERR "mtp_bind() could not allocate requests\n");
  715. return -1;
  716. }
  717. static int mtp_send_devicereset_event(struct mtp_dev *dev)
  718. {
  719. struct usb_request *req = NULL;
  720. int ret;
  721. int length = 12;
  722. unsigned long flags;
  723. char buffer[12]={0x0C, 0x0, 0x0, 0x0, 0x4, 0x0, 0xb, 0x40, 0x0, 0x0, 0x0, 0x0}; /* length 12, 0x00000010, type EVENT: 0x0004, event code 0x400b */
  724. DBG(dev->cdev, "%s, line %d: dev->dev_disconnected = %d\n", __func__, __LINE__, dev->dev_disconnected);
  725. if (length < 0 || length > INTR_BUFFER_SIZE)
  726. return -EINVAL;
  727. if (dev->state == STATE_OFFLINE)
  728. return -ENODEV;
  729. spin_lock_irqsave(&dev->lock, flags);
  730. DBG(dev->cdev, "%s, line %d: _mtp_dev->dev_disconnected = %d, dev->state = %d \n", __func__, __LINE__, dev->dev_disconnected, dev->state);
  731. if(!dev->dev_disconnected || dev->state != STATE_OFFLINE)
  732. {
  733. spin_unlock_irqrestore(&dev->lock, flags);
  734. ret = wait_event_interruptible_timeout(dev->intr_wq,
  735. (req = mtp_req_get(dev, &dev->intr_idle)),
  736. msecs_to_jiffies(1000));
  737. if (!req)
  738. return -ETIME;
  739. memcpy(req->buf, buffer, length);
  740. req->length = length;
  741. ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
  742. DBG(dev->cdev, "%s, line %d: ret = %d\n", __func__, __LINE__, ret);
  743. if (ret)
  744. mtp_req_put(dev, &dev->intr_idle, req);
  745. }
  746. else
  747. {
  748. spin_unlock_irqrestore(&dev->lock, flags);
  749. DBG(dev->cdev, "%s, line %d: usb function has been unbind!! do nothing!!\n", __func__, __LINE__);
  750. ret = 0;
  751. }
  752. DBG(dev->cdev, "%s, line %d: _mtp_dev->dev_disconnected = %d, dev->state = %d, return!! \n", __func__, __LINE__, dev->dev_disconnected, dev->state);
  753. return ret;
  754. }
  755. static ssize_t mtp_read(struct file *fp, char __user *buf,
  756. size_t count, loff_t *pos)
  757. {
  758. struct mtp_dev *dev = fp->private_data;
  759. struct usb_composite_dev *cdev = dev->cdev;
  760. struct usb_request *req;
  761. ssize_t r = count;
  762. unsigned xfer;
  763. int ret = 0;
  764. DBG(cdev, "mtp_read(%zu)\n", count);
  765. if (count > MTP_BULK_BUFFER_SIZE)
  766. return -EINVAL;
  767. if (dev->epOut_halt) {
  768. printk("%s, line %d: ret %d!! <dev->epOut_halt = %d> reset the out ep \n", __func__, __LINE__, ret, dev->epOut_halt);
  769. mdelay(2000);
  770. usb_ep_fifo_flush(dev->ep_out);
  771. dev->epOut_halt=0;
  772. usb_ep_clear_halt(dev->ep_out);
  773. printk("%s, line %d: ret %d!! <dev->epOut_halt = %d> finish the reset \n", __func__, __LINE__, ret, dev->epOut_halt);
  774. }
  775. spin_lock_irq(&dev->lock);
  776. if (dev->state == STATE_RESET) {
  777. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  778. dev->state = STATE_READY;
  779. DBG(dev->cdev, "%s: dev->state = %d, change back to Ready state;!! \n", __func__, dev->state);
  780. spin_unlock_irq(&dev->lock);
  781. return -ECANCELED;
  782. }
  783. spin_unlock_irq(&dev->lock);
  784. /* we will block until we're online */
  785. DBG(cdev, "mtp_read: waiting for online state\n");
  786. ret = wait_event_interruptible(dev->read_wq,
  787. dev->state != STATE_OFFLINE);
  788. if (ret < 0) {
  789. r = ret;
  790. goto done;
  791. }
  792. spin_lock_irq(&dev->lock);
  793. if(dev->state == STATE_RESET)
  794. {
  795. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  796. dev->state = STATE_READY;
  797. DBG(dev->cdev, "%s: dev->state = %d, change back to Ready state;!! \n", __func__, dev->state);
  798. spin_unlock_irq(&dev->lock);
  799. return -ECANCELED;
  800. }
  801. spin_unlock_irq(&dev->lock);
  802. spin_lock_irq(&dev->lock);
  803. if (dev->state == STATE_CANCELED) {
  804. /* report cancelation to userspace */
  805. dev->state = STATE_READY;
  806. spin_unlock_irq(&dev->lock);
  807. return -ECANCELED;
  808. }
  809. dev->state = STATE_BUSY;
  810. spin_unlock_irq(&dev->lock);
  811. requeue_req:
  812. /* queue a request */
  813. req = dev->rx_req[0];
  814. req->length = count;
  815. dev->rx_done = 0;
  816. ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
  817. if (ret < 0) {
  818. r = -EIO;
  819. goto done;
  820. } else {
  821. DBG(cdev, "rx %p queue\n", req);
  822. }
  823. /* wait for a request to complete */
  824. ret = wait_event_interruptible(dev->read_wq, dev->rx_done || dev->state != STATE_BUSY);
  825. if (ret < 0) {
  826. r = ret;
  827. usb_ep_dequeue(dev->ep_out, req);
  828. goto done;
  829. }
  830. if (!dev->rx_done) {
  831. DBG(cdev, "%s, line %d: ret %d!! <!dev->rx_done> dev->state = %d, dev->rx_done = %d \n", __func__, __LINE__, ret, dev->state, dev->rx_done);
  832. printk("%s, line %d: ret %d!! <!dev->rx_done> dev->state = %d, dev->rx_done = %d \n", __func__, __LINE__, ret, dev->state, dev->rx_done);
  833. r = -ECANCELED;
  834. dev->state = STATE_ERROR;
  835. usb_ep_dequeue(dev->ep_out, req);
  836. goto done;
  837. }
  838. if (dev->state == STATE_BUSY) {
  839. /* If we got a 0-len packet, throw it back and try again. */
  840. if (req->actual == 0)
  841. goto requeue_req;
  842. DBG(cdev, "rx %p %d\n", req, req->actual);
  843. xfer = (req->actual < count) ? req->actual : count;
  844. r = xfer;
  845. if (copy_to_user(buf, req->buf, xfer))
  846. r = -EFAULT;
  847. } else if(dev->state == STATE_RESET) {
  848. /* If we got a 0-len packet, throw it back and try again. */
  849. if (req->actual == 0)
  850. goto requeue_req;
  851. DBG(dev->cdev, "rx %p %d\n", req, req->actual);
  852. xfer = (req->actual < count) ? req->actual : count;
  853. r = xfer;
  854. if (copy_to_user(buf, req->buf, xfer))
  855. r = -EFAULT;
  856. } else
  857. r = -EIO;
  858. done:
  859. spin_lock_irq(&dev->lock);
  860. if (dev->state == STATE_CANCELED)
  861. r = -ECANCELED;
  862. else if (dev->state != STATE_OFFLINE)
  863. dev->state = STATE_READY;
  864. spin_unlock_irq(&dev->lock);
  865. DBG(cdev, "mtp_read returning %zd\n", r);
  866. return r;
  867. }
  868. static ssize_t mtp_write(struct file *fp, const char __user *buf,
  869. size_t count, loff_t *pos)
  870. {
  871. struct mtp_dev *dev = fp->private_data;
  872. struct usb_composite_dev *cdev = dev->cdev;
  873. struct usb_request *req = 0;
  874. ssize_t r = count;
  875. unsigned xfer;
  876. int sendZLP = 0;
  877. int ret;
  878. DBG(cdev, "mtp_write(%zu)\n", count);
  879. spin_lock_irq(&dev->lock);
  880. if (dev->state == STATE_CANCELED) {
  881. /* report cancelation to userspace */
  882. dev->state = STATE_READY;
  883. spin_unlock_irq(&dev->lock);
  884. return -ECANCELED;
  885. }
  886. if (dev->state == STATE_RESET) {
  887. /* report cancelation to userspace */
  888. dev->state = STATE_READY;
  889. spin_unlock_irq(&dev->lock);
  890. return -ECANCELED;
  891. }
  892. if (dev->state == STATE_OFFLINE) {
  893. spin_unlock_irq(&dev->lock);
  894. DBG(cdev, "%s, line %d: mtp_write return ENODEV = %d\n", __func__, __LINE__, ENODEV);
  895. return -ENODEV;
  896. }
  897. dev->state = STATE_BUSY;
  898. spin_unlock_irq(&dev->lock);
  899. /* we need to send a zero length packet to signal the end of transfer
  900. * if the transfer size is aligned to a packet boundary.
  901. */
  902. if ((count & (dev->ep_in->maxpacket - 1)) == 0)
  903. sendZLP = 1;
  904. while (count > 0 || sendZLP) {
  905. /* so we exit after sending ZLP */
  906. if (count == 0)
  907. sendZLP = 0;
  908. if (dev->state != STATE_BUSY) {
  909. DBG(cdev, "mtp_write dev->error\n");
  910. r = -EIO;
  911. break;
  912. }
  913. /* get an idle tx request to use */
  914. req = 0;
  915. ret = wait_event_interruptible(dev->write_wq,
  916. ((req = mtp_req_get(dev, &dev->tx_idle))
  917. || dev->state != STATE_BUSY));
  918. if (!req) {
  919. r = ret;
  920. break;
  921. }
  922. if (count > MTP_BULK_BUFFER_SIZE)
  923. xfer = MTP_BULK_BUFFER_SIZE;
  924. else
  925. xfer = count;
  926. if (xfer && copy_from_user(req->buf, buf, xfer)) {
  927. r = -EFAULT;
  928. break;
  929. }
  930. req->length = xfer;
  931. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  932. if (ret < 0) {
  933. DBG(cdev, "mtp_write: xfer error %d\n", ret);
  934. r = -EIO;
  935. break;
  936. }
  937. buf += xfer;
  938. count -= xfer;
  939. /* zero this so we don't try to free it on error exit */
  940. req = 0;
  941. }
  942. if (req)
  943. mtp_req_put(dev, &dev->tx_idle, req);
  944. spin_lock_irq(&dev->lock);
  945. if (dev->state == STATE_CANCELED)
  946. r = -ECANCELED;
  947. else if (dev->state == STATE_RESET) {
  948. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  949. dev->state = STATE_READY;
  950. r = -ECANCELED;
  951. } else if (dev->state != STATE_OFFLINE)
  952. dev->state = STATE_READY;
  953. spin_unlock_irq(&dev->lock);
  954. DBG(cdev, "mtp_write returning %zd\n", r);
  955. return r;
  956. }
  957. /* read from a local file and write to USB */
  958. static void send_file_work(struct work_struct *data)
  959. {
  960. struct mtp_dev *dev = container_of(data, struct mtp_dev,
  961. send_file_work);
  962. struct usb_composite_dev *cdev = dev->cdev;
  963. struct usb_request *req = 0;
  964. struct mtp_data_header *header;
  965. struct file *filp;
  966. loff_t offset;
  967. int64_t count;
  968. int xfer, ret, hdr_size;
  969. int r = 0;
  970. int sendZLP = 0;
  971. #define IOMAXNUM 5
  972. int iotimeMax[IOMAXNUM] = {0};
  973. struct timeval tv_begin, tv_end;
  974. int i = 0;
  975. /* read our parameters */
  976. smp_rmb();
  977. filp = dev->xfer_file;
  978. offset = dev->xfer_file_offset;
  979. count = dev->xfer_file_length;
  980. if (count < 0) {
  981. dev->xfer_result = -EINVAL;
  982. return;
  983. }
  984. DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
  985. if (dev->xfer_send_header) {
  986. hdr_size = sizeof(struct mtp_data_header);
  987. count += hdr_size;
  988. } else {
  989. hdr_size = 0;
  990. }
  991. /* we need to send a zero length packet to signal the end of transfer
  992. * if the transfer size is aligned to a packet boundary.
  993. */
  994. if ((count & (dev->ep_in->maxpacket - 1)) == 0)
  995. sendZLP = 1;
  996. while (count > 0 || sendZLP) {
  997. /* so we exit after sending ZLP */
  998. if (count == 0)
  999. sendZLP = 0;
  1000. /* get an idle tx request to use */
  1001. req = 0;
  1002. ret = wait_event_interruptible(dev->write_wq,
  1003. (req = mtp_req_get(dev, &dev->tx_idle))
  1004. || dev->state != STATE_BUSY);
  1005. if (dev->state == STATE_CANCELED) {
  1006. r = -ECANCELED;
  1007. break;
  1008. }
  1009. else if (dev->state == STATE_RESET) {
  1010. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  1011. r = -ECANCELED;
  1012. break;
  1013. }
  1014. if (!req) {
  1015. r = ret;
  1016. break;
  1017. }
  1018. if (count > MTP_BULK_BUFFER_SIZE)
  1019. xfer = MTP_BULK_BUFFER_SIZE;
  1020. else
  1021. xfer = count;
  1022. if (hdr_size) {
  1023. /* prepend MTP data header */
  1024. header = (struct mtp_data_header *)req->buf;
  1025. if (count >= 0xffffffff)
  1026. header->length = __cpu_to_le32(0xffffffff);
  1027. else
  1028. header->length = __cpu_to_le32(count);
  1029. header->type = __cpu_to_le16(2); /* data packet */
  1030. header->command = __cpu_to_le16(dev->xfer_command);
  1031. header->transaction_id =
  1032. __cpu_to_le32(dev->xfer_transaction_id);
  1033. }
  1034. do_gettimeofday(&tv_begin);
  1035. ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
  1036. &offset);
  1037. do_gettimeofday(&tv_end);
  1038. {
  1039. /* ignore the difference under msec */
  1040. int pos = -1;
  1041. int time_msec = (tv_end.tv_sec * 1000 + tv_end.tv_usec / 1000)
  1042. - (tv_begin.tv_sec * 1000 + tv_begin.tv_usec / 1000);
  1043. for (i = 0; i < IOMAXNUM; ++i){
  1044. if (time_msec > iotimeMax[i])
  1045. pos = i;
  1046. else
  1047. break;
  1048. }
  1049. if (pos > 0){
  1050. for (i = 1; i <= pos; ++i){
  1051. iotimeMax[i-1] = iotimeMax[i];
  1052. }
  1053. }
  1054. if (pos != -1)
  1055. iotimeMax[pos] = time_msec;
  1056. }
  1057. if (ret < 0) {
  1058. r = ret;
  1059. DBG(cdev, "send_file_work: vfs_read error %d\n", ret);
  1060. if (dev->dev_disconnected) {
  1061. /* USB SW disconnected */
  1062. dev->state = STATE_OFFLINE;
  1063. } else {
  1064. /* ex: Might be SD card plug-out with USB connected */
  1065. dev->state = STATE_ERROR;
  1066. mtp_ueventToDisconnect(dev);
  1067. }
  1068. break;
  1069. }
  1070. xfer = ret + hdr_size;
  1071. hdr_size = 0;
  1072. req->length = xfer;
  1073. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  1074. if (ret < 0) {
  1075. DBG(cdev, "send_file_work: xfer error %d\n", ret);
  1076. if (dev->dev_disconnected) {
  1077. /* USB SW disconnected */
  1078. dev->state = STATE_OFFLINE;
  1079. } else {
  1080. /* ex: Might be SD card plug-out with USB connected */
  1081. dev->state = STATE_ERROR;
  1082. mtp_ueventToDisconnect(dev);
  1083. }
  1084. r = -EIO;
  1085. break;
  1086. }
  1087. count -= xfer;
  1088. /* zero this so we don't try to free it on error exit */
  1089. req = 0;
  1090. }
  1091. DBG(dev->cdev, "%s, line = %d: req = 0x%p \n", __func__, __LINE__, req);
  1092. if (req)
  1093. mtp_req_put(dev, &dev->tx_idle, req);
  1094. DBG(dev->cdev, "[mtp]top time of vfs_read() in %s:\n", __func__);
  1095. for (i = 0; i < IOMAXNUM; ++i){
  1096. DBG(dev->cdev, "[mtp] %d msec\n", iotimeMax[i]);
  1097. }
  1098. DBG(cdev, "send_file_work returning %d\n", r);
  1099. /* write the result */
  1100. dev->xfer_result = r;
  1101. smp_wmb();
  1102. }
  1103. /* read from USB and write to a local file */
  1104. static void receive_file_work(struct work_struct *data)
  1105. {
  1106. struct mtp_dev *dev = container_of(data, struct mtp_dev,
  1107. receive_file_work);
  1108. struct usb_composite_dev *cdev = dev->cdev;
  1109. struct usb_request *read_req = NULL, *write_req = NULL;
  1110. struct file *filp;
  1111. loff_t offset;
  1112. int64_t count;
  1113. int ret, cur_buf = 0;
  1114. int r = 0;
  1115. #if 1 /* #ifdef CONFIG_MTK_SHARED_SDCARD */
  1116. int64_t total_size=0;
  1117. #endif
  1118. #define IOMAXNUM 5
  1119. int iotimeMax[IOMAXNUM] = {0};
  1120. struct timeval tv_begin, tv_end;
  1121. int i = 0;
  1122. /* read our parameters */
  1123. smp_rmb();
  1124. if (dev->epOut_halt) {
  1125. printk("%s, line %d: <dev->epOut_halt = %d> reset the out ep \n", __func__, __LINE__, dev->epOut_halt);
  1126. mdelay(2000);
  1127. usb_ep_fifo_flush(dev->ep_out);
  1128. dev->epOut_halt=0;
  1129. usb_ep_clear_halt(dev->ep_out);
  1130. }
  1131. filp = dev->xfer_file;
  1132. offset = dev->xfer_file_offset;
  1133. count = dev->xfer_file_length;
  1134. if (count < 0) {
  1135. dev->xfer_result = -EINVAL;
  1136. return;
  1137. }
  1138. DBG(cdev, "receive_file_work(%lld)\n", count);
  1139. while (count > 0 || write_req) {
  1140. if (count > 0) {
  1141. /* queue a request */
  1142. read_req = dev->rx_req[cur_buf];
  1143. cur_buf = (cur_buf + 1) % RX_REQ_MAX;
  1144. read_req->length = (count > MTP_BULK_BUFFER_SIZE
  1145. ? MTP_BULK_BUFFER_SIZE : count);
  1146. /* This might be modified TBD,
  1147. so far, there is only sharedSD with EXT4 FFS could transfer Object with size oevr 4GBs*/
  1148. #if 1 /* #ifdef CONFIG_MTK_SHARED_SDCARD */
  1149. if(total_size >= 0xFFFFFFFF)
  1150. read_req->short_not_ok = 0;
  1151. else {
  1152. if (0 == (read_req->length % dev->ep_out->maxpacket ))
  1153. read_req->short_not_ok = 1;
  1154. else
  1155. read_req->short_not_ok = 0;
  1156. }
  1157. #else
  1158. /* Add for RX mode 1 */
  1159. if (0 == (read_req->length % dev->ep_out->maxpacket ))
  1160. read_req->short_not_ok = 1;
  1161. else
  1162. read_req->short_not_ok = 0;
  1163. DBG(cdev, "read_req->short_not_ok(%d), ep_out->maxpacket (%d)\n",
  1164. read_req->short_not_ok, dev->ep_out->maxpacket);
  1165. #endif
  1166. dev->rx_done = 0;
  1167. ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
  1168. if (ret < 0) {
  1169. r = -EIO;
  1170. pr_debug("%s, line %d: EIO, dev->dev_disconnected = %d, usb queue error \n", __func__, __LINE__, dev->dev_disconnected);
  1171. if (dev->dev_disconnected) {
  1172. dev->state = STATE_OFFLINE;
  1173. } else {
  1174. dev->state = STATE_ERROR;
  1175. mtp_ueventToDisconnect(dev);
  1176. }
  1177. break;
  1178. }
  1179. }
  1180. if (write_req) {
  1181. DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
  1182. do_gettimeofday(&tv_begin);
  1183. ret = vfs_write(filp, write_req->buf, write_req->actual,
  1184. &offset);
  1185. do_gettimeofday(&tv_end);
  1186. DBG(cdev, "vfs_write %d\n", ret);
  1187. {
  1188. /* ignore the difference under msec */
  1189. int pos = -1;
  1190. int time_msec = (tv_end.tv_sec * 1000 + tv_end.tv_usec / 1000)
  1191. - (tv_begin.tv_sec * 1000 + tv_begin.tv_usec / 1000);
  1192. for (i = 0; i < IOMAXNUM; ++i){
  1193. if (time_msec > iotimeMax[i])
  1194. pos = i;
  1195. else
  1196. break;
  1197. }
  1198. if (pos > 0){
  1199. for (i = 1; i <= pos; ++i){
  1200. iotimeMax[i-1] = iotimeMax[i];
  1201. }
  1202. }
  1203. if (pos != -1)
  1204. iotimeMax[pos] = time_msec;
  1205. }
  1206. if (ret != write_req->actual) {
  1207. r = -EIO;
  1208. pr_debug("%s, line %d: EIO, dev->dev_disconnected = %d, file write error \n", __func__, __LINE__, dev->dev_disconnected);
  1209. if (dev->dev_disconnected)
  1210. dev->state = STATE_OFFLINE;
  1211. else {
  1212. dev->state = STATE_ERROR;
  1213. mtp_ueventToDisconnect(dev);
  1214. }
  1215. break;
  1216. }
  1217. write_req = NULL;
  1218. }
  1219. DBG(dev->cdev, "%s, line %d: Wait for read_req = %p!! \n", __func__, __LINE__, read_req);
  1220. if (read_req) {
  1221. /* wait for our last read to complete */
  1222. ret = wait_event_interruptible(dev->read_wq,
  1223. dev->rx_done || dev->state != STATE_BUSY);
  1224. if (dev->state == STATE_CANCELED) {
  1225. pr_debug("%s, line %d: dev->state = %d, get cancel command !! Cancel it!! rx_done = %d\n", __func__, __LINE__, dev->state, dev->rx_done);
  1226. r = -ECANCELED;
  1227. if (!dev->rx_done)
  1228. usb_ep_dequeue(dev->ep_out, read_req);
  1229. break;
  1230. }
  1231. if (dev->state == STATE_RESET) {
  1232. DBG(dev->cdev, "%s: dev->state = %d, get reset command !! Cancel it!! rx_done = %d\n", __func__, dev->state, dev->rx_done);
  1233. r = -ECANCELED;
  1234. DBG(dev->cdev, "%s, %d: request to usb_ep_dequeue!! \n", __func__, __LINE__);
  1235. usb_ep_dequeue(dev->ep_out, read_req);
  1236. break;
  1237. }
  1238. /* if xfer_file_length is 0xFFFFFFFF, then we read until
  1239. * we get a zero length packet
  1240. */
  1241. if (count != 0xFFFFFFFF)
  1242. count -= read_req->actual;
  1243. #if 1 /* #ifdef CONFIG_MTK_SHARED_SDCARD */
  1244. total_size += read_req->actual;
  1245. DBG(cdev, "%s, line %d: count = %lld, total_size = %lld, read_req->actual = %d, read_req->length= %d\n", __func__, __LINE__, count, total_size, read_req->actual, read_req->length);
  1246. #endif
  1247. if (read_req->actual < read_req->length) {
  1248. /*
  1249. * short packet is used to signal EOF for
  1250. * sizes > 4 gig
  1251. */
  1252. DBG(cdev, "got short packet\n");
  1253. count = 0;
  1254. }
  1255. /* Add for RX mode 1 */
  1256. read_req->short_not_ok = 0;
  1257. DBG(dev->cdev, "%s, line %d: dev->state = %d, NEXT!!\n", __func__, __LINE__, dev->state);
  1258. write_req = read_req;
  1259. read_req = NULL;
  1260. }
  1261. }
  1262. if (dev->state == STATE_ERROR || dev->state == STATE_OFFLINE) {
  1263. DBG(dev->cdev, "%s, line %d: read_req = %p \n", __func__, __LINE__, read_req);
  1264. if (read_req) {
  1265. read_req->short_not_ok = 0;
  1266. }
  1267. }
  1268. DBG(dev->cdev, "[mtp]top time of vfs_write() in %s:\n", __func__);
  1269. for (i = 0; i < IOMAXNUM; ++i){
  1270. DBG(dev->cdev, "[mtp] %d msec\n", iotimeMax[i]);
  1271. }
  1272. pr_debug("%s, line %d: receive_file_work returning %d \n", __func__, __LINE__, r);
  1273. /* write the result */
  1274. dev->xfer_result = r;
  1275. smp_wmb();
  1276. }
  1277. static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
  1278. {
  1279. struct usb_request *req = NULL;
  1280. int ret;
  1281. int length = event->length;
  1282. int eventIndex = 6;
  1283. DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
  1284. if (length < 0 || length > INTR_BUFFER_SIZE)
  1285. return -EINVAL;
  1286. if (dev->state == STATE_OFFLINE)
  1287. return -ENODEV;
  1288. ret = wait_event_interruptible_timeout(dev->intr_wq,
  1289. (req = mtp_req_get(dev, &dev->intr_idle)),
  1290. msecs_to_jiffies(1000));
  1291. if (!req)
  1292. return -ETIME;
  1293. if (copy_from_user(req->buf, (void __user *)event->data, length)) {
  1294. mtp_req_put(dev, &dev->intr_idle, req);
  1295. return -EFAULT;
  1296. }
  1297. req->length = length;
  1298. DBG(dev->cdev, "mtp_send_event: EventCode: req->buf[7] = 0x%x, req->buf[6] = 0x%x\n", ((char*)req->buf)[eventIndex+1], ((char*)req->buf)[eventIndex]);
  1299. ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
  1300. if (ret)
  1301. mtp_req_put(dev, &dev->intr_idle, req);
  1302. return ret;
  1303. }
  1304. static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
  1305. {
  1306. struct mtp_dev *dev = fp->private_data;
  1307. struct file *filp = NULL;
  1308. int ret = -EINVAL;
  1309. switch (code)
  1310. {
  1311. case MTP_SEND_FILE:
  1312. pr_debug("%s: MTP_SEND_FILE, code = 0x%x\n", __func__, code);
  1313. break;
  1314. case MTP_RECEIVE_FILE:
  1315. pr_debug("%s: MTP_RECEIVE_FILE, code = 0x%x\n", __func__, code);
  1316. break;
  1317. case MTP_SEND_FILE_WITH_HEADER:
  1318. pr_debug("%s: MTP_SEND_FILE_WITH_HEADER, code = 0x%x\n", __func__, code);
  1319. break;
  1320. case MTP_SEND_EVENT:
  1321. pr_debug("%s: MTP_SEND_EVENT, code = 0x%x\n", __func__, code);
  1322. break;
  1323. }
  1324. if (mtp_lock(&dev->ioctl_excl))
  1325. return -EBUSY;
  1326. switch (code) {
  1327. case MTP_SEND_FILE:
  1328. case MTP_RECEIVE_FILE:
  1329. case MTP_SEND_FILE_WITH_HEADER:
  1330. {
  1331. struct mtp_file_range mfr;
  1332. struct work_struct *work;
  1333. spin_lock_irq(&dev->lock);
  1334. if (dev->state == STATE_CANCELED) {
  1335. /* report cancelation to userspace */
  1336. DBG(dev->cdev, "%s: cancel!!! \n", __func__);
  1337. dev->state = STATE_READY;
  1338. spin_unlock_irq(&dev->lock);
  1339. ret = -ECANCELED;
  1340. goto out;
  1341. }
  1342. if (dev->state == STATE_RESET) {
  1343. /* report cancelation to userspace */
  1344. dev->state = STATE_READY;
  1345. spin_unlock_irq(&dev->lock);
  1346. ret = -ECANCELED;
  1347. goto out;
  1348. }
  1349. if (dev->state == STATE_OFFLINE) {
  1350. spin_unlock_irq(&dev->lock);
  1351. ret = -ENODEV;
  1352. goto out;
  1353. }
  1354. dev->state = STATE_BUSY;
  1355. spin_unlock_irq(&dev->lock);
  1356. if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
  1357. ret = -EFAULT;
  1358. goto fail;
  1359. }
  1360. /* hold a reference to the file while we are working with it */
  1361. filp = fget(mfr.fd);
  1362. if (!filp) {
  1363. ret = -EBADF;
  1364. goto fail;
  1365. }
  1366. /* write the parameters */
  1367. dev->xfer_file = filp;
  1368. dev->xfer_file_offset = mfr.offset;
  1369. dev->xfer_file_length = mfr.length;
  1370. smp_wmb();
  1371. if (code == MTP_SEND_FILE_WITH_HEADER) {
  1372. work = &dev->send_file_work;
  1373. dev->xfer_send_header = 1;
  1374. dev->xfer_command = mfr.command;
  1375. dev->xfer_transaction_id = mfr.transaction_id;
  1376. } else if (code == MTP_SEND_FILE) {
  1377. work = &dev->send_file_work;
  1378. dev->xfer_send_header = 0;
  1379. } else {
  1380. work = &dev->receive_file_work;
  1381. }
  1382. /* We do the file transfer on a work queue so it will run
  1383. * in kernel context, which is necessary for vfs_read and
  1384. * vfs_write to use our buffers in the kernel address space.
  1385. */
  1386. queue_work(dev->wq, work);
  1387. /* wait for operation to complete */
  1388. flush_workqueue(dev->wq);
  1389. fput(filp);
  1390. /* read the result */
  1391. smp_rmb();
  1392. ret = dev->xfer_result;
  1393. break;
  1394. }
  1395. case MTP_SEND_EVENT:
  1396. {
  1397. struct mtp_event event;
  1398. /* return here so we don't change dev->state below,
  1399. * which would interfere with bulk transfer state.
  1400. */
  1401. if (copy_from_user(&event, (void __user *)value, sizeof(event)))
  1402. ret = -EFAULT;
  1403. else
  1404. ret = mtp_send_event(dev, &event);
  1405. goto out;
  1406. }
  1407. }
  1408. fail:
  1409. spin_lock_irq(&dev->lock);
  1410. if (dev->state == STATE_CANCELED)
  1411. ret = -ECANCELED;
  1412. else if (dev->state == STATE_RESET)
  1413. ret = -ECANCELED;
  1414. else if (dev->state != STATE_OFFLINE)
  1415. dev->state = STATE_READY;
  1416. spin_unlock_irq(&dev->lock);
  1417. out:
  1418. mtp_unlock(&dev->ioctl_excl);
  1419. DBG(dev->cdev, "ioctl returning %d\n", ret);
  1420. return ret;
  1421. }
  1422. static int mtp_open(struct inode *ip, struct file *fp)
  1423. {
  1424. printk(KERN_INFO "mtp_open\n");
  1425. if (mtp_lock(&_mtp_dev->open_excl))
  1426. return -EBUSY;
  1427. /* clear any error condition */
  1428. if (_mtp_dev->state != STATE_OFFLINE)
  1429. _mtp_dev->state = STATE_READY;
  1430. fp->private_data = _mtp_dev;
  1431. return 0;
  1432. }
  1433. static int mtp_release(struct inode *ip, struct file *fp)
  1434. {
  1435. unsigned long flags;
  1436. printk(KERN_INFO "mtp_release\n");
  1437. spin_lock_irqsave(&_mtp_dev->lock, flags);
  1438. if (!_mtp_dev->dev_disconnected) {
  1439. spin_unlock_irqrestore(&_mtp_dev->lock, flags);
  1440. mtp_send_devicereset_event(_mtp_dev);
  1441. } else
  1442. spin_unlock_irqrestore(&_mtp_dev->lock, flags);
  1443. mtp_unlock(&_mtp_dev->open_excl);
  1444. return 0;
  1445. }
  1446. /* file operations for /dev/mtp_usb */
  1447. static const struct file_operations mtp_fops = {
  1448. .owner = THIS_MODULE,
  1449. .read = mtp_read,
  1450. .write = mtp_write,
  1451. .unlocked_ioctl = mtp_ioctl,
  1452. .compat_ioctl = mtp_ioctl,
  1453. .open = mtp_open,
  1454. .release = mtp_release,
  1455. };
  1456. static struct miscdevice mtp_device = {
  1457. .minor = MISC_DYNAMIC_MINOR,
  1458. .name = mtp_shortname,
  1459. .fops = &mtp_fops,
  1460. };
  1461. static void mtp_work(struct work_struct *data)
  1462. {
  1463. char *envp_sessionend[2] = { "MTP=SESSIONEND", NULL };
  1464. pr_debug("%s: __begin__ \n", __func__);
  1465. kobject_uevent_env(&mtp_device.this_device->kobj, KOBJ_CHANGE, envp_sessionend);
  1466. }
  1467. static void mtp_ueventToDisconnect(struct mtp_dev *dev)
  1468. {
  1469. char *envp_mtpAskDisconnect[2] = { "USB_STATE=MTPASKDISCONNECT", NULL };
  1470. pr_debug("%s: __begin__ \n", __func__);
  1471. kobject_uevent_env(&mtp_device.this_device->kobj, KOBJ_CHANGE, envp_mtpAskDisconnect);
  1472. }
  1473. static void mtp_read_usb_functions(int functions_no, char * buff)
  1474. {
  1475. struct mtp_dev *dev = _mtp_dev;
  1476. int i;
  1477. DBG(dev->cdev, "%s: dev->curr_mtp_func_index = 0x%x\n",__func__, dev->curr_mtp_func_index);
  1478. dev->usb_functions_no = functions_no;
  1479. dev->curr_mtp_func_index = 0xff;
  1480. memcpy(dev->usb_functions, buff, sizeof(dev->usb_functions));
  1481. DBG(dev->cdev, "%s:usb_functions_no = %d, usb_functions=%s\n",__func__, dev->usb_functions_no, dev->usb_functions);
  1482. for(i=0;i<USB_MTP_FUNCTIONS;i++)
  1483. {
  1484. if(!strcmp(dev->usb_functions, USB_MTP_FUNC[i]))
  1485. {
  1486. DBG(dev->cdev, "%s: usb functions = %s, i = %d \n",__func__, dev->usb_functions, i);
  1487. dev->curr_mtp_func_index = i;
  1488. break;
  1489. }
  1490. }
  1491. }
  1492. enum FILE_ACTION_ENABLED
  1493. {
  1494. SEND_FILE_ENABLE = 0,
  1495. SEND_FILE_DISABLE = 1,
  1496. RECEIVE_FILE_ENABLE = 2,
  1497. RECEIVE_FILE_DISABLE = 3
  1498. };
  1499. static void mtp_ep_flush_all(void)
  1500. {
  1501. struct mtp_dev *dev = _mtp_dev;
  1502. DBG(dev->cdev, "%s: __begin__ \n", __func__);
  1503. dev->state = STATE_RESET;
  1504. DBG(dev->cdev, "%s: __end__ \n", __func__);
  1505. }
  1506. static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
  1507. const struct usb_ctrlrequest *ctrl)
  1508. {
  1509. struct mtp_dev *dev = _mtp_dev;
  1510. int value = -EOPNOTSUPP;
  1511. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1512. u16 w_value = le16_to_cpu(ctrl->wValue);
  1513. u16 w_length = le16_to_cpu(ctrl->wLength);
  1514. unsigned long flags;
  1515. VDBG(cdev, "mtp_ctrlrequest "
  1516. "%02x.%02x v%04x i%04x l%u\n",
  1517. ctrl->bRequestType, ctrl->bRequest,
  1518. w_value, w_index, w_length);
  1519. /* Handle MTP OS string */
  1520. if (ctrl->bRequestType ==
  1521. (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
  1522. && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
  1523. && (w_value >> 8) == USB_DT_STRING
  1524. && (w_value & 0xFF) == MTP_OS_STRING_ID) {
  1525. value = (w_length < sizeof(mtp_os_string)
  1526. ? w_length : sizeof(mtp_os_string));
  1527. memcpy(cdev->req->buf, mtp_os_string, value);
  1528. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
  1529. /* Handle MTP OS descriptor */
  1530. DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
  1531. ctrl->bRequest, w_index, w_value, w_length);
  1532. if (ctrl->bRequest == 1
  1533. && (ctrl->bRequestType & USB_DIR_IN)
  1534. && (w_index == 5)) {
  1535. value = (w_length < sizeof(mtp_ext_prop_desc) ?
  1536. w_length : sizeof(mtp_ext_prop_desc));
  1537. DBG(cdev, "vendor request: Property OS Feature, w_length = %d, value = %d \n", w_length, value);
  1538. memcpy(cdev->req->buf, &mtp_ext_prop_desc, value);
  1539. } else if (ctrl->bRequest == 1
  1540. && (ctrl->bRequestType & USB_DIR_IN)
  1541. && (w_index == 4)) {
  1542. switch(dev->curr_mtp_func_index)
  1543. {
  1544. case 0: /* mtp */
  1545. value = (w_length < sizeof(mtp_ext_config_desc) ?
  1546. w_length : sizeof(mtp_ext_config_desc));
  1547. memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
  1548. break;
  1549. case 1: /* mtp,acm , with acm, failed so far */
  1550. case 2: /* mtp,adb */
  1551. case 4: /* mtp,mass_storage */
  1552. value = (w_length < sizeof(mtp_ext_config_desc_2) ?
  1553. w_length : sizeof(mtp_ext_config_desc_2));
  1554. memcpy(cdev->req->buf, &mtp_ext_config_desc_2, value);
  1555. break;
  1556. case 3: /* mtp,adb,acm , with acm, failed so far */
  1557. case 5: /* mtp,mass_storage,adb */
  1558. value = (w_length < sizeof(mtp_ext_config_desc_3) ?
  1559. w_length : sizeof(mtp_ext_config_desc_3));
  1560. memcpy(cdev->req->buf, &mtp_ext_config_desc_3, value);
  1561. break;
  1562. #ifdef CONFIG_MTK_TC1_FEATURE
  1563. case 6: /* acm,gser,mtp,adb, with acm, xp failed so far */
  1564. value = (w_length < sizeof(mtp_ext_config_desc_4) ?
  1565. w_length : sizeof(mtp_ext_config_desc_4));
  1566. memcpy(cdev->req->buf, &mtp_ext_config_desc_4, value);
  1567. break;
  1568. case 7: /* acm,gser,mtp,adb, with acm, xp failed so far */
  1569. value = (w_length < sizeof(mtp_ext_config_desc_5) ?
  1570. w_length : sizeof(mtp_ext_config_desc_5));
  1571. memcpy(cdev->req->buf, &mtp_ext_config_desc_5, value);
  1572. break;
  1573. #endif
  1574. default: /* unknown, 0xff */
  1575. value = (w_length < sizeof(mtp_ext_config_desc) ?
  1576. w_length : sizeof(mtp_ext_config_desc));
  1577. memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
  1578. break;
  1579. }
  1580. DBG(cdev, "vendor request: Compat ID OS Feature, dev->curr_mtp_func_index = %d, dev->usb_functions = %s \n", dev->curr_mtp_func_index, dev->usb_functions);
  1581. DBG(cdev, "vendor request: Extended OS Feature, w_length = %d, value = %d, dev->curr_mtp_func_index = %d\n", w_length, value, dev->curr_mtp_func_index);
  1582. }
  1583. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
  1584. DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
  1585. ctrl->bRequest, w_index, w_value, w_length);
  1586. if (ctrl->bRequest == MTP_REQ_CANCEL
  1587. #ifndef CONFIG_MTK_TC1_FEATURE
  1588. && w_index == 0
  1589. #endif
  1590. && w_value == 0) {
  1591. DBG(cdev, "MTP_REQ_CANCEL\n");
  1592. DBG(cdev, "%s: MTP_REQ_CANCEL. dev->state = %d.\n", __func__, dev->state);
  1593. spin_lock_irqsave(&dev->lock, flags);
  1594. if (dev->state == STATE_BUSY) {
  1595. dev->state = STATE_CANCELED;
  1596. wake_up(&dev->read_wq);
  1597. wake_up(&dev->write_wq);
  1598. } else if(dev->state == STATE_READY) {
  1599. dev->state = STATE_CANCELED;
  1600. }
  1601. spin_unlock_irqrestore(&dev->lock, flags);
  1602. /* We need to queue a request to read the remaining
  1603. * bytes, but we don't actually need to look at
  1604. * the contents.
  1605. */
  1606. value = w_length;
  1607. } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
  1608. #ifndef CONFIG_MTK_TC1_FEATURE
  1609. && w_index == 0
  1610. #endif
  1611. && w_value == 0) {
  1612. struct mtp_device_status *status = cdev->req->buf;
  1613. status->wLength =
  1614. __constant_cpu_to_le16(sizeof(*status));
  1615. DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
  1616. spin_lock_irqsave(&dev->lock, flags);
  1617. /* device status is "busy" until we report
  1618. * the cancelation to userspace
  1619. */
  1620. if (dev->state == STATE_CANCELED){
  1621. status->wCode =
  1622. __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
  1623. dev->fileTransferSend ++;
  1624. DBG(cdev, "%s: dev->fileTransferSend = %d \n", __func__, dev->fileTransferSend);
  1625. if(dev->fileTransferSend > 5) {
  1626. dev->fileTransferSend = 0;
  1627. dev->state = STATE_BUSY;
  1628. status->wCode =
  1629. __cpu_to_le16(MTP_RESPONSE_OK);
  1630. }
  1631. } else if(dev->state == STATE_RESET) {
  1632. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1633. dev->fileTransferSend = 0;
  1634. status->wCode =
  1635. __cpu_to_le16(MTP_RESPONSE_OK);
  1636. } else if(dev->state == STATE_ERROR) {
  1637. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1638. dev->fileTransferSend = 0;
  1639. if(dev->epOut_halt){
  1640. status->wCode =
  1641. __cpu_to_le16(MTP_RESPONSE_DEVICE_CANCEL);
  1642. } else
  1643. status->wCode =
  1644. __cpu_to_le16(MTP_RESPONSE_OK);
  1645. } else {
  1646. dev->fileTransferSend = 0;
  1647. status->wCode =
  1648. __cpu_to_le16(MTP_RESPONSE_OK);
  1649. }
  1650. DBG(dev->cdev, "%s: status->wCode = 0x%x, under MTP_REQ_GET_DEVICE_STATUS\n", __func__, status->wCode);
  1651. spin_unlock_irqrestore(&dev->lock, flags);
  1652. value = sizeof(*status);
  1653. } else if (ctrl->bRequest == MTP_REQ_RESET
  1654. #ifndef CONFIG_MTK_TC1_FEATURE
  1655. && w_index == 0
  1656. #endif
  1657. && w_value == 0) {
  1658. struct work_struct *work;
  1659. DBG(dev->cdev, "%s: MTP_REQ_RESET. dev->state = %d. \n", __func__, dev->state);
  1660. spin_lock_irqsave(&dev->lock, flags);
  1661. work = &dev->device_reset_work;
  1662. schedule_work(work);
  1663. /* wait for operation to complete */
  1664. mtp_ep_flush_all();
  1665. DBG(dev->cdev, "%s: wake up the work queue to prevent that they are waiting!!\n", __func__);
  1666. spin_unlock_irqrestore(&dev->lock, flags);
  1667. value = w_length;
  1668. }
  1669. }
  1670. /* respond with data transfer or status phase? */
  1671. if (value >= 0) {
  1672. int rc;
  1673. cdev->req->zero = value < w_length;
  1674. cdev->req->length = value;
  1675. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1676. if (rc < 0)
  1677. ERROR(cdev, "%s: response queue error\n", __func__);
  1678. }
  1679. return value;
  1680. }
  1681. static int ptp_ctrlrequest(struct usb_composite_dev *cdev,
  1682. const struct usb_ctrlrequest *ctrl)
  1683. {
  1684. struct mtp_dev *dev = _mtp_dev;
  1685. int value = -EOPNOTSUPP;
  1686. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1687. u16 w_value = le16_to_cpu(ctrl->wValue);
  1688. u16 w_length = le16_to_cpu(ctrl->wLength);
  1689. unsigned long flags;
  1690. VDBG(cdev, "mtp_ctrlrequest "
  1691. "%02x.%02x v%04x i%04x l%u\n",
  1692. ctrl->bRequestType, ctrl->bRequest,
  1693. w_value, w_index, w_length);
  1694. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
  1695. DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
  1696. ctrl->bRequest, w_index, w_value, w_length);
  1697. if (ctrl->bRequest == MTP_REQ_CANCEL
  1698. #ifndef CONFIG_MTK_TC1_FEATURE
  1699. && w_index == 0
  1700. #endif
  1701. && w_value == 0) {
  1702. DBG(cdev, "MTP_REQ_CANCEL\n");
  1703. DBG(cdev, "%s: MTP_REQ_CANCEL. dev->state = %d.\n", __func__, dev->state);
  1704. spin_lock_irqsave(&dev->lock, flags);
  1705. if (dev->state == STATE_BUSY) {
  1706. dev->state = STATE_CANCELED;
  1707. wake_up(&dev->read_wq);
  1708. wake_up(&dev->write_wq);
  1709. } else if(dev->state == STATE_READY) {
  1710. dev->state = STATE_CANCELED;
  1711. }
  1712. spin_unlock_irqrestore(&dev->lock, flags);
  1713. /* We need to queue a request to read the remaining
  1714. * bytes, but we don't actually need to look at
  1715. * the contents.
  1716. */
  1717. value = w_length;
  1718. } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
  1719. #ifndef CONFIG_MTK_TC1_FEATURE
  1720. && w_index == 0
  1721. #endif
  1722. && w_value == 0) {
  1723. struct mtp_device_status *status = cdev->req->buf;
  1724. status->wLength =
  1725. __constant_cpu_to_le16(sizeof(*status));
  1726. DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
  1727. spin_lock_irqsave(&dev->lock, flags);
  1728. /* device status is "busy" until we report
  1729. * the cancelation to userspace
  1730. */
  1731. if (dev->state == STATE_CANCELED){
  1732. status->wCode =
  1733. __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
  1734. dev->fileTransferSend ++;
  1735. DBG(cdev, "%s: dev->fileTransferSend = %d \n", __func__, dev->fileTransferSend);
  1736. if(dev->fileTransferSend > 5) {
  1737. dev->fileTransferSend = 0;
  1738. dev->state = STATE_BUSY;
  1739. status->wCode =
  1740. __cpu_to_le16(MTP_RESPONSE_OK);
  1741. }
  1742. } else if(dev->state == STATE_RESET) {
  1743. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1744. dev->fileTransferSend = 0;
  1745. status->wCode =
  1746. __cpu_to_le16(MTP_RESPONSE_OK);
  1747. } else if(dev->state == STATE_ERROR) {
  1748. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1749. dev->fileTransferSend = 0;
  1750. if(dev->epOut_halt){
  1751. status->wCode =
  1752. __cpu_to_le16(MTP_RESPONSE_DEVICE_CANCEL);
  1753. } else
  1754. status->wCode =
  1755. __cpu_to_le16(MTP_RESPONSE_OK);
  1756. } else {
  1757. dev->fileTransferSend = 0;
  1758. status->wCode =
  1759. __cpu_to_le16(MTP_RESPONSE_OK);
  1760. }
  1761. DBG(dev->cdev, "%s: status->wCode = 0x%x, under MTP_REQ_GET_DEVICE_STATUS\n", __func__, status->wCode);
  1762. spin_unlock_irqrestore(&dev->lock, flags);
  1763. value = sizeof(*status);
  1764. } else if (ctrl->bRequest == MTP_REQ_RESET
  1765. #ifndef CONFIG_MTK_TC1_FEATURE
  1766. && w_index == 0
  1767. #endif
  1768. && w_value == 0) {
  1769. struct work_struct *work;
  1770. DBG(dev->cdev, "%s: MTP_REQ_RESET. dev->state = %d. \n", __func__, dev->state);
  1771. spin_lock_irqsave(&dev->lock, flags);
  1772. work = &dev->device_reset_work;
  1773. schedule_work(work);
  1774. /* wait for operation to complete */
  1775. mtp_ep_flush_all();
  1776. DBG(dev->cdev, "%s: wake up the work queue to prevent that they are waiting!!\n", __func__);
  1777. spin_unlock_irqrestore(&dev->lock, flags);
  1778. value = w_length;
  1779. }
  1780. }
  1781. /* respond with data transfer or status phase? */
  1782. if (value >= 0) {
  1783. int rc;
  1784. cdev->req->zero = value < w_length;
  1785. cdev->req->length = value;
  1786. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1787. if (rc < 0)
  1788. ERROR(cdev, "%s: response queue error\n", __func__);
  1789. }
  1790. return value;
  1791. }
  1792. static int
  1793. mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
  1794. {
  1795. struct usb_composite_dev *cdev = c->cdev;
  1796. struct mtp_dev *dev = func_to_mtp(f);
  1797. int id;
  1798. int ret;
  1799. dev->cdev = cdev;
  1800. DBG(cdev, "mtp_function_bind dev: %p\n", dev);
  1801. printk("mtp_function_bind dev: %p\n", dev);
  1802. /* allocate interface ID(s) */
  1803. id = usb_interface_id(c, f);
  1804. if (id < 0)
  1805. return id;
  1806. mtp_interface_desc.bInterfaceNumber = id;
  1807. ptp_interface_desc.bInterfaceNumber = id;
  1808. DBG(cdev, "mtp_function_bind bInterfaceNumber = id= %d\n", id);
  1809. DBG(cdev, "%s: reset dev->curr_mtp_func_index to 0xff \n", __func__);
  1810. dev->curr_mtp_func_index = 0xff;
  1811. /* allocate endpoints */
  1812. ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
  1813. &mtp_fullspeed_out_desc, &mtp_intr_desc);
  1814. if (ret)
  1815. return ret;
  1816. /* support high speed hardware */
  1817. if (gadget_is_dualspeed(c->cdev->gadget)) {
  1818. mtp_highspeed_in_desc.bEndpointAddress =
  1819. mtp_fullspeed_in_desc.bEndpointAddress;
  1820. mtp_highspeed_out_desc.bEndpointAddress =
  1821. mtp_fullspeed_out_desc.bEndpointAddress;
  1822. }
  1823. dev->dev_disconnected = 0;
  1824. /* support super speed hardware */
  1825. if (gadget_is_superspeed(c->cdev->gadget)) {
  1826. mtp_superspeed_in_desc.bEndpointAddress =
  1827. mtp_fullspeed_in_desc.bEndpointAddress;
  1828. mtp_superspeed_out_desc.bEndpointAddress =
  1829. mtp_fullspeed_out_desc.bEndpointAddress;
  1830. }
  1831. DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
  1832. gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
  1833. f->name, dev->ep_in->name, dev->ep_out->name);
  1834. return 0;
  1835. }
  1836. static void
  1837. mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
  1838. {
  1839. struct mtp_dev *dev = func_to_mtp(f);
  1840. struct usb_request *req;
  1841. int i;
  1842. printk("%s, line %d: \n", __func__, __LINE__);
  1843. while ((req = mtp_req_get(dev, &dev->tx_idle)))
  1844. mtp_request_free(req, dev->ep_in);
  1845. for (i = 0; i < RX_REQ_MAX; i++)
  1846. mtp_request_free(dev->rx_req[i], dev->ep_out);
  1847. while ((req = mtp_req_get(dev, &dev->intr_idle)))
  1848. mtp_request_free(req, dev->ep_intr);
  1849. dev->state = STATE_OFFLINE;
  1850. dev->dev_disconnected = 1;
  1851. }
  1852. static int mtp_function_set_alt(struct usb_function *f,
  1853. unsigned intf, unsigned alt)
  1854. {
  1855. struct mtp_dev *dev = func_to_mtp(f);
  1856. struct usb_composite_dev *cdev = f->config->cdev;
  1857. int ret;
  1858. printk("mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
  1859. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
  1860. if (ret)
  1861. return ret;
  1862. ret = usb_ep_enable(dev->ep_in);
  1863. if (ret)
  1864. return ret;
  1865. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
  1866. if (ret)
  1867. return ret;
  1868. ret = usb_ep_enable(dev->ep_out);
  1869. if (ret) {
  1870. usb_ep_disable(dev->ep_in);
  1871. return ret;
  1872. }
  1873. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
  1874. if (ret)
  1875. return ret;
  1876. ret = usb_ep_enable(dev->ep_intr);
  1877. if (ret) {
  1878. usb_ep_disable(dev->ep_out);
  1879. usb_ep_disable(dev->ep_in);
  1880. return ret;
  1881. }
  1882. dev->state = STATE_READY;
  1883. dev->dev_disconnected = 0;
  1884. /* readers may be blocked waiting for us to go online */
  1885. wake_up(&dev->read_wq);
  1886. return 0;
  1887. }
  1888. static void mtp_function_disable(struct usb_function *f)
  1889. {
  1890. struct mtp_dev *dev = func_to_mtp(f);
  1891. struct usb_composite_dev *cdev = dev->cdev;
  1892. printk("mtp_function_disable\n");
  1893. dev->state = STATE_OFFLINE;
  1894. usb_ep_disable(dev->ep_in);
  1895. usb_ep_disable(dev->ep_out);
  1896. usb_ep_disable(dev->ep_intr);
  1897. dev->dev_disconnected = 1;
  1898. /* readers may be blocked waiting for us to go online */
  1899. wake_up(&dev->read_wq);
  1900. VDBG(cdev, "%s disabled\n", dev->function.name);
  1901. }
  1902. static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
  1903. {
  1904. struct mtp_dev *dev = _mtp_dev;
  1905. int ret = 0;
  1906. printk(KERN_INFO "mtp_bind_config\n");
  1907. /* allocate a string ID for our interface */
  1908. if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
  1909. ret = usb_string_id(c->cdev);
  1910. if (ret < 0)
  1911. return ret;
  1912. mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
  1913. mtp_interface_desc.iInterface = ret;
  1914. }
  1915. dev->cdev = c->cdev;
  1916. dev->function.name = "mtp";
  1917. dev->function.strings = mtp_strings;
  1918. if (ptp_config) {
  1919. dev->function.fs_descriptors = fs_ptp_descs;
  1920. dev->function.hs_descriptors = hs_ptp_descs;
  1921. if (gadget_is_superspeed(c->cdev->gadget))
  1922. dev->function.ss_descriptors = ss_ptp_descs;
  1923. } else {
  1924. dev->function.fs_descriptors = fs_mtp_descs;
  1925. dev->function.hs_descriptors = hs_mtp_descs;
  1926. if (gadget_is_superspeed(c->cdev->gadget))
  1927. dev->function.ss_descriptors = ss_mtp_descs;
  1928. }
  1929. dev->function.bind = mtp_function_bind;
  1930. dev->function.unbind = mtp_function_unbind;
  1931. dev->function.set_alt = mtp_function_set_alt;
  1932. dev->function.disable = mtp_function_disable;
  1933. return usb_add_function(c, &dev->function);
  1934. }
  1935. static int mtp_setup(void)
  1936. {
  1937. struct mtp_dev *dev;
  1938. int ret;
  1939. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1940. if (!dev)
  1941. return -ENOMEM;
  1942. spin_lock_init(&dev->lock);
  1943. init_waitqueue_head(&dev->read_wq);
  1944. init_waitqueue_head(&dev->write_wq);
  1945. init_waitqueue_head(&dev->intr_wq);
  1946. atomic_set(&dev->open_excl, 0);
  1947. atomic_set(&dev->ioctl_excl, 0);
  1948. INIT_LIST_HEAD(&dev->tx_idle);
  1949. INIT_LIST_HEAD(&dev->intr_idle);
  1950. dev->wq = create_singlethread_workqueue("f_mtp");
  1951. if (!dev->wq) {
  1952. ret = -ENOMEM;
  1953. goto err1;
  1954. }
  1955. INIT_WORK(&dev->send_file_work, send_file_work);
  1956. INIT_WORK(&dev->receive_file_work, receive_file_work);
  1957. INIT_WORK(&dev->device_reset_work, mtp_work);
  1958. dev->fileTransferSend = 0;
  1959. dev->epOut_halt = 0;
  1960. dev->dev_disconnected = 0;
  1961. _mtp_dev = dev;
  1962. ret = misc_register(&mtp_device);
  1963. if (ret)
  1964. goto err2;
  1965. return 0;
  1966. err2:
  1967. destroy_workqueue(dev->wq);
  1968. err1:
  1969. _mtp_dev = NULL;
  1970. kfree(dev);
  1971. printk(KERN_ERR "mtp gadget driver failed to initialize\n");
  1972. return ret;
  1973. }
  1974. static void mtp_cleanup(void)
  1975. {
  1976. struct mtp_dev *dev = _mtp_dev;
  1977. if (!dev)
  1978. return;
  1979. misc_deregister(&mtp_device);
  1980. destroy_workqueue(dev->wq);
  1981. _mtp_dev = NULL;
  1982. kfree(dev);
  1983. }