f_mtp.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293
  1. /*
  2. * Gadget Function Driver for MTP
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. * Author: Mike Lockwood <lockwood@android.com>
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. /* #define DEBUG */
  18. /* #define VERBOSE_DEBUG */
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/poll.h>
  22. #include <linux/delay.h>
  23. #include <linux/wait.h>
  24. #include <linux/err.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/types.h>
  27. #include <linux/file.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/usb.h>
  31. #include <linux/usb_usual.h>
  32. #include <linux/usb/ch9.h>
  33. #include <linux/usb/f_mtp.h>
  34. #include <linux/delay.h>
  35. #include <linux/time.h>
  36. #define MTP_BULK_BUFFER_SIZE 16384
  37. #define INTR_BUFFER_SIZE 28
  38. /* String IDs */
  39. #define INTERFACE_STRING_INDEX 0
  40. /* values for mtp_dev.state */
  41. #define STATE_OFFLINE 0 /* initial state, disconnected */
  42. #define STATE_READY 1 /* ready for userspace calls */
  43. #define STATE_BUSY 2 /* processing userspace calls */
  44. #define STATE_CANCELED 3 /* transaction canceled by host */
  45. #define STATE_ERROR 4 /* error from completion routine */
  46. #define STATE_RESET 5 /* reset from device reset request */
  47. /* number of tx and rx requests to allocate */
  48. #define TX_REQ_MAX 4
  49. #define RX_REQ_MAX 2
  50. #define INTR_REQ_MAX 5
  51. /* ID for Microsoft MTP OS String */
  52. #define MTP_OS_STRING_ID 0xEE
  53. /* MTP class reqeusts */
  54. #define MTP_REQ_CANCEL 0x64
  55. #define MTP_REQ_GET_EXT_EVENT_DATA 0x65
  56. #define MTP_REQ_RESET 0x66
  57. #define MTP_REQ_GET_DEVICE_STATUS 0x67
  58. /* constants for device status */
  59. #define MTP_RESPONSE_OK 0x2001
  60. #define MTP_RESPONSE_DEVICE_BUSY 0x2019
  61. #define MTP_RESPONSE_DEVICE_CANCEL 0x201F
  62. static const char mtp_shortname[] = "mtp_usb";
  63. /*#ifdef DBG
  64. #undef DBG
  65. #endif
  66. #define DBG(level, fmt, args...) \
  67. do { \
  68. printk( fmt, ##args); \
  69. } while (0)
  70. #ifdef VDBG
  71. #undef VDBG
  72. #endif
  73. #define VDBG(level, fmt, args...) \
  74. do { \
  75. printk( fmt, ##args); \
  76. } while (0)
  77. #ifdef pr_debug
  78. #undef pr_debug
  79. #endif
  80. #define pr_debug(fmt, args...) \
  81. do { \
  82. printk( fmt, ##args); \
  83. } while (0)
  84. #ifdef pr_info
  85. #undef pr_info
  86. #endif
  87. #define pr_info(fmt, args...) \
  88. do { \
  89. printk( fmt, ##args); \
  90. } while (0)
  91. */
  92. struct mtp_dev {
  93. struct usb_function function;
  94. struct usb_composite_dev *cdev;
  95. spinlock_t lock;
  96. struct usb_ep *ep_in;
  97. struct usb_ep *ep_out;
  98. struct usb_ep *ep_intr;
  99. int state;
  100. /* synchronize access to our device file */
  101. atomic_t open_excl;
  102. /* to enforce only one ioctl at a time */
  103. atomic_t ioctl_excl;
  104. struct list_head tx_idle;
  105. struct list_head intr_idle;
  106. wait_queue_head_t read_wq;
  107. wait_queue_head_t write_wq;
  108. wait_queue_head_t intr_wq;
  109. struct usb_request *rx_req[RX_REQ_MAX];
  110. int rx_done;
  111. /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
  112. * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
  113. */
  114. struct workqueue_struct *wq;
  115. struct work_struct send_file_work;
  116. struct work_struct receive_file_work;
  117. struct file *xfer_file;
  118. loff_t xfer_file_offset;
  119. int64_t xfer_file_length;
  120. unsigned xfer_send_header;
  121. uint16_t xfer_command;
  122. uint32_t xfer_transaction_id;
  123. int xfer_result;
  124. struct work_struct device_reset_work;
  125. int fileTransferSend;
  126. char usb_functions[32];
  127. int curr_mtp_func_index;
  128. int usb_functions_no;
  129. int epOut_halt;
  130. int dev_disconnected;
  131. };
  132. static struct usb_interface_descriptor mtp_interface_desc = {
  133. .bLength = USB_DT_INTERFACE_SIZE,
  134. .bDescriptorType = USB_DT_INTERFACE,
  135. .bInterfaceNumber = 0,
  136. .bNumEndpoints = 3,
  137. .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
  138. .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
  139. .bInterfaceProtocol = 0,
  140. };
  141. static struct usb_interface_descriptor ptp_interface_desc = {
  142. .bLength = USB_DT_INTERFACE_SIZE,
  143. .bDescriptorType = USB_DT_INTERFACE,
  144. .bInterfaceNumber = 0,
  145. .bNumEndpoints = 3,
  146. .bInterfaceClass = USB_CLASS_STILL_IMAGE,
  147. .bInterfaceSubClass = 1,
  148. .bInterfaceProtocol = 1,
  149. };
  150. static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
  151. .bLength = USB_DT_ENDPOINT_SIZE,
  152. .bDescriptorType = USB_DT_ENDPOINT,
  153. .bEndpointAddress = USB_DIR_IN,
  154. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  155. .wMaxPacketSize = __constant_cpu_to_le16(1024),
  156. };
  157. static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
  158. .bLength = sizeof mtp_superspeed_in_comp_desc,
  159. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  160. /* the following 2 values can be tweaked if necessary */
  161. /* .bMaxBurst = 0, */
  162. /* .bmAttributes = 0, */
  163. };
  164. static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
  165. .bLength = USB_DT_ENDPOINT_SIZE,
  166. .bDescriptorType = USB_DT_ENDPOINT,
  167. .bEndpointAddress = USB_DIR_OUT,
  168. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  169. .wMaxPacketSize = __constant_cpu_to_le16(1024),
  170. };
  171. static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
  172. .bLength = sizeof mtp_superspeed_out_comp_desc,
  173. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  174. /* the following 2 values can be tweaked if necessary */
  175. /* .bMaxBurst = 0, */
  176. /* .bmAttributes = 0, */
  177. };
  178. static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
  179. .bLength = USB_DT_ENDPOINT_SIZE,
  180. .bDescriptorType = USB_DT_ENDPOINT,
  181. .bEndpointAddress = USB_DIR_IN,
  182. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  183. .wMaxPacketSize = __constant_cpu_to_le16(512),
  184. };
  185. static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
  186. .bLength = USB_DT_ENDPOINT_SIZE,
  187. .bDescriptorType = USB_DT_ENDPOINT,
  188. .bEndpointAddress = USB_DIR_OUT,
  189. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  190. .wMaxPacketSize = __constant_cpu_to_le16(512),
  191. };
  192. static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
  193. .bLength = USB_DT_ENDPOINT_SIZE,
  194. .bDescriptorType = USB_DT_ENDPOINT,
  195. .bEndpointAddress = USB_DIR_IN,
  196. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  197. };
  198. static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
  199. .bLength = USB_DT_ENDPOINT_SIZE,
  200. .bDescriptorType = USB_DT_ENDPOINT,
  201. .bEndpointAddress = USB_DIR_OUT,
  202. .bmAttributes = USB_ENDPOINT_XFER_BULK,
  203. };
  204. static struct usb_endpoint_descriptor mtp_intr_desc = {
  205. .bLength = USB_DT_ENDPOINT_SIZE,
  206. .bDescriptorType = USB_DT_ENDPOINT,
  207. .bEndpointAddress = USB_DIR_IN,
  208. .bmAttributes = USB_ENDPOINT_XFER_INT,
  209. .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
  210. .bInterval = 6,
  211. };
  212. static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
  213. .bLength = sizeof mtp_superspeed_intr_comp_desc,
  214. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  215. /* the following 3 values can be tweaked if necessary */
  216. /* .bMaxBurst = 0, */
  217. /* .bmAttributes = 0, */
  218. .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
  219. };
  220. static struct usb_descriptor_header *fs_mtp_descs[] = {
  221. (struct usb_descriptor_header *) &mtp_interface_desc,
  222. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  223. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  224. (struct usb_descriptor_header *) &mtp_intr_desc,
  225. NULL,
  226. };
  227. static struct usb_descriptor_header *hs_mtp_descs[] = {
  228. (struct usb_descriptor_header *) &mtp_interface_desc,
  229. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  230. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  231. (struct usb_descriptor_header *) &mtp_intr_desc,
  232. NULL,
  233. };
  234. static struct usb_descriptor_header *ss_mtp_descs[] = {
  235. (struct usb_descriptor_header *) &mtp_interface_desc,
  236. (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
  237. (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
  238. (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
  239. (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
  240. (struct usb_descriptor_header *) &mtp_intr_desc,
  241. (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
  242. NULL,
  243. };
  244. static struct usb_descriptor_header *fs_ptp_descs[] = {
  245. (struct usb_descriptor_header *) &ptp_interface_desc,
  246. (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
  247. (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
  248. (struct usb_descriptor_header *) &mtp_intr_desc,
  249. NULL,
  250. };
  251. static struct usb_descriptor_header *hs_ptp_descs[] = {
  252. (struct usb_descriptor_header *) &ptp_interface_desc,
  253. (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
  254. (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
  255. (struct usb_descriptor_header *) &mtp_intr_desc,
  256. NULL,
  257. };
  258. static struct usb_descriptor_header *ss_ptp_descs[] = {
  259. (struct usb_descriptor_header *) &ptp_interface_desc,
  260. (struct usb_descriptor_header *) &mtp_superspeed_in_desc,
  261. (struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
  262. (struct usb_descriptor_header *) &mtp_superspeed_out_desc,
  263. (struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
  264. (struct usb_descriptor_header *) &mtp_intr_desc,
  265. (struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
  266. NULL,
  267. };
  268. static struct usb_string mtp_string_defs[] = {
  269. /* Naming interface "MTP" so libmtp will recognize us */
  270. [INTERFACE_STRING_INDEX].s = "MTP",
  271. { }, /* end of list */
  272. };
  273. static struct usb_gadget_strings mtp_string_table = {
  274. .language = 0x0409, /* en-US */
  275. .strings = mtp_string_defs,
  276. };
  277. static struct usb_gadget_strings *mtp_strings[] = {
  278. &mtp_string_table,
  279. NULL,
  280. };
  281. /* Microsoft MTP OS String */
  282. static u8 mtp_os_string[] = {
  283. 18, /* sizeof(mtp_os_string) */
  284. USB_DT_STRING,
  285. /* Signature field: "MSFT100" */
  286. 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
  287. /* vendor code */
  288. 1,
  289. /* padding */
  290. 0
  291. };
  292. /* Microsoft Extended Property OS Feature Descriptor Header Section */
  293. struct mtp_ext_prop_desc_header {
  294. __le32 dwLength;
  295. __u16 bcdVersion;
  296. __le16 wIndex;
  297. __u16 wCount;
  298. };
  299. /* Microsoft xtended Property OS Feature Function Section */
  300. struct mtp_ext_prop_desc_property {
  301. __le32 dwSize;
  302. __le32 dwPropertyDataType;
  303. __le16 wPropertyNameLength;
  304. __u8 bPropertyName[8]; /* MTP */
  305. __le32 dwPropertyDataLength;
  306. __u8 bPropertyData[22]; /* MTP Device */
  307. }mtp_ext_prop_desc_property;
  308. /* MTP Extended Configuration Descriptor */
  309. struct {
  310. struct mtp_ext_prop_desc_header header;
  311. struct mtp_ext_prop_desc_property customProp;
  312. } mtp_ext_prop_desc = {
  313. .header = {
  314. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_prop_desc)),
  315. .bcdVersion = __constant_cpu_to_le16(0x0100),
  316. .wIndex = __constant_cpu_to_le16(5),
  317. .wCount = __constant_cpu_to_le16(1),
  318. },
  319. .customProp = {
  320. .dwSize = __constant_cpu_to_le32(sizeof(mtp_ext_prop_desc_property)),
  321. .dwPropertyDataType = __constant_cpu_to_le32(1),
  322. .wPropertyNameLength = __constant_cpu_to_le16(8),
  323. .bPropertyName = {'M', 0, 'T', 0, 'P', 0, 0, 0}, /* MTP */
  324. .dwPropertyDataLength = __constant_cpu_to_le32(22),
  325. .bPropertyData = {'M', 0, 'T', 0, 'P', 0, ' ', 0, 'D', 0, 'e', 0, 'v', 0, 'i', 0, 'c', 0, 'e', 0, 0, 0}, /* MTP Device */
  326. },
  327. };
  328. #define MSFT_bMS_VENDOR_CODE 1
  329. #ifdef CONFIG_MTK_TC1_FEATURE
  330. #define USB_MTP_FUNCTIONS 8
  331. #else
  332. #define USB_MTP_FUNCTIONS 6
  333. #endif
  334. #define USB_MTP "mtp\n"
  335. #define USB_MTP_ACM "mtp,acm\n"
  336. #define USB_MTP_ADB "mtp,adb\n"
  337. #define USB_MTP_ADB_ACM "mtp,adb,acm\n"
  338. #define USB_MTP_UMS "mtp,mass_storage\n"
  339. #define USB_MTP_UMS_ADB "mtp,mass_storage,adb\n"
  340. #ifdef CONFIG_MTK_TC1_FEATURE
  341. #define USB_TC1_MTP_ADB "acm,gser,mtp,adb\n"
  342. #define USB_TC1_MTP "acm,gser,mtp\n"
  343. #endif
  344. static char * USB_MTP_FUNC[USB_MTP_FUNCTIONS] =
  345. {
  346. USB_MTP,
  347. USB_MTP_ACM,
  348. USB_MTP_ADB,
  349. USB_MTP_ADB_ACM,
  350. USB_MTP_UMS,
  351. USB_MTP_UMS_ADB,
  352. #ifdef CONFIG_MTK_TC1_FEATURE
  353. USB_TC1_MTP_ADB,
  354. USB_TC1_MTP
  355. #endif
  356. };
  357. /* Microsoft Extended Configuration Descriptor Header Section */
  358. struct mtp_ext_config_desc_header {
  359. __le32 dwLength;
  360. __u16 bcdVersion;
  361. __le16 wIndex;
  362. __u8 bCount;
  363. __u8 reserved[7];
  364. };
  365. /* Microsoft Extended Configuration Descriptor Function Section */
  366. struct mtp_ext_config_desc_function {
  367. __u8 bFirstInterfaceNumber;
  368. __u8 bInterfaceCount;
  369. __u8 compatibleID[8];
  370. __u8 subCompatibleID[8];
  371. __u8 reserved[6];
  372. };
  373. /* MTP Extended Configuration Descriptor */
  374. struct {
  375. struct mtp_ext_config_desc_header header;
  376. struct mtp_ext_config_desc_function function;
  377. } mtp_ext_config_desc = {
  378. .header = {
  379. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
  380. .bcdVersion = __constant_cpu_to_le16(0x0100),
  381. .wIndex = __constant_cpu_to_le16(4),
  382. /* .bCount = __constant_cpu_to_le16(1), */
  383. .bCount = 0x01,
  384. },
  385. .function = {
  386. .bFirstInterfaceNumber = 0,
  387. .bInterfaceCount = 1,
  388. .compatibleID = { 'M', 'T', 'P' },
  389. },
  390. };
  391. struct {
  392. struct mtp_ext_config_desc_header header;
  393. struct mtp_ext_config_desc_function function1;
  394. struct mtp_ext_config_desc_function function2;
  395. } mtp_ext_config_desc_2 = {
  396. .header = {
  397. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_2)),
  398. .bcdVersion = __constant_cpu_to_le16(0x0100),
  399. .wIndex = __constant_cpu_to_le16(4),
  400. /* .bCount = __constant_cpu_to_le16(1), */
  401. .bCount = 0x02,
  402. .reserved = { 0 },
  403. },
  404. .function1 =
  405. {
  406. .bFirstInterfaceNumber = 0,
  407. .bInterfaceCount = 1,
  408. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  409. .subCompatibleID = { 0 },
  410. .reserved = { 0 },
  411. },
  412. .function2 =
  413. {
  414. .bFirstInterfaceNumber = 1,
  415. .bInterfaceCount = 1,
  416. .compatibleID = { 0 },
  417. .subCompatibleID = { 0 },
  418. .reserved = { 0 },
  419. },
  420. };
  421. struct {
  422. struct mtp_ext_config_desc_header header;
  423. struct mtp_ext_config_desc_function function1;
  424. struct mtp_ext_config_desc_function function2;
  425. struct mtp_ext_config_desc_function function3;
  426. } mtp_ext_config_desc_3 = {
  427. .header = {
  428. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_3)),
  429. .bcdVersion = __constant_cpu_to_le16(0x0100),
  430. .wIndex = __constant_cpu_to_le16(4),
  431. /* .bCount = __constant_cpu_to_le16(1), */
  432. .bCount = 0x03,
  433. .reserved = { 0 },
  434. },
  435. .function1 =
  436. {
  437. .bFirstInterfaceNumber = 0,
  438. .bInterfaceCount = 1,
  439. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  440. .subCompatibleID = { 0 },
  441. .reserved = { 0 },
  442. },
  443. .function2 =
  444. {
  445. .bFirstInterfaceNumber = 1,
  446. .bInterfaceCount = 1,
  447. .compatibleID = { 0 },
  448. .subCompatibleID = { 0 },
  449. .reserved = { 0 },
  450. },
  451. .function3 =
  452. {
  453. .bFirstInterfaceNumber = 2,
  454. .bInterfaceCount = 1,
  455. .compatibleID = { 0 },
  456. .subCompatibleID = { 0 },
  457. .reserved = { 0 },
  458. },
  459. };
  460. #ifdef CONFIG_MTK_TC1_FEATURE
  461. struct {
  462. struct mtp_ext_config_desc_header header;
  463. struct mtp_ext_config_desc_function function1;
  464. struct mtp_ext_config_desc_function function2;
  465. struct mtp_ext_config_desc_function function3;
  466. struct mtp_ext_config_desc_function function4;
  467. } mtp_ext_config_desc_4 = {
  468. .header = {
  469. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_4)),
  470. .bcdVersion = __constant_cpu_to_le16(0x0100),
  471. .wIndex = __constant_cpu_to_le16(4),
  472. /* .bCount = __constant_cpu_to_le16(1), */
  473. .bCount = 0x04,
  474. .reserved = { 0 },
  475. },
  476. .function1 =
  477. {
  478. .bFirstInterfaceNumber = 0,
  479. .bInterfaceCount = 2,
  480. .compatibleID = { 0 },
  481. .subCompatibleID = { 0 },
  482. .reserved = { 0 },
  483. },
  484. .function2 =
  485. {
  486. .bFirstInterfaceNumber = 2,
  487. .bInterfaceCount = 1,
  488. .compatibleID = { 0 },
  489. .subCompatibleID = { 0 },
  490. .reserved = { 0 },
  491. },
  492. .function3 =
  493. {
  494. .bFirstInterfaceNumber = 3,
  495. .bInterfaceCount = 1,
  496. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  497. .subCompatibleID = { 0 },
  498. .reserved = { 0 },
  499. },
  500. .function4 =
  501. {
  502. .bFirstInterfaceNumber = 4,
  503. .bInterfaceCount = 1,
  504. .compatibleID = { 0 },
  505. .subCompatibleID = { 0 },
  506. .reserved = { 0 },
  507. },
  508. };
  509. struct {
  510. struct mtp_ext_config_desc_header header;
  511. struct mtp_ext_config_desc_function function1;
  512. struct mtp_ext_config_desc_function function2;
  513. struct mtp_ext_config_desc_function function3;
  514. } mtp_ext_config_desc_5 = {
  515. .header = {
  516. .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc_5)),
  517. .bcdVersion = __constant_cpu_to_le16(0x0100),
  518. .wIndex = __constant_cpu_to_le16(4),
  519. /* .bCount = __constant_cpu_to_le16(1), */
  520. .bCount = 0x03,
  521. .reserved = { 0 },
  522. },
  523. .function1 =
  524. {
  525. .bFirstInterfaceNumber = 0,
  526. .bInterfaceCount = 2,
  527. .compatibleID = { 0 },
  528. .subCompatibleID = { 0 },
  529. .reserved = { 0 },
  530. },
  531. .function2 =
  532. {
  533. .bFirstInterfaceNumber = 2,
  534. .bInterfaceCount = 1,
  535. .compatibleID = { 0 },
  536. .subCompatibleID = { 0 },
  537. .reserved = { 0 },
  538. },
  539. .function3 =
  540. {
  541. .bFirstInterfaceNumber = 3,
  542. .bInterfaceCount = 1,
  543. .compatibleID = { 'M', 'T', 'P', 0, 0, 0, 0, 0 },
  544. .subCompatibleID = { 0 },
  545. .reserved = { 0 },
  546. },
  547. };
  548. #endif
  549. struct mtp_device_status {
  550. __le16 wLength;
  551. __le16 wCode;
  552. };
  553. struct mtp_data_header {
  554. /* length of packet, including this header */
  555. __le32 length;
  556. /* container type (2 for data packet) */
  557. __le16 type;
  558. /* MTP command code */
  559. __le16 command;
  560. /* MTP transaction ID */
  561. __le32 transaction_id;
  562. };
  563. static void mtp_ueventToDisconnect(struct mtp_dev *dev);
  564. /* temporary variable used between mtp_open() and mtp_gadget_bind() */
  565. static struct mtp_dev *_mtp_dev;
  566. static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
  567. {
  568. return container_of(f, struct mtp_dev, function);
  569. }
  570. static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
  571. {
  572. struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
  573. if (!req)
  574. return NULL;
  575. /* now allocate buffers for the requests */
  576. #if defined(CONFIG_64BIT) && defined(CONFIG_MTK_LM_MODE)
  577. req->buf = kmalloc(buffer_size, GFP_KERNEL | GFP_DMA);
  578. #else
  579. req->buf = kmalloc(buffer_size, GFP_KERNEL);
  580. #endif
  581. if (!req->buf) {
  582. usb_ep_free_request(ep, req);
  583. return NULL;
  584. }
  585. return req;
  586. }
  587. static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
  588. {
  589. if (req) {
  590. kfree(req->buf);
  591. usb_ep_free_request(ep, req);
  592. }
  593. }
  594. static inline int mtp_lock(atomic_t *excl)
  595. {
  596. if (atomic_inc_return(excl) == 1) {
  597. return 0;
  598. } else {
  599. atomic_dec(excl);
  600. return -1;
  601. }
  602. }
  603. static inline void mtp_unlock(atomic_t *excl)
  604. {
  605. atomic_dec(excl);
  606. }
  607. /* add a request to the tail of a list */
  608. static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
  609. struct usb_request *req)
  610. {
  611. unsigned long flags;
  612. spin_lock_irqsave(&dev->lock, flags);
  613. list_add_tail(&req->list, head);
  614. spin_unlock_irqrestore(&dev->lock, flags);
  615. }
  616. /* remove a request from the head of a list */
  617. static struct usb_request
  618. *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
  619. {
  620. unsigned long flags;
  621. struct usb_request *req;
  622. spin_lock_irqsave(&dev->lock, flags);
  623. if (list_empty(head)) {
  624. req = 0;
  625. } else {
  626. req = list_first_entry(head, struct usb_request, list);
  627. list_del(&req->list);
  628. }
  629. spin_unlock_irqrestore(&dev->lock, flags);
  630. return req;
  631. }
  632. static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
  633. {
  634. struct mtp_dev *dev = _mtp_dev;
  635. if (req->status != 0)
  636. dev->state = STATE_ERROR;
  637. mtp_req_put(dev, &dev->tx_idle, req);
  638. wake_up(&dev->write_wq);
  639. }
  640. static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
  641. {
  642. struct mtp_dev *dev = _mtp_dev;
  643. dev->rx_done = 1;
  644. if (req->status != 0)
  645. dev->state = STATE_ERROR;
  646. wake_up(&dev->read_wq);
  647. }
  648. static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
  649. {
  650. struct mtp_dev *dev = _mtp_dev;
  651. if (req->status != 0)
  652. dev->state = STATE_ERROR;
  653. mtp_req_put(dev, &dev->intr_idle, req);
  654. wake_up(&dev->intr_wq);
  655. }
  656. static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
  657. struct usb_endpoint_descriptor *in_desc,
  658. struct usb_endpoint_descriptor *out_desc,
  659. struct usb_endpoint_descriptor *intr_desc)
  660. {
  661. struct usb_composite_dev *cdev = dev->cdev;
  662. struct usb_request *req;
  663. struct usb_ep *ep;
  664. int i;
  665. DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
  666. ep = usb_ep_autoconfig(cdev->gadget, in_desc);
  667. if (!ep) {
  668. DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
  669. return -ENODEV;
  670. }
  671. DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
  672. ep->driver_data = dev; /* claim the endpoint */
  673. dev->ep_in = ep;
  674. ep = usb_ep_autoconfig(cdev->gadget, out_desc);
  675. if (!ep) {
  676. DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
  677. return -ENODEV;
  678. }
  679. DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
  680. ep->driver_data = dev; /* claim the endpoint */
  681. dev->ep_out = ep;
  682. ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
  683. if (!ep) {
  684. DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
  685. return -ENODEV;
  686. }
  687. DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
  688. ep->driver_data = dev; /* claim the endpoint */
  689. dev->ep_intr = ep;
  690. /* now allocate requests for our endpoints */
  691. for (i = 0; i < TX_REQ_MAX; i++) {
  692. req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
  693. if (!req)
  694. goto fail;
  695. req->complete = mtp_complete_in;
  696. mtp_req_put(dev, &dev->tx_idle, req);
  697. }
  698. for (i = 0; i < RX_REQ_MAX; i++) {
  699. req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
  700. if (!req)
  701. goto fail;
  702. req->complete = mtp_complete_out;
  703. dev->rx_req[i] = req;
  704. }
  705. for (i = 0; i < INTR_REQ_MAX; i++) {
  706. req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
  707. if (!req)
  708. goto fail;
  709. req->complete = mtp_complete_intr;
  710. mtp_req_put(dev, &dev->intr_idle, req);
  711. }
  712. return 0;
  713. fail:
  714. printk(KERN_ERR "mtp_bind() could not allocate requests\n");
  715. return -1;
  716. }
  717. static int mtp_send_devicereset_event(struct mtp_dev *dev)
  718. {
  719. struct usb_request *req = NULL;
  720. int ret;
  721. int length = 12;
  722. unsigned long flags;
  723. char buffer[12]={0x0C, 0x0, 0x0, 0x0, 0x4, 0x0, 0xb, 0x40, 0x0, 0x0, 0x0, 0x0}; /* length 12, 0x00000010, type EVENT: 0x0004, event code 0x400b */
  724. DBG(dev->cdev, "%s, line %d: dev->dev_disconnected = %d\n", __func__, __LINE__, dev->dev_disconnected);
  725. if (length < 0 || length > INTR_BUFFER_SIZE)
  726. return -EINVAL;
  727. if (dev->state == STATE_OFFLINE)
  728. return -ENODEV;
  729. spin_lock_irqsave(&dev->lock, flags);
  730. DBG(dev->cdev, "%s, line %d: _mtp_dev->dev_disconnected = %d, dev->state = %d \n", __func__, __LINE__, dev->dev_disconnected, dev->state);
  731. if(!dev->dev_disconnected || dev->state != STATE_OFFLINE)
  732. {
  733. spin_unlock_irqrestore(&dev->lock, flags);
  734. ret = wait_event_interruptible_timeout(dev->intr_wq,
  735. (req = mtp_req_get(dev, &dev->intr_idle)),
  736. msecs_to_jiffies(1000));
  737. if (!req)
  738. return -ETIME;
  739. memcpy(req->buf, buffer, length);
  740. req->length = length;
  741. ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
  742. DBG(dev->cdev, "%s, line %d: ret = %d\n", __func__, __LINE__, ret);
  743. if (ret)
  744. mtp_req_put(dev, &dev->intr_idle, req);
  745. }
  746. else
  747. {
  748. spin_unlock_irqrestore(&dev->lock, flags);
  749. DBG(dev->cdev, "%s, line %d: usb function has been unbind!! do nothing!!\n", __func__, __LINE__);
  750. ret = 0;
  751. }
  752. DBG(dev->cdev, "%s, line %d: _mtp_dev->dev_disconnected = %d, dev->state = %d, return!! \n", __func__, __LINE__, dev->dev_disconnected, dev->state);
  753. return ret;
  754. }
  755. static ssize_t mtp_read(struct file *fp, char __user *buf,
  756. size_t count, loff_t *pos)
  757. {
  758. struct mtp_dev *dev = fp->private_data;
  759. struct usb_composite_dev *cdev = dev->cdev;
  760. struct usb_request *req;
  761. ssize_t r = count;
  762. unsigned xfer;
  763. int ret = 0;
  764. DBG(cdev, "mtp_read(%zu)\n", count);
  765. if (count > MTP_BULK_BUFFER_SIZE)
  766. return -EINVAL;
  767. if (dev->epOut_halt) {
  768. printk("%s, line %d: ret %d!! <dev->epOut_halt = %d> reset the out ep \n", __func__, __LINE__, ret, dev->epOut_halt);
  769. mdelay(2000);
  770. usb_ep_fifo_flush(dev->ep_out);
  771. dev->epOut_halt=0;
  772. usb_ep_clear_halt(dev->ep_out);
  773. printk("%s, line %d: ret %d!! <dev->epOut_halt = %d> finish the reset \n", __func__, __LINE__, ret, dev->epOut_halt);
  774. }
  775. spin_lock_irq(&dev->lock);
  776. if (dev->state == STATE_RESET) {
  777. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  778. dev->state = STATE_READY;
  779. DBG(dev->cdev, "%s: dev->state = %d, change back to Ready state;!! \n", __func__, dev->state);
  780. spin_unlock_irq(&dev->lock);
  781. return -ECANCELED;
  782. }
  783. spin_unlock_irq(&dev->lock);
  784. /* we will block until we're online */
  785. DBG(cdev, "mtp_read: waiting for online state\n");
  786. ret = wait_event_interruptible(dev->read_wq,
  787. dev->state != STATE_OFFLINE);
  788. if (ret < 0) {
  789. r = ret;
  790. goto done;
  791. }
  792. spin_lock_irq(&dev->lock);
  793. if(dev->state == STATE_RESET)
  794. {
  795. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  796. dev->state = STATE_READY;
  797. DBG(dev->cdev, "%s: dev->state = %d, change back to Ready state;!! \n", __func__, dev->state);
  798. spin_unlock_irq(&dev->lock);
  799. return -ECANCELED;
  800. }
  801. spin_unlock_irq(&dev->lock);
  802. spin_lock_irq(&dev->lock);
  803. if (dev->state == STATE_CANCELED) {
  804. /* report cancelation to userspace */
  805. dev->state = STATE_READY;
  806. spin_unlock_irq(&dev->lock);
  807. return -ECANCELED;
  808. }
  809. dev->state = STATE_BUSY;
  810. spin_unlock_irq(&dev->lock);
  811. requeue_req:
  812. /* queue a request */
  813. req = dev->rx_req[0];
  814. req->length = count;
  815. dev->rx_done = 0;
  816. ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
  817. if (ret < 0) {
  818. r = -EIO;
  819. goto done;
  820. } else {
  821. DBG(cdev, "rx %p queue\n", req);
  822. }
  823. /* wait for a request to complete */
  824. ret = wait_event_interruptible(dev->read_wq, dev->rx_done || dev->state != STATE_BUSY);
  825. if (ret < 0) {
  826. r = ret;
  827. usb_ep_dequeue(dev->ep_out, req);
  828. goto done;
  829. }
  830. if (!dev->rx_done) {
  831. DBG(cdev, "%s, line %d: ret %d!! <!dev->rx_done> dev->state = %d, dev->rx_done = %d \n", __func__, __LINE__, ret, dev->state, dev->rx_done);
  832. printk("%s, line %d: ret %d!! <!dev->rx_done> dev->state = %d, dev->rx_done = %d \n", __func__, __LINE__, ret, dev->state, dev->rx_done);
  833. r = -ECANCELED;
  834. dev->state = STATE_ERROR;
  835. usb_ep_dequeue(dev->ep_out, req);
  836. goto done;
  837. }
  838. if (dev->state == STATE_BUSY) {
  839. /* If we got a 0-len packet, throw it back and try again. */
  840. if (req->actual == 0)
  841. goto requeue_req;
  842. DBG(cdev, "rx %p %d\n", req, req->actual);
  843. xfer = (req->actual < count) ? req->actual : count;
  844. r = xfer;
  845. if (copy_to_user(buf, req->buf, xfer))
  846. r = -EFAULT;
  847. } else if(dev->state == STATE_RESET) {
  848. /* If we got a 0-len packet, throw it back and try again. */
  849. if (req->actual == 0)
  850. goto requeue_req;
  851. DBG(dev->cdev, "rx %p %d\n", req, req->actual);
  852. xfer = (req->actual < count) ? req->actual : count;
  853. r = xfer;
  854. if (copy_to_user(buf, req->buf, xfer))
  855. r = -EFAULT;
  856. } else
  857. r = -EIO;
  858. done:
  859. spin_lock_irq(&dev->lock);
  860. if (dev->state == STATE_CANCELED)
  861. r = -ECANCELED;
  862. else if (dev->state != STATE_OFFLINE)
  863. dev->state = STATE_READY;
  864. spin_unlock_irq(&dev->lock);
  865. DBG(cdev, "mtp_read returning %zd\n", r);
  866. return r;
  867. }
  868. static ssize_t mtp_write(struct file *fp, const char __user *buf,
  869. size_t count, loff_t *pos)
  870. {
  871. struct mtp_dev *dev = fp->private_data;
  872. struct usb_composite_dev *cdev = dev->cdev;
  873. struct usb_request *req = 0;
  874. ssize_t r = count;
  875. unsigned xfer;
  876. int sendZLP = 0;
  877. int ret;
  878. DBG(cdev, "mtp_write(%zu)\n", count);
  879. spin_lock_irq(&dev->lock);
  880. if (dev->state == STATE_CANCELED) {
  881. /* report cancelation to userspace */
  882. dev->state = STATE_READY;
  883. spin_unlock_irq(&dev->lock);
  884. return -ECANCELED;
  885. }
  886. if (dev->state == STATE_RESET) {
  887. /* report cancelation to userspace */
  888. dev->state = STATE_READY;
  889. spin_unlock_irq(&dev->lock);
  890. return -ECANCELED;
  891. }
  892. if (dev->state == STATE_OFFLINE) {
  893. spin_unlock_irq(&dev->lock);
  894. DBG(cdev, "%s, line %d: mtp_write return ENODEV = %d\n", __func__, __LINE__, ENODEV);
  895. return -ENODEV;
  896. }
  897. dev->state = STATE_BUSY;
  898. spin_unlock_irq(&dev->lock);
  899. /* we need to send a zero length packet to signal the end of transfer
  900. * if the transfer size is aligned to a packet boundary.
  901. */
  902. if ((count & (dev->ep_in->maxpacket - 1)) == 0)
  903. sendZLP = 1;
  904. while (count > 0 || sendZLP) {
  905. /* so we exit after sending ZLP */
  906. if (count == 0)
  907. sendZLP = 0;
  908. if (dev->state != STATE_BUSY) {
  909. DBG(cdev, "mtp_write dev->error\n");
  910. r = -EIO;
  911. break;
  912. }
  913. /* get an idle tx request to use */
  914. req = 0;
  915. ret = wait_event_interruptible(dev->write_wq,
  916. ((req = mtp_req_get(dev, &dev->tx_idle))
  917. || dev->state != STATE_BUSY));
  918. if (!req) {
  919. r = ret;
  920. break;
  921. }
  922. if (count > MTP_BULK_BUFFER_SIZE)
  923. xfer = MTP_BULK_BUFFER_SIZE;
  924. else
  925. xfer = count;
  926. if (xfer && copy_from_user(req->buf, buf, xfer)) {
  927. r = -EFAULT;
  928. break;
  929. }
  930. req->length = xfer;
  931. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  932. if (ret < 0) {
  933. DBG(cdev, "mtp_write: xfer error %d\n", ret);
  934. r = -EIO;
  935. break;
  936. }
  937. buf += xfer;
  938. count -= xfer;
  939. /* zero this so we don't try to free it on error exit */
  940. req = 0;
  941. }
  942. if (req)
  943. mtp_req_put(dev, &dev->tx_idle, req);
  944. spin_lock_irq(&dev->lock);
  945. if (dev->state == STATE_CANCELED)
  946. r = -ECANCELED;
  947. else if (dev->state == STATE_RESET) {
  948. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  949. dev->state = STATE_READY;
  950. r = -ECANCELED;
  951. } else if (dev->state != STATE_OFFLINE)
  952. dev->state = STATE_READY;
  953. spin_unlock_irq(&dev->lock);
  954. DBG(cdev, "mtp_write returning %zd\n", r);
  955. return r;
  956. }
  957. /* read from a local file and write to USB */
  958. static void send_file_work(struct work_struct *data)
  959. {
  960. struct mtp_dev *dev = container_of(data, struct mtp_dev,
  961. send_file_work);
  962. struct usb_composite_dev *cdev = dev->cdev;
  963. struct usb_request *req = 0;
  964. struct mtp_data_header *header;
  965. struct file *filp;
  966. loff_t offset;
  967. int64_t count;
  968. int xfer, ret, hdr_size;
  969. int r = 0;
  970. int sendZLP = 0;
  971. #define IOMAXNUM 5
  972. int iotimeMax[IOMAXNUM] = {0};
  973. struct timeval tv_begin, tv_end;
  974. int i = 0;
  975. /* read our parameters */
  976. smp_rmb();
  977. filp = dev->xfer_file;
  978. offset = dev->xfer_file_offset;
  979. count = dev->xfer_file_length;
  980. DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
  981. if (dev->xfer_send_header) {
  982. hdr_size = sizeof(struct mtp_data_header);
  983. count += hdr_size;
  984. } else {
  985. hdr_size = 0;
  986. }
  987. /* we need to send a zero length packet to signal the end of transfer
  988. * if the transfer size is aligned to a packet boundary.
  989. */
  990. if ((count & (dev->ep_in->maxpacket - 1)) == 0)
  991. sendZLP = 1;
  992. while (count > 0 || sendZLP) {
  993. /* so we exit after sending ZLP */
  994. if (count == 0)
  995. sendZLP = 0;
  996. /* get an idle tx request to use */
  997. req = 0;
  998. ret = wait_event_interruptible(dev->write_wq,
  999. (req = mtp_req_get(dev, &dev->tx_idle))
  1000. || dev->state != STATE_BUSY);
  1001. if (dev->state == STATE_CANCELED) {
  1002. r = -ECANCELED;
  1003. break;
  1004. }
  1005. else if (dev->state == STATE_RESET) {
  1006. DBG(dev->cdev, "%s: dev->state = %d, device is under reset state!! \n", __func__, dev->state);
  1007. r = -ECANCELED;
  1008. break;
  1009. }
  1010. if (!req) {
  1011. r = ret;
  1012. break;
  1013. }
  1014. if (count > MTP_BULK_BUFFER_SIZE)
  1015. xfer = MTP_BULK_BUFFER_SIZE;
  1016. else
  1017. xfer = count;
  1018. if (hdr_size) {
  1019. /* prepend MTP data header */
  1020. header = (struct mtp_data_header *)req->buf;
  1021. if (count >= 0xffffffff)
  1022. header->length = __cpu_to_le32(0xffffffff);
  1023. else
  1024. header->length = __cpu_to_le32(count);
  1025. header->type = __cpu_to_le16(2); /* data packet */
  1026. header->command = __cpu_to_le16(dev->xfer_command);
  1027. header->transaction_id =
  1028. __cpu_to_le32(dev->xfer_transaction_id);
  1029. }
  1030. do_gettimeofday(&tv_begin);
  1031. ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
  1032. &offset);
  1033. do_gettimeofday(&tv_end);
  1034. {
  1035. /* ignore the difference under msec */
  1036. int pos = -1;
  1037. int time_msec = (tv_end.tv_sec * 1000 + tv_end.tv_usec / 1000)
  1038. - (tv_begin.tv_sec * 1000 + tv_begin.tv_usec / 1000);
  1039. for (i = 0; i < IOMAXNUM; ++i){
  1040. if (time_msec > iotimeMax[i])
  1041. pos = i;
  1042. else
  1043. break;
  1044. }
  1045. if (pos > 0){
  1046. for (i = 1; i <= pos; ++i){
  1047. iotimeMax[i-1] = iotimeMax[i];
  1048. }
  1049. }
  1050. if (pos != -1)
  1051. iotimeMax[pos] = time_msec;
  1052. }
  1053. if (ret < 0) {
  1054. r = ret;
  1055. DBG(cdev, "send_file_work: vfs_read error %d\n", ret);
  1056. if (dev->dev_disconnected) {
  1057. /* USB SW disconnected */
  1058. dev->state = STATE_OFFLINE;
  1059. } else {
  1060. /* ex: Might be SD card plug-out with USB connected */
  1061. dev->state = STATE_ERROR;
  1062. mtp_ueventToDisconnect(dev);
  1063. }
  1064. break;
  1065. }
  1066. xfer = ret + hdr_size;
  1067. hdr_size = 0;
  1068. req->length = xfer;
  1069. ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
  1070. if (ret < 0) {
  1071. DBG(cdev, "send_file_work: xfer error %d\n", ret);
  1072. if (dev->dev_disconnected) {
  1073. /* USB SW disconnected */
  1074. dev->state = STATE_OFFLINE;
  1075. } else {
  1076. /* ex: Might be SD card plug-out with USB connected */
  1077. dev->state = STATE_ERROR;
  1078. mtp_ueventToDisconnect(dev);
  1079. }
  1080. r = -EIO;
  1081. break;
  1082. }
  1083. count -= xfer;
  1084. /* zero this so we don't try to free it on error exit */
  1085. req = 0;
  1086. }
  1087. DBG(dev->cdev, "%s, line = %d: req = 0x%p \n", __func__, __LINE__, req);
  1088. if (req)
  1089. mtp_req_put(dev, &dev->tx_idle, req);
  1090. DBG(dev->cdev, "[mtp]top time of vfs_read() in %s:\n", __func__);
  1091. for (i = 0; i < IOMAXNUM; ++i){
  1092. DBG(dev->cdev, "[mtp] %d msec\n", iotimeMax[i]);
  1093. }
  1094. DBG(cdev, "send_file_work returning %d\n", r);
  1095. /* write the result */
  1096. dev->xfer_result = r;
  1097. smp_wmb();
  1098. }
  1099. /* read from USB and write to a local file */
  1100. static void receive_file_work(struct work_struct *data)
  1101. {
  1102. struct mtp_dev *dev = container_of(data, struct mtp_dev,
  1103. receive_file_work);
  1104. struct usb_composite_dev *cdev = dev->cdev;
  1105. struct usb_request *read_req = NULL, *write_req = NULL;
  1106. struct file *filp;
  1107. loff_t offset;
  1108. int64_t count;
  1109. int ret, cur_buf = 0;
  1110. int r = 0;
  1111. #if 1 /* #ifdef CONFIG_MTK_SHARED_SDCARD */
  1112. int64_t total_size=0;
  1113. #endif
  1114. #define IOMAXNUM 5
  1115. int iotimeMax[IOMAXNUM] = {0};
  1116. struct timeval tv_begin, tv_end;
  1117. int i = 0;
  1118. /* read our parameters */
  1119. smp_rmb();
  1120. if (dev->epOut_halt) {
  1121. printk("%s, line %d: <dev->epOut_halt = %d> reset the out ep \n", __func__, __LINE__, dev->epOut_halt);
  1122. mdelay(2000);
  1123. usb_ep_fifo_flush(dev->ep_out);
  1124. dev->epOut_halt=0;
  1125. usb_ep_clear_halt(dev->ep_out);
  1126. }
  1127. filp = dev->xfer_file;
  1128. offset = dev->xfer_file_offset;
  1129. count = dev->xfer_file_length;
  1130. DBG(cdev, "receive_file_work(%lld)\n", count);
  1131. while (count > 0 || write_req) {
  1132. if (count > 0) {
  1133. /* queue a request */
  1134. read_req = dev->rx_req[cur_buf];
  1135. cur_buf = (cur_buf + 1) % RX_REQ_MAX;
  1136. read_req->length = (count > MTP_BULK_BUFFER_SIZE
  1137. ? MTP_BULK_BUFFER_SIZE : count);
  1138. /* This might be modified TBD,
  1139. so far, there is only sharedSD with EXT4 FFS could transfer Object with size oevr 4GBs*/
  1140. #if 1 /* #ifdef CONFIG_MTK_SHARED_SDCARD */
  1141. if(total_size >= 0xFFFFFFFF)
  1142. read_req->short_not_ok = 0;
  1143. else {
  1144. if (0 == (read_req->length % dev->ep_out->maxpacket ))
  1145. read_req->short_not_ok = 1;
  1146. else
  1147. read_req->short_not_ok = 0;
  1148. }
  1149. #else
  1150. /* Add for RX mode 1 */
  1151. if (0 == (read_req->length % dev->ep_out->maxpacket ))
  1152. read_req->short_not_ok = 1;
  1153. else
  1154. read_req->short_not_ok = 0;
  1155. DBG(cdev, "read_req->short_not_ok(%d), ep_out->maxpacket (%d)\n",
  1156. read_req->short_not_ok, dev->ep_out->maxpacket);
  1157. #endif
  1158. dev->rx_done = 0;
  1159. ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
  1160. if (ret < 0) {
  1161. r = -EIO;
  1162. pr_debug("%s, line %d: EIO, dev->dev_disconnected = %d, usb queue error \n", __func__, __LINE__, dev->dev_disconnected);
  1163. if (dev->dev_disconnected) {
  1164. dev->state = STATE_OFFLINE;
  1165. } else {
  1166. dev->state = STATE_ERROR;
  1167. mtp_ueventToDisconnect(dev);
  1168. }
  1169. break;
  1170. }
  1171. }
  1172. if (write_req) {
  1173. DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
  1174. do_gettimeofday(&tv_begin);
  1175. ret = vfs_write(filp, write_req->buf, write_req->actual,
  1176. &offset);
  1177. do_gettimeofday(&tv_end);
  1178. DBG(cdev, "vfs_write %d\n", ret);
  1179. {
  1180. /* ignore the difference under msec */
  1181. int pos = -1;
  1182. int time_msec = (tv_end.tv_sec * 1000 + tv_end.tv_usec / 1000)
  1183. - (tv_begin.tv_sec * 1000 + tv_begin.tv_usec / 1000);
  1184. for (i = 0; i < IOMAXNUM; ++i){
  1185. if (time_msec > iotimeMax[i])
  1186. pos = i;
  1187. else
  1188. break;
  1189. }
  1190. if (pos > 0){
  1191. for (i = 1; i <= pos; ++i){
  1192. iotimeMax[i-1] = iotimeMax[i];
  1193. }
  1194. }
  1195. if (pos != -1)
  1196. iotimeMax[pos] = time_msec;
  1197. }
  1198. if (ret != write_req->actual) {
  1199. r = -EIO;
  1200. pr_debug("%s, line %d: EIO, dev->dev_disconnected = %d, file write error \n", __func__, __LINE__, dev->dev_disconnected);
  1201. if (dev->dev_disconnected)
  1202. dev->state = STATE_OFFLINE;
  1203. else {
  1204. dev->state = STATE_ERROR;
  1205. mtp_ueventToDisconnect(dev);
  1206. }
  1207. break;
  1208. }
  1209. write_req = NULL;
  1210. }
  1211. DBG(dev->cdev, "%s, line %d: Wait for read_req = %p!! \n", __func__, __LINE__, read_req);
  1212. if (read_req) {
  1213. /* wait for our last read to complete */
  1214. ret = wait_event_interruptible(dev->read_wq,
  1215. dev->rx_done || dev->state != STATE_BUSY);
  1216. if (dev->state == STATE_CANCELED) {
  1217. pr_debug("%s, line %d: dev->state = %d, get cancel command !! Cancel it!! rx_done = %d\n", __func__, __LINE__, dev->state, dev->rx_done);
  1218. r = -ECANCELED;
  1219. if (!dev->rx_done)
  1220. usb_ep_dequeue(dev->ep_out, read_req);
  1221. break;
  1222. }
  1223. if (dev->state == STATE_RESET) {
  1224. DBG(dev->cdev, "%s: dev->state = %d, get reset command !! Cancel it!! rx_done = %d\n", __func__, dev->state, dev->rx_done);
  1225. r = -ECANCELED;
  1226. DBG(dev->cdev, "%s, %d: request to usb_ep_dequeue!! \n", __func__, __LINE__);
  1227. usb_ep_dequeue(dev->ep_out, read_req);
  1228. break;
  1229. }
  1230. /* if xfer_file_length is 0xFFFFFFFF, then we read until
  1231. * we get a zero length packet
  1232. */
  1233. if (count != 0xFFFFFFFF)
  1234. count -= read_req->actual;
  1235. #if 1 /* #ifdef CONFIG_MTK_SHARED_SDCARD */
  1236. total_size += read_req->actual;
  1237. DBG(cdev, "%s, line %d: count = %lld, total_size = %lld, read_req->actual = %d, read_req->length= %d\n", __func__, __LINE__, count, total_size, read_req->actual, read_req->length);
  1238. #endif
  1239. if (read_req->actual < read_req->length) {
  1240. /*
  1241. * short packet is used to signal EOF for
  1242. * sizes > 4 gig
  1243. */
  1244. DBG(cdev, "got short packet\n");
  1245. count = 0;
  1246. }
  1247. /* Add for RX mode 1 */
  1248. read_req->short_not_ok = 0;
  1249. DBG(dev->cdev, "%s, line %d: dev->state = %d, NEXT!!\n", __func__, __LINE__, dev->state);
  1250. write_req = read_req;
  1251. read_req = NULL;
  1252. }
  1253. }
  1254. if (dev->state == STATE_ERROR || dev->state == STATE_OFFLINE) {
  1255. DBG(dev->cdev, "%s, line %d: read_req = %p \n", __func__, __LINE__, read_req);
  1256. if (read_req) {
  1257. read_req->short_not_ok = 0;
  1258. }
  1259. }
  1260. DBG(dev->cdev, "[mtp]top time of vfs_write() in %s:\n", __func__);
  1261. for (i = 0; i < IOMAXNUM; ++i){
  1262. DBG(dev->cdev, "[mtp] %d msec\n", iotimeMax[i]);
  1263. }
  1264. pr_debug("%s, line %d: receive_file_work returning %d \n", __func__, __LINE__, r);
  1265. /* write the result */
  1266. dev->xfer_result = r;
  1267. smp_wmb();
  1268. }
  1269. static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
  1270. {
  1271. struct usb_request *req = NULL;
  1272. int ret;
  1273. int length = event->length;
  1274. int eventIndex = 6;
  1275. DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
  1276. if (length < 0 || length > INTR_BUFFER_SIZE)
  1277. return -EINVAL;
  1278. if (dev->state == STATE_OFFLINE)
  1279. return -ENODEV;
  1280. ret = wait_event_interruptible_timeout(dev->intr_wq,
  1281. (req = mtp_req_get(dev, &dev->intr_idle)),
  1282. msecs_to_jiffies(1000));
  1283. if (!req)
  1284. return -ETIME;
  1285. if (copy_from_user(req->buf, (void __user *)event->data, length)) {
  1286. mtp_req_put(dev, &dev->intr_idle, req);
  1287. return -EFAULT;
  1288. }
  1289. req->length = length;
  1290. DBG(dev->cdev, "mtp_send_event: EventCode: req->buf[7] = 0x%x, req->buf[6] = 0x%x\n", ((char*)req->buf)[eventIndex+1], ((char*)req->buf)[eventIndex]);
  1291. ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
  1292. if (ret)
  1293. mtp_req_put(dev, &dev->intr_idle, req);
  1294. return ret;
  1295. }
  1296. static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
  1297. {
  1298. struct mtp_dev *dev = fp->private_data;
  1299. struct file *filp = NULL;
  1300. int ret = -EINVAL;
  1301. switch (code)
  1302. {
  1303. case MTP_SEND_FILE:
  1304. pr_debug("%s: MTP_SEND_FILE, code = 0x%x\n", __func__, code);
  1305. break;
  1306. case MTP_RECEIVE_FILE:
  1307. pr_debug("%s: MTP_RECEIVE_FILE, code = 0x%x\n", __func__, code);
  1308. break;
  1309. case MTP_SEND_FILE_WITH_HEADER:
  1310. pr_debug("%s: MTP_SEND_FILE_WITH_HEADER, code = 0x%x\n", __func__, code);
  1311. break;
  1312. case MTP_SEND_EVENT:
  1313. pr_debug("%s: MTP_SEND_EVENT, code = 0x%x\n", __func__, code);
  1314. break;
  1315. }
  1316. if (mtp_lock(&dev->ioctl_excl))
  1317. return -EBUSY;
  1318. switch (code) {
  1319. case MTP_SEND_FILE:
  1320. case MTP_RECEIVE_FILE:
  1321. case MTP_SEND_FILE_WITH_HEADER:
  1322. {
  1323. struct mtp_file_range mfr;
  1324. struct work_struct *work;
  1325. spin_lock_irq(&dev->lock);
  1326. if (dev->state == STATE_CANCELED) {
  1327. /* report cancelation to userspace */
  1328. DBG(dev->cdev, "%s: cancel!!! \n", __func__);
  1329. dev->state = STATE_READY;
  1330. spin_unlock_irq(&dev->lock);
  1331. ret = -ECANCELED;
  1332. goto out;
  1333. }
  1334. if (dev->state == STATE_RESET) {
  1335. /* report cancelation to userspace */
  1336. dev->state = STATE_READY;
  1337. spin_unlock_irq(&dev->lock);
  1338. ret = -ECANCELED;
  1339. goto out;
  1340. }
  1341. if (dev->state == STATE_OFFLINE) {
  1342. spin_unlock_irq(&dev->lock);
  1343. ret = -ENODEV;
  1344. goto out;
  1345. }
  1346. dev->state = STATE_BUSY;
  1347. spin_unlock_irq(&dev->lock);
  1348. if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
  1349. ret = -EFAULT;
  1350. goto fail;
  1351. }
  1352. /* hold a reference to the file while we are working with it */
  1353. filp = fget(mfr.fd);
  1354. if (!filp) {
  1355. ret = -EBADF;
  1356. goto fail;
  1357. }
  1358. /* write the parameters */
  1359. dev->xfer_file = filp;
  1360. dev->xfer_file_offset = mfr.offset;
  1361. dev->xfer_file_length = mfr.length;
  1362. smp_wmb();
  1363. if (code == MTP_SEND_FILE_WITH_HEADER) {
  1364. work = &dev->send_file_work;
  1365. dev->xfer_send_header = 1;
  1366. dev->xfer_command = mfr.command;
  1367. dev->xfer_transaction_id = mfr.transaction_id;
  1368. } else if (code == MTP_SEND_FILE) {
  1369. work = &dev->send_file_work;
  1370. dev->xfer_send_header = 0;
  1371. } else {
  1372. work = &dev->receive_file_work;
  1373. }
  1374. /* We do the file transfer on a work queue so it will run
  1375. * in kernel context, which is necessary for vfs_read and
  1376. * vfs_write to use our buffers in the kernel address space.
  1377. */
  1378. queue_work(dev->wq, work);
  1379. /* wait for operation to complete */
  1380. flush_workqueue(dev->wq);
  1381. fput(filp);
  1382. /* read the result */
  1383. smp_rmb();
  1384. ret = dev->xfer_result;
  1385. break;
  1386. }
  1387. case MTP_SEND_EVENT:
  1388. {
  1389. struct mtp_event event;
  1390. /* return here so we don't change dev->state below,
  1391. * which would interfere with bulk transfer state.
  1392. */
  1393. if (copy_from_user(&event, (void __user *)value, sizeof(event)))
  1394. ret = -EFAULT;
  1395. else
  1396. ret = mtp_send_event(dev, &event);
  1397. goto out;
  1398. }
  1399. }
  1400. fail:
  1401. spin_lock_irq(&dev->lock);
  1402. if (dev->state == STATE_CANCELED)
  1403. ret = -ECANCELED;
  1404. else if (dev->state == STATE_RESET)
  1405. ret = -ECANCELED;
  1406. else if (dev->state != STATE_OFFLINE)
  1407. dev->state = STATE_READY;
  1408. spin_unlock_irq(&dev->lock);
  1409. out:
  1410. mtp_unlock(&dev->ioctl_excl);
  1411. DBG(dev->cdev, "ioctl returning %d\n", ret);
  1412. return ret;
  1413. }
  1414. static int mtp_open(struct inode *ip, struct file *fp)
  1415. {
  1416. printk(KERN_INFO "mtp_open\n");
  1417. if (mtp_lock(&_mtp_dev->open_excl))
  1418. return -EBUSY;
  1419. /* clear any error condition */
  1420. if (_mtp_dev->state != STATE_OFFLINE)
  1421. _mtp_dev->state = STATE_READY;
  1422. fp->private_data = _mtp_dev;
  1423. return 0;
  1424. }
  1425. static int mtp_release(struct inode *ip, struct file *fp)
  1426. {
  1427. unsigned long flags;
  1428. printk(KERN_INFO "mtp_release\n");
  1429. spin_lock_irqsave(&_mtp_dev->lock, flags);
  1430. if (!_mtp_dev->dev_disconnected) {
  1431. spin_unlock_irqrestore(&_mtp_dev->lock, flags);
  1432. mtp_send_devicereset_event(_mtp_dev);
  1433. } else
  1434. spin_unlock_irqrestore(&_mtp_dev->lock, flags);
  1435. mtp_unlock(&_mtp_dev->open_excl);
  1436. return 0;
  1437. }
  1438. /* file operations for /dev/mtp_usb */
  1439. static const struct file_operations mtp_fops = {
  1440. .owner = THIS_MODULE,
  1441. .read = mtp_read,
  1442. .write = mtp_write,
  1443. .unlocked_ioctl = mtp_ioctl,
  1444. .compat_ioctl = mtp_ioctl,
  1445. .open = mtp_open,
  1446. .release = mtp_release,
  1447. };
  1448. static struct miscdevice mtp_device = {
  1449. .minor = MISC_DYNAMIC_MINOR,
  1450. .name = mtp_shortname,
  1451. .fops = &mtp_fops,
  1452. };
  1453. static void mtp_work(struct work_struct *data)
  1454. {
  1455. char *envp_sessionend[2] = { "MTP=SESSIONEND", NULL };
  1456. pr_debug("%s: __begin__ \n", __func__);
  1457. kobject_uevent_env(&mtp_device.this_device->kobj, KOBJ_CHANGE, envp_sessionend);
  1458. }
  1459. static void mtp_ueventToDisconnect(struct mtp_dev *dev)
  1460. {
  1461. char *envp_mtpAskDisconnect[2] = { "USB_STATE=MTPASKDISCONNECT", NULL };
  1462. pr_debug("%s: __begin__ \n", __func__);
  1463. kobject_uevent_env(&mtp_device.this_device->kobj, KOBJ_CHANGE, envp_mtpAskDisconnect);
  1464. }
  1465. static void mtp_read_usb_functions(int functions_no, char * buff)
  1466. {
  1467. struct mtp_dev *dev = _mtp_dev;
  1468. int i;
  1469. DBG(dev->cdev, "%s: dev->curr_mtp_func_index = 0x%x\n",__func__, dev->curr_mtp_func_index);
  1470. dev->usb_functions_no = functions_no;
  1471. dev->curr_mtp_func_index = 0xff;
  1472. memcpy(dev->usb_functions, buff, sizeof(dev->usb_functions));
  1473. DBG(dev->cdev, "%s:usb_functions_no = %d, usb_functions=%s\n",__func__, dev->usb_functions_no, dev->usb_functions);
  1474. for(i=0;i<USB_MTP_FUNCTIONS;i++)
  1475. {
  1476. if(!strcmp(dev->usb_functions, USB_MTP_FUNC[i]))
  1477. {
  1478. DBG(dev->cdev, "%s: usb functions = %s, i = %d \n",__func__, dev->usb_functions, i);
  1479. dev->curr_mtp_func_index = i;
  1480. break;
  1481. }
  1482. }
  1483. }
  1484. enum FILE_ACTION_ENABLED
  1485. {
  1486. SEND_FILE_ENABLE = 0,
  1487. SEND_FILE_DISABLE = 1,
  1488. RECEIVE_FILE_ENABLE = 2,
  1489. RECEIVE_FILE_DISABLE = 3
  1490. };
  1491. static void mtp_ep_flush_all(void)
  1492. {
  1493. struct mtp_dev *dev = _mtp_dev;
  1494. DBG(dev->cdev, "%s: __begin__ \n", __func__);
  1495. dev->state = STATE_RESET;
  1496. DBG(dev->cdev, "%s: __end__ \n", __func__);
  1497. }
  1498. static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
  1499. const struct usb_ctrlrequest *ctrl)
  1500. {
  1501. struct mtp_dev *dev = _mtp_dev;
  1502. int value = -EOPNOTSUPP;
  1503. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1504. u16 w_value = le16_to_cpu(ctrl->wValue);
  1505. u16 w_length = le16_to_cpu(ctrl->wLength);
  1506. unsigned long flags;
  1507. VDBG(cdev, "mtp_ctrlrequest "
  1508. "%02x.%02x v%04x i%04x l%u\n",
  1509. ctrl->bRequestType, ctrl->bRequest,
  1510. w_value, w_index, w_length);
  1511. /* Handle MTP OS string */
  1512. if (ctrl->bRequestType ==
  1513. (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
  1514. && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
  1515. && (w_value >> 8) == USB_DT_STRING
  1516. && (w_value & 0xFF) == MTP_OS_STRING_ID) {
  1517. value = (w_length < sizeof(mtp_os_string)
  1518. ? w_length : sizeof(mtp_os_string));
  1519. memcpy(cdev->req->buf, mtp_os_string, value);
  1520. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
  1521. /* Handle MTP OS descriptor */
  1522. DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
  1523. ctrl->bRequest, w_index, w_value, w_length);
  1524. if (ctrl->bRequest == 1
  1525. && (ctrl->bRequestType & USB_DIR_IN)
  1526. && (w_index == 5)) {
  1527. value = (w_length < sizeof(mtp_ext_prop_desc) ?
  1528. w_length : sizeof(mtp_ext_prop_desc));
  1529. DBG(cdev, "vendor request: Property OS Feature, w_length = %d, value = %d \n", w_length, value);
  1530. memcpy(cdev->req->buf, &mtp_ext_prop_desc, value);
  1531. } else if (ctrl->bRequest == 1
  1532. && (ctrl->bRequestType & USB_DIR_IN)
  1533. && (w_index == 4)) {
  1534. switch(dev->curr_mtp_func_index)
  1535. {
  1536. case 0: /* mtp */
  1537. value = (w_length < sizeof(mtp_ext_config_desc) ?
  1538. w_length : sizeof(mtp_ext_config_desc));
  1539. memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
  1540. break;
  1541. case 1: /* mtp,acm , with acm, failed so far */
  1542. case 2: /* mtp,adb */
  1543. case 4: /* mtp,mass_storage */
  1544. value = (w_length < sizeof(mtp_ext_config_desc_2) ?
  1545. w_length : sizeof(mtp_ext_config_desc_2));
  1546. memcpy(cdev->req->buf, &mtp_ext_config_desc_2, value);
  1547. break;
  1548. case 3: /* mtp,adb,acm , with acm, failed so far */
  1549. case 5: /* mtp,mass_storage,adb */
  1550. value = (w_length < sizeof(mtp_ext_config_desc_3) ?
  1551. w_length : sizeof(mtp_ext_config_desc_3));
  1552. memcpy(cdev->req->buf, &mtp_ext_config_desc_3, value);
  1553. break;
  1554. #ifdef CONFIG_MTK_TC1_FEATURE
  1555. case 6: /* acm,gser,mtp,adb, with acm, xp failed so far */
  1556. value = (w_length < sizeof(mtp_ext_config_desc_4) ?
  1557. w_length : sizeof(mtp_ext_config_desc_4));
  1558. memcpy(cdev->req->buf, &mtp_ext_config_desc_4, value);
  1559. break;
  1560. case 7: /* acm,gser,mtp,adb, with acm, xp failed so far */
  1561. value = (w_length < sizeof(mtp_ext_config_desc_5) ?
  1562. w_length : sizeof(mtp_ext_config_desc_5));
  1563. memcpy(cdev->req->buf, &mtp_ext_config_desc_5, value);
  1564. break;
  1565. #endif
  1566. default: /* unknown, 0xff */
  1567. value = (w_length < sizeof(mtp_ext_config_desc) ?
  1568. w_length : sizeof(mtp_ext_config_desc));
  1569. memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
  1570. break;
  1571. }
  1572. DBG(cdev, "vendor request: Compat ID OS Feature, dev->curr_mtp_func_index = %d, dev->usb_functions = %s \n", dev->curr_mtp_func_index, dev->usb_functions);
  1573. DBG(cdev, "vendor request: Extended OS Feature, w_length = %d, value = %d, dev->curr_mtp_func_index = %d\n", w_length, value, dev->curr_mtp_func_index);
  1574. }
  1575. } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
  1576. DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
  1577. ctrl->bRequest, w_index, w_value, w_length);
  1578. if (ctrl->bRequest == MTP_REQ_CANCEL
  1579. #ifndef CONFIG_MTK_TC1_FEATURE
  1580. && w_index == 0
  1581. #endif
  1582. && w_value == 0) {
  1583. DBG(cdev, "MTP_REQ_CANCEL\n");
  1584. DBG(cdev, "%s: MTP_REQ_CANCEL. dev->state = %d.\n", __func__, dev->state);
  1585. spin_lock_irqsave(&dev->lock, flags);
  1586. if (dev->state == STATE_BUSY) {
  1587. dev->state = STATE_CANCELED;
  1588. wake_up(&dev->read_wq);
  1589. wake_up(&dev->write_wq);
  1590. } else if(dev->state == STATE_READY) {
  1591. dev->state = STATE_CANCELED;
  1592. }
  1593. spin_unlock_irqrestore(&dev->lock, flags);
  1594. /* We need to queue a request to read the remaining
  1595. * bytes, but we don't actually need to look at
  1596. * the contents.
  1597. */
  1598. value = w_length;
  1599. } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
  1600. #ifndef CONFIG_MTK_TC1_FEATURE
  1601. && w_index == 0
  1602. #endif
  1603. && w_value == 0) {
  1604. struct mtp_device_status *status = cdev->req->buf;
  1605. status->wLength =
  1606. __constant_cpu_to_le16(sizeof(*status));
  1607. DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
  1608. spin_lock_irqsave(&dev->lock, flags);
  1609. /* device status is "busy" until we report
  1610. * the cancelation to userspace
  1611. */
  1612. if (dev->state == STATE_CANCELED){
  1613. status->wCode =
  1614. __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
  1615. dev->fileTransferSend ++;
  1616. DBG(cdev, "%s: dev->fileTransferSend = %d \n", __func__, dev->fileTransferSend);
  1617. if(dev->fileTransferSend > 5) {
  1618. dev->fileTransferSend = 0;
  1619. dev->state = STATE_BUSY;
  1620. status->wCode =
  1621. __cpu_to_le16(MTP_RESPONSE_OK);
  1622. }
  1623. } else if(dev->state == STATE_RESET) {
  1624. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1625. dev->fileTransferSend = 0;
  1626. status->wCode =
  1627. __cpu_to_le16(MTP_RESPONSE_OK);
  1628. } else if(dev->state == STATE_ERROR) {
  1629. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1630. dev->fileTransferSend = 0;
  1631. if(dev->epOut_halt){
  1632. status->wCode =
  1633. __cpu_to_le16(MTP_RESPONSE_DEVICE_CANCEL);
  1634. } else
  1635. status->wCode =
  1636. __cpu_to_le16(MTP_RESPONSE_OK);
  1637. } else {
  1638. dev->fileTransferSend = 0;
  1639. status->wCode =
  1640. __cpu_to_le16(MTP_RESPONSE_OK);
  1641. }
  1642. DBG(dev->cdev, "%s: status->wCode = 0x%x, under MTP_REQ_GET_DEVICE_STATUS\n", __func__, status->wCode);
  1643. spin_unlock_irqrestore(&dev->lock, flags);
  1644. value = sizeof(*status);
  1645. } else if (ctrl->bRequest == MTP_REQ_RESET
  1646. #ifndef CONFIG_MTK_TC1_FEATURE
  1647. && w_index == 0
  1648. #endif
  1649. && w_value == 0) {
  1650. struct work_struct *work;
  1651. DBG(dev->cdev, "%s: MTP_REQ_RESET. dev->state = %d. \n", __func__, dev->state);
  1652. spin_lock_irqsave(&dev->lock, flags);
  1653. work = &dev->device_reset_work;
  1654. schedule_work(work);
  1655. /* wait for operation to complete */
  1656. mtp_ep_flush_all();
  1657. DBG(dev->cdev, "%s: wake up the work queue to prevent that they are waiting!!\n", __func__);
  1658. spin_unlock_irqrestore(&dev->lock, flags);
  1659. value = w_length;
  1660. }
  1661. }
  1662. /* respond with data transfer or status phase? */
  1663. if (value >= 0) {
  1664. int rc;
  1665. cdev->req->zero = value < w_length;
  1666. cdev->req->length = value;
  1667. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1668. if (rc < 0)
  1669. ERROR(cdev, "%s: response queue error\n", __func__);
  1670. }
  1671. return value;
  1672. }
  1673. static int ptp_ctrlrequest(struct usb_composite_dev *cdev,
  1674. const struct usb_ctrlrequest *ctrl)
  1675. {
  1676. struct mtp_dev *dev = _mtp_dev;
  1677. int value = -EOPNOTSUPP;
  1678. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1679. u16 w_value = le16_to_cpu(ctrl->wValue);
  1680. u16 w_length = le16_to_cpu(ctrl->wLength);
  1681. unsigned long flags;
  1682. VDBG(cdev, "mtp_ctrlrequest "
  1683. "%02x.%02x v%04x i%04x l%u\n",
  1684. ctrl->bRequestType, ctrl->bRequest,
  1685. w_value, w_index, w_length);
  1686. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
  1687. DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
  1688. ctrl->bRequest, w_index, w_value, w_length);
  1689. if (ctrl->bRequest == MTP_REQ_CANCEL
  1690. #ifndef CONFIG_MTK_TC1_FEATURE
  1691. && w_index == 0
  1692. #endif
  1693. && w_value == 0) {
  1694. DBG(cdev, "MTP_REQ_CANCEL\n");
  1695. DBG(cdev, "%s: MTP_REQ_CANCEL. dev->state = %d.\n", __func__, dev->state);
  1696. spin_lock_irqsave(&dev->lock, flags);
  1697. if (dev->state == STATE_BUSY) {
  1698. dev->state = STATE_CANCELED;
  1699. wake_up(&dev->read_wq);
  1700. wake_up(&dev->write_wq);
  1701. } else if(dev->state == STATE_READY) {
  1702. dev->state = STATE_CANCELED;
  1703. }
  1704. spin_unlock_irqrestore(&dev->lock, flags);
  1705. /* We need to queue a request to read the remaining
  1706. * bytes, but we don't actually need to look at
  1707. * the contents.
  1708. */
  1709. value = w_length;
  1710. } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
  1711. #ifndef CONFIG_MTK_TC1_FEATURE
  1712. && w_index == 0
  1713. #endif
  1714. && w_value == 0) {
  1715. struct mtp_device_status *status = cdev->req->buf;
  1716. status->wLength =
  1717. __constant_cpu_to_le16(sizeof(*status));
  1718. DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
  1719. spin_lock_irqsave(&dev->lock, flags);
  1720. /* device status is "busy" until we report
  1721. * the cancelation to userspace
  1722. */
  1723. if (dev->state == STATE_CANCELED){
  1724. status->wCode =
  1725. __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
  1726. dev->fileTransferSend ++;
  1727. DBG(cdev, "%s: dev->fileTransferSend = %d \n", __func__, dev->fileTransferSend);
  1728. if(dev->fileTransferSend > 5) {
  1729. dev->fileTransferSend = 0;
  1730. dev->state = STATE_BUSY;
  1731. status->wCode =
  1732. __cpu_to_le16(MTP_RESPONSE_OK);
  1733. }
  1734. } else if(dev->state == STATE_RESET) {
  1735. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1736. dev->fileTransferSend = 0;
  1737. status->wCode =
  1738. __cpu_to_le16(MTP_RESPONSE_OK);
  1739. } else if(dev->state == STATE_ERROR) {
  1740. DBG(dev->cdev, "%s: dev->state = RESET under MTP_REQ_GET_DEVICE_STATUS\n", __func__);
  1741. dev->fileTransferSend = 0;
  1742. if(dev->epOut_halt){
  1743. status->wCode =
  1744. __cpu_to_le16(MTP_RESPONSE_DEVICE_CANCEL);
  1745. } else
  1746. status->wCode =
  1747. __cpu_to_le16(MTP_RESPONSE_OK);
  1748. } else {
  1749. dev->fileTransferSend = 0;
  1750. status->wCode =
  1751. __cpu_to_le16(MTP_RESPONSE_OK);
  1752. }
  1753. DBG(dev->cdev, "%s: status->wCode = 0x%x, under MTP_REQ_GET_DEVICE_STATUS\n", __func__, status->wCode);
  1754. spin_unlock_irqrestore(&dev->lock, flags);
  1755. value = sizeof(*status);
  1756. } else if (ctrl->bRequest == MTP_REQ_RESET
  1757. #ifndef CONFIG_MTK_TC1_FEATURE
  1758. && w_index == 0
  1759. #endif
  1760. && w_value == 0) {
  1761. struct work_struct *work;
  1762. DBG(dev->cdev, "%s: MTP_REQ_RESET. dev->state = %d. \n", __func__, dev->state);
  1763. spin_lock_irqsave(&dev->lock, flags);
  1764. work = &dev->device_reset_work;
  1765. schedule_work(work);
  1766. /* wait for operation to complete */
  1767. mtp_ep_flush_all();
  1768. DBG(dev->cdev, "%s: wake up the work queue to prevent that they are waiting!!\n", __func__);
  1769. spin_unlock_irqrestore(&dev->lock, flags);
  1770. value = w_length;
  1771. }
  1772. }
  1773. /* respond with data transfer or status phase? */
  1774. if (value >= 0) {
  1775. int rc;
  1776. cdev->req->zero = value < w_length;
  1777. cdev->req->length = value;
  1778. rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
  1779. if (rc < 0)
  1780. ERROR(cdev, "%s: response queue error\n", __func__);
  1781. }
  1782. return value;
  1783. }
  1784. static int
  1785. mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
  1786. {
  1787. struct usb_composite_dev *cdev = c->cdev;
  1788. struct mtp_dev *dev = func_to_mtp(f);
  1789. int id;
  1790. int ret;
  1791. dev->cdev = cdev;
  1792. DBG(cdev, "mtp_function_bind dev: %p\n", dev);
  1793. printk("mtp_function_bind dev: %p\n", dev);
  1794. /* allocate interface ID(s) */
  1795. id = usb_interface_id(c, f);
  1796. if (id < 0)
  1797. return id;
  1798. mtp_interface_desc.bInterfaceNumber = id;
  1799. ptp_interface_desc.bInterfaceNumber = id;
  1800. DBG(cdev, "mtp_function_bind bInterfaceNumber = id= %d\n", id);
  1801. DBG(cdev, "%s: reset dev->curr_mtp_func_index to 0xff \n", __func__);
  1802. dev->curr_mtp_func_index = 0xff;
  1803. /* allocate endpoints */
  1804. ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
  1805. &mtp_fullspeed_out_desc, &mtp_intr_desc);
  1806. if (ret)
  1807. return ret;
  1808. /* support high speed hardware */
  1809. if (gadget_is_dualspeed(c->cdev->gadget)) {
  1810. mtp_highspeed_in_desc.bEndpointAddress =
  1811. mtp_fullspeed_in_desc.bEndpointAddress;
  1812. mtp_highspeed_out_desc.bEndpointAddress =
  1813. mtp_fullspeed_out_desc.bEndpointAddress;
  1814. }
  1815. dev->dev_disconnected = 0;
  1816. /* support super speed hardware */
  1817. if (gadget_is_superspeed(c->cdev->gadget)) {
  1818. mtp_superspeed_in_desc.bEndpointAddress =
  1819. mtp_fullspeed_in_desc.bEndpointAddress;
  1820. mtp_superspeed_out_desc.bEndpointAddress =
  1821. mtp_fullspeed_out_desc.bEndpointAddress;
  1822. }
  1823. DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
  1824. gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
  1825. f->name, dev->ep_in->name, dev->ep_out->name);
  1826. return 0;
  1827. }
  1828. static void
  1829. mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
  1830. {
  1831. struct mtp_dev *dev = func_to_mtp(f);
  1832. struct usb_request *req;
  1833. int i;
  1834. printk("%s, line %d: \n", __func__, __LINE__);
  1835. while ((req = mtp_req_get(dev, &dev->tx_idle)))
  1836. mtp_request_free(req, dev->ep_in);
  1837. for (i = 0; i < RX_REQ_MAX; i++)
  1838. mtp_request_free(dev->rx_req[i], dev->ep_out);
  1839. while ((req = mtp_req_get(dev, &dev->intr_idle)))
  1840. mtp_request_free(req, dev->ep_intr);
  1841. dev->state = STATE_OFFLINE;
  1842. dev->dev_disconnected = 1;
  1843. }
  1844. static int mtp_function_set_alt(struct usb_function *f,
  1845. unsigned intf, unsigned alt)
  1846. {
  1847. struct mtp_dev *dev = func_to_mtp(f);
  1848. struct usb_composite_dev *cdev = f->config->cdev;
  1849. int ret;
  1850. printk("mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
  1851. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
  1852. if (ret)
  1853. return ret;
  1854. ret = usb_ep_enable(dev->ep_in);
  1855. if (ret)
  1856. return ret;
  1857. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
  1858. if (ret)
  1859. return ret;
  1860. ret = usb_ep_enable(dev->ep_out);
  1861. if (ret) {
  1862. usb_ep_disable(dev->ep_in);
  1863. return ret;
  1864. }
  1865. ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
  1866. if (ret)
  1867. return ret;
  1868. ret = usb_ep_enable(dev->ep_intr);
  1869. if (ret) {
  1870. usb_ep_disable(dev->ep_out);
  1871. usb_ep_disable(dev->ep_in);
  1872. return ret;
  1873. }
  1874. dev->state = STATE_READY;
  1875. dev->dev_disconnected = 0;
  1876. /* readers may be blocked waiting for us to go online */
  1877. wake_up(&dev->read_wq);
  1878. return 0;
  1879. }
  1880. static void mtp_function_disable(struct usb_function *f)
  1881. {
  1882. struct mtp_dev *dev = func_to_mtp(f);
  1883. struct usb_composite_dev *cdev = dev->cdev;
  1884. printk("mtp_function_disable\n");
  1885. dev->state = STATE_OFFLINE;
  1886. usb_ep_disable(dev->ep_in);
  1887. usb_ep_disable(dev->ep_out);
  1888. usb_ep_disable(dev->ep_intr);
  1889. dev->dev_disconnected = 1;
  1890. /* readers may be blocked waiting for us to go online */
  1891. wake_up(&dev->read_wq);
  1892. VDBG(cdev, "%s disabled\n", dev->function.name);
  1893. }
  1894. static int mtp_bind_config(struct usb_configuration *c, bool ptp_config)
  1895. {
  1896. struct mtp_dev *dev = _mtp_dev;
  1897. int ret = 0;
  1898. printk(KERN_INFO "mtp_bind_config\n");
  1899. /* allocate a string ID for our interface */
  1900. if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
  1901. ret = usb_string_id(c->cdev);
  1902. if (ret < 0)
  1903. return ret;
  1904. mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
  1905. mtp_interface_desc.iInterface = ret;
  1906. }
  1907. dev->cdev = c->cdev;
  1908. dev->function.name = "mtp";
  1909. dev->function.strings = mtp_strings;
  1910. if (ptp_config) {
  1911. dev->function.fs_descriptors = fs_ptp_descs;
  1912. dev->function.hs_descriptors = hs_ptp_descs;
  1913. if (gadget_is_superspeed(c->cdev->gadget))
  1914. dev->function.ss_descriptors = ss_ptp_descs;
  1915. } else {
  1916. dev->function.fs_descriptors = fs_mtp_descs;
  1917. dev->function.hs_descriptors = hs_mtp_descs;
  1918. if (gadget_is_superspeed(c->cdev->gadget))
  1919. dev->function.ss_descriptors = ss_mtp_descs;
  1920. }
  1921. dev->function.bind = mtp_function_bind;
  1922. dev->function.unbind = mtp_function_unbind;
  1923. dev->function.set_alt = mtp_function_set_alt;
  1924. dev->function.disable = mtp_function_disable;
  1925. return usb_add_function(c, &dev->function);
  1926. }
  1927. static int mtp_setup(void)
  1928. {
  1929. struct mtp_dev *dev;
  1930. int ret;
  1931. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  1932. if (!dev)
  1933. return -ENOMEM;
  1934. spin_lock_init(&dev->lock);
  1935. init_waitqueue_head(&dev->read_wq);
  1936. init_waitqueue_head(&dev->write_wq);
  1937. init_waitqueue_head(&dev->intr_wq);
  1938. atomic_set(&dev->open_excl, 0);
  1939. atomic_set(&dev->ioctl_excl, 0);
  1940. INIT_LIST_HEAD(&dev->tx_idle);
  1941. INIT_LIST_HEAD(&dev->intr_idle);
  1942. dev->wq = create_singlethread_workqueue("f_mtp");
  1943. if (!dev->wq) {
  1944. ret = -ENOMEM;
  1945. goto err1;
  1946. }
  1947. INIT_WORK(&dev->send_file_work, send_file_work);
  1948. INIT_WORK(&dev->receive_file_work, receive_file_work);
  1949. INIT_WORK(&dev->device_reset_work, mtp_work);
  1950. dev->fileTransferSend = 0;
  1951. dev->epOut_halt = 0;
  1952. dev->dev_disconnected = 0;
  1953. _mtp_dev = dev;
  1954. ret = misc_register(&mtp_device);
  1955. if (ret)
  1956. goto err2;
  1957. return 0;
  1958. err2:
  1959. destroy_workqueue(dev->wq);
  1960. err1:
  1961. _mtp_dev = NULL;
  1962. kfree(dev);
  1963. printk(KERN_ERR "mtp gadget driver failed to initialize\n");
  1964. return ret;
  1965. }
  1966. static void mtp_cleanup(void)
  1967. {
  1968. struct mtp_dev *dev = _mtp_dev;
  1969. if (!dev)
  1970. return;
  1971. misc_deregister(&mtp_device);
  1972. destroy_workqueue(dev->wq);
  1973. _mtp_dev = NULL;
  1974. kfree(dev);
  1975. }