rawbulk_transfer.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /*
  2. * Rawbulk Driver from VIA Telecom
  3. * Copyright (C) 2011 VIA Telecom, Inc.
  4. * Author: Karfield Chen (kfchen@via-telecom.com)
  5. * Copyright (C) 2012 VIA Telecom, Inc.
  6. * Author: Juelun Guo (jlguo@via-telecom.com)
  7. * Changes:
  8. *
  9. * Sep 2012: Juelun Guo <jlguo@via-telecom.com>
  10. * Version 1.0.4
  11. * changed to support for sdio bypass.
  12. * This software is licensed under the terms of the GNU General Public
  13. * License version 2, as published by the Free Software Foundation, and
  14. * may be copied, distributed, and modified under those terms.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. *
  22. *
  23. * Rawbulk is transfer performer between CBP host driver and Gadget driver
  24. *
  25. *
  26. * upstream: CBP Driver ---> Gadget IN
  27. * downstream: Gadget OUT ---> CBP Driver
  28. *
  29. *
  30. */
  31. /* #define DEBUG */
  32. /* #define VERBOSE_DEBUG */
  33. #define DRIVER_AUTHOR "Juelun Guo <jlguo@via-telecom.com>"
  34. #define DRIVER_DESC "Rawbulk Driver - perform bypass for QingCheng"
  35. #define DRIVER_VERSION "1.0.4"
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/init.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/list.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/workqueue.h>
  43. #include "viatel_rawbulk.h"
  44. #include <linux/moduleparam.h>
  45. #include "modem_sdio.h"
  46. #define terr(t, fmt, args...) pr_err("Rawbulk [%s]:" fmt "\n", t->name, ##args)
  47. #define STOP_UPSTREAM 0x1
  48. #define STOP_DOWNSTREAM 0x2
  49. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  50. char *transfer_name[] = { "modem", "ets", "at", "pcv", "gps" };
  51. #else
  52. char *transfer_name[] = { "pcv", "modem", "ets", "at", "gps" };
  53. #endif
  54. unsigned int upstream_data[_MAX_TID] = { 0 };
  55. unsigned int upstream_cnt[_MAX_TID] = { 0 };
  56. unsigned int total_drop[_MAX_TID] = { 0 };
  57. unsigned int alloc_fail[_MAX_TID] = { 0 };
  58. unsigned int total_tran[_MAX_TID] = { 0 };
  59. static unsigned long drop_check_timeout;
  60. static unsigned int udata[_MAX_TID] = { 0 };
  61. static unsigned int ucnt[_MAX_TID] = { 0 };
  62. struct rawbulk_transfer {
  63. enum transfer_id id;
  64. spinlock_t lock;
  65. int control;
  66. struct usb_function *function;
  67. struct usb_interface *interface;
  68. rawbulk_autoreconn_callback_t autoreconn;
  69. struct {
  70. int ntrans;
  71. struct list_head transactions;
  72. struct usb_ep *ep;
  73. } upstream, downstream, repush2modem, cache_buf_lists;
  74. int sdio_block;
  75. int down_flow;
  76. spinlock_t usb_down_lock;
  77. spinlock_t modem_block_lock;
  78. struct delayed_work delayed;
  79. struct workqueue_struct *flow_wq;
  80. struct work_struct read_work;
  81. struct work_struct write_work;
  82. struct workqueue_struct *rx_wq;
  83. struct workqueue_struct *tx_wq;
  84. struct mutex modem_up_mutex;
  85. struct mutex usb_up_mutex;
  86. struct timer_list timer;
  87. spinlock_t flow_lock;
  88. };
  89. static inline int get_epnum(struct usb_host_endpoint *ep)
  90. {
  91. return (int)(ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  92. }
  93. static inline int get_maxpacksize(struct usb_host_endpoint *ep)
  94. {
  95. return (int)(le16_to_cpu(ep->desc.wMaxPacketSize));
  96. }
  97. struct cache_buf {
  98. int length;
  99. struct list_head clist;
  100. struct rawbulk_transfer *transfer;
  101. int state;
  102. /* unsigned char buffer[0]; */
  103. char *buffer;
  104. };
  105. #define MAX_RESPONSE 32
  106. struct rawbulk_transfer_model {
  107. struct usb_device *udev;
  108. struct usb_composite_dev *cdev;
  109. char ctrl_response[MAX_RESPONSE];
  110. struct rawbulk_transfer transfer[_MAX_TID];
  111. };
  112. static struct rawbulk_transfer_model *rawbulk;
  113. static struct rawbulk_transfer *id_to_transfer(int transfer_id)
  114. {
  115. if (transfer_id < 0 || transfer_id >= _MAX_TID)
  116. return NULL;
  117. return &rawbulk->transfer[transfer_id];
  118. }
  119. /* extern int rawbulk_usb_state_check(void); */
  120. /*
  121. * upstream
  122. */
  123. #define UPSTREAM_STAT_FREE 0
  124. #define UPSTREAM_STAT_UPLOADING 2
  125. struct upstream_transaction {
  126. int state;
  127. int stalled;
  128. char name[32];
  129. struct list_head tlist;
  130. struct delayed_work delayed;
  131. struct rawbulk_transfer *transfer;
  132. struct usb_request *req;
  133. int buffer_length;
  134. /* unsigned char buffer[0]; */
  135. char *buffer;
  136. };
  137. static unsigned int dump_mask;
  138. static unsigned int full_dump;
  139. static unsigned int max_cache_cnt = 2048;
  140. static unsigned int base_cache_cnt = 1024;
  141. static unsigned int up_note_sz = 1024 * 1024;
  142. static unsigned int drop_check_interval = 1;
  143. unsigned int c2k_usb_dbg_level = C2K_LOG_NOTICE;
  144. module_param(c2k_usb_dbg_level, uint, S_IRUGO | S_IWUSR);
  145. module_param(dump_mask, uint, S_IRUGO | S_IWUSR);
  146. module_param(full_dump, uint, S_IRUGO | S_IWUSR);
  147. module_param(max_cache_cnt, uint, S_IRUGO | S_IWUSR);
  148. module_param(base_cache_cnt, uint, S_IRUGO | S_IWUSR);
  149. module_param(drop_check_interval, uint, S_IRUGO | S_IWUSR);
  150. MODULE_PARM_DESC(dump_mask, "Set data dump mask for each transfers");
  151. #ifdef C2K_USB_UT
  152. int delay_set = 1200;
  153. module_param(delay_set, uint, S_IRUGO | S_IWUSR);
  154. #endif
  155. static inline void dump_data(struct rawbulk_transfer *trans,
  156. const char *str, const unsigned char *data, int size)
  157. {
  158. int i;
  159. char verb[128], *pbuf;
  160. if (!(dump_mask & (1 << trans->id)))
  161. return;
  162. pbuf = verb;
  163. pbuf += sprintf(pbuf, "DUMP tid = %d, %s: len = %d, chars = \"", trans->id, str, size);
  164. /* data in ascii */
  165. #if 0
  166. for (i = 0; i < size; ++i) {
  167. char c = data[i];
  168. if (c > 0x20 && c < 0x7e)
  169. pbuf += sprintf(pbuf, "%c", c);
  170. else
  171. pbuf += sprintf(pbuf, ".");
  172. if (i > 7)
  173. break;
  174. }
  175. #endif
  176. pbuf += sprintf(pbuf, "\", data = ");
  177. for (i = 0; i < size; ++i) {
  178. pbuf += sprintf(pbuf, "%.2x ", data[i]);
  179. if (!full_dump) {
  180. if (i > 7)
  181. break;
  182. }
  183. }
  184. if (full_dump || size < 8) {
  185. C2K_NOTE("%s\n", verb);
  186. return;
  187. }
  188. /* data in tail */
  189. #if 1
  190. else if (i < size - 8) {
  191. pbuf += sprintf(pbuf, "... ");
  192. i = size - 8;
  193. }
  194. for (; i < size; ++i)
  195. pbuf += sprintf(pbuf, "%.2x ", data[i]);
  196. #endif
  197. C2K_NOTE("%s\n", verb);
  198. }
  199. static struct upstream_transaction *alloc_upstream_transaction(struct rawbulk_transfer *transfer,
  200. int bufsz)
  201. {
  202. struct upstream_transaction *t;
  203. C2K_NOTE("%s\n", __func__);
  204. /* t = kmalloc(sizeof *t + bufsz * sizeof(unsigned char), GFP_KERNEL); */
  205. t = kmalloc(sizeof(struct upstream_transaction), GFP_KERNEL);
  206. if (!t)
  207. return NULL;
  208. t->buffer = (char *)__get_free_page(GFP_KERNEL);
  209. /* t->buffer = kmalloc(bufsz, GFP_KERNEL); */
  210. if (!t->buffer) {
  211. kfree(t);
  212. return NULL;
  213. }
  214. t->buffer_length = bufsz;
  215. t->req = usb_ep_alloc_request(transfer->upstream.ep, GFP_KERNEL);
  216. if (!t->req)
  217. goto failto_alloc_usb_request;
  218. t->req->context = t;
  219. t->name[0] = 0;
  220. sprintf(t->name, "U%d ( G:%s)", transfer->upstream.ntrans, transfer->upstream.ep->name);
  221. INIT_LIST_HEAD(&t->tlist);
  222. list_add_tail(&t->tlist, &transfer->upstream.transactions);
  223. transfer->upstream.ntrans++;
  224. t->transfer = transfer;
  225. t->state = UPSTREAM_STAT_FREE;
  226. return t;
  227. failto_alloc_usb_request:
  228. /* kfree(t->buffer); */
  229. free_page((unsigned long)t->buffer);
  230. kfree(t);
  231. return NULL;
  232. }
  233. static void free_upstream_transaction(struct rawbulk_transfer *transfer)
  234. {
  235. struct list_head *p, *n;
  236. C2K_DBG("%s\n", __func__);
  237. mutex_lock(&transfer->usb_up_mutex);
  238. list_for_each_safe(p, n, &transfer->upstream.transactions) {
  239. struct upstream_transaction *t = list_entry(p, struct
  240. upstream_transaction, tlist);
  241. list_del(p);
  242. /* kfree(t->buffer); */
  243. free_page((unsigned long)t->buffer);
  244. usb_ep_free_request(transfer->upstream.ep, t->req);
  245. kfree(t);
  246. transfer->upstream.ntrans--;
  247. }
  248. mutex_unlock(&transfer->usb_up_mutex);
  249. }
  250. static void free_upstream_sdio_buf(struct rawbulk_transfer *transfer)
  251. {
  252. struct list_head *p, *n;
  253. C2K_DBG("%s\n", __func__);
  254. mutex_lock(&transfer->modem_up_mutex);
  255. list_for_each_safe(p, n, &transfer->cache_buf_lists.transactions) {
  256. struct cache_buf *c = list_entry(p, struct
  257. cache_buf, clist);
  258. list_del(p);
  259. /* kfree(c->buffer); */
  260. free_page((unsigned long)c->buffer);
  261. kfree(c);
  262. transfer->cache_buf_lists.ntrans--;
  263. }
  264. mutex_unlock(&transfer->modem_up_mutex);
  265. }
  266. static void upstream_complete(struct usb_ep *ep, struct usb_request
  267. *req);
  268. static void start_upstream(struct work_struct *work)
  269. {
  270. int ret = -1, got = 0;
  271. struct upstream_transaction *t;
  272. struct rawbulk_transfer *transfer = container_of(work, struct rawbulk_transfer, write_work);
  273. struct cache_buf *c;
  274. int length;
  275. char *buffer;
  276. int retry = 0;
  277. struct usb_request *req;
  278. C2K_DBG("%s\n", __func__);
  279. mutex_lock(&transfer->modem_up_mutex);
  280. list_for_each_entry(c, &transfer->cache_buf_lists.transactions, clist) {
  281. if (c && (c->state == UPSTREAM_STAT_UPLOADING)
  282. && !(transfer->control & STOP_UPSTREAM)) {
  283. ret = 0;
  284. break;
  285. }
  286. }
  287. mutex_unlock(&transfer->modem_up_mutex);
  288. if (ret < 0) {
  289. C2K_DBG("%s\n", __func__);
  290. return;
  291. }
  292. if (!c) {
  293. C2K_DBG("%s\n", __func__);
  294. return;
  295. }
  296. length = c->length;
  297. buffer = c->buffer;
  298. reget:
  299. mutex_lock(&transfer->usb_up_mutex);
  300. list_for_each_entry(t, &transfer->upstream.transactions, tlist) {
  301. if (t && (t->state == UPSTREAM_STAT_FREE) && !(transfer->control & STOP_UPSTREAM)) {
  302. ret = 0;
  303. retry = 0;
  304. got = 1;
  305. break;
  306. }
  307. }
  308. mutex_unlock(&transfer->usb_up_mutex);
  309. if (ret < 0) {
  310. if (transfer->control & STOP_UPSTREAM) {
  311. C2K_NOTE("%s\n", __func__);
  312. return;
  313. }
  314. retry = 1;
  315. }
  316. if (retry) {
  317. C2K_NOTE("%s: up request is buzy, try to get usb request\n", __func__);
  318. goto reget;
  319. }
  320. if (!t->req || got == 0) {
  321. C2K_DBG("%s\n", __func__);
  322. return;
  323. }
  324. req = t->req;
  325. memcpy(t->buffer, buffer, length);
  326. dump_data(transfer, "pushing up", t->buffer, length);
  327. req->length = length;
  328. req->buf = t->buffer;
  329. req->complete = upstream_complete;
  330. req->zero = ((length % transfer->upstream.ep->maxpacket) == 0);
  331. t->state = UPSTREAM_STAT_UPLOADING;
  332. ret = usb_ep_queue(transfer->upstream.ep, req, GFP_ATOMIC);
  333. if (ret < 0) {
  334. terr(t, "fail to queue request, %d", ret);
  335. t->state = UPSTREAM_STAT_FREE;
  336. return;
  337. }
  338. c->state = UPSTREAM_STAT_FREE;
  339. }
  340. static void upstream_complete(struct usb_ep *ep, struct usb_request *req)
  341. {
  342. struct upstream_transaction *t = req->context;
  343. struct rawbulk_transfer *transfer = t->transfer;
  344. C2K_DBG("%s\n", __func__);
  345. t->state = UPSTREAM_STAT_FREE;
  346. if (req->status < 0) {
  347. /*if (req->status == -ESHUTDOWN)
  348. return;
  349. else
  350. terr(t, "req status %d", req->status); */
  351. C2K_DBG(" %s: req status %d\n", __func__, req->status);
  352. return;
  353. }
  354. if (!req->actual)
  355. terr(t, "req actual 0");
  356. /* update statistics */
  357. upstream_data[transfer->id] += req->actual;
  358. upstream_cnt[transfer->id]++;
  359. udata[transfer->id] += req->actual;
  360. ucnt[transfer->id]++;
  361. if (udata[transfer->id] >= up_note_sz) {
  362. C2K_NOTE("t<%d>,%d Bytes upload\n", transfer->id, udata[transfer->id]);
  363. udata[transfer->id] = 0;
  364. ucnt[transfer->id] = 0;
  365. }
  366. queue_work(transfer->tx_wq, &transfer->write_work);
  367. }
  368. static void stop_upstream(struct upstream_transaction *t)
  369. {
  370. struct rawbulk_transfer *transfer = t->transfer;
  371. C2K_DBG("%s, %p, %p\n", __func__, transfer->upstream.ep, t->req);
  372. if (t->state == UPSTREAM_STAT_UPLOADING) {
  373. C2K_NOTE("%s\n", __func__);
  374. usb_ep_dequeue(transfer->upstream.ep, t->req);
  375. }
  376. t->state = UPSTREAM_STAT_FREE;
  377. }
  378. int rawbulk_push_upstream_buffer(int transfer_id, const void *buffer, unsigned int length)
  379. {
  380. int ret = -ENOENT;
  381. struct rawbulk_transfer *transfer;
  382. int count = length;
  383. struct cache_buf *c;
  384. C2K_DBG("%s\n", __func__);
  385. if (transfer_id > (FLS_CH_ID - 1))
  386. transfer_id--;
  387. else if (transfer_id == (FLS_CH_ID - 1)) {
  388. C2K_ERR("channal %d is flashless, no nessesory to bypass\n", (FLS_CH_ID - 1));
  389. return 0;
  390. }
  391. C2K_DBG("%s:transfer_id = %d, length = %d\n", __func__, transfer_id, length);
  392. transfer = id_to_transfer(transfer_id);
  393. if (!transfer)
  394. return -ENODEV;
  395. mutex_lock(&transfer->modem_up_mutex);
  396. list_for_each_entry(c, &transfer->cache_buf_lists.transactions, clist) {
  397. if (c && (c->state == UPSTREAM_STAT_FREE) && !(transfer->control & STOP_UPSTREAM)) {
  398. list_move_tail(&c->clist, &transfer->cache_buf_lists.transactions);
  399. c->state = UPSTREAM_STAT_UPLOADING;
  400. ret = 0;
  401. break;
  402. }
  403. }
  404. /* dynamic got cache pool */
  405. if (ret < 0 && transfer->cache_buf_lists.ntrans < max_cache_cnt) {
  406. c = kmalloc(sizeof(struct cache_buf), GFP_KERNEL);
  407. if (!c)
  408. C2K_NOTE("fail to allocate upstream sdio buf n %d\n", transfer_id);
  409. c->buffer = (char *)__get_free_page(GFP_KERNEL);
  410. /* c->buffer = kmalloc(upsz, GFP_KERNEL); */
  411. if (!c) {
  412. kfree(c);
  413. C2K_NOTE("fail to allocate upstream sdio buf n %d\n", transfer_id);
  414. }
  415. c->state = UPSTREAM_STAT_UPLOADING;
  416. INIT_LIST_HEAD(&c->clist);
  417. list_add_tail(&c->clist, &transfer->cache_buf_lists.transactions);
  418. transfer->cache_buf_lists.ntrans++;
  419. total_tran[transfer_id] = transfer->cache_buf_lists.ntrans;
  420. C2K_NOTE("new cache, t<%d>, trans<%d>, alloc_fail<%d>, upstream<%d,%d>\n",
  421. transfer_id,
  422. transfer->cache_buf_lists.ntrans,
  423. alloc_fail[transfer_id],
  424. upstream_data[transfer_id], upstream_cnt[transfer_id]);
  425. ret = 0;
  426. }
  427. if (ret < 0) {
  428. total_drop[transfer_id] += length;
  429. if (time_after(jiffies, drop_check_timeout)) {
  430. C2K_NOTE("cahce full, t<%d>, drop<%d>, total_drop<%d>\n"
  431. , transfer_id, length, total_drop[transfer_id]);
  432. C2K_NOTE("trans<%d>, alloc_fail<%d>, upstream<%d,%d>\n"
  433. , transfer->cache_buf_lists.ntrans, alloc_fail[transfer_id],
  434. upstream_data[transfer_id], upstream_cnt[transfer_id]);
  435. drop_check_timeout = jiffies + HZ * drop_check_interval;
  436. }
  437. mutex_unlock(&transfer->modem_up_mutex);
  438. return -ENOMEM;
  439. }
  440. mutex_unlock(&transfer->modem_up_mutex);
  441. memcpy(c->buffer, buffer, count);
  442. c->length = count;
  443. dump_data(transfer, "pushing up", c->buffer, count);
  444. queue_work(transfer->tx_wq, &transfer->write_work);
  445. return count;
  446. }
  447. EXPORT_SYMBOL_GPL(rawbulk_push_upstream_buffer);
  448. /*
  449. * downstream
  450. */
  451. #define DOWNSTREAM_STAT_FREE 0
  452. #define DOWNSTREAM_STAT_DOWNLOADING 2
  453. struct downstream_transaction {
  454. int state;
  455. int stalled;
  456. char name[32];
  457. struct list_head tlist;
  458. struct rawbulk_transfer *transfer;
  459. struct usb_request *req;
  460. int buffer_length;
  461. /* unsigned char buffer[0]; */
  462. char *buffer;
  463. };
  464. static void downstream_delayed_work(struct work_struct *work);
  465. static void downstream_complete(struct usb_ep *ep, struct usb_request *req);
  466. static struct downstream_transaction *alloc_downstream_transaction(struct rawbulk_transfer
  467. *transfer, int bufsz)
  468. {
  469. struct downstream_transaction *t;
  470. C2K_NOTE("%s\n", __func__);
  471. /* t = kzalloc(sizeof *t + bufsz * sizeof(unsigned char), GFP_ATOMIC); */
  472. t = kmalloc(sizeof(struct downstream_transaction), GFP_ATOMIC);
  473. if (!t)
  474. return NULL;
  475. t->buffer = (char *)__get_free_page(GFP_ATOMIC);
  476. /* t->buffer = kmalloc(bufsz, GFP_ATOMIC); */
  477. if (!t->buffer) {
  478. kfree(t);
  479. return NULL;
  480. }
  481. t->buffer_length = bufsz;
  482. t->req = usb_ep_alloc_request(transfer->downstream.ep, GFP_ATOMIC);
  483. if (!t->req)
  484. goto failto_alloc_usb_request;
  485. t->name[0] = 0;
  486. INIT_LIST_HEAD(&t->tlist);
  487. list_add_tail(&t->tlist, &transfer->downstream.transactions);
  488. transfer->downstream.ntrans++;
  489. t->transfer = transfer;
  490. t->state = DOWNSTREAM_STAT_FREE;
  491. t->stalled = 0;
  492. t->req->context = t;
  493. return t;
  494. failto_alloc_usb_request:
  495. /* kfree(t->buffer); */
  496. free_page((unsigned long)t->buffer);
  497. kfree(t);
  498. return NULL;
  499. }
  500. static void free_downstream_transaction(struct rawbulk_transfer *transfer)
  501. {
  502. struct list_head *p, *n;
  503. unsigned long flags;
  504. C2K_NOTE("%s\n", __func__);
  505. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  506. list_for_each_safe(p, n, &transfer->downstream.transactions) {
  507. struct downstream_transaction *t = list_entry(p, struct
  508. downstream_transaction, tlist);
  509. list_del(p);
  510. /* kfree(t->buffer); */
  511. if (t->buffer) /*NULL pointer when ETS switch */
  512. free_page((unsigned long)t->buffer);
  513. usb_ep_free_request(transfer->downstream.ep, t->req);
  514. kfree(t);
  515. transfer->downstream.ntrans--;
  516. }
  517. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  518. }
  519. static void stop_downstream(struct downstream_transaction *t)
  520. {
  521. struct rawbulk_transfer *transfer = t->transfer;
  522. if (t->state == DOWNSTREAM_STAT_DOWNLOADING) {
  523. usb_ep_dequeue(transfer->downstream.ep, t->req);
  524. t->state = DOWNSTREAM_STAT_FREE;
  525. }
  526. }
  527. static int queue_downstream(struct downstream_transaction *t)
  528. {
  529. int rc = 0;
  530. struct rawbulk_transfer *transfer = t->transfer;
  531. struct usb_request *req = t->req;
  532. C2K_DBG("%s\n", __func__);
  533. req->buf = t->buffer;
  534. req->length = t->buffer_length;
  535. req->complete = downstream_complete;
  536. /* if (rawbulk_usb_state_check()) */
  537. rc = usb_ep_queue(transfer->downstream.ep, req, GFP_ATOMIC);
  538. /* else */
  539. /* return; */
  540. if (rc < 0) {
  541. C2K_ERR("queue err:%d\n", rc);
  542. return rc;
  543. }
  544. t->state = DOWNSTREAM_STAT_DOWNLOADING;
  545. return 0;
  546. }
  547. static int start_downstream(struct downstream_transaction *t)
  548. {
  549. int rc = 0;
  550. struct rawbulk_transfer *transfer = t->transfer;
  551. struct usb_request *req = t->req;
  552. int time_delayed = msecs_to_jiffies(1);
  553. C2K_DBG("%s\n", __func__);
  554. if (transfer->control & STOP_DOWNSTREAM) {
  555. /* t->state = DOWNSTREAM_STAT_FREE; */
  556. return -EPIPE;
  557. }
  558. rc = modem_buffer_push(transfer->id, t->req->buf, t->req->actual);
  559. if (rc < 0) {
  560. if (rc == -ENOMEM) {
  561. spin_lock(&transfer->modem_block_lock);
  562. transfer->sdio_block = 1;
  563. spin_unlock(&transfer->modem_block_lock);
  564. spin_lock(&transfer->usb_down_lock);
  565. list_move_tail(&t->tlist, &transfer->repush2modem.transactions);
  566. spin_unlock(&transfer->usb_down_lock);
  567. transfer->repush2modem.ntrans++;
  568. transfer->downstream.ntrans--;
  569. queue_delayed_work(transfer->flow_wq, &transfer->delayed, time_delayed);
  570. return -EPIPE;
  571. } else
  572. return -EPIPE;
  573. }
  574. req->buf = t->buffer;
  575. req->length = t->buffer_length;
  576. req->complete = downstream_complete;
  577. /* if (rawbulk_usb_state_check()) */
  578. rc = usb_ep_queue(transfer->downstream.ep, req, GFP_ATOMIC);
  579. /* else */
  580. /* return; */
  581. if (rc < 0) {
  582. terr(t, "fail to queue request, %d", rc);
  583. return rc;
  584. }
  585. t->state = DOWNSTREAM_STAT_DOWNLOADING;
  586. return 0;
  587. }
  588. static void downstream_complete(struct usb_ep *ep, struct usb_request *req)
  589. {
  590. #ifdef C2K_USB_UT
  591. int i;
  592. static unsigned char last_c;
  593. unsigned char c;
  594. char verb[64];
  595. char compare_val;
  596. char *ptr;
  597. char *pbuf;
  598. #endif
  599. /* struct downstream_transaction *t = container_of(req->buf, */
  600. /* struct downstream_transaction, buffer); */
  601. /* struct downstream_transaction *t = container_of(req->buf, */
  602. /* struct downstream_transaction, buffer); */
  603. struct downstream_transaction *t = req->context;
  604. struct rawbulk_transfer *transfer = t->transfer;
  605. C2K_DBG("%s\n", __func__);
  606. t->state = DOWNSTREAM_STAT_FREE;
  607. if (req->status < 0) {
  608. /*if (req->status == -ESHUTDOWN)
  609. return;
  610. else
  611. terr(t, "req status %d", req->status); */
  612. C2K_DBG("req status %d\n", req->status);
  613. return;
  614. }
  615. #ifdef C2K_USB_UT
  616. #define PRINT_LIMIT 8
  617. ptr = (char *)t->req->buf;
  618. pbuf = (char *)verb;
  619. pbuf += sprintf(pbuf, "down len(%d), %d, ", t->req->actual, (int)sizeof(unsigned char));
  620. for (i = 0; i < t->req->actual; i++) {
  621. c = *(ptr + i);
  622. if (last_c == 0xff)
  623. compare_val = 0;
  624. else
  625. compare_val = last_c + 1;
  626. if (c != compare_val || ut_err == 1) {
  627. if (c != compare_val) {
  628. C2K_NOTE("<%x,%x, %x>, sizeof(unsigned char):%d\n", c, last_c,
  629. compare_val, (int)sizeof(unsigned char));
  630. }
  631. ut_err = 1;
  632. }
  633. if (i < PRINT_LIMIT)
  634. pbuf += sprintf(pbuf, "%c ", c);
  635. last_c = c; /* keep updating data */
  636. }
  637. C2K_DBG("%s, last_c(%x)\n", verb, last_c);
  638. if (ut_err)
  639. C2K_ERR("errrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\n");
  640. #endif
  641. dump_data(transfer, "downstream", t->buffer, req->actual);
  642. spin_lock(&transfer->modem_block_lock);
  643. if (!!transfer->sdio_block) {
  644. spin_unlock(&transfer->modem_block_lock);
  645. spin_lock(&transfer->usb_down_lock);
  646. list_move_tail(&t->tlist, &transfer->repush2modem.transactions);
  647. spin_unlock(&transfer->usb_down_lock);
  648. transfer->repush2modem.ntrans++;
  649. transfer->downstream.ntrans--;
  650. return;
  651. }
  652. spin_unlock(&transfer->modem_block_lock);
  653. start_downstream(t);
  654. }
  655. static void downstream_delayed_work(struct work_struct *work)
  656. {
  657. int rc = 0;
  658. unsigned long flags;
  659. struct downstream_transaction *downstream, *downstream_copy;
  660. struct usb_request *req;
  661. int time_delayed = msecs_to_jiffies(1);
  662. struct rawbulk_transfer *transfer = container_of(work, struct
  663. rawbulk_transfer, delayed.work);
  664. C2K_NOTE("%s\n", __func__);
  665. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  666. list_for_each_entry_safe(downstream, downstream_copy, &transfer->repush2modem.transactions,
  667. tlist) {
  668. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  669. rc = modem_buffer_push(transfer->id, downstream->req->buf, downstream->req->actual);
  670. if (rc < 0) {
  671. if (rc != -ENOMEM)
  672. terr(downstream, "port is not presence\n");
  673. if (!(transfer->control & STOP_DOWNSTREAM)) {
  674. queue_delayed_work(transfer->flow_wq, &transfer->delayed,
  675. time_delayed);
  676. }
  677. return;
  678. }
  679. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  680. list_move_tail(&downstream->tlist, &transfer->downstream.transactions);
  681. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  682. downstream->stalled = 0;
  683. downstream->state = DOWNSTREAM_STAT_FREE;
  684. req = downstream->req;
  685. req->buf = downstream->buffer;
  686. req->length = downstream->buffer_length;
  687. req->complete = downstream_complete;
  688. /* if (rawbulk_usb_state_check()) */
  689. rc = usb_ep_queue(transfer->downstream.ep, req, GFP_ATOMIC);
  690. /* else */
  691. /* return; */
  692. if (rc < 0) {
  693. terr(downstream, "fail to queue request, %d", rc);
  694. downstream->stalled = 1;
  695. return;
  696. }
  697. downstream->state = DOWNSTREAM_STAT_DOWNLOADING;
  698. transfer->repush2modem.ntrans--;
  699. transfer->downstream.ntrans++;
  700. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  701. }
  702. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  703. spin_lock_irqsave(&transfer->modem_block_lock, flags);
  704. transfer->sdio_block = 0;
  705. spin_unlock_irqrestore(&transfer->modem_block_lock, flags);
  706. }
  707. int rawbulk_start_transactions(int transfer_id, int nups, int ndowns, int upsz, int downsz)
  708. {
  709. int n;
  710. int rc, ret, up_cache_cnt;
  711. unsigned long flags;
  712. struct rawbulk_transfer *transfer;
  713. struct upstream_transaction *upstream; /* upstream_copy; */
  714. struct downstream_transaction *downstream, *downstream_copy;
  715. struct cache_buf *c;
  716. C2K_NOTE("%s\n", __func__);
  717. transfer = id_to_transfer(transfer_id);
  718. if (!transfer)
  719. return -ENODEV;
  720. if (!rawbulk->cdev)
  721. return -ENODEV;
  722. if (!transfer->function)
  723. return -ENODEV;
  724. C2K_NOTE("start transactions on id %d, nups %d ndowns %d upsz %d downsz %d\n",
  725. transfer_id, nups, ndowns, upsz, downsz);
  726. /* stop host transfer 1stly */
  727. ret = sdio_rawbulk_intercept(transfer->id, 1);
  728. if (ret < 0) {
  729. C2K_ERR("bypass sdio failed, channel id = %d\n", transfer->id);
  730. return ret;
  731. }
  732. transfer->sdio_block = 0;
  733. spin_lock(&transfer->flow_lock);
  734. transfer->down_flow = 0;
  735. spin_unlock(&transfer->flow_lock);
  736. mutex_lock(&transfer->usb_up_mutex);
  737. for (n = 0; n < nups; n++) {
  738. upstream = alloc_upstream_transaction(transfer, upsz);
  739. if (!upstream) {
  740. rc = -ENOMEM;
  741. mutex_unlock(&transfer->usb_up_mutex);
  742. C2K_NOTE("fail to allocate upstream transaction n %d", n);
  743. goto failto_alloc_upstream;
  744. }
  745. }
  746. mutex_unlock(&transfer->usb_up_mutex);
  747. mutex_lock(&transfer->modem_up_mutex);
  748. if (transfer_id == RAWBULK_TID_ETS || transfer_id == RAWBULK_TID_MODEM)
  749. up_cache_cnt = base_cache_cnt;
  750. else
  751. up_cache_cnt = 8 * nups;
  752. C2K_NOTE("t<%d>, up_cache_cnt<%d>\n", transfer_id, up_cache_cnt);
  753. for (n = 0; n < up_cache_cnt; n++) {
  754. /* c = kzalloc(sizeof *c + upsz * sizeof(unsigned char), GFP_KERNEL); */
  755. c = kmalloc(sizeof(struct cache_buf), GFP_KERNEL);
  756. if (!c) {
  757. rc = -ENOMEM;
  758. mutex_unlock(&transfer->modem_up_mutex);
  759. C2K_NOTE("fail to allocate upstream sdio buf n %d", n);
  760. alloc_fail[transfer_id] = 1;
  761. goto failto_alloc_up_sdiobuf;
  762. }
  763. c->buffer = (char *)__get_free_page(GFP_KERNEL);
  764. /* c->buffer = kmalloc(upsz, GFP_KERNEL); */
  765. if (!c) {
  766. rc = -ENOMEM;
  767. kfree(c);
  768. mutex_unlock(&transfer->modem_up_mutex);
  769. C2K_NOTE("fail to allocate upstream sdio buf n %d", n);
  770. alloc_fail[transfer_id] = 1;
  771. goto failto_alloc_up_sdiobuf;
  772. }
  773. c->state = UPSTREAM_STAT_FREE;
  774. INIT_LIST_HEAD(&c->clist);
  775. list_add_tail(&c->clist, &transfer->cache_buf_lists.transactions);
  776. transfer->cache_buf_lists.ntrans++;
  777. }
  778. total_tran[transfer_id] = transfer->cache_buf_lists.ntrans;
  779. mutex_unlock(&transfer->modem_up_mutex);
  780. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  781. for (n = 0; n < ndowns; n++) {
  782. downstream = alloc_downstream_transaction(transfer, downsz);
  783. if (!downstream) {
  784. rc = -ENOMEM;
  785. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  786. C2K_NOTE("fail to allocate downstream transaction n %d", n);
  787. goto failto_alloc_downstream;
  788. }
  789. }
  790. transfer->control &= ~STOP_UPSTREAM;
  791. transfer->control &= ~STOP_DOWNSTREAM;
  792. list_for_each_entry_safe(downstream, downstream_copy, &transfer->downstream.transactions,
  793. tlist) {
  794. if (downstream->state == DOWNSTREAM_STAT_FREE && !downstream->stalled) {
  795. rc = queue_downstream(downstream);
  796. if (rc < 0) {
  797. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  798. C2K_NOTE("fail to start downstream %s rc %d\n", downstream->name,
  799. rc);
  800. goto failto_start_downstream;
  801. }
  802. }
  803. }
  804. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  805. return 0;
  806. failto_start_downstream:
  807. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  808. list_for_each_entry(downstream, &transfer->downstream.transactions, tlist)
  809. stop_downstream(downstream);
  810. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  811. failto_alloc_up_sdiobuf:
  812. free_upstream_sdio_buf(transfer);
  813. failto_alloc_downstream:
  814. free_downstream_transaction(transfer);
  815. failto_alloc_upstream:
  816. free_upstream_transaction(transfer);
  817. /* recover host transfer */
  818. sdio_rawbulk_intercept(transfer->id, 0);
  819. return rc;
  820. }
  821. EXPORT_SYMBOL_GPL(rawbulk_start_transactions);
  822. void rawbulk_stop_transactions(int transfer_id)
  823. {
  824. unsigned long flags;
  825. struct rawbulk_transfer *transfer;
  826. struct upstream_transaction *upstream;
  827. struct downstream_transaction *downstream, *downstream_copy;
  828. struct list_head *p, *n;
  829. C2K_NOTE("t-%d\n", transfer_id);
  830. transfer = id_to_transfer(transfer_id);
  831. if (!transfer) {
  832. C2K_NOTE("t-%d, NULL\n", transfer_id);
  833. return;
  834. }
  835. if (transfer->control) {
  836. C2K_NOTE("t-%d,ctrl:%d\n", transfer_id, transfer->control);
  837. return;
  838. }
  839. spin_lock(&transfer->lock);
  840. transfer->control |= (STOP_UPSTREAM | STOP_DOWNSTREAM);
  841. spin_unlock(&transfer->lock);
  842. sdio_rawbulk_intercept(transfer->id, 0);
  843. cancel_delayed_work(&transfer->delayed);
  844. flush_workqueue(transfer->flow_wq);
  845. flush_workqueue(transfer->tx_wq);
  846. mutex_lock(&transfer->usb_up_mutex);
  847. list_for_each_entry(upstream, &transfer->upstream.transactions, tlist) {
  848. C2K_DBG("t-%d,upstresm<%p>\n", transfer_id, upstream);
  849. stop_upstream(upstream);
  850. }
  851. mutex_unlock(&transfer->usb_up_mutex);
  852. /* this one got lock inside */
  853. free_upstream_transaction(transfer);
  854. free_upstream_sdio_buf(transfer);
  855. list_for_each_entry_safe(downstream, downstream_copy, &transfer->downstream.transactions,
  856. tlist) {
  857. stop_downstream(downstream);
  858. }
  859. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  860. list_for_each_safe(p, n, &transfer->repush2modem.transactions) {
  861. struct downstream_transaction *delayed_t = list_entry(p, struct
  862. downstream_transaction,
  863. tlist);
  864. list_move_tail(&delayed_t->tlist, &transfer->downstream.transactions);
  865. }
  866. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  867. spin_lock_irqsave(&transfer->modem_block_lock, flags);
  868. transfer->sdio_block = 0;
  869. spin_unlock_irqrestore(&transfer->modem_block_lock, flags);
  870. free_downstream_transaction(transfer);
  871. }
  872. EXPORT_SYMBOL_GPL(rawbulk_stop_transactions);
  873. static char *state2string(int state, int upstream)
  874. {
  875. if (upstream) {
  876. switch (state) {
  877. case UPSTREAM_STAT_FREE:
  878. return "FREE";
  879. case UPSTREAM_STAT_UPLOADING:
  880. return "UPLOADING";
  881. default:
  882. return "UNKNOWN";
  883. }
  884. } else {
  885. switch (state) {
  886. case DOWNSTREAM_STAT_FREE:
  887. return "FREE";
  888. case DOWNSTREAM_STAT_DOWNLOADING:
  889. return "DOWNLOADING";
  890. default:
  891. return "UNKNOWN";
  892. }
  893. }
  894. }
  895. int rawbulk_transfer_statistics(int transfer_id, char *buf)
  896. {
  897. char *pbuf = buf;
  898. struct rawbulk_transfer *transfer;
  899. struct upstream_transaction *upstream;
  900. struct downstream_transaction *downstream;
  901. struct cache_buf *c;
  902. unsigned long flags;
  903. C2K_NOTE("%s\n", __func__);
  904. transfer = id_to_transfer(transfer_id);
  905. if (!transfer)
  906. return sprintf(pbuf, "-ENODEV, id %d\n", transfer_id);
  907. pbuf += sprintf(pbuf, "rawbulk statistics:\n");
  908. if (rawbulk->cdev && rawbulk->cdev->config)
  909. pbuf += sprintf(pbuf, " gadget device: %s\n", rawbulk->cdev->config->label);
  910. else
  911. pbuf += sprintf(pbuf, " gadget device: -ENODEV\n");
  912. pbuf += sprintf(pbuf, " upstreams (total %d transactions)\n", transfer->upstream.ntrans);
  913. mutex_lock(&transfer->usb_up_mutex);
  914. list_for_each_entry(upstream, &transfer->upstream.transactions, tlist) {
  915. pbuf += sprintf(pbuf, " %s state: %s", upstream->name,
  916. state2string(upstream->state, 1));
  917. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", upstream->buffer_length);
  918. if (upstream->stalled)
  919. pbuf += sprintf(pbuf, " (stalled!)");
  920. pbuf += sprintf(pbuf, "\n");
  921. }
  922. mutex_unlock(&transfer->usb_up_mutex);
  923. pbuf += sprintf(pbuf, " cache_buf_lists (total %d transactions)\n",
  924. transfer->cache_buf_lists.ntrans);
  925. mutex_lock(&transfer->modem_up_mutex);
  926. list_for_each_entry(c, &transfer->cache_buf_lists.transactions, clist) {
  927. pbuf += sprintf(pbuf, " %s state:", state2string(c->state, 1));
  928. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", c->length);
  929. pbuf += sprintf(pbuf, "\n");
  930. }
  931. mutex_unlock(&transfer->modem_up_mutex);
  932. pbuf += sprintf(pbuf, " downstreams (total %d transactions)\n",
  933. transfer->downstream.ntrans);
  934. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  935. list_for_each_entry(downstream, &transfer->downstream.transactions, tlist) {
  936. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  937. pbuf += sprintf(pbuf, " %s state: %s", downstream->name,
  938. state2string(downstream->state, 0));
  939. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", downstream->buffer_length);
  940. if (downstream->stalled)
  941. pbuf += sprintf(pbuf, " (stalled!)");
  942. pbuf += sprintf(pbuf, "\n");
  943. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  944. }
  945. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  946. pbuf += sprintf(pbuf, " repush2modem (total %d transactions)\n",
  947. transfer->downstream.ntrans);
  948. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  949. list_for_each_entry(downstream, &transfer->repush2modem.transactions, tlist) {
  950. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  951. pbuf += sprintf(pbuf, " %s state: %s", downstream->name,
  952. state2string(downstream->state, 0));
  953. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", downstream->buffer_length);
  954. if (downstream->stalled)
  955. pbuf += sprintf(pbuf, " (stalled!)");
  956. pbuf += sprintf(pbuf, "\n");
  957. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  958. }
  959. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  960. return (int)(pbuf - buf);
  961. }
  962. EXPORT_SYMBOL_GPL(rawbulk_transfer_statistics);
  963. int rawbulk_bind_function(int transfer_id, struct usb_function *function, struct
  964. usb_ep * bulk_out, struct usb_ep *bulk_in,
  965. rawbulk_autoreconn_callback_t autoreconn_callback)
  966. {
  967. struct rawbulk_transfer *transfer;
  968. C2K_NOTE("%s\n", __func__);
  969. if (!function || !bulk_out || !bulk_in)
  970. return -EINVAL;
  971. transfer = id_to_transfer(transfer_id);
  972. if (!transfer)
  973. return -ENODEV;
  974. transfer->downstream.ep = bulk_out;
  975. transfer->upstream.ep = bulk_in;
  976. transfer->function = function;
  977. rawbulk->cdev = function->config->cdev;
  978. transfer->autoreconn = autoreconn_callback;
  979. return 0;
  980. }
  981. EXPORT_SYMBOL_GPL(rawbulk_bind_function);
  982. void rawbulk_unbind_function(int transfer_id)
  983. {
  984. int n;
  985. int no_functions = 1;
  986. struct rawbulk_transfer *transfer;
  987. C2K_NOTE("%s\n", __func__);
  988. transfer = id_to_transfer(transfer_id);
  989. if (!transfer)
  990. return;
  991. rawbulk_stop_transactions(transfer_id);
  992. /* mark this for disable->work->stop_transaction not compelte */
  993. /* transfer->downstream.ep = NULL; */
  994. /* transfer->upstream.ep = NULL; */
  995. transfer->function = NULL;
  996. for (n = 0; n < _MAX_TID; n++) {
  997. if (!!rawbulk->transfer[n].function)
  998. no_functions = 0;
  999. }
  1000. if (no_functions)
  1001. rawbulk->cdev = NULL;
  1002. }
  1003. EXPORT_SYMBOL_GPL(rawbulk_unbind_function);
  1004. int rawbulk_bind_sdio_channel(int transfer_id)
  1005. {
  1006. struct rawbulk_transfer *transfer;
  1007. struct rawbulk_function *fn;
  1008. C2K_NOTE("%d\n", transfer_id);
  1009. transfer = id_to_transfer(transfer_id);
  1010. if (!transfer)
  1011. return -ENODEV;
  1012. fn = rawbulk_lookup_function(transfer_id);
  1013. if (fn)
  1014. fn->cbp_reset = 0;
  1015. if (transfer->autoreconn)
  1016. transfer->autoreconn(transfer->id);
  1017. return 0;
  1018. }
  1019. EXPORT_SYMBOL_GPL(rawbulk_bind_sdio_channel);
  1020. void rawbulk_unbind_sdio_channel(int transfer_id)
  1021. {
  1022. struct rawbulk_transfer *transfer;
  1023. struct rawbulk_function *fn;
  1024. C2K_NOTE("%d\n", transfer_id);
  1025. transfer = id_to_transfer(transfer_id);
  1026. if (!transfer)
  1027. return;
  1028. rawbulk_stop_transactions(transfer_id);
  1029. fn = rawbulk_lookup_function(transfer_id);
  1030. if (fn) {
  1031. fn->cbp_reset = 1;
  1032. rawbulk_disable_function(fn);
  1033. }
  1034. }
  1035. EXPORT_SYMBOL_GPL(rawbulk_unbind_sdio_channel);
  1036. static __init int rawbulk_init(void)
  1037. {
  1038. int n;
  1039. char name[20];
  1040. C2K_NOTE("%s\n", __func__);
  1041. drop_check_timeout = jiffies;
  1042. rawbulk = kzalloc(sizeof(*rawbulk), GFP_KERNEL);
  1043. if (!rawbulk)
  1044. return -ENOMEM;
  1045. for (n = 0; n < _MAX_TID; n++) {
  1046. struct rawbulk_transfer *t = &rawbulk->transfer[n];
  1047. t->id = n;
  1048. INIT_LIST_HEAD(&t->upstream.transactions);
  1049. INIT_LIST_HEAD(&t->downstream.transactions);
  1050. INIT_LIST_HEAD(&t->repush2modem.transactions);
  1051. INIT_LIST_HEAD(&t->cache_buf_lists.transactions);
  1052. INIT_DELAYED_WORK(&t->delayed, downstream_delayed_work);
  1053. memset(name, 0, 20);
  1054. sprintf(name, "%s_flow_ctrl", transfer_name[n]);
  1055. t->flow_wq = create_singlethread_workqueue(name);
  1056. if (!t->flow_wq)
  1057. return -ENOMEM;
  1058. INIT_WORK(&t->write_work, start_upstream);
  1059. memset(name, 0, 20);
  1060. sprintf(name, "%s_tx_wq", transfer_name[n]);
  1061. t->tx_wq = create_singlethread_workqueue(name);
  1062. if (!t->tx_wq)
  1063. return -ENOMEM;
  1064. mutex_init(&t->modem_up_mutex);
  1065. mutex_init(&t->usb_up_mutex);
  1066. spin_lock_init(&t->lock);
  1067. spin_lock_init(&t->usb_down_lock);
  1068. spin_lock_init(&t->modem_block_lock);
  1069. spin_lock_init(&t->flow_lock);
  1070. t->control = STOP_UPSTREAM | STOP_DOWNSTREAM;
  1071. }
  1072. return 0;
  1073. }
  1074. module_init(rawbulk_init);
  1075. static __exit void rawbulk_exit(void)
  1076. {
  1077. int n;
  1078. struct rawbulk_transfer *t;
  1079. for (n = 0; n < _MAX_TID; n++) {
  1080. t = &rawbulk->transfer[n];
  1081. rawbulk_stop_transactions(n);
  1082. destroy_workqueue(t->flow_wq);
  1083. destroy_workqueue(t->tx_wq);
  1084. }
  1085. kfree(rawbulk);
  1086. }
  1087. module_exit(rawbulk_exit);
  1088. MODULE_AUTHOR(DRIVER_AUTHOR);
  1089. MODULE_DESCRIPTION(DRIVER_DESC);
  1090. MODULE_VERSION(DRIVER_VERSION);
  1091. MODULE_LICENSE("GPL");