rawbulk_transfer.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360
  1. /*
  2. * Rawbulk Driver from VIA Telecom
  3. * Copyright (C) 2011 VIA Telecom, Inc.
  4. * Author: Karfield Chen (kfchen@via-telecom.com)
  5. * Copyright (C) 2012 VIA Telecom, Inc.
  6. * Author: Juelun Guo (jlguo@via-telecom.com)
  7. * Changes:
  8. *
  9. * Sep 2012: Juelun Guo <jlguo@via-telecom.com>
  10. * Version 1.0.4
  11. * changed to support for sdio bypass.
  12. * This software is licensed under the terms of the GNU General Public
  13. * License version 2, as published by the Free Software Foundation, and
  14. * may be copied, distributed, and modified under those terms.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. *
  22. *
  23. * Rawbulk is transfer performer between CBP host driver and Gadget driver
  24. *
  25. *
  26. * upstream: CBP Driver ---> Gadget IN
  27. * downstream: Gadget OUT ---> CBP Driver
  28. *
  29. *
  30. */
  31. /* #define DEBUG */
  32. /* #define VERBOSE_DEBUG */
  33. #define DRIVER_AUTHOR "Juelun Guo <jlguo@via-telecom.com>"
  34. #define DRIVER_DESC "Rawbulk Driver - perform bypass for QingCheng"
  35. #define DRIVER_VERSION "1.0.4"
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/init.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/list.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/moduleparam.h>
  44. #include <mt-plat/mt_ccci_common.h>
  45. #include "viatel_rawbulk.h"
  46. /* #include "modem_sdio.h" */
  47. #ifdef CONFIG_MTK_ECCCI_C2K
  48. #include <mt-plat/mt_ccci_common.h>
  49. #endif
  50. #define terr(t, fmt, args...) pr_err("Rawbulk [%s]:" fmt "\n", t->name, ##args)
  51. #define STOP_UPSTREAM 0x1
  52. #define STOP_DOWNSTREAM 0x2
  53. /* extern int modem_buffer_push(int port_num, const unsigned char *buf, int count); */
  54. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  55. char *transfer_name[] = { "modem", "ets", "at", "pcv", "gps" };
  56. #else
  57. char *transfer_name[] = { "pcv", "modem", "dummy0", "at", "gps", "dummy1", "dummy2", "ets" };
  58. #endif
  59. unsigned int upstream_data[_MAX_TID] = { 0 };
  60. unsigned int upstream_cnt[_MAX_TID] = { 0 };
  61. unsigned int total_drop[_MAX_TID] = { 0 };
  62. unsigned int alloc_fail[_MAX_TID] = { 0 };
  63. unsigned int total_tran[_MAX_TID] = { 0 };
  64. static unsigned long drop_check_timeout;
  65. static unsigned int udata[_MAX_TID] = { 0 };
  66. static unsigned int ucnt[_MAX_TID] = { 0 };
  67. struct rawbulk_transfer {
  68. enum transfer_id id;
  69. spinlock_t lock;
  70. int control;
  71. struct usb_function *function;
  72. struct usb_interface *interface;
  73. rawbulk_autoreconn_callback_t autoreconn;
  74. struct {
  75. int ntrans;
  76. struct list_head transactions;
  77. struct usb_ep *ep;
  78. } upstream, downstream, repush2modem, cache_buf_lists;
  79. int sdio_block;
  80. int down_flow;
  81. spinlock_t usb_down_lock;
  82. spinlock_t modem_block_lock;
  83. struct delayed_work delayed;
  84. struct workqueue_struct *flow_wq;
  85. struct work_struct read_work;
  86. struct work_struct write_work;
  87. struct workqueue_struct *rx_wq;
  88. struct workqueue_struct *tx_wq;
  89. struct mutex modem_up_mutex;
  90. struct mutex usb_up_mutex;
  91. struct timer_list timer;
  92. spinlock_t flow_lock;
  93. };
  94. static inline int get_epnum(struct usb_host_endpoint *ep)
  95. {
  96. return (int)(ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
  97. }
  98. static inline int get_maxpacksize(struct usb_host_endpoint *ep)
  99. {
  100. return (int)(le16_to_cpu(ep->desc.wMaxPacketSize));
  101. }
  102. struct cache_buf {
  103. int length;
  104. struct list_head clist;
  105. struct rawbulk_transfer *transfer;
  106. int state;
  107. /* unsigned char buffer[0]; */
  108. char *buffer;
  109. };
  110. #define MAX_RESPONSE 32
  111. struct rawbulk_transfer_model {
  112. struct usb_device *udev;
  113. struct usb_composite_dev *cdev;
  114. char ctrl_response[MAX_RESPONSE];
  115. struct rawbulk_transfer transfer[_MAX_TID];
  116. };
  117. static struct rawbulk_transfer_model *rawbulk;
  118. static struct rawbulk_transfer *id_to_transfer(int transfer_id)
  119. {
  120. if (transfer_id < 0 || transfer_id >= _MAX_TID)
  121. return NULL;
  122. return &rawbulk->transfer[transfer_id];
  123. }
  124. /* extern int rawbulk_usb_state_check(void); */
  125. /*
  126. * upstream
  127. */
  128. #define UPSTREAM_STAT_FREE 0
  129. #define UPSTREAM_STAT_UPLOADING 2
  130. struct upstream_transaction {
  131. int state;
  132. int stalled;
  133. char name[32];
  134. struct list_head tlist;
  135. struct delayed_work delayed;
  136. struct rawbulk_transfer *transfer;
  137. struct usb_request *req;
  138. int buffer_length;
  139. /* unsigned char buffer[0]; */
  140. char *buffer;
  141. };
  142. static unsigned int dump_mask;
  143. static unsigned int full_dump;
  144. static unsigned int max_cache_cnt = 2048;
  145. static unsigned int base_cache_cnt = 1024;
  146. static unsigned int up_note_sz = 1024 * 1024;
  147. static unsigned int drop_check_interval = 1;
  148. unsigned int c2k_usb_dbg_level = C2K_LOG_NOTICE;
  149. module_param(c2k_usb_dbg_level, uint, S_IRUGO | S_IWUSR);
  150. module_param(dump_mask, uint, S_IRUGO | S_IWUSR);
  151. module_param(full_dump, uint, S_IRUGO | S_IWUSR);
  152. module_param(max_cache_cnt, uint, S_IRUGO | S_IWUSR);
  153. module_param(base_cache_cnt, uint, S_IRUGO | S_IWUSR);
  154. module_param(drop_check_interval, uint, S_IRUGO | S_IWUSR);
  155. MODULE_PARM_DESC(dump_mask, "Set data dump mask for each transfers");
  156. #ifdef C2K_USB_UT
  157. int delay_set = 1200;
  158. module_param(delay_set, uint, S_IRUGO | S_IWUSR);
  159. #endif
  160. static inline void dump_data(struct rawbulk_transfer *trans,
  161. const char *str, const unsigned char *data, int size)
  162. {
  163. int i;
  164. char verb[128], *pbuf;
  165. if (!(dump_mask & (1 << trans->id)))
  166. return;
  167. pbuf = verb;
  168. pbuf += sprintf(pbuf, "DUMP tid = %d, %s: len = %d, chars = \"", trans->id, str, size);
  169. /* data in ascii */
  170. #if 0
  171. for (i = 0; i < size; ++i) {
  172. char c = data[i];
  173. if (c > 0x20 && c < 0x7e)
  174. pbuf += sprintf(pbuf, "%c", c);
  175. else
  176. pbuf += sprintf(pbuf, ".");
  177. if (i > 7)
  178. break;
  179. }
  180. #endif
  181. pbuf += sprintf(pbuf, "\", data = ");
  182. for (i = 0; i < size; ++i) {
  183. pbuf += sprintf(pbuf, "%.2x ", data[i]);
  184. if (!full_dump) {
  185. if (i > 7)
  186. break;
  187. }
  188. }
  189. if (full_dump || size < 8) {
  190. /* buf to printk */
  191. C2K_ERR("%s\n", verb);
  192. return;
  193. }
  194. /* data in tail */
  195. #if 1
  196. else if (i < size - 8) {
  197. pbuf += sprintf(pbuf, "... ");
  198. i = size - 8;
  199. }
  200. for (; i < size; ++i)
  201. pbuf += sprintf(pbuf, "%.2x ", data[i]);
  202. #endif
  203. /* buf to printk */
  204. C2K_ERR("%s\n", verb);
  205. }
  206. static struct upstream_transaction *alloc_upstream_transaction(struct rawbulk_transfer *transfer,
  207. int bufsz)
  208. {
  209. struct upstream_transaction *t;
  210. C2K_NOTE("%s\n", __func__);
  211. /* t = kmalloc(sizeof *t + bufsz * sizeof(unsigned char), GFP_KERNEL); */
  212. t = kmalloc(sizeof(struct upstream_transaction), GFP_KERNEL);
  213. if (!t)
  214. return NULL;
  215. t->buffer = (char *)__get_free_page(GFP_KERNEL);
  216. /* t->buffer = kmalloc(bufsz, GFP_KERNEL); */
  217. if (!t->buffer) {
  218. kfree(t);
  219. return NULL;
  220. }
  221. t->buffer_length = bufsz;
  222. t->req = usb_ep_alloc_request(transfer->upstream.ep, GFP_KERNEL);
  223. if (!t->req)
  224. goto failto_alloc_usb_request;
  225. t->req->context = t;
  226. t->name[0] = 0;
  227. sprintf(t->name, "U%d ( G:%s)", transfer->upstream.ntrans, transfer->upstream.ep->name);
  228. INIT_LIST_HEAD(&t->tlist);
  229. list_add_tail(&t->tlist, &transfer->upstream.transactions);
  230. transfer->upstream.ntrans++;
  231. t->transfer = transfer;
  232. t->state = UPSTREAM_STAT_FREE;
  233. return t;
  234. failto_alloc_usb_request:
  235. /* kfree(t->buffer); */
  236. free_page((unsigned long)t->buffer);
  237. kfree(t);
  238. return NULL;
  239. }
  240. static void free_upstream_transaction(struct rawbulk_transfer *transfer)
  241. {
  242. struct list_head *p, *n;
  243. C2K_NOTE("%s\n", __func__);
  244. mutex_lock(&transfer->usb_up_mutex);
  245. list_for_each_safe(p, n, &transfer->upstream.transactions) {
  246. struct upstream_transaction *t = list_entry(p, struct
  247. upstream_transaction, tlist);
  248. list_del(p);
  249. /* kfree(t->buffer); */
  250. free_page((unsigned long)t->buffer);
  251. usb_ep_free_request(transfer->upstream.ep, t->req);
  252. kfree(t);
  253. transfer->upstream.ntrans--;
  254. }
  255. mutex_unlock(&transfer->usb_up_mutex);
  256. }
  257. static void free_upstream_sdio_buf(struct rawbulk_transfer *transfer)
  258. {
  259. struct list_head *p, *n;
  260. C2K_NOTE("%s\n", __func__);
  261. mutex_lock(&transfer->modem_up_mutex);
  262. list_for_each_safe(p, n, &transfer->cache_buf_lists.transactions) {
  263. struct cache_buf *c = list_entry(p, struct
  264. cache_buf, clist);
  265. list_del(p);
  266. /* kfree(c->buffer); */
  267. free_page((unsigned long)c->buffer);
  268. kfree(c);
  269. transfer->cache_buf_lists.ntrans--;
  270. }
  271. mutex_unlock(&transfer->modem_up_mutex);
  272. }
  273. static void upstream_complete(struct usb_ep *ep, struct usb_request
  274. *req);
  275. static void start_upstream(struct work_struct *work)
  276. {
  277. int ret = -1, got = 0;
  278. struct upstream_transaction *t;
  279. struct rawbulk_transfer *transfer = container_of(work, struct rawbulk_transfer, write_work);
  280. struct cache_buf *c;
  281. int length;
  282. char *buffer;
  283. int retry = 0;
  284. struct usb_request *req;
  285. C2K_DBG("%s\n", __func__);
  286. mutex_lock(&transfer->modem_up_mutex);
  287. list_for_each_entry(c, &transfer->cache_buf_lists.transactions, clist) {
  288. if (c && (c->state == UPSTREAM_STAT_UPLOADING)
  289. && !(transfer->control & STOP_UPSTREAM)) {
  290. ret = 0;
  291. break;
  292. }
  293. }
  294. mutex_unlock(&transfer->modem_up_mutex);
  295. if (ret < 0)
  296. return;
  297. if (!c)
  298. return;
  299. length = c->length;
  300. buffer = c->buffer;
  301. reget:
  302. mutex_lock(&transfer->usb_up_mutex);
  303. list_for_each_entry(t, &transfer->upstream.transactions, tlist) {
  304. if (t && (t->state == UPSTREAM_STAT_FREE) && !(transfer->control & STOP_UPSTREAM)) {
  305. ret = 0;
  306. retry = 0;
  307. got = 1;
  308. break;
  309. }
  310. }
  311. mutex_unlock(&transfer->usb_up_mutex);
  312. if (ret < 0) {
  313. if (transfer->control & STOP_UPSTREAM)
  314. return;
  315. retry = 1;
  316. }
  317. if (retry) {
  318. C2K_NOTE("%s: up request is buzy, try to get usb request\n", __func__);
  319. goto reget;
  320. }
  321. if (!t->req || got == 0)
  322. return;
  323. req = t->req;
  324. memcpy(t->buffer, buffer, length);
  325. dump_data(transfer, "pushing up", t->buffer, length);
  326. req->length = length;
  327. req->buf = t->buffer;
  328. req->complete = upstream_complete;
  329. req->zero = ((length % transfer->upstream.ep->maxpacket) == 0);
  330. t->state = UPSTREAM_STAT_UPLOADING;
  331. /* if(rawbulk_usb_state_check()) { */
  332. ret = usb_ep_queue(transfer->upstream.ep, req, GFP_ATOMIC);
  333. /* } else */
  334. /* return; */
  335. if (ret < 0) {
  336. terr(t, "fail to queue request, %d", ret);
  337. t->state = UPSTREAM_STAT_FREE;
  338. return;
  339. }
  340. c->state = UPSTREAM_STAT_FREE;
  341. /* length = c->length;
  342. buffer = c->buffer;
  343. part1_sz = length - (length & 511);
  344. part2_sz = length & 511;
  345. for(i = 0; i < 2; i++) {
  346. //printk("%s i = %d**3**\n", __func__, i);
  347. if (i == 0) {
  348. flag = 1;
  349. temp = part1_sz;
  350. if(part1_sz <= 0)
  351. continue;
  352. } else {
  353. flag = 0;
  354. temp = part2_sz;
  355. if(part2_sz <= 0)
  356. continue;
  357. }
  358. ret = -1;
  359. reget:
  360. mutex_lock(&transfer->usb_up_mutex);
  361. list_for_each_entry(t, &transfer->upstream.transactions, tlist) {
  362. if (t && (t->state == UPSTREAM_STAT_FREE) && !(transfer->control & STOP_UPSTREAM)) {
  363. ret = 0;
  364. retry = 0;
  365. break;
  366. }
  367. }
  368. mutex_unlock(&transfer->usb_up_mutex);
  369. if (ret < 0) {
  370. if(transfer->control & STOP_UPSTREAM) {
  371. return;
  372. }
  373. retry = 1;
  374. }
  375. if (retry) {
  376. printk("%s goto reget usb request\n", __func__);
  377. goto reget;
  378. }
  379. if (!t->req) {
  380. return;
  381. }
  382. req = t->req;
  383. memcpy(t->buffer, buffer, temp);
  384. dump_data(transfer, "pushing up", t->buffer, temp);
  385. buffer += temp;
  386. length -= temp;
  387. req->length = temp;
  388. req->buf = t->buffer;
  389. req->complete = upstream_complete;
  390. //req->zero = ((length % transfer->upstream.ep->maxpacket) == 0);
  391. req->short_not_ok = flag;
  392. t->state = UPSTREAM_STAT_UPLOADING;
  393. if(rawbulk_usb_state_check()) {
  394. ret = usb_ep_queue(transfer->upstream.ep, req, GFP_ATOMIC);
  395. //printk("%s %d: after requeue\n", __func__, __LINE__);
  396. } else
  397. return;
  398. if(ret < 0) {
  399. terr(t, "fail to queue request, %d", ret);
  400. t->state = UPSTREAM_STAT_FREE;
  401. return;
  402. }
  403. c->state = UPSTREAM_STAT_FREE;
  404. }*/
  405. }
  406. static void upstream_complete(struct usb_ep *ep, struct usb_request *req)
  407. {
  408. struct upstream_transaction *t = req->context;
  409. struct rawbulk_transfer *transfer = t->transfer;
  410. C2K_DBG("%s\n", __func__);
  411. t->state = UPSTREAM_STAT_FREE;
  412. if (req->status < 0) {
  413. /*if (req->status == -ESHUTDOWN)
  414. return;
  415. else
  416. terr(t, "req status %d", req->status); */
  417. C2K_ERR(" %s: req status %d\n", __func__, req->status);
  418. return;
  419. }
  420. if (!req->actual)
  421. terr(t, "req actual 0");
  422. /* update statistics */
  423. upstream_data[transfer->id] += req->actual;
  424. upstream_cnt[transfer->id]++;
  425. udata[transfer->id] += req->actual;
  426. ucnt[transfer->id]++;
  427. if (udata[transfer->id] >= up_note_sz) {
  428. C2K_NOTE("t<%d>,%d Bytes upload\n", transfer->id, udata[transfer->id]);
  429. udata[transfer->id] = 0;
  430. ucnt[transfer->id] = 0;
  431. }
  432. queue_work(transfer->tx_wq, &transfer->write_work);
  433. }
  434. static void stop_upstream(struct upstream_transaction *t)
  435. {
  436. struct rawbulk_transfer *transfer = t->transfer;
  437. C2K_NOTE("%s, %p, %p\n", __func__, transfer->upstream.ep, t->req);
  438. if (t->state == UPSTREAM_STAT_UPLOADING)
  439. usb_ep_dequeue(transfer->upstream.ep, t->req);
  440. t->state = UPSTREAM_STAT_FREE;
  441. }
  442. int rawbulk_push_upstream_buffer(int transfer_id, const void *buffer, unsigned int length)
  443. {
  444. int ret = -ENOENT;
  445. struct rawbulk_transfer *transfer;
  446. int count = length;
  447. struct cache_buf *c;
  448. C2K_DBG("%s\n", __func__);
  449. if (transfer_id > (FS_CH_C2K - 1))
  450. transfer_id--;
  451. else if (transfer_id == (FS_CH_C2K - 1)) {
  452. C2K_ERR("channal %d is flashless, no nessesory to bypass\n", (FS_CH_C2K - 1));
  453. return 0;
  454. }
  455. C2K_DBG("%s:transfer_id = %d, length = %d\n", __func__, transfer_id, length);
  456. transfer = id_to_transfer(transfer_id);
  457. if (!transfer)
  458. return -ENODEV;
  459. mutex_lock(&transfer->modem_up_mutex);
  460. list_for_each_entry(c, &transfer->cache_buf_lists.transactions, clist) {
  461. if (c && (c->state == UPSTREAM_STAT_FREE) && !(transfer->control & STOP_UPSTREAM)) {
  462. list_move_tail(&c->clist, &transfer->cache_buf_lists.transactions);
  463. c->state = UPSTREAM_STAT_UPLOADING;
  464. ret = 0;
  465. break;
  466. }
  467. }
  468. /* dynamic got cache pool */
  469. if (ret < 0 && transfer->cache_buf_lists.ntrans < max_cache_cnt) {
  470. c = kmalloc(sizeof(struct cache_buf), GFP_KERNEL);
  471. if (!c)
  472. C2K_NOTE("fail to allocate upstream sdio buf n %d\n", transfer_id);
  473. c->buffer = (char *)__get_free_page(GFP_KERNEL);
  474. /* c->buffer = kmalloc(upsz, GFP_KERNEL); */
  475. if (!c) {
  476. kfree(c);
  477. C2K_NOTE("fail to allocate upstream sdio buf n %d\n", transfer_id);
  478. }
  479. c->state = UPSTREAM_STAT_UPLOADING;
  480. INIT_LIST_HEAD(&c->clist);
  481. list_add_tail(&c->clist, &transfer->cache_buf_lists.transactions);
  482. transfer->cache_buf_lists.ntrans++;
  483. total_tran[transfer_id] = transfer->cache_buf_lists.ntrans;
  484. C2K_NOTE("new cache, t<%d>, trans<%d>, alloc_fail<%d>, upstream<%d,%d>\n",
  485. transfer_id,
  486. transfer->cache_buf_lists.ntrans,
  487. alloc_fail[transfer_id],
  488. upstream_data[transfer_id], upstream_cnt[transfer_id]);
  489. ret = 0;
  490. }
  491. if (ret < 0) {
  492. total_drop[transfer_id] += length;
  493. if (time_after(jiffies, drop_check_timeout)) {
  494. C2K_NOTE("cahce full, t<%d>, drop<%d>, total_drop<%d>\n"
  495. , transfer_id, length, total_drop[transfer_id]);
  496. C2K_NOTE("trans<%d>, alloc_fail<%d>, upstream<%d,%d>\n"
  497. , transfer->cache_buf_lists.ntrans, alloc_fail[transfer_id],
  498. upstream_data[transfer_id], upstream_cnt[transfer_id]);
  499. drop_check_timeout = jiffies + HZ * drop_check_interval;
  500. }
  501. mutex_unlock(&transfer->modem_up_mutex);
  502. return -ENOMEM;
  503. }
  504. mutex_unlock(&transfer->modem_up_mutex);
  505. memcpy(c->buffer, buffer, count);
  506. c->length = count;
  507. dump_data(transfer, "pushing up", c->buffer, count);
  508. queue_work(transfer->tx_wq, &transfer->write_work);
  509. return count;
  510. }
  511. EXPORT_SYMBOL_GPL(rawbulk_push_upstream_buffer);
  512. /*
  513. * downstream
  514. */
  515. #define DOWNSTREAM_STAT_FREE 0
  516. #define DOWNSTREAM_STAT_DOWNLOADING 2
  517. struct downstream_transaction {
  518. int state;
  519. int stalled;
  520. char name[32];
  521. struct list_head tlist;
  522. struct rawbulk_transfer *transfer;
  523. struct usb_request *req;
  524. int buffer_length;
  525. /* unsigned char buffer[0]; */
  526. char *buffer;
  527. };
  528. static void downstream_delayed_work(struct work_struct *work);
  529. static void downstream_complete(struct usb_ep *ep, struct usb_request *req);
  530. static struct downstream_transaction *alloc_downstream_transaction(struct rawbulk_transfer
  531. *transfer, int bufsz)
  532. {
  533. struct downstream_transaction *t;
  534. C2K_NOTE("%s\n", __func__);
  535. /* t = kzalloc(sizeof *t + bufsz * sizeof(unsigned char), GFP_ATOMIC); */
  536. t = kmalloc(sizeof(struct downstream_transaction), GFP_ATOMIC);
  537. if (!t)
  538. return NULL;
  539. t->buffer = (char *)__get_free_page(GFP_ATOMIC);
  540. /* t->buffer = kmalloc(bufsz, GFP_ATOMIC); */
  541. if (!t->buffer) {
  542. kfree(t);
  543. return NULL;
  544. }
  545. t->buffer_length = bufsz;
  546. t->req = usb_ep_alloc_request(transfer->downstream.ep, GFP_ATOMIC);
  547. if (!t->req)
  548. goto failto_alloc_usb_request;
  549. t->name[0] = 0;
  550. INIT_LIST_HEAD(&t->tlist);
  551. list_add_tail(&t->tlist, &transfer->downstream.transactions);
  552. transfer->downstream.ntrans++;
  553. t->transfer = transfer;
  554. t->state = DOWNSTREAM_STAT_FREE;
  555. t->stalled = 0;
  556. t->req->context = t;
  557. return t;
  558. failto_alloc_usb_request:
  559. /* kfree(t->buffer); */
  560. free_page((unsigned long)t->buffer);
  561. kfree(t);
  562. return NULL;
  563. }
  564. static void free_downstream_transaction(struct rawbulk_transfer *transfer)
  565. {
  566. struct list_head *p, *n;
  567. unsigned long flags;
  568. C2K_NOTE("%s\n", __func__);
  569. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  570. list_for_each_safe(p, n, &transfer->downstream.transactions) {
  571. struct downstream_transaction *t = list_entry(p, struct
  572. downstream_transaction, tlist);
  573. list_del(p);
  574. /* kfree(t->buffer); */
  575. if (t->buffer) /*NULL pointer when ETS switch */
  576. free_page((unsigned long)t->buffer);
  577. usb_ep_free_request(transfer->downstream.ep, t->req);
  578. kfree(t);
  579. transfer->downstream.ntrans--;
  580. }
  581. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  582. }
  583. static void stop_downstream(struct downstream_transaction *t)
  584. {
  585. struct rawbulk_transfer *transfer = t->transfer;
  586. if (t->state == DOWNSTREAM_STAT_DOWNLOADING) {
  587. usb_ep_dequeue(transfer->downstream.ep, t->req);
  588. t->state = DOWNSTREAM_STAT_FREE;
  589. }
  590. }
  591. static int queue_downstream(struct downstream_transaction *t)
  592. {
  593. int rc = 0;
  594. struct rawbulk_transfer *transfer = t->transfer;
  595. struct usb_request *req = t->req;
  596. C2K_DBG("%s\n", __func__);
  597. req->buf = t->buffer;
  598. req->length = t->buffer_length;
  599. req->complete = downstream_complete;
  600. /* if (rawbulk_usb_state_check()) */
  601. rc = usb_ep_queue(transfer->downstream.ep, req, GFP_ATOMIC);
  602. /* else */
  603. /* return; */
  604. if (rc < 0)
  605. return rc;
  606. t->state = DOWNSTREAM_STAT_DOWNLOADING;
  607. return 0;
  608. }
  609. static int start_downstream(struct downstream_transaction *t)
  610. {
  611. int rc = 0;
  612. struct rawbulk_transfer *transfer = t->transfer;
  613. struct usb_request *req = t->req;
  614. int time_delayed = msecs_to_jiffies(1);
  615. C2K_DBG("%s\n", __func__);
  616. if (transfer->control & STOP_DOWNSTREAM) {
  617. /* t->state = DOWNSTREAM_STAT_FREE; */
  618. return -EPIPE;
  619. }
  620. rc = ccci_c2k_buffer_push(transfer->id, t->req->buf, t->req->actual);
  621. if (rc < 0) {
  622. if (rc == -ENOMEM) {
  623. spin_lock(&transfer->modem_block_lock);
  624. transfer->sdio_block = 1;
  625. spin_unlock(&transfer->modem_block_lock);
  626. spin_lock(&transfer->usb_down_lock);
  627. list_move_tail(&t->tlist, &transfer->repush2modem.transactions);
  628. spin_unlock(&transfer->usb_down_lock);
  629. transfer->repush2modem.ntrans++;
  630. transfer->downstream.ntrans--;
  631. queue_delayed_work(transfer->flow_wq, &transfer->delayed, time_delayed);
  632. return -EPIPE;
  633. } else
  634. return -EPIPE;
  635. }
  636. req->buf = t->buffer;
  637. req->length = t->buffer_length;
  638. req->complete = downstream_complete;
  639. /* if (rawbulk_usb_state_check()) */
  640. rc = usb_ep_queue(transfer->downstream.ep, req, GFP_ATOMIC);
  641. /* else */
  642. /* return; */
  643. if (rc < 0) {
  644. terr(t, "fail to queue request, %d", rc);
  645. return rc;
  646. }
  647. t->state = DOWNSTREAM_STAT_DOWNLOADING;
  648. return 0;
  649. }
  650. static void downstream_complete(struct usb_ep *ep, struct usb_request *req)
  651. {
  652. /* struct downstream_transaction *t = container_of(req->buf, */
  653. /* struct downstream_transaction, buffer); */
  654. /* struct downstream_transaction *t = container_of(req->buf, */
  655. /* struct downstream_transaction, buffer); */
  656. struct downstream_transaction *t = req->context;
  657. struct rawbulk_transfer *transfer = t->transfer;
  658. C2K_DBG("%s\n", __func__);
  659. t->state = DOWNSTREAM_STAT_FREE;
  660. if (req->status < 0) {
  661. /*if (req->status == -ESHUTDOWN)
  662. return;
  663. else
  664. terr(t, "req status %d", req->status); */
  665. C2K_WARN("req status %d\n", req->status);
  666. return;
  667. }
  668. #ifdef C2K_USB_UT
  669. #define PRINT_LIMIT 8
  670. int i;
  671. static unsigned char last_c;
  672. unsigned char c;
  673. char *ptr = (char *)t->req->buf;
  674. char verb[64];
  675. char *pbuf = (char *)verb;
  676. char compare_val;
  677. pbuf += sprintf(pbuf, "down len(%d), %d, ", t->req->actual, (int)sizeof(unsigned char));
  678. for (i = 0; i < t->req->actual; i++) {
  679. c = *(ptr + i);
  680. if (last_c == 0xff)
  681. compare_val = 0;
  682. else
  683. compare_val = last_c + 1;
  684. if (c != compare_val || ut_err == 1) {
  685. if (c != compare_val) {
  686. C2K_NOTE("<%x,%x, %x>, sizeof(unsigned char):%d\n", c, last_c,
  687. compare_val, (int)sizeof(unsigned char));
  688. }
  689. ut_err = 1;
  690. }
  691. if (i < PRINT_LIMIT)
  692. pbuf += sprintf(pbuf, "%c ", c);
  693. last_c = c; /* keep updating data */
  694. }
  695. C2K_DBG("%s, last_c(%x)\n", verb, last_c);
  696. if (ut_err)
  697. C2K_NOTE("errrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr\n");
  698. #endif
  699. dump_data(transfer, "downstream", t->buffer, req->actual);
  700. spin_lock(&transfer->modem_block_lock);
  701. if (!!transfer->sdio_block)
  702. spin_unlock(&transfer->modem_block_lock);
  703. spin_lock(&transfer->usb_down_lock);
  704. list_move_tail(&t->tlist, &transfer->repush2modem.transactions);
  705. spin_unlock(&transfer->usb_down_lock);
  706. transfer->repush2modem.ntrans++;
  707. transfer->downstream.ntrans--;
  708. return;
  709. spin_unlock(&transfer->modem_block_lock);
  710. start_downstream(t);
  711. }
  712. static void downstream_delayed_work(struct work_struct *work)
  713. {
  714. int rc = 0;
  715. unsigned long flags;
  716. struct downstream_transaction *downstream, *downstream_copy;
  717. struct usb_request *req;
  718. int time_delayed = msecs_to_jiffies(1);
  719. struct rawbulk_transfer *transfer = container_of(work, struct
  720. rawbulk_transfer, delayed.work);
  721. C2K_NOTE("%s\n", __func__);
  722. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  723. list_for_each_entry_safe(downstream, downstream_copy, &transfer->repush2modem.transactions,
  724. tlist) {
  725. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  726. rc = ccci_c2k_buffer_push(transfer->id, downstream->req->buf,
  727. downstream->req->actual);
  728. if (rc < 0) {
  729. if (rc != -ENOMEM)
  730. terr(downstream, "port is not presence\n");
  731. if (!(transfer->control & STOP_DOWNSTREAM))
  732. queue_delayed_work(transfer->flow_wq, &transfer->delayed,
  733. time_delayed);
  734. return;
  735. }
  736. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  737. list_move_tail(&downstream->tlist, &transfer->downstream.transactions);
  738. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  739. downstream->stalled = 0;
  740. downstream->state = DOWNSTREAM_STAT_FREE;
  741. req = downstream->req;
  742. req->buf = downstream->buffer;
  743. req->length = downstream->buffer_length;
  744. req->complete = downstream_complete;
  745. /* if (rawbulk_usb_state_check()) */
  746. rc = usb_ep_queue(transfer->downstream.ep, req, GFP_ATOMIC);
  747. /* else */
  748. /* return; */
  749. if (rc < 0) {
  750. terr(downstream, "fail to queue request, %d", rc);
  751. downstream->stalled = 1;
  752. return;
  753. }
  754. downstream->state = DOWNSTREAM_STAT_DOWNLOADING;
  755. transfer->repush2modem.ntrans--;
  756. transfer->downstream.ntrans++;
  757. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  758. }
  759. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  760. spin_lock_irqsave(&transfer->modem_block_lock, flags);
  761. transfer->sdio_block = 0;
  762. spin_unlock_irqrestore(&transfer->modem_block_lock, flags);
  763. }
  764. int rawbulk_start_transactions(int transfer_id, int nups, int ndowns, int upsz, int downsz)
  765. {
  766. int n;
  767. int rc, ret, up_cache_cnt;
  768. unsigned long flags;
  769. struct rawbulk_transfer *transfer;
  770. struct upstream_transaction *upstream; /* upstream_copy; */
  771. struct downstream_transaction *downstream, *downstream_copy;
  772. struct cache_buf *c;
  773. C2K_NOTE("%s\n", __func__);
  774. transfer = id_to_transfer(transfer_id);
  775. if (!transfer)
  776. return -ENODEV;
  777. if (!rawbulk->cdev)
  778. return -ENODEV;
  779. if (!transfer->function)
  780. return -ENODEV;
  781. C2K_NOTE("start transactions on id %d, nups %d ndowns %d upsz %d downsz %d\n",
  782. transfer_id, nups, ndowns, upsz, downsz);
  783. /* stop host transfer 1stly */
  784. ret = ccci_c2k_rawbulk_intercept(transfer->id, 1);
  785. if (ret < 0) {
  786. C2K_ERR("bypass sdio failed, channel id = %d\n", transfer->id);
  787. return ret;
  788. }
  789. transfer->sdio_block = 0;
  790. spin_lock(&transfer->flow_lock);
  791. transfer->down_flow = 0;
  792. spin_unlock(&transfer->flow_lock);
  793. mutex_lock(&transfer->usb_up_mutex);
  794. for (n = 0; n < nups; n++) {
  795. upstream = alloc_upstream_transaction(transfer, upsz);
  796. if (!upstream) {
  797. rc = -ENOMEM;
  798. mutex_unlock(&transfer->usb_up_mutex);
  799. C2K_NOTE("fail to allocate upstream transaction n %d", n);
  800. goto failto_alloc_upstream;
  801. }
  802. }
  803. mutex_unlock(&transfer->usb_up_mutex);
  804. mutex_lock(&transfer->modem_up_mutex);
  805. if (transfer_id == RAWBULK_TID_ETS || transfer_id == RAWBULK_TID_MODEM)
  806. up_cache_cnt = base_cache_cnt;
  807. else
  808. up_cache_cnt = 8 * nups;
  809. C2K_NOTE("t<%d>, up_cache_cnt<%d>\n", transfer_id, up_cache_cnt);
  810. for (n = 0; n < up_cache_cnt; n++) {
  811. /* c = kzalloc(sizeof *c + upsz * sizeof(unsigned char), GFP_KERNEL); */
  812. c = kmalloc(sizeof(struct cache_buf), GFP_KERNEL);
  813. if (!c) {
  814. rc = -ENOMEM;
  815. mutex_unlock(&transfer->modem_up_mutex);
  816. C2K_NOTE("fail to allocate upstream sdio buf n %d", n);
  817. alloc_fail[transfer_id] = 1;
  818. goto failto_alloc_up_sdiobuf;
  819. }
  820. c->buffer = (char *)__get_free_page(GFP_KERNEL);
  821. /* c->buffer = kmalloc(upsz, GFP_KERNEL); */
  822. if (!c) {
  823. rc = -ENOMEM;
  824. kfree(c);
  825. mutex_unlock(&transfer->modem_up_mutex);
  826. C2K_NOTE("fail to allocate upstream sdio buf n %d", n);
  827. alloc_fail[transfer_id] = 1;
  828. goto failto_alloc_up_sdiobuf;
  829. }
  830. c->state = UPSTREAM_STAT_FREE;
  831. INIT_LIST_HEAD(&c->clist);
  832. list_add_tail(&c->clist, &transfer->cache_buf_lists.transactions);
  833. transfer->cache_buf_lists.ntrans++;
  834. }
  835. total_tran[transfer_id] = transfer->cache_buf_lists.ntrans;
  836. mutex_unlock(&transfer->modem_up_mutex);
  837. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  838. for (n = 0; n < ndowns; n++) {
  839. downstream = alloc_downstream_transaction(transfer, downsz);
  840. if (!downstream) {
  841. rc = -ENOMEM;
  842. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  843. C2K_NOTE("fail to allocate downstream transaction n %d", n);
  844. goto failto_alloc_downstream;
  845. }
  846. }
  847. transfer->control &= ~STOP_UPSTREAM;
  848. transfer->control &= ~STOP_DOWNSTREAM;
  849. list_for_each_entry_safe(downstream, downstream_copy, &transfer->downstream.transactions,
  850. tlist) {
  851. if (downstream->state == DOWNSTREAM_STAT_FREE && !downstream->stalled) {
  852. rc = queue_downstream(downstream);
  853. if (rc < 0) {
  854. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  855. C2K_NOTE("fail to start downstream %s rc %d\n", downstream->name,
  856. rc);
  857. goto failto_start_downstream;
  858. }
  859. }
  860. }
  861. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  862. return 0;
  863. failto_start_downstream:
  864. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  865. list_for_each_entry(downstream, &transfer->downstream.transactions, tlist)
  866. stop_downstream(downstream);
  867. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  868. failto_alloc_up_sdiobuf:
  869. free_upstream_sdio_buf(transfer);
  870. failto_alloc_downstream:
  871. free_downstream_transaction(transfer);
  872. failto_alloc_upstream:
  873. free_upstream_transaction(transfer);
  874. /* recover host transfer */
  875. ccci_c2k_rawbulk_intercept(transfer->id, 0);
  876. return rc;
  877. }
  878. EXPORT_SYMBOL_GPL(rawbulk_start_transactions);
  879. void rawbulk_stop_transactions(int transfer_id)
  880. {
  881. unsigned long flags;
  882. struct rawbulk_transfer *transfer;
  883. struct upstream_transaction *upstream;
  884. struct downstream_transaction *downstream, *downstream_copy;
  885. struct list_head *p, *n;
  886. C2K_NOTE("t-%d\n", transfer_id);
  887. transfer = id_to_transfer(transfer_id);
  888. if (!transfer) {
  889. C2K_NOTE("t-%d, NULL\n", transfer_id);
  890. return;
  891. }
  892. if (transfer->control) {
  893. C2K_NOTE("t-%d,ctrl:%d\n", transfer_id, transfer->control);
  894. return;
  895. }
  896. spin_lock(&transfer->lock);
  897. transfer->control |= (STOP_UPSTREAM | STOP_DOWNSTREAM);
  898. spin_unlock(&transfer->lock);
  899. ccci_c2k_rawbulk_intercept(transfer->id, 0);
  900. cancel_delayed_work(&transfer->delayed);
  901. flush_workqueue(transfer->flow_wq);
  902. flush_workqueue(transfer->tx_wq);
  903. mutex_lock(&transfer->usb_up_mutex);
  904. list_for_each_entry(upstream, &transfer->upstream.transactions, tlist) {
  905. C2K_NOTE("t-%d,upstresm<%p>\n", transfer_id, upstream);
  906. stop_upstream(upstream);
  907. }
  908. mutex_unlock(&transfer->usb_up_mutex);
  909. /* this one got lock inside */
  910. free_upstream_transaction(transfer);
  911. free_upstream_sdio_buf(transfer);
  912. list_for_each_entry_safe(downstream, downstream_copy, &transfer->downstream.transactions,
  913. tlist) {
  914. stop_downstream(downstream);
  915. }
  916. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  917. list_for_each_safe(p, n, &transfer->repush2modem.transactions) {
  918. struct downstream_transaction *delayed_t = list_entry(p, struct
  919. downstream_transaction,
  920. tlist);
  921. list_move_tail(&delayed_t->tlist, &transfer->downstream.transactions);
  922. }
  923. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  924. spin_lock_irqsave(&transfer->modem_block_lock, flags);
  925. transfer->sdio_block = 0;
  926. spin_unlock_irqrestore(&transfer->modem_block_lock, flags);
  927. free_downstream_transaction(transfer);
  928. }
  929. EXPORT_SYMBOL_GPL(rawbulk_stop_transactions);
  930. static char *state2string(int state, int upstream)
  931. {
  932. if (upstream) {
  933. switch (state) {
  934. case UPSTREAM_STAT_FREE:
  935. return "FREE";
  936. case UPSTREAM_STAT_UPLOADING:
  937. return "UPLOADING";
  938. default:
  939. return "UNKNOWN";
  940. }
  941. } else {
  942. switch (state) {
  943. case DOWNSTREAM_STAT_FREE:
  944. return "FREE";
  945. case DOWNSTREAM_STAT_DOWNLOADING:
  946. return "DOWNLOADING";
  947. default:
  948. return "UNKNOWN";
  949. }
  950. }
  951. }
  952. int rawbulk_transfer_statistics(int transfer_id, char *buf)
  953. {
  954. char *pbuf = buf;
  955. struct rawbulk_transfer *transfer;
  956. struct upstream_transaction *upstream;
  957. struct downstream_transaction *downstream;
  958. struct cache_buf *c;
  959. unsigned long flags;
  960. C2K_NOTE("%s\n", __func__);
  961. transfer = id_to_transfer(transfer_id);
  962. if (!transfer)
  963. return sprintf(pbuf, "-ENODEV, id %d\n", transfer_id);
  964. pbuf += sprintf(pbuf, "rawbulk statistics:\n");
  965. if (rawbulk->cdev && rawbulk->cdev->config)
  966. pbuf += sprintf(pbuf, " gadget device: %s\n", rawbulk->cdev->config->label);
  967. else
  968. pbuf += sprintf(pbuf, " gadget device: -ENODEV\n");
  969. pbuf += sprintf(pbuf, " upstreams (total %d transactions)\n", transfer->upstream.ntrans);
  970. mutex_lock(&transfer->usb_up_mutex);
  971. list_for_each_entry(upstream, &transfer->upstream.transactions, tlist) {
  972. pbuf += sprintf(pbuf, " %s state: %s", upstream->name,
  973. state2string(upstream->state, 1));
  974. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", upstream->buffer_length);
  975. if (upstream->stalled)
  976. pbuf += sprintf(pbuf, " (stalled!)");
  977. pbuf += sprintf(pbuf, "\n");
  978. }
  979. mutex_unlock(&transfer->usb_up_mutex);
  980. pbuf += sprintf(pbuf, " cache_buf_lists (total %d transactions)\n",
  981. transfer->cache_buf_lists.ntrans);
  982. mutex_lock(&transfer->modem_up_mutex);
  983. list_for_each_entry(c, &transfer->cache_buf_lists.transactions, clist) {
  984. pbuf += sprintf(pbuf, " %s state:", state2string(c->state, 1));
  985. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", c->length);
  986. pbuf += sprintf(pbuf, "\n");
  987. }
  988. mutex_unlock(&transfer->modem_up_mutex);
  989. pbuf += sprintf(pbuf, " downstreams (total %d transactions)\n",
  990. transfer->downstream.ntrans);
  991. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  992. list_for_each_entry(downstream, &transfer->downstream.transactions, tlist) {
  993. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  994. pbuf += sprintf(pbuf, " %s state: %s", downstream->name,
  995. state2string(downstream->state, 0));
  996. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", downstream->buffer_length);
  997. if (downstream->stalled)
  998. pbuf += sprintf(pbuf, " (stalled!)");
  999. pbuf += sprintf(pbuf, "\n");
  1000. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  1001. }
  1002. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  1003. pbuf += sprintf(pbuf, " repush2modem (total %d transactions)\n",
  1004. transfer->downstream.ntrans);
  1005. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  1006. list_for_each_entry(downstream, &transfer->repush2modem.transactions, tlist) {
  1007. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  1008. pbuf += sprintf(pbuf, " %s state: %s", downstream->name,
  1009. state2string(downstream->state, 0));
  1010. pbuf += sprintf(pbuf, ", maxbuf: %d bytes", downstream->buffer_length);
  1011. if (downstream->stalled)
  1012. pbuf += sprintf(pbuf, " (stalled!)");
  1013. pbuf += sprintf(pbuf, "\n");
  1014. spin_lock_irqsave(&transfer->usb_down_lock, flags);
  1015. }
  1016. spin_unlock_irqrestore(&transfer->usb_down_lock, flags);
  1017. return (int)(pbuf - buf);
  1018. }
  1019. EXPORT_SYMBOL_GPL(rawbulk_transfer_statistics);
  1020. int rawbulk_bind_function(int transfer_id, struct usb_function *function, struct
  1021. usb_ep * bulk_out, struct usb_ep *bulk_in,
  1022. rawbulk_autoreconn_callback_t autoreconn_callback)
  1023. {
  1024. struct rawbulk_transfer *transfer;
  1025. C2K_NOTE("%s\n", __func__);
  1026. if (!function || !bulk_out || !bulk_in)
  1027. return -EINVAL;
  1028. transfer = id_to_transfer(transfer_id);
  1029. if (!transfer)
  1030. return -ENODEV;
  1031. transfer->downstream.ep = bulk_out;
  1032. transfer->upstream.ep = bulk_in;
  1033. transfer->function = function;
  1034. rawbulk->cdev = function->config->cdev;
  1035. transfer->autoreconn = autoreconn_callback;
  1036. return 0;
  1037. }
  1038. EXPORT_SYMBOL_GPL(rawbulk_bind_function);
  1039. void rawbulk_unbind_function(int transfer_id)
  1040. {
  1041. int n;
  1042. int no_functions = 1;
  1043. struct rawbulk_transfer *transfer;
  1044. C2K_NOTE("%s\n", __func__);
  1045. transfer = id_to_transfer(transfer_id);
  1046. if (!transfer)
  1047. return;
  1048. rawbulk_stop_transactions(transfer_id);
  1049. /* mark this for disable->work->stop_transaction not compelte */
  1050. /* transfer->downstream.ep = NULL; */
  1051. /* transfer->upstream.ep = NULL; */
  1052. transfer->function = NULL;
  1053. for (n = 0; n < _MAX_TID; n++) {
  1054. if (!!rawbulk->transfer[n].function)
  1055. no_functions = 0;
  1056. }
  1057. if (no_functions)
  1058. rawbulk->cdev = NULL;
  1059. }
  1060. EXPORT_SYMBOL_GPL(rawbulk_unbind_function);
  1061. int rawbulk_bind_sdio_channel(int transfer_id)
  1062. {
  1063. struct rawbulk_transfer *transfer;
  1064. struct rawbulk_function *fn;
  1065. C2K_NOTE("%d\n", transfer_id);
  1066. transfer = id_to_transfer(transfer_id);
  1067. if (!transfer)
  1068. return -ENODEV;
  1069. fn = rawbulk_lookup_function(transfer_id);
  1070. if (fn)
  1071. fn->cbp_reset = 0;
  1072. if (transfer->autoreconn)
  1073. transfer->autoreconn(transfer->id);
  1074. return 0;
  1075. }
  1076. EXPORT_SYMBOL_GPL(rawbulk_bind_sdio_channel);
  1077. void rawbulk_unbind_sdio_channel(int transfer_id)
  1078. {
  1079. struct rawbulk_transfer *transfer;
  1080. struct rawbulk_function *fn;
  1081. C2K_NOTE("%d\n", transfer_id);
  1082. transfer = id_to_transfer(transfer_id);
  1083. if (!transfer)
  1084. return;
  1085. rawbulk_stop_transactions(transfer_id);
  1086. fn = rawbulk_lookup_function(transfer_id);
  1087. if (fn) {
  1088. fn->cbp_reset = 1;
  1089. rawbulk_disable_function(fn);
  1090. }
  1091. }
  1092. EXPORT_SYMBOL_GPL(rawbulk_unbind_sdio_channel);
  1093. static __init int rawbulk_init(void)
  1094. {
  1095. int n;
  1096. char name[20];
  1097. C2K_NOTE("%s\n", __func__);
  1098. drop_check_timeout = jiffies;
  1099. rawbulk = kzalloc(sizeof(*rawbulk), GFP_KERNEL);
  1100. if (!rawbulk)
  1101. return -ENOMEM;
  1102. for (n = 0; n < _MAX_TID; n++) {
  1103. struct rawbulk_transfer *t = &rawbulk->transfer[n];
  1104. t->id = n;
  1105. INIT_LIST_HEAD(&t->upstream.transactions);
  1106. INIT_LIST_HEAD(&t->downstream.transactions);
  1107. INIT_LIST_HEAD(&t->repush2modem.transactions);
  1108. INIT_LIST_HEAD(&t->cache_buf_lists.transactions);
  1109. INIT_DELAYED_WORK(&t->delayed, downstream_delayed_work);
  1110. memset(name, 0, 20);
  1111. sprintf(name, "%s_flow_ctrl", transfer_name[n]);
  1112. t->flow_wq = create_singlethread_workqueue(name);
  1113. if (!t->flow_wq)
  1114. return -ENOMEM;
  1115. INIT_WORK(&t->write_work, start_upstream);
  1116. memset(name, 0, 20);
  1117. sprintf(name, "%s_tx_wq", transfer_name[n]);
  1118. t->tx_wq = create_singlethread_workqueue(name);
  1119. if (!t->tx_wq)
  1120. return -ENOMEM;
  1121. mutex_init(&t->modem_up_mutex);
  1122. mutex_init(&t->usb_up_mutex);
  1123. spin_lock_init(&t->lock);
  1124. spin_lock_init(&t->usb_down_lock);
  1125. spin_lock_init(&t->modem_block_lock);
  1126. spin_lock_init(&t->flow_lock);
  1127. t->control = STOP_UPSTREAM | STOP_DOWNSTREAM;
  1128. }
  1129. return 0;
  1130. }
  1131. module_init(rawbulk_init);
  1132. static __exit void rawbulk_exit(void)
  1133. {
  1134. int n;
  1135. struct rawbulk_transfer *t;
  1136. for (n = 0; n < _MAX_TID; n++) {
  1137. t = &rawbulk->transfer[n];
  1138. rawbulk_stop_transactions(n);
  1139. destroy_workqueue(t->flow_wq);
  1140. destroy_workqueue(t->tx_wq);
  1141. }
  1142. kfree(rawbulk);
  1143. }
  1144. module_exit(rawbulk_exit);
  1145. MODULE_AUTHOR(DRIVER_AUTHOR);
  1146. MODULE_DESCRIPTION(DRIVER_DESC);
  1147. MODULE_VERSION(DRIVER_VERSION);
  1148. MODULE_LICENSE("GPL");