port_char.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. #include <linux/cdev.h>
  2. #include <linux/device.h>
  3. #include <linux/fs.h>
  4. #include <linux/uaccess.h>
  5. #include <linux/wait.h>
  6. #include <linux/module.h>
  7. #include <linux/poll.h>
  8. #include <linux/uidgid.h>
  9. #include <mt-plat/mt_ccci_common.h>
  10. #include <mt-plat/mt_boot_common.h>
  11. #ifdef CONFIG_COMPAT
  12. #include <linux/compat.h>
  13. #endif
  14. #include "ccci_config.h"
  15. #include "ccci_core.h"
  16. #include "ccci_support.h"
  17. #include "ccci_bm.h"
  18. #include "port_ipc.h"
  19. #include "port_kernel.h"
  20. #include "port_char.h"
  21. #ifdef CONFIG_MTK_ECCCI_C2K
  22. #include "ccif_c2k_platform.h"
  23. #endif
  24. #ifdef FEATURE_GET_MD_BAT_VOL /* must be after ccci_config.h */
  25. #include <mt-plat/battery_common.h>
  26. #else
  27. #define BAT_Get_Battery_Voltage(polling_mode) ({ 0; })
  28. #endif
  29. #define MAX_QUEUE_LENGTH 32
  30. static void dev_char_open_check(struct ccci_port *port)
  31. {
  32. if (port->rx_ch == CCCI_FS_RX)
  33. port->modem->critical_user_active[0] = 1;
  34. if (port->rx_ch == CCCI_UART2_RX)
  35. port->modem->critical_user_active[1] = 1;
  36. if (port->rx_ch == CCCI_MD_LOG_RX)
  37. port->modem->critical_user_active[2] = 1;
  38. if (port->rx_ch == CCCI_UART1_RX)
  39. port->modem->critical_user_active[3] = 1;
  40. }
  41. static int dev_char_close_check(struct ccci_port *port)
  42. {
  43. if (port->rx_ch == CCCI_FS_RX && !atomic_read(&port->usage_cnt))
  44. port->modem->critical_user_active[0] = 0;
  45. if (port->rx_ch == CCCI_UART2_RX && !atomic_read(&port->usage_cnt))
  46. port->modem->critical_user_active[1] = 0;
  47. if (port->rx_ch == CCCI_MD_LOG_RX && !atomic_read(&port->usage_cnt))
  48. port->modem->critical_user_active[2] = 0;
  49. if (port->rx_ch == CCCI_UART1_RX && !atomic_read(&port->usage_cnt))
  50. port->modem->critical_user_active[3] = 0;
  51. CCCI_INF_MSG(port->modem->index, CHAR, "dev close check: %d %d %d %d\n", port->modem->critical_user_active[0],
  52. port->modem->critical_user_active[1], port->modem->critical_user_active[2],
  53. port->modem->critical_user_active[3]);
  54. if (port->modem->critical_user_active[0] == 0 && port->modem->critical_user_active[1] == 0) {
  55. if (is_meta_mode() || is_advanced_meta_mode()) {
  56. if (port->modem->critical_user_active[3] == 0) {
  57. CCCI_INF_MSG(port->modem->index, CHAR, "ready to reset MD in META mode\n");
  58. return 0;
  59. }
  60. /* this should never happen */
  61. CCCI_ERR_MSG(port->modem->index, CHAR, "DHL ctrl is still open in META mode\n");
  62. } else {
  63. if (port->modem->critical_user_active[2] == 0 && port->modem->critical_user_active[3] == 0) {
  64. CCCI_INF_MSG(port->modem->index, CHAR, "ready to reset MD in normal mode\n");
  65. return 0;
  66. }
  67. }
  68. }
  69. return 1;
  70. }
  71. static int dev_char_open(struct inode *inode, struct file *file)
  72. {
  73. int major = imajor(inode);
  74. int minor = iminor(inode);
  75. struct ccci_port *port;
  76. port = ccci_get_port_for_node(major, minor);
  77. if (atomic_read(&port->usage_cnt))
  78. return -EBUSY;
  79. CCCI_INF_MSG(port->modem->index, CHAR, "port %s open with flag %X by %s\n", port->name, file->f_flags,
  80. current->comm);
  81. atomic_inc(&port->usage_cnt);
  82. file->private_data = port;
  83. nonseekable_open(inode, file);
  84. dev_char_open_check(port);
  85. #ifdef FEATURE_POLL_MD_EN
  86. if (port->rx_ch == CCCI_MD_LOG_RX && port->modem->md_state == READY)
  87. mod_timer(&port->modem->md_status_poller, jiffies + 10 * HZ);
  88. #endif
  89. return 0;
  90. }
  91. static int dev_char_close(struct inode *inode, struct file *file)
  92. {
  93. struct ccci_port *port = file->private_data;
  94. struct ccci_request *req = NULL;
  95. struct ccci_request *reqn;
  96. unsigned long flags;
  97. /* 0. decrease usage count, so when we ask more, the packet can be dropped in recv_request */
  98. atomic_dec(&port->usage_cnt);
  99. /* 1. purge Rx request list */
  100. spin_lock_irqsave(&port->rx_req_lock, flags);
  101. list_for_each_entry_safe(req, reqn, &port->rx_req_list, entry) {
  102. /* 1.1. remove from list */
  103. list_del(&req->entry);
  104. port->rx_length--;
  105. /* 1.2. free it */
  106. req->policy = RECYCLE;
  107. ccci_free_req(req);
  108. }
  109. /* 1.3 flush Rx */
  110. ccci_port_ask_more_request(port);
  111. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  112. CCCI_INF_MSG(port->modem->index, CHAR, "port %s close rx_len=%d empty=%d\n", port->name,
  113. port->rx_length, list_empty(&port->rx_req_list));
  114. /* 2. check critical nodes for reset, run close check first,
  115. as mdlogger is killed before we gated MD when IPO shutdown */
  116. if (dev_char_close_check(port) == 0 && port->modem->md_state == GATED)
  117. ccci_send_virtual_md_msg(port->modem, CCCI_MONITOR_CH, CCCI_MD_MSG_READY_TO_RESET, 0);
  118. #ifdef FEATURE_POLL_MD_EN
  119. if (port->rx_ch == CCCI_MD_LOG_RX)
  120. del_timer(&port->modem->md_status_poller);
  121. #endif
  122. return 0;
  123. }
  124. static void port_ch_dump(int md_id, char *str, void *msg_buf, int len)
  125. {
  126. #if 0
  127. #define DUMP_BUF_SIZE 200
  128. unsigned char *char_ptr = (unsigned char *)msg_buf;
  129. char buf[DUMP_BUF_SIZE];
  130. int i, j;
  131. for (i = 0, j = 0; i < len && i < DUMP_BUF_SIZE && j + 4 < DUMP_BUF_SIZE; i++) {
  132. if (((32 <= char_ptr[i]) && (char_ptr[i] <= 126))) {
  133. buf[j++] = char_ptr[i];
  134. } else {
  135. if (DUMP_BUF_SIZE - j > 4) {
  136. snprintf(buf+j, DUMP_BUF_SIZE-j, "[%02X]", char_ptr[i]);
  137. j += 4;
  138. } else {
  139. buf[j++] = '.';
  140. }
  141. }
  142. }
  143. buf[j] = '\0';
  144. CCCI_INF_MSG(md_id, CHAR, "%s %d>%s\n", str, len, buf);
  145. #endif
  146. }
  147. static ssize_t dev_char_read(struct file *file, char *buf, size_t count, loff_t *ppos)
  148. {
  149. struct ccci_port *port = file->private_data;
  150. struct ccci_request *req = NULL;
  151. struct ccci_header *ccci_h = NULL;
  152. int ret = 0, read_len = 0, full_req_done = 0;
  153. unsigned long flags = 0;
  154. READ_START:
  155. /* 1. get incoming request */
  156. if (list_empty(&port->rx_req_list)) {
  157. if (!(file->f_flags & O_NONBLOCK)) {
  158. ret = wait_event_interruptible(port->rx_wq, !list_empty(&port->rx_req_list));
  159. if (ret == -ERESTARTSYS) {
  160. ret = -EINTR;
  161. goto exit;
  162. }
  163. } else {
  164. ret = -EAGAIN;
  165. goto exit;
  166. }
  167. }
  168. CCCI_DBG_MSG(port->modem->index, CHAR, "read on %s for %zu\n", port->name, count);
  169. spin_lock_irqsave(&port->rx_req_lock, flags);
  170. if (list_empty(&port->rx_req_list)) {
  171. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  172. if (!(file->f_flags & O_NONBLOCK)) {
  173. goto READ_START;
  174. } else {
  175. ret = -EAGAIN;
  176. goto exit;
  177. }
  178. }
  179. req = list_first_entry(&port->rx_req_list, struct ccci_request, entry);
  180. /* 2. caculate available data */
  181. if (req->state != PARTIAL_READ) {
  182. ccci_h = (struct ccci_header *)req->skb->data;
  183. if (port->flags & PORT_F_USER_HEADER) { /* header provide by user */
  184. /* CCCI_MON_CH should fall in here, as header must be send to md_init */
  185. if (ccci_h->data[0] == CCCI_MAGIC_NUM) {
  186. read_len = sizeof(struct ccci_header);
  187. if (ccci_h->channel == CCCI_MONITOR_CH)
  188. /* ccci_h->channel = CCCI_MONITOR_CH_ID; */
  189. *(((u32 *) ccci_h) + 2) = CCCI_MONITOR_CH_ID;
  190. } else {
  191. read_len = req->skb->len;
  192. }
  193. } else {
  194. /* ATTENTION, if user does not provide header, it should NOT send empty packet. */
  195. read_len = req->skb->len - sizeof(struct ccci_header);
  196. /* remove CCCI header */
  197. skb_pull(req->skb, sizeof(struct ccci_header));
  198. }
  199. } else {
  200. read_len = req->skb->len;
  201. }
  202. if (count >= read_len) {
  203. full_req_done = 1;
  204. list_del(&req->entry);
  205. /*
  206. * here we only ask for more request when rx list is empty. no need to be too gready, because
  207. * for most of the case, queue will not stop sending request to port.
  208. * actually we just need to ask by ourselves when we rejected requests before. these
  209. * rejected requests will staty in queue's buffer and may never get a chance to be handled again.
  210. */
  211. if (--(port->rx_length) == 0)
  212. ccci_port_ask_more_request(port);
  213. BUG_ON(port->rx_length < 0);
  214. } else {
  215. req->state = PARTIAL_READ;
  216. read_len = count;
  217. }
  218. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  219. if (ccci_h && ccci_h->channel == CCCI_UART2_RX)
  220. port_ch_dump(port->modem->index, "chr_read", req->skb->data, read_len);
  221. /* 3. copy to user */
  222. if (copy_to_user(buf, req->skb->data, read_len)) {
  223. CCCI_ERR_MSG(port->modem->index, CHAR, "read on %s, copy to user failed, %d/%zu\n", port->name,
  224. read_len, count);
  225. ret = -EFAULT;
  226. }
  227. skb_pull(req->skb, read_len);
  228. /* CCCI_DBG_MSG(port->modem->index, CHAR,
  229. "read done on %s l=%d r=%d pr=%d\n", port->name, read_len, ret, (req->state==PARTIAL_READ)); */
  230. /* 4. free request */
  231. if (full_req_done) {
  232. req->policy = RECYCLE;
  233. /* Rx flow doesn't know the free policy until it reaches port
  234. (network and char are different) */
  235. #if 0
  236. if (port->rx_ch == CCCI_IPC_RX)
  237. port_ipc_rx_ack(port);
  238. #endif
  239. ccci_free_req(req);
  240. }
  241. exit:
  242. return ret ? ret : read_len;
  243. }
  244. #ifdef CONFIG_MTK_ECCCI_C2K
  245. int ccci_c2k_rawbulk_intercept(int ch_id, unsigned int interception)
  246. {
  247. int ret = 0;
  248. struct ccci_modem *md = NULL;
  249. struct ccci_port *port = NULL;
  250. struct list_head *port_list = NULL;
  251. char matched = 0;
  252. int ch_id_rx = 0;
  253. /* USB bypass's channel id offset, please refer to viatel_rawbulk.h */
  254. if (ch_id >= FS_CH_C2K)
  255. ch_id += 2;
  256. else
  257. ch_id += 1;
  258. /*only data and log channel are legal*/
  259. if (ch_id == DATA_PPP_CH_C2K) {
  260. ch_id = CCCI_C2K_PPP_DATA;
  261. ch_id_rx = CCCI_C2K_PPP_DATA;
  262. } else if (ch_id == MDLOG_CH_C2K) {
  263. ch_id = CCCI_MD_LOG_TX;
  264. ch_id_rx = CCCI_MD_LOG_RX;
  265. } else {
  266. ret = -ENODEV;
  267. CCCI_ERR_MSG(MD_SYS3, CHAR, "Err: wrong ch_id(%d) from usb bypass\n", ch_id);
  268. return ret;
  269. }
  270. /* only md3 can usb bypass */
  271. md = ccci_get_modem_by_id(MD_SYS3);
  272. /*use rx channel to find port*/
  273. port_list = &md->rx_ch_ports[ch_id_rx];
  274. list_for_each_entry(port, port_list, entry) {
  275. matched = (ch_id == port->tx_ch);
  276. if (matched) {
  277. port->interception = !!interception;
  278. if (port->interception)
  279. atomic_inc(&port->usage_cnt);
  280. else
  281. atomic_dec(&port->usage_cnt);
  282. if (ch_id == CCCI_C2K_PPP_DATA)
  283. md->data_usb_bypass = !!interception;
  284. ret = 0;
  285. CCCI_INF_MSG(md->index, CHAR, "port(%s) ch(%d) interception(%d) set\n",
  286. port->name, ch_id, interception);
  287. }
  288. }
  289. if (!matched) {
  290. ret = -ENODEV;
  291. CCCI_ERR_MSG(md->index, CHAR, "Err: no port found when setting interception(%d,%d)\n",
  292. ch_id, interception);
  293. }
  294. return ret;
  295. }
  296. int ccci_c2k_buffer_push(int ch_id, void *buf, int count)
  297. {
  298. int ret = 0;
  299. struct ccci_modem *md = NULL;
  300. struct ccci_port *port = NULL;
  301. struct list_head *port_list = NULL;
  302. struct ccci_request *req = NULL;
  303. struct ccci_header *ccci_h = NULL;
  304. char matched = 0;
  305. size_t actual_count = 0;
  306. int ch_id_rx = 0;
  307. unsigned char blk1 = 0; /* usb will call this routine in ISR, so we cannot schedule */
  308. unsigned char blk2 = 1; /* default blocking for all request from USB */
  309. /* USB bypass's channel id offset, please refer to viatel_rawbulk.h */
  310. if (ch_id >= FS_CH_C2K)
  311. ch_id += 2;
  312. else
  313. ch_id += 1;
  314. /* only data and log channel are legal */
  315. if (ch_id == DATA_PPP_CH_C2K) {
  316. ch_id = CCCI_C2K_PPP_DATA;
  317. ch_id_rx = CCCI_C2K_PPP_DATA;
  318. } else if (ch_id == MDLOG_CH_C2K) {
  319. ch_id = CCCI_MD_LOG_TX;
  320. ch_id_rx = CCCI_MD_LOG_RX;
  321. } else {
  322. ret = -ENODEV;
  323. CCCI_ERR_MSG(MD_SYS3, CHAR, "Err: wrong ch_id(%d) from usb bypass\n", ch_id);
  324. return ret;
  325. }
  326. /* only md3 can usb bypass */
  327. md = ccci_get_modem_by_id(MD_SYS3);
  328. CCCI_INF_MSG(md->index, CHAR, "data from usb bypass (ch%d)(%d)\n", ch_id, count);
  329. actual_count = count > CCCI_MTU ? CCCI_MTU : count;
  330. port_list = &md->rx_ch_ports[ch_id_rx];
  331. list_for_each_entry(port, port_list, entry) {
  332. matched = (ch_id == port->tx_ch);
  333. if (matched) {
  334. req = ccci_alloc_req(OUT, actual_count, blk1, blk2);
  335. if (req) {
  336. req->policy = RECYCLE;
  337. ccci_h = (struct ccci_header *)skb_put(req->skb, sizeof(struct ccci_header));
  338. ccci_h->data[0] = 0;
  339. ccci_h->data[1] = actual_count + sizeof(struct ccci_header);
  340. ccci_h->channel = port->tx_ch;
  341. ccci_h->reserved = 0;
  342. memcpy(skb_put(req->skb, actual_count), buf, actual_count);
  343. /* for md3, ccci_h->channel will probably change after call send_request,
  344. because md3's channel mapping. */
  345. /* do NOT reference request after called this,
  346. modem may have freed it, unless you get -EBUSY */
  347. ret = ccci_port_send_request(port, req);
  348. if (ret) {
  349. if (ret == -EBUSY && !req->blocking)
  350. ret = -EAGAIN;
  351. goto push_err_out;
  352. }
  353. return ret < 0 ? ret : actual_count;
  354. push_err_out:
  355. ccci_free_req(req);
  356. return ret;
  357. }
  358. /* consider this case as non-blocking */
  359. return -ENOMEM;
  360. }
  361. }
  362. return -ENODEV;
  363. }
  364. #endif
  365. static ssize_t dev_char_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  366. {
  367. struct ccci_port *port = file->private_data;
  368. unsigned char blocking = !(file->f_flags & O_NONBLOCK);
  369. struct ccci_request *req = NULL;
  370. struct ccci_header *ccci_h = NULL;
  371. size_t actual_count = 0;
  372. int ret = 0, header_len = 0;
  373. if (port->tx_ch == CCCI_MONITOR_CH)
  374. return -EPERM;
  375. if (port->tx_ch == CCCI_IPC_UART_TX)
  376. CCCI_DBG_MSG(port->modem->index, CHAR,
  377. "port %s write: md_state=%d\n", port->name, port->modem->md_state);
  378. if (port->modem->md_state == BOOTING && port->tx_ch != CCCI_FS_TX && port->tx_ch != CCCI_RPC_TX) {
  379. CCCI_INF_MSG(port->modem->index, CHAR, "port %s ch%d write fail when md_state=%d\n", port->name,
  380. port->tx_ch, port->modem->md_state);
  381. return -ENODEV;
  382. }
  383. if (port->modem->md_state == EXCEPTION && port->tx_ch != CCCI_MD_LOG_TX && port->tx_ch != CCCI_UART1_TX
  384. && port->tx_ch != CCCI_FS_TX)
  385. return -ETXTBSY;
  386. if (port->modem->md_state == GATED || port->modem->md_state == RESET || port->modem->md_state == INVALID)
  387. return -ENODEV;
  388. header_len = sizeof(struct ccci_header) + (port->rx_ch == CCCI_FS_RX ? sizeof(unsigned int) : 0);
  389. if (port->flags & PORT_F_USER_HEADER) {
  390. if (count > (CCCI_MTU + header_len)) {
  391. CCCI_ERR_MSG(port->modem->index, CHAR, "reject packet(size=%zu ), larger than MTU on %s\n",
  392. count, port->name);
  393. return -ENOMEM;
  394. }
  395. }
  396. if (count == 0)
  397. return -EINVAL;
  398. if (port->flags & PORT_F_USER_HEADER)
  399. actual_count = count > (CCCI_MTU + header_len) ? (CCCI_MTU + header_len) : count;
  400. else
  401. actual_count = count > CCCI_MTU ? CCCI_MTU : count;
  402. /*if (CCCI_FS_TX != port->tx_ch)
  403. CCCI_INF_MSG(port->modem->index, CHAR, "write on %s for %zu of %zu, md_s=%d\n",
  404. port->name, actual_count, count, port->modem->md_state); */
  405. req = ccci_alloc_req(OUT, actual_count, blocking, blocking);
  406. if (req) {
  407. /* 1. for Tx packet, who issued it should know whether recycle it or not */
  408. req->policy = RECYCLE;
  409. /* 2. prepare CCCI header, every member of header should be re-write as request may be re-used */
  410. if (!(port->flags & PORT_F_USER_HEADER)) {
  411. ccci_h = (struct ccci_header *)skb_put(req->skb, sizeof(struct ccci_header));
  412. ccci_h->data[0] = 0;
  413. ccci_h->data[1] = actual_count + sizeof(struct ccci_header);
  414. ccci_h->channel = port->tx_ch;
  415. ccci_h->reserved = 0;
  416. } else {
  417. ccci_h = (struct ccci_header *)req->skb->data;
  418. }
  419. /* 3. get user data */
  420. ret = copy_from_user(skb_put(req->skb, actual_count), buf, actual_count);
  421. if (ret)
  422. goto err_out;
  423. if (port->flags & PORT_F_USER_HEADER) { /* header provided by user, valid after copy_from_user */
  424. if (actual_count == sizeof(struct ccci_header))
  425. ccci_h->data[0] = CCCI_MAGIC_NUM;
  426. else
  427. ccci_h->data[1] = actual_count;
  428. ccci_h->channel = port->tx_ch; /* as EEMCS VA will not fill this filed */
  429. }
  430. if (port->rx_ch == CCCI_IPC_RX) {
  431. ret = port_ipc_write_check_id(port, req);
  432. if (ret < 0)
  433. goto err_out;
  434. else
  435. ccci_h->reserved = ret; /* Unity ID */
  436. }
  437. if (ccci_h && ccci_h->channel == CCCI_UART2_TX) {
  438. port_ch_dump(port->modem->index, "chr_write", req->skb->data + sizeof(struct ccci_header),
  439. actual_count);
  440. }
  441. /* 4. send out */
  442. /* for md3, ccci_h->channel will probably change after call send_request,
  443. because md3's channel mapping */
  444. ret = ccci_port_send_request(port, req);
  445. /* do NOT reference request after called this, modem may have freed it, unless you get -EBUSY */
  446. if (ccci_h && ccci_h->channel == CCCI_UART2_TX) {
  447. /* CCCI_INF_MSG(port->modem->index, CHAR,
  448. "write done on %s, l=%zu r=%d\n", port->name, actual_count, ret); */
  449. }
  450. if (ret) {
  451. if (ret == -EBUSY && !req->blocking)
  452. ret = -EAGAIN;
  453. goto err_out;
  454. } else {
  455. #if 0
  456. if (port->rx_ch == CCCI_IPC_RX)
  457. port_ipc_tx_wait(port);
  458. #endif
  459. }
  460. return actual_count;
  461. err_out:
  462. CCCI_INF_MSG(port->modem->index, CHAR, "write error done on %s, l=%zu r=%d\n",
  463. port->name, actual_count, ret);
  464. ccci_free_req(req);
  465. return ret;
  466. }
  467. /* consider this case as non-blocking */
  468. return -EBUSY;
  469. }
  470. static int last_md_status[5];
  471. static int md_status_show_count[5];
  472. static long dev_char_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  473. {
  474. long state, ret = 0;
  475. struct ccci_setting *ccci_setting;
  476. struct ccci_port *port = file->private_data;
  477. struct ccci_modem *md = port->modem;
  478. int ch = port->rx_ch; /* use Rx channel number to distinguish a port */
  479. unsigned int sim_mode, sim_switch_type, enable_sim_type, sim_id, bat_info;
  480. unsigned int traffic_control = 0;
  481. unsigned int sim_slot_cfg[4];
  482. unsigned int tmp_md_img_list[MAX_IMG_NUM]; /* for META */
  483. int scanned_num;
  484. struct siginfo sig_info;
  485. unsigned int sig_pid;
  486. switch (cmd) {
  487. case CCCI_IOC_GET_MD_PROTOCOL_TYPE:
  488. {
  489. char md_protol[] = "DHL";
  490. unsigned int data_size = sizeof(md_protol) / sizeof(char);
  491. CCCI_ERR_MSG(md->index, CHAR, "Call CCCI_IOC_GET_MD_PROTOCOL_TYPE!\n");
  492. if (copy_to_user((void __user *)arg, md_protol, data_size)) {
  493. CCCI_ERR_MSG(md->index, CHAR, "copy_to_user MD_PROTOCOL failed !!\n");
  494. return -EFAULT;
  495. }
  496. break;
  497. }
  498. case CCCI_IOC_GET_MD_STATE:
  499. state = md->boot_stage;
  500. if (state != last_md_status[md->index]) {
  501. last_md_status[md->index] = state;
  502. md_status_show_count[md->index] = 0;
  503. } else {
  504. if (md_status_show_count[md->index] < 100)
  505. md_status_show_count[md->index]++;
  506. else
  507. md_status_show_count[md->index] = 0;
  508. }
  509. if (md_status_show_count[md->index] == 0) {
  510. CCCI_INF_MSG(md->index, CHAR, "MD state %ld, %d\n", state, md->md_state);
  511. md_status_show_count[md->index]++;
  512. }
  513. if (state >= 0) {
  514. /* CCCI_DBG_MSG(md->index, CHAR, "MD state %ld\n", state); */
  515. /* state+='0'; // convert number to character */
  516. ret = put_user((unsigned int)state, (unsigned int __user *)arg);
  517. } else {
  518. CCCI_ERR_MSG(md->index, CHAR, "Get MD state fail: %ld\n", state);
  519. ret = state;
  520. }
  521. break;
  522. case CCCI_IOC_PCM_BASE_ADDR:
  523. case CCCI_IOC_PCM_LEN:
  524. case CCCI_IOC_ALLOC_MD_LOG_MEM:
  525. /* deprecated, share memory operation */
  526. break;
  527. case CCCI_IOC_MD_RESET:
  528. CCCI_INF_MSG(md->index, CHAR, "MD reset ioctl(%d) called by %s\n", ch, current->comm);
  529. ret = md->ops->reset(md);
  530. if (ret == 0) {
  531. ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_RESET, 0);
  532. #ifdef CONFIG_MTK_ECCCI_C2K
  533. if (md->index == MD_SYS1)
  534. exec_ccci_kern_func_by_md_id(MD_SYS3, ID_RESET_MD, NULL, 0);
  535. else if (md->index == MD_SYS3)
  536. exec_ccci_kern_func_by_md_id(MD_SYS1, ID_RESET_MD, NULL, 0);
  537. #else
  538. #ifdef CONFIG_MTK_SVLTE_SUPPORT
  539. c2k_reset_modem();
  540. #endif
  541. #endif
  542. }
  543. break;
  544. case CCCI_IOC_FORCE_MD_ASSERT:
  545. CCCI_NOTICE_MSG(md->index, CHAR, "Force MD assert ioctl(%d) called by %s\n", ch, current->comm);
  546. if (md->index == MD_SYS3)
  547. /* MD3 use interrupt to force assert */
  548. ret = md->ops->force_assert(md, CCIF_INTERRUPT);
  549. else
  550. ret = md->ops->force_assert(md, CCCI_MESSAGE);
  551. break;
  552. case CCCI_IOC_SEND_RUN_TIME_DATA:
  553. if (ch == CCCI_MONITOR_CH) {
  554. ret = md->ops->send_runtime_data(md, md->sbp_code);
  555. } else {
  556. CCCI_INF_MSG(md->index, CHAR, "Set runtime by invalid user(%u) called by %s\n", ch,
  557. current->comm);
  558. ret = -1;
  559. }
  560. break;
  561. case CCCI_IOC_GET_MD_INFO:
  562. state = md->img_info[IMG_MD].img_info.version;
  563. ret = put_user((unsigned int)state, (unsigned int __user *)arg);
  564. break;
  565. case CCCI_IOC_GET_MD_EX_TYPE:
  566. ret = put_user((unsigned int)md->ex_type, (unsigned int __user *)arg);
  567. CCCI_INF_MSG(md->index, CHAR, "get modem exception type=%d ret=%ld\n", md->ex_type, ret);
  568. break;
  569. case CCCI_IOC_SEND_STOP_MD_REQUEST:
  570. CCCI_INF_MSG(md->index, CHAR, "stop MD request ioctl called by %s\n", current->comm);
  571. ret = md->ops->reset(md);
  572. if (ret == 0) {
  573. md->ops->stop(md, 0);
  574. ret = ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_STOP_MD_REQUEST, 0);
  575. #ifdef CONFIG_MTK_ECCCI_C2K
  576. if (md->index == MD_SYS1)
  577. exec_ccci_kern_func_by_md_id(MD_SYS3, ID_RESET_MD, NULL, 0);
  578. else if (md->index == MD_SYS3)
  579. exec_ccci_kern_func_by_md_id(MD_SYS1, ID_RESET_MD, NULL, 0);
  580. #else
  581. #ifdef CONFIG_MTK_SVLTE_SUPPORT
  582. c2k_reset_modem();
  583. #endif
  584. #endif
  585. }
  586. break;
  587. case CCCI_IOC_SEND_START_MD_REQUEST:
  588. CCCI_INF_MSG(md->index, CHAR, "start MD request ioctl called by %s\n", current->comm);
  589. ret = ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_START_MD_REQUEST, 0);
  590. break;
  591. case CCCI_IOC_DO_START_MD:
  592. CCCI_INF_MSG(md->index, CHAR, "start MD ioctl called by %s\n", current->comm);
  593. ret = md->ops->start(md);
  594. break;
  595. case CCCI_IOC_DO_STOP_MD:
  596. CCCI_INF_MSG(md->index, CHAR, "stop MD ioctl called by %s\n", current->comm);
  597. ret = md->ops->stop(md, 0);
  598. break;
  599. case CCCI_IOC_ENTER_DEEP_FLIGHT:
  600. CCCI_INF_MSG(md->index, CHAR, "enter MD flight mode ioctl called by %s\n", current->comm);
  601. #ifdef MD_UMOLY_EE_SUPPORT
  602. md->flight_mode = MD_FIGHT_MODE_ENTER; /* enter flight mode */
  603. #endif
  604. ret = md->ops->reset(md);
  605. if (ret == 0) {
  606. md->ops->stop(md, 1000);
  607. ret = ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_ENTER_FLIGHT_MODE, 0);
  608. }
  609. break;
  610. case CCCI_IOC_LEAVE_DEEP_FLIGHT:
  611. CCCI_INF_MSG(md->index, CHAR, "leave MD flight mode ioctl called by %s\n", current->comm);
  612. #ifdef MD_UMOLY_EE_SUPPORT
  613. md->flight_mode = MD_FIGHT_MODE_LEAVE; /* leave flight mode */
  614. #endif
  615. ret = ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_LEAVE_FLIGHT_MODE, 0);
  616. break;
  617. case CCCI_IOC_POWER_ON_MD_REQUEST:
  618. CCCI_INF_MSG(md->index, CHAR, "Power on MD request ioctl called by %s\n", current->comm);
  619. ret = ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_POWER_ON_REQUEST, 0);
  620. break;
  621. case CCCI_IOC_POWER_OFF_MD_REQUEST:
  622. CCCI_INF_MSG(md->index, CHAR, "Power off MD request ioctl called by %s\n", current->comm);
  623. ret = ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_POWER_OFF_REQUEST, 0);
  624. break;
  625. case CCCI_IOC_POWER_ON_MD:
  626. case CCCI_IOC_POWER_OFF_MD:
  627. /* abandoned */
  628. CCCI_INF_MSG(md->index, CHAR, "Power on/off MD by user(%d) called by %s\n", ch, current->comm);
  629. ret = -1;
  630. break;
  631. case CCCI_IOC_SIM_SWITCH:
  632. if (copy_from_user(&sim_mode, (void __user *)arg, sizeof(unsigned int))) {
  633. CCCI_INF_MSG(md->index, CHAR, "IOC_SIM_SWITCH: copy_from_user fail!\n");
  634. ret = -EFAULT;
  635. } else {
  636. switch_sim_mode(md->index, (char *)&sim_mode, sizeof(sim_mode));
  637. CCCI_INF_MSG(md->index, CHAR, "IOC_SIM_SWITCH(%x): %ld\n", sim_mode, ret);
  638. }
  639. break;
  640. case CCCI_IOC_SIM_SWITCH_TYPE:
  641. sim_switch_type = get_sim_switch_type();
  642. CCCI_INF_MSG(md->index, KERN, "CCCI_IOC_SIM_SWITCH_TYPE:sim type(0x%x)", sim_switch_type);
  643. ret = put_user(sim_switch_type, (unsigned int __user *)arg);
  644. break;
  645. case CCCI_IOC_GET_SIM_TYPE:
  646. if (md->sim_type == 0xEEEEEEEE)
  647. CCCI_ERR_MSG(md->index, KERN, "md has not send sim type yet(0x%x)", md->sim_type);
  648. else
  649. CCCI_INF_MSG(md->index, KERN, "md has send sim type(0x%x)", md->sim_type);
  650. ret = put_user(md->sim_type, (unsigned int __user *)arg);
  651. break;
  652. case CCCI_IOC_ENABLE_GET_SIM_TYPE:
  653. if (copy_from_user(&enable_sim_type, (void __user *)arg, sizeof(unsigned int))) {
  654. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_ENABLE_GET_SIM_TYPE: copy_from_user fail!\n");
  655. ret = -EFAULT;
  656. } else {
  657. CCCI_INF_MSG(md->index, KERN, "CCCI_IOC_ENABLE_GET_SIM_TYPE: sim type(0x%x)", enable_sim_type);
  658. ret = ccci_send_msg_to_md(md, CCCI_SYSTEM_TX, MD_SIM_TYPE, enable_sim_type, 1);
  659. }
  660. break;
  661. case CCCI_IOC_SEND_BATTERY_INFO:
  662. bat_info = (unsigned int)BAT_Get_Battery_Voltage(0);
  663. CCCI_INF_MSG(md->index, CHAR, "get bat voltage %d\n", bat_info);
  664. ret = ccci_send_msg_to_md(md, CCCI_SYSTEM_TX, MD_GET_BATTERY_INFO, bat_info, 1);
  665. break;
  666. case CCCI_IOC_RELOAD_MD_TYPE:
  667. state = 0;
  668. if (copy_from_user(&state, (void __user *)arg, sizeof(unsigned int))) {
  669. CCCI_INF_MSG(md->index, CHAR, "IOC_RELOAD_MD_TYPE: copy_from_user fail!\n");
  670. ret = -EFAULT;
  671. } else {
  672. CCCI_INF_MSG(md->index, CHAR, "IOC_RELOAD_MD_TYPE: storing md type(%ld)!\n", state);
  673. if ((state >= modem_ultg) && (state <= MAX_IMG_NUM) && (md->index == MD_SYS1)) {
  674. if (md_capability(MD_SYS1, state, 0))
  675. ccci_reload_md_type(md, state);
  676. else
  677. ret = -1;
  678. } else
  679. ccci_reload_md_type(md, state);
  680. }
  681. break;
  682. case CCCI_IOC_SET_MD_IMG_EXIST:
  683. break;
  684. case CCCI_IOC_GET_MD_IMG_EXIST:
  685. memset(tmp_md_img_list, 0, sizeof(tmp_md_img_list));
  686. scanned_num = scan_image_list(md->index, "modem_%d_%s_n.img", tmp_md_img_list, MAX_IMG_NUM);
  687. if (copy_to_user((void __user *)arg, &tmp_md_img_list, sizeof(tmp_md_img_list))) {
  688. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_GET_MD_IMG_EXIST: copy_to_user fail\n");
  689. ret = -EFAULT;
  690. }
  691. break;
  692. case CCCI_IOC_GET_MD_TYPE:
  693. state = md->config.load_type;
  694. ret = put_user((unsigned int)state, (unsigned int __user *)arg);
  695. break;
  696. case CCCI_IOC_STORE_MD_TYPE:
  697. if (copy_from_user(&md->config.load_type_saving, (void __user *)arg, sizeof(unsigned int))) {
  698. CCCI_INF_MSG(md->index, CHAR, "store md type fail: copy_from_user fail!\n");
  699. ret = -EFAULT;
  700. } else {
  701. CCCI_INF_MSG(md->index, CHAR, "storing md type(%d) in kernel space!\n",
  702. md->config.load_type_saving);
  703. if (md->config.load_type_saving >= 1 && md->config.load_type_saving <= MAX_IMG_NUM) {
  704. if (md->config.load_type_saving != md->config.load_type)
  705. CCCI_INF_MSG(md->index, CHAR,
  706. "Maybe Wrong: md type storing not equal with current setting!(%d %d)\n",
  707. md->config.load_type_saving, md->config.load_type);
  708. /* Notify md_init daemon to store md type in nvram */
  709. ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_STORE_NVRAM_MD_TYPE, 0);
  710. } else {
  711. CCCI_INF_MSG(md->index, CHAR, "store md type fail: invalid md type(0x%x)\n",
  712. md->config.load_type_saving);
  713. }
  714. }
  715. break;
  716. case CCCI_IOC_GET_MD_TYPE_SAVING:
  717. ret = put_user(md->config.load_type_saving, (unsigned int __user *)arg);
  718. break;
  719. case CCCI_IPC_RESET_RECV:
  720. case CCCI_IPC_RESET_SEND:
  721. case CCCI_IPC_WAIT_MD_READY:
  722. case CCCI_IPC_UPDATE_TIME:
  723. case CCCI_IPC_WAIT_TIME_UPDATE:
  724. case CCCI_IPC_UPDATE_TIMEZONE:
  725. ret = port_ipc_ioctl(port, cmd, arg);
  726. break;
  727. case CCCI_IOC_GET_EXT_MD_POST_FIX:
  728. if (copy_to_user((void __user *)arg, md->post_fix, IMG_POSTFIX_LEN)) {
  729. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_GET_EXT_MD_POST_FIX: copy_to_user fail\n");
  730. ret = -EFAULT;
  731. }
  732. break;
  733. case CCCI_IOC_SEND_ICUSB_NOTIFY:
  734. if (copy_from_user(&sim_id, (void __user *)arg, sizeof(unsigned int))) {
  735. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_SEND_ICUSB_NOTIFY: copy_from_user fail!\n");
  736. ret = -EFAULT;
  737. } else {
  738. ret = ccci_send_msg_to_md(md, CCCI_SYSTEM_TX, MD_ICUSB_NOTIFY, sim_id, 1);
  739. }
  740. break;
  741. case CCCI_IOC_DL_TRAFFIC_CONTROL:
  742. if (copy_from_user(&traffic_control, (void __user *)arg, sizeof(unsigned int)))
  743. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_DL_TRAFFIC_CONTROL: copy_from_user fail\n");
  744. if (traffic_control == 1)
  745. ;/* turn off downlink queue */
  746. else if (traffic_control == 0)
  747. ;/* turn on donwlink queue */
  748. else
  749. ;
  750. ret = 0;
  751. break;
  752. case CCCI_IOC_UPDATE_SIM_SLOT_CFG:
  753. if (copy_from_user(&sim_slot_cfg, (void __user *)arg, sizeof(sim_slot_cfg))) {
  754. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_UPDATE_SIM_SLOT_CFG: copy_from_user fail!\n");
  755. ret = -EFAULT;
  756. } else {
  757. int need_update;
  758. sim_switch_type = get_sim_switch_type();
  759. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_UPDATE_SIM_SLOT_CFG get s0:%d s1:%d s2:%d s3:%d\n",
  760. sim_slot_cfg[0], sim_slot_cfg[1], sim_slot_cfg[2], sim_slot_cfg[3]);
  761. ccci_setting = ccci_get_common_setting(md->index);
  762. need_update = sim_slot_cfg[0];
  763. ccci_setting->sim_mode = sim_slot_cfg[1];
  764. ccci_setting->slot1_mode = sim_slot_cfg[2];
  765. ccci_setting->slot2_mode = sim_slot_cfg[3];
  766. sim_mode = ((sim_switch_type << 16) | ccci_setting->sim_mode);
  767. switch_sim_mode(md->index, (char *)&sim_mode, sizeof(sim_mode));
  768. ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_CFG_UPDATE, need_update);
  769. ret = 0;
  770. }
  771. break;
  772. case CCCI_IOC_STORE_SIM_MODE:
  773. if (copy_from_user(&sim_mode, (void __user *)arg, sizeof(unsigned int))) {
  774. CCCI_INF_MSG(md->index, CHAR, "store sim mode fail: copy_from_user fail!\n");
  775. ret = -EFAULT;
  776. } else {
  777. CCCI_INF_MSG(md->index, CHAR, "store sim mode(%x) in kernel space!\n", sim_mode);
  778. exec_ccci_kern_func_by_md_id(0, ID_STORE_SIM_SWITCH_MODE, (char *)&sim_mode,
  779. sizeof(unsigned int));
  780. }
  781. break;
  782. case CCCI_IOC_GET_SIM_MODE:
  783. CCCI_INF_MSG(md->index, CHAR, "get sim mode ioctl called by %s\n", current->comm);
  784. exec_ccci_kern_func_by_md_id(0, ID_GET_SIM_SWITCH_MODE, (char *)&sim_mode, sizeof(unsigned int));
  785. ret = put_user(sim_mode, (unsigned int __user *)arg);
  786. break;
  787. case CCCI_IOC_GET_CFG_SETTING:
  788. ccci_setting = ccci_get_common_setting(md->index);
  789. if (copy_to_user((void __user *)arg, ccci_setting, sizeof(struct ccci_setting))) {
  790. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_GET_CFG_SETTING: copy_to_user fail\n");
  791. ret = -EFAULT;
  792. }
  793. break;
  794. case CCCI_IOC_GET_MD_SBP_CFG:
  795. if (!md->sbp_code_default) {
  796. unsigned char *sbp_custom_value = NULL;
  797. if (md->index == MD_SYS1) {
  798. #if defined(CONFIG_MTK_MD_SBP_CUSTOM_VALUE)
  799. sbp_custom_value = CONFIG_MTK_MD_SBP_CUSTOM_VALUE;
  800. #else
  801. sbp_custom_value = "";
  802. #endif
  803. } else if (md->index == MD_SYS2) {
  804. #if defined(CONFIG_MTK_MD2_SBP_CUSTOM_VALUE)
  805. sbp_custom_value = CONFIG_MTK_MD2_SBP_CUSTOM_VALUE;
  806. #else
  807. sbp_custom_value = "";
  808. #endif
  809. }
  810. if (sbp_custom_value == NULL)
  811. sbp_custom_value = "";
  812. ret = kstrtouint(sbp_custom_value, 0, &md->sbp_code_default);
  813. if (!ret) {
  814. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_GET_MD_SBP_CFG: get config sbp code:%d!\n",
  815. md->sbp_code_default);
  816. } else {
  817. CCCI_INF_MSG(md->index, CHAR,
  818. "CCCI_IOC_GET_MD_SBP_CFG: get config sbp code fail! ret:%ld, Config val:%s\n",
  819. ret, sbp_custom_value);
  820. }
  821. } else {
  822. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_GET_MD_SBP_CFG: config sbp code:%d!\n",
  823. md->sbp_code_default);
  824. }
  825. ret = put_user(md->sbp_code_default, (unsigned int __user *)arg);
  826. break;
  827. case CCCI_IOC_SET_MD_SBP_CFG:
  828. if (copy_from_user(&md->sbp_code, (void __user *)arg, sizeof(unsigned int))) {
  829. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_SET_MD_SBP_CFG: copy_from_user fail!\n");
  830. ret = -EFAULT;
  831. } else {
  832. CCCI_INF_MSG(md->index, CHAR, "CCCI_IOC_SET_MD_SBP_CFG: set md sbp code:0x%x!\n", md->sbp_code);
  833. }
  834. break;
  835. case CCCI_IOC_SET_HEADER:
  836. port->flags |= PORT_F_USER_HEADER;
  837. break;
  838. case CCCI_IOC_CLR_HEADER:
  839. port->flags &= ~PORT_F_USER_HEADER;
  840. break;
  841. case CCCI_IOC_SEND_SIGNAL_TO_USER:
  842. if (copy_from_user(&sig_pid, (void __user *)arg, sizeof(unsigned int))) {
  843. CCCI_INF_MSG(md->index, CHAR, "signal to rild fail: copy_from_user fail!\n");
  844. ret = -EFAULT;
  845. } else {
  846. unsigned int sig = (sig_pid >> 16) & 0xFFFF;
  847. unsigned int pid = sig_pid & 0xFFFF;
  848. sig_info.si_signo = sig;
  849. sig_info.si_code = SI_KERNEL;
  850. sig_info.si_pid = current->pid;
  851. sig_info.si_uid = __kuid_val(current->cred->uid);
  852. ret = kill_proc_info(SIGUSR2, &sig_info, pid);
  853. CCCI_INF_MSG(md->index, CHAR, "send signal %d to rild %d ret=%ld\n", sig, pid, ret);
  854. }
  855. break;
  856. case CCCI_IOC_RESET_MD1_MD3_PCCIF:
  857. #ifdef CONFIG_MTK_ECCCI_C2K
  858. CCCI_INF_MSG(md->index, CHAR, "reset md1/md3 pccif ioctl called by %s\n", current->comm);
  859. reset_md1_md3_pccif(md);
  860. #endif
  861. break;
  862. default:
  863. ret = -ENOTTY;
  864. break;
  865. }
  866. return ret;
  867. }
  868. #ifdef CONFIG_COMPAT
  869. static long dev_char_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  870. {
  871. struct ccci_port *port = filp->private_data;
  872. struct ccci_modem *md = port->modem;
  873. if (!filp->f_op || !filp->f_op->unlocked_ioctl) {
  874. CCCI_ERR_MSG(md->index, CHAR, "dev_char_compat_ioctl(!filp->f_op || !filp->f_op->unlocked_ioctl)\n");
  875. return -ENOTTY;
  876. }
  877. switch (cmd) {
  878. case CCCI_IOC_PCM_BASE_ADDR:
  879. case CCCI_IOC_PCM_LEN:
  880. case CCCI_IOC_ALLOC_MD_LOG_MEM:
  881. case CCCI_IOC_FORCE_FD:
  882. case CCCI_IOC_AP_ENG_BUILD:
  883. case CCCI_IOC_GET_MD_MEM_SIZE:
  884. {
  885. CCCI_ERR_MSG(md->index, CHAR, "dev_char_compat_ioctl deprecated cmd(%d)\n", cmd);
  886. return 0;
  887. }
  888. default:
  889. {
  890. return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
  891. }
  892. }
  893. }
  894. #endif
  895. unsigned int dev_char_poll(struct file *fp, struct poll_table_struct *poll)
  896. {
  897. struct ccci_port *port = fp->private_data;
  898. unsigned int mask = 0;
  899. CCCI_DBG_MSG(port->modem->index, CHAR, "poll on %s\n", port->name);
  900. if (port->rx_ch == CCCI_IPC_RX) {
  901. mask = port_ipc_poll(fp, poll);
  902. } else {
  903. poll_wait(fp, &port->rx_wq, poll);
  904. /* TODO: lack of poll wait for Tx */
  905. if (!list_empty(&port->rx_req_list))
  906. mask |= POLLIN | POLLRDNORM;
  907. if (port->modem->ops->write_room(port->modem, PORT_TXQ_INDEX(port)) > 0)
  908. mask |= POLLOUT | POLLWRNORM;
  909. if (port->rx_ch == CCCI_UART1_RX &&
  910. port->modem->md_state != READY && port->modem->md_state != EXCEPTION) {
  911. mask |= POLLERR; /* notify MD logger to save its log before md_init kills it */
  912. CCCI_INF_MSG(port->modem->index, CHAR, "poll error for MD logger at state %d,mask=%d\n",
  913. port->modem->md_state, mask);
  914. }
  915. }
  916. return mask;
  917. }
  918. static const struct file_operations char_dev_fops = {
  919. .owner = THIS_MODULE,
  920. .open = &dev_char_open,
  921. .read = &dev_char_read,
  922. .write = &dev_char_write,
  923. .release = &dev_char_close,
  924. .unlocked_ioctl = &dev_char_ioctl,
  925. #ifdef CONFIG_COMPAT
  926. .compat_ioctl = &dev_char_compat_ioctl,
  927. #endif
  928. .poll = &dev_char_poll,
  929. };
  930. static int port_char_init(struct ccci_port *port)
  931. {
  932. struct cdev *dev;
  933. int ret = 0;
  934. CCCI_DBG_MSG(port->modem->index, CHAR, "char port %s is initializing\n", port->name);
  935. dev = kmalloc(sizeof(struct cdev), GFP_KERNEL);
  936. cdev_init(dev, &char_dev_fops);
  937. dev->owner = THIS_MODULE;
  938. port->rx_length_th = MAX_QUEUE_LENGTH;
  939. if (port->rx_ch == CCCI_IPC_RX)
  940. port_ipc_init(port); /* this will change port->minor, call it before register device */
  941. else if ((port->rx_ch == CCCI_RPC_RX) && (port->minor == 0))
  942. port_kernel_init(port);
  943. else
  944. port->private_data = dev; /* not using */
  945. ret = cdev_add(dev, MKDEV(port->modem->major, port->modem->minor_base + port->minor), 1);
  946. ret = ccci_register_dev_node(port->name, port->modem->major, port->modem->minor_base + port->minor);
  947. port->interception = 0;
  948. return ret;
  949. }
  950. #ifdef CONFIG_MTK_ECCCI_C2K
  951. static int c2k_req_push_to_usb(struct ccci_port *port, struct ccci_request *req)
  952. {
  953. struct ccci_header *ccci_h = NULL;
  954. int ret, read_len, read_count;
  955. int c2k_ch_id;
  956. if (port->rx_ch == CCCI_C2K_PPP_DATA)
  957. c2k_ch_id = DATA_PPP_CH_C2K-1;
  958. else if (port->rx_ch == CCCI_MD_LOG_RX)
  959. c2k_ch_id = MDLOG_CH_C2K-2;
  960. else {
  961. ret = -ENODEV;
  962. CCCI_ERR_MSG(port->modem->index, CHAR, "Err: wrong ch_id(%d) from usb bypass\n", port->rx_ch);
  963. return ret;
  964. }
  965. /* caculate available data */
  966. ccci_h = (struct ccci_header *)req->skb->data;
  967. read_len = req->skb->len - sizeof(struct ccci_header);
  968. /* remove CCCI header */
  969. skb_pull(req->skb, sizeof(struct ccci_header));
  970. retry_push:
  971. /* push to usb */
  972. read_count = rawbulk_push_upstream_buffer(c2k_ch_id, req->skb->data, read_len);
  973. CCCI_DBG_MSG(port->modem->index, CHAR, "data push to usb bypass (ch%d)(%d)\n", port->rx_ch, read_count);
  974. if (read_count > 0) {
  975. skb_pull(req->skb, read_count);
  976. read_len -= read_count;
  977. if (read_len > 0)
  978. goto retry_push;
  979. else if (read_len == 0) {
  980. req->policy = RECYCLE;
  981. ccci_free_req(req);
  982. } else if (read_len < 0)
  983. CCCI_ERR_MSG(port->modem->index, CHAR, "read_len error, check why come here\n");
  984. } else {
  985. CCCI_INF_MSG(port->modem->index, CHAR, "usb buf full\n");
  986. msleep(20);
  987. goto retry_push;
  988. }
  989. return ret?ret:read_len;
  990. }
  991. #endif
  992. static int port_char_recv_req(struct ccci_port *port, struct ccci_request *req)
  993. {
  994. unsigned long flags; /* as we can not tell the context, use spin_lock_irqsafe for safe */
  995. if (!atomic_read(&port->usage_cnt) &&
  996. (port->rx_ch != CCCI_UART2_RX && port->rx_ch != CCCI_C2K_AT && port->rx_ch != CCCI_PCM_RX &&
  997. port->rx_ch != CCCI_FS_RX && port->rx_ch != CCCI_RPC_RX))
  998. goto drop;
  999. #ifdef CONFIG_MTK_ECCCI_C2K
  1000. if ((port->modem->index == MD_SYS3) && port->interception) {
  1001. list_del(&req->entry);
  1002. c2k_req_push_to_usb(port, req);
  1003. return 0;
  1004. }
  1005. #endif
  1006. CCCI_DBG_MSG(port->modem->index, CHAR, "recv on %s, len=%d\n", port->name, port->rx_length);
  1007. spin_lock_irqsave(&port->rx_req_lock, flags);
  1008. if (port->rx_length < port->rx_length_th) {
  1009. port->flags &= ~PORT_F_RX_FULLED;
  1010. port->rx_length++;
  1011. list_del(&req->entry); /* dequeue from queue's list */
  1012. list_add_tail(&req->entry, &port->rx_req_list);
  1013. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  1014. wake_lock_timeout(&port->rx_wakelock, HZ);
  1015. wake_up_all(&port->rx_wq);
  1016. return 0;
  1017. }
  1018. port->flags |= PORT_F_RX_FULLED;
  1019. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  1020. if ((port->flags & PORT_F_ALLOW_DROP) /* || !(port->flags&PORT_F_RX_EXCLUSIVE) */) {
  1021. CCCI_INF_MSG(port->modem->index, CHAR, "port %s Rx full, drop packet\n", port->name);
  1022. goto drop;
  1023. } else
  1024. return -CCCI_ERR_PORT_RX_FULL;
  1025. drop:
  1026. /* drop this packet */
  1027. CCCI_DBG_MSG(port->modem->index, CHAR, "drop on %s, len=%d\n", port->name, port->rx_length);
  1028. list_del(&req->entry);
  1029. req->policy = RECYCLE;
  1030. ccci_free_req(req);
  1031. return -CCCI_ERR_DROP_PACKET;
  1032. }
  1033. static int port_char_req_match(struct ccci_port *port, struct ccci_request *req)
  1034. {
  1035. struct ccci_header *ccci_h = (struct ccci_header *)req->skb->data;
  1036. if (ccci_h->channel == port->rx_ch) {
  1037. if (unlikely(port->rx_ch == CCCI_IPC_RX))
  1038. return port_ipc_req_match(port, req);
  1039. if (unlikely(port->rx_ch == CCCI_RPC_RX))
  1040. return (port_kernel_req_match(port, req) == 0);
  1041. return 1;
  1042. }
  1043. return 0;
  1044. }
  1045. static void port_char_md_state_notice(struct ccci_port *port, MD_STATE state)
  1046. {
  1047. if (unlikely(port->rx_ch == CCCI_IPC_RX))
  1048. port_ipc_md_state_notice(port, state);
  1049. if (port->rx_ch == CCCI_UART1_RX && state == GATED)
  1050. wake_up_all(&port->rx_wq); /* check poll function */
  1051. }
  1052. struct ccci_port_ops char_port_ops = {
  1053. .init = &port_char_init,
  1054. .recv_request = &port_char_recv_req,
  1055. .req_match = &port_char_req_match,
  1056. .md_state_notice = &port_char_md_state_notice,
  1057. };
  1058. int ccci_subsys_char_init(struct ccci_modem *md)
  1059. {
  1060. int ret = 0;
  1061. dev_t dev = 0;
  1062. if (md->major) {
  1063. dev = MKDEV(md->major, md->minor_base);
  1064. ret = register_chrdev_region(dev, 120, CCCI_DEV_NAME);
  1065. } else {
  1066. ret = alloc_chrdev_region(&dev, md->minor_base, 120, CCCI_DEV_NAME);
  1067. if (ret)
  1068. CCCI_ERR_MSG(md->index, CHAR, "alloc_chrdev_region fail,ret=%d\n", ret);
  1069. md->major = MAJOR(dev);
  1070. }
  1071. /* as IPC minor starts from 100 */
  1072. last_md_status[md->index] = -1;
  1073. return 0;
  1074. }