port_ipc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. #include <linux/device.h>
  2. #include <linux/fs.h>
  3. #include <linux/uaccess.h>
  4. #include <linux/wait.h>
  5. #include <linux/module.h>
  6. #include <linux/kthread.h>
  7. #include <linux/poll.h>
  8. #include <linux/bitops.h>
  9. #include "ccci_config.h"
  10. #include "ccci_core.h"
  11. #include "ccci_bm.h"
  12. #include "port_ipc.h"
  13. #include "ccci_ipc_el1_msg_id.h"
  14. static struct ccci_ipc_ctrl *ipc_task_ctrl[MAX_NUM_IPC_TASKS];
  15. static struct ipc_task_id_map ipc_msgsvc_maptbl[] = {
  16. #define __IPC_ID_TABLE
  17. #include "ccci_ipc_task_ID.h"
  18. #undef __IPC_ID_TABLE
  19. };
  20. #ifdef FEATURE_CONN_MD_EXP_EN
  21. #include "conn_md_exp.h" /* this file also include ccci_ipc_task_ID.h, must include it after ipc_msgsvc_maptbl */
  22. #endif
  23. #define MAX_QUEUE_LENGTH 32
  24. #define local_AP_id_2_unify_id(id) local_xx_id_2_unify_id(id, 1)/* not using */
  25. #define local_MD_id_2_unify_id(id) local_xx_id_2_unify_id(id, 0)
  26. #define unify_AP_id_2_local_id(id) unify_xx_id_2_local_id(id, 1)
  27. #define unify_MD_id_2_local_id(id) unify_xx_id_2_local_id(id, 0)/* not using */
  28. static struct ipc_task_id_map *local_xx_id_2_unify_id(u32 local_id, int AP)
  29. {
  30. int i;
  31. for (i = 0; i < ARRAY_SIZE(ipc_msgsvc_maptbl); i++) {
  32. if (ipc_msgsvc_maptbl[i].task_id == local_id &&
  33. (AP ? (ipc_msgsvc_maptbl[i].extq_id & AP_UNIFY_ID_FLAG) :
  34. !(ipc_msgsvc_maptbl[i].extq_id & AP_UNIFY_ID_FLAG)))
  35. return ipc_msgsvc_maptbl + i;
  36. }
  37. return NULL;
  38. }
  39. static struct ipc_task_id_map *unify_xx_id_2_local_id(u32 unify_id, int AP)
  40. {
  41. int i;
  42. if (!(AP ? (unify_id & AP_UNIFY_ID_FLAG) : !(unify_id & AP_UNIFY_ID_FLAG)))
  43. return NULL;
  44. for (i = 0; i < ARRAY_SIZE(ipc_msgsvc_maptbl); i++) {
  45. if (ipc_msgsvc_maptbl[i].extq_id == unify_id)
  46. return ipc_msgsvc_maptbl + i;
  47. }
  48. return NULL;
  49. }
  50. static int port_ipc_ack_init(struct ccci_port *port)
  51. {
  52. return 0;
  53. }
  54. static int port_ipc_ack_recv_req(struct ccci_port *port, struct ccci_request *req)
  55. {
  56. struct ccci_header *ccci_h = (struct ccci_header *)req->skb->data;
  57. struct ccci_ipc_ctrl *ipc_ctrl = ipc_task_ctrl[ccci_h->reserved]; /* find port via task ID */
  58. list_del(&req->entry); /* dequeue from queue's list */
  59. clear_bit(CCCI_TASK_PENDING, &ipc_ctrl->flag);
  60. wake_up_all(&ipc_ctrl->tx_wq);
  61. req->policy = RECYCLE;
  62. ccci_free_req(req);
  63. wake_lock_timeout(&port->rx_wakelock, HZ / 2);
  64. return 0;
  65. }
  66. /*
  67. * CCCI_IPC_TX/RX are treated as char device port, and we assemble CCCI_IPC_TX/RX_ACK as a
  68. * separate port. some IPC dedicated function are also been put here. that's why some of the function name
  69. * have "_ack_" in it, and others not.
  70. * ALL IPC ports share one ACK port.
  71. */
  72. struct ccci_port_ops ipc_port_ack_ops = {
  73. .init = &port_ipc_ack_init,
  74. .recv_request = &port_ipc_ack_recv_req,
  75. };
  76. int port_ipc_req_match(struct ccci_port *port, struct ccci_request *req)
  77. {
  78. struct ccci_header *ccci_h = (struct ccci_header *)req->skb->data;
  79. struct ccci_ipc_ctrl *ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  80. struct ipc_task_id_map *id_map;
  81. if (port->rx_ch != CCCI_IPC_RX)
  82. return 1;
  83. CCCI_DBG_MSG(port->modem->index, IPC, "task_id matching: (%x/%x)\n", ipc_ctrl->task_id, ccci_h->reserved);
  84. id_map = unify_AP_id_2_local_id(ccci_h->reserved);
  85. if (id_map == NULL)
  86. return 0;
  87. if (id_map->task_id == ipc_ctrl->task_id)
  88. return 1;
  89. return 0;
  90. }
  91. int port_ipc_tx_wait(struct ccci_port *port)
  92. {
  93. struct ccci_ipc_ctrl *ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  94. int ret;
  95. ret = wait_event_interruptible(ipc_ctrl->tx_wq, !test_and_set_bit(CCCI_TASK_PENDING, &ipc_ctrl->flag));
  96. if (ret == -ERESTARTSYS)
  97. return -EINTR;
  98. return 0;
  99. }
  100. int port_ipc_rx_ack(struct ccci_port *port)
  101. {
  102. struct ccci_ipc_ctrl *ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  103. return ccci_send_msg_to_md(port->modem, CCCI_IPC_RX_ACK, IPC_MSGSVC_RVC_DONE, ipc_ctrl->task_id, 1);
  104. }
  105. static int send_new_time_to_md(int tz);
  106. volatile int current_time_zone = 0;
  107. int port_ipc_ioctl(struct ccci_port *port, unsigned int cmd, unsigned long arg)
  108. {
  109. int ret = 0;
  110. struct ccci_request *req = NULL;
  111. struct ccci_request *reqn;
  112. unsigned long flags;
  113. struct ccci_ipc_ctrl *ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  114. switch (cmd) {
  115. case CCCI_IPC_RESET_RECV:
  116. /* purge the Rx list */
  117. spin_lock_irqsave(&port->rx_req_lock, flags);
  118. list_for_each_entry_safe(req, reqn, &port->rx_req_list, entry) {
  119. list_del(&req->entry);
  120. port->rx_length++;
  121. req->policy = RECYCLE;
  122. ccci_free_req(req);
  123. }
  124. INIT_LIST_HEAD(&port->rx_req_list);
  125. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  126. break;
  127. case CCCI_IPC_RESET_SEND:
  128. clear_bit(CCCI_TASK_PENDING, &ipc_ctrl->flag);
  129. wake_up(&ipc_ctrl->tx_wq);
  130. break;
  131. case CCCI_IPC_WAIT_MD_READY:
  132. if (ipc_ctrl->md_is_ready == 0) {
  133. ret = wait_event_interruptible(ipc_ctrl->md_rdy_wq, ipc_ctrl->md_is_ready == 1);
  134. if (ret == -ERESTARTSYS)
  135. ret = -EINTR;
  136. }
  137. break;
  138. case CCCI_IPC_UPDATE_TIME:
  139. #ifdef FEATURE_MD_GET_CLIB_TIME
  140. CCCI_DBG_MSG(port->modem->index, IPC, "CCCI_IPC_UPDATE_TIME 0x%x\n", (unsigned int)arg);
  141. current_time_zone = (int)arg;
  142. ret = send_new_time_to_md((int)arg);
  143. #else
  144. CCCI_INF_MSG(port->modem->index, IPC, "CCCI_IPC_UPDATE_TIME 0x%x(dummy)\n", (unsigned int)arg);
  145. #endif
  146. break;
  147. case CCCI_IPC_WAIT_TIME_UPDATE:
  148. CCCI_DBG_MSG(port->modem->index, IPC, "CCCI_IPC_WAIT_TIME_UPDATE\n");
  149. ret = wait_time_update_notify();
  150. CCCI_DBG_MSG(port->modem->index, IPC, "CCCI_IPC_WAIT_TIME_UPDATE wakeup\n");
  151. break;
  152. case CCCI_IPC_UPDATE_TIMEZONE:
  153. CCCI_INF_MSG(port->modem->index, IPC, "CCCI_IPC_UPDATE_TIMEZONE keep 0x%x\n", (unsigned int)arg);
  154. current_time_zone = (int)arg;
  155. break;
  156. };
  157. return ret;
  158. }
  159. void port_ipc_md_state_notice(struct ccci_port *port, MD_STATE state)
  160. {
  161. struct ccci_ipc_ctrl *ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  162. switch (state) {
  163. case READY:
  164. ipc_ctrl->md_is_ready = 1;
  165. wake_up_all(&ipc_ctrl->md_rdy_wq);
  166. break;
  167. default:
  168. break;
  169. };
  170. }
  171. int port_ipc_write_check_id(struct ccci_port *port, struct ccci_request *req)
  172. {
  173. struct ccci_ipc_ilm *ilm = (struct ccci_ipc_ilm *)((char *)req->skb->data + sizeof(struct ccci_header));
  174. struct ipc_task_id_map *id_map;
  175. id_map = local_MD_id_2_unify_id(ilm->dest_mod_id);
  176. if (id_map == NULL) {
  177. CCCI_ERR_MSG(port->modem->index, IPC, "Invalid Dest MD ID (%d)\n", ilm->dest_mod_id);
  178. return -CCCI_ERR_IPC_ID_ERROR;
  179. }
  180. return id_map->extq_id;
  181. }
  182. unsigned int port_ipc_poll(struct file *fp, struct poll_table_struct *poll)
  183. {
  184. struct ccci_port *port = fp->private_data;
  185. struct ccci_ipc_ctrl *ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  186. unsigned int mask = 0;
  187. poll_wait(fp, &ipc_ctrl->tx_wq, poll);
  188. poll_wait(fp, &port->rx_wq, poll);
  189. if (!list_empty(&port->rx_req_list))
  190. mask |= POLLIN | POLLRDNORM;
  191. if (!test_bit(CCCI_TASK_PENDING, &ipc_ctrl->flag))
  192. mask |= POLLOUT | POLLWRNORM;
  193. return mask;
  194. }
  195. int port_ipc_init(struct ccci_port *port)
  196. {
  197. struct ccci_ipc_ctrl *ipc_ctrl = kmalloc(sizeof(struct ccci_ipc_ctrl), GFP_KERNEL);
  198. port->private_data = ipc_ctrl;
  199. /*
  200. * tricky part, we use pre-defined minor number as task ID, then we modify it into the right number.
  201. */
  202. ipc_ctrl->task_id = port->minor;
  203. port->minor += CCCI_IPC_MINOR_BASE;
  204. ipc_task_ctrl[ipc_ctrl->task_id] = ipc_ctrl;
  205. init_waitqueue_head(&ipc_ctrl->tx_wq);
  206. init_waitqueue_head(&ipc_ctrl->md_rdy_wq);
  207. ipc_ctrl->md_is_ready = 0;
  208. ipc_ctrl->port = port;
  209. return 0;
  210. }
  211. #define GF_PORT_LIST_MAX 128
  212. struct garbage_filter_item gf_port_list[GF_PORT_LIST_MAX];
  213. /* frame size be less than 1400, so we made it global variable */
  214. int gf_port_list_reg[GF_PORT_LIST_MAX];
  215. int gf_port_list_unreg[GF_PORT_LIST_MAX];
  216. static int port_ipc_parse_gf_port(GF_IP_TYPE ip_type, GF_PROTOCOL_TYPE prot_type, struct garbage_filter_item *list,
  217. int number)
  218. {
  219. #define PORT_IPC_BUFFER_SIZE 256
  220. struct file *filp = NULL;
  221. static const char * const file_list[] = {
  222. "/proc/net/tcp",
  223. "/proc/net/tcp6",
  224. "/proc/net/udp",
  225. "/proc/net/udp6",
  226. };
  227. const char *file_name = NULL;
  228. int port_number = -1;
  229. char *buffer;
  230. buffer = kmalloc(PORT_IPC_BUFFER_SIZE, GFP_KERNEL);
  231. if (buffer == NULL) {
  232. CCCI_INF_MSG(-1, IPC, "%s kmalloc 256 failed\n", __func__);
  233. return -1;
  234. }
  235. if (prot_type == GF_TCP) {
  236. if (ip_type == GF_IPV4)
  237. file_name = file_list[0];
  238. else
  239. file_name = file_list[1];
  240. } else {
  241. if (ip_type == GF_IPV4)
  242. file_name = file_list[2];
  243. else
  244. file_name = file_list[3];
  245. }
  246. filp = filp_open(file_name, O_RDONLY, 0777);
  247. if (!IS_ERR(filp)) {
  248. port_number = 0;
  249. kernel_read(filp, 0, buffer, PORT_IPC_BUFFER_SIZE);
  250. /* TODO: parse file */
  251. if (prot_type == GF_TCP && ip_type == GF_IPV4) {
  252. int i;
  253. for (i = 0; i < GF_PORT_LIST_MAX && i < number; i++) {
  254. if (gf_port_list_reg[i] != 0) {
  255. port_number++;
  256. (list + i)->filter_id = i;
  257. (list + i)->ip_type = ip_type;
  258. (list + i)->protocol = prot_type;
  259. (list + i)->dst_port = gf_port_list_reg[i];
  260. (list + i)->magic_code = 168;
  261. } else {
  262. break;
  263. }
  264. }
  265. }
  266. }
  267. kfree(buffer);
  268. if (filp != NULL)
  269. filp_close(filp, NULL);
  270. CCCI_INF_MSG(-1, IPC, "IP:%d Protocol:%d port number:%d\n", ip_type, prot_type, port_number);
  271. return port_number;
  272. }
  273. int ccci_ipc_set_garbage_filter(struct ccci_modem *md, int reg)
  274. {
  275. struct garbage_filter_header gf_header;
  276. int ret, actual_count, count = 0;
  277. struct ccci_request *req;
  278. struct ccci_header *ccci_h;
  279. struct ccci_ipc_ilm *ilm;
  280. struct local_para *local_para_ptr;
  281. struct ccci_port *port;
  282. int garbage_length;
  283. u32 task_id = AP_IPC_GF;
  284. memset(gf_port_list, 0, sizeof(gf_port_list));
  285. port = ipc_task_ctrl[task_id]->port;
  286. if (port->modem->md_state != READY)
  287. return -ENODEV;
  288. if (reg) {
  289. ret = port_ipc_parse_gf_port(GF_IPV4, GF_TCP, gf_port_list, GF_PORT_LIST_MAX - count);
  290. if (ret > 0)
  291. count += ret;
  292. ret = port_ipc_parse_gf_port(GF_IPV4, GF_UDP, gf_port_list + count, GF_PORT_LIST_MAX - count);
  293. if (ret > 0)
  294. count += ret;
  295. ret = port_ipc_parse_gf_port(GF_IPV6, GF_TCP, gf_port_list + count, GF_PORT_LIST_MAX - count);
  296. if (ret > 0)
  297. count += ret;
  298. ret = port_ipc_parse_gf_port(GF_IPV6, GF_UDP, gf_port_list + count, GF_PORT_LIST_MAX - count);
  299. if (ret > 0)
  300. count += ret;
  301. CCCI_INF_MSG(md->index, IPC, "register garbage filer port number %d\n", count);
  302. gf_header.filter_set_id = 0;
  303. gf_header.filter_cnt = count;
  304. } else {
  305. int i;
  306. for (i = 0; i < GF_PORT_LIST_MAX; i++) {
  307. if (gf_port_list_unreg[i] != 0)
  308. count++;
  309. else
  310. break;
  311. }
  312. gf_header.filter_set_id = 0;
  313. if (count == 0)
  314. gf_header.filter_cnt = -1; /* de-register all */
  315. else
  316. gf_header.filter_cnt = count;
  317. CCCI_INF_MSG(md->index, IPC, "unregister garbage filer port number %d\n", count);
  318. }
  319. gf_header.uplink = 0;
  320. actual_count = sizeof(struct ccci_header) + sizeof(struct ccci_ipc_ilm) + sizeof(struct local_para);
  321. if (reg)
  322. actual_count += (sizeof(struct garbage_filter_header) + count * sizeof(struct garbage_filter_item));
  323. else
  324. actual_count += (sizeof(struct garbage_filter_header) + count * sizeof(int));
  325. req = ccci_alloc_req(OUT, actual_count, 1, 1);
  326. if (req) {
  327. req->policy = RECYCLE;
  328. req->ioc_override = 0;
  329. /* ccci header */
  330. ccci_h = (struct ccci_header *)skb_put(req->skb, sizeof(struct ccci_header));
  331. ccci_h->data[0] = 0;
  332. ccci_h->data[1] = actual_count;
  333. ccci_h->channel = port->tx_ch;
  334. ccci_h->reserved = 0;
  335. /* set ilm */
  336. ilm = (struct ccci_ipc_ilm *)skb_put(req->skb, sizeof(struct ccci_ipc_ilm));
  337. ilm->src_mod_id = AP_MOD_GF;
  338. ilm->dest_mod_id = MD_MOD_GF;
  339. ilm->sap_id = 0;
  340. if (reg)
  341. ilm->msg_id = IPC_MSG_ID_IPCORE_GF_REG;
  342. else
  343. ilm->msg_id = IPC_MSG_ID_IPCORE_GF_UNREG;
  344. ilm->local_para_ptr = 1; /* to let MD treat it as != NULL */
  345. ilm->peer_buff_ptr = 0;
  346. /* set ilm->local_para_ptr structure */
  347. local_para_ptr = (struct local_para *)skb_put(req->skb, sizeof(struct local_para));
  348. local_para_ptr->ref_count = 0;
  349. local_para_ptr->_stub = 0;
  350. if (reg)
  351. garbage_length = count * sizeof(struct garbage_filter_item);
  352. else
  353. garbage_length = count * sizeof(int);
  354. local_para_ptr->msg_len =
  355. garbage_length + sizeof(struct garbage_filter_header) + sizeof(struct local_para);
  356. /* copy gf header */
  357. memcpy(skb_put(req->skb, sizeof(struct garbage_filter_header)),
  358. &gf_header, sizeof(struct garbage_filter_header));
  359. /* copy gf items */
  360. if (reg)
  361. memcpy(skb_put(req->skb, garbage_length), gf_port_list, garbage_length);
  362. else
  363. memcpy(skb_put(req->skb, garbage_length), gf_port_list_unreg, garbage_length);
  364. CCCI_INF_MSG(md->index, IPC, "garbage filer data length %d/%d\n", garbage_length, actual_count);
  365. ccci_mem_dump(md->index, req->skb->data, req->skb->len);
  366. /* send packet */
  367. ret = port_ipc_write_check_id(port, req);
  368. if (ret < 0)
  369. goto err_out;
  370. else
  371. ccci_h->reserved = ret; /* Unity ID */
  372. ret = ccci_port_send_request(port, req);
  373. if (ret)
  374. goto err_out;
  375. else
  376. return actual_count - sizeof(struct ccci_header);
  377. err_out:
  378. ccci_free_req(req);
  379. return ret;
  380. } else {
  381. return -CCCI_ERR_ALLOCATE_MEMORY_FAIL;
  382. }
  383. }
  384. static int port_ipc_kernel_write(ipc_ilm_t *in_ilm)
  385. {
  386. u32 task_id;
  387. int count, actual_count, ret;
  388. struct ccci_port *port;
  389. struct ccci_header *ccci_h;
  390. struct ccci_ipc_ilm *ilm;
  391. struct ccci_request *req;
  392. /* src module id check */
  393. task_id = in_ilm->src_mod_id & (~AP_UNIFY_ID_FLAG);
  394. if (task_id >= ARRAY_SIZE(ipc_task_ctrl)) {
  395. CCCI_ERR_MSG(-1, IPC, "invalid task ID %x\n", in_ilm->src_mod_id);
  396. return -1;
  397. }
  398. if (in_ilm->local_para_ptr == NULL) {
  399. CCCI_ERR_MSG(-1, IPC, "invalid ILM local parameter pointer %p for task %d\n", in_ilm, task_id);
  400. return -2;
  401. }
  402. port = ipc_task_ctrl[task_id]->port;
  403. if (port->modem->md_state != READY)
  404. return -ENODEV;
  405. count = sizeof(struct ccci_ipc_ilm) + in_ilm->local_para_ptr->msg_len;
  406. if (count > CCCI_MTU) {
  407. CCCI_ERR_MSG(port->modem->index, IPC, "reject packet(size=%d ), lager than MTU on %s\n", count,
  408. port->name);
  409. return -ENOMEM;
  410. }
  411. CCCI_DBG_MSG(port->modem->index, IPC, "write on %s for %d\n", port->name, in_ilm->local_para_ptr->msg_len);
  412. actual_count = count + sizeof(struct ccci_header);
  413. req = ccci_alloc_req(OUT, actual_count, 1, 1);
  414. if (req) {
  415. req->policy = RECYCLE;
  416. /* ccci header */
  417. ccci_h = (struct ccci_header *)skb_put(req->skb, sizeof(struct ccci_header));
  418. ccci_h->data[0] = 0;
  419. ccci_h->data[1] = actual_count;
  420. ccci_h->channel = port->tx_ch;
  421. ccci_h->reserved = 0;
  422. /* copy ilm */
  423. ilm = (struct ccci_ipc_ilm *)skb_put(req->skb, sizeof(struct ccci_ipc_ilm));
  424. ilm->src_mod_id = in_ilm->src_mod_id;
  425. ilm->dest_mod_id = in_ilm->dest_mod_id;
  426. ilm->sap_id = in_ilm->sap_id;
  427. ilm->msg_id = in_ilm->msg_id;
  428. ilm->local_para_ptr = 1; /* to let MD treat it as != NULL */
  429. ilm->peer_buff_ptr = 0;
  430. /* copy data */
  431. count = in_ilm->local_para_ptr->msg_len;
  432. memcpy(skb_put(req->skb, count), in_ilm->local_para_ptr, count);
  433. /* send packet */
  434. ret = port_ipc_write_check_id(port, req);
  435. if (ret < 0)
  436. goto err_out;
  437. else
  438. ccci_h->reserved = ret; /* Unity ID */
  439. ret = ccci_port_send_request(port, req);
  440. if (ret)
  441. goto err_out;
  442. else
  443. return actual_count - sizeof(struct ccci_header);
  444. err_out:
  445. ccci_free_req(req);
  446. return ret;
  447. } else {
  448. return -EBUSY;
  449. }
  450. }
  451. static int port_ipc_kernel_recv_req(struct ccci_port *port, struct ccci_request *req)
  452. {
  453. unsigned long flags;
  454. spin_lock_irqsave(&port->rx_req_lock, flags);
  455. CCCI_DBG_MSG(port->modem->index, IPC, "recv on %s, len=%d\n", port->name, port->rx_length);
  456. if (port->rx_length < port->rx_length_th) {
  457. port->flags &= ~PORT_F_RX_FULLED;
  458. port->rx_length++;
  459. list_del(&req->entry); /* dequeue from queue's list */
  460. list_add_tail(&req->entry, &port->rx_req_list);
  461. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  462. wake_lock_timeout(&port->rx_wakelock, HZ);
  463. wake_up_all(&port->rx_wq);
  464. return 0;
  465. }
  466. port->flags |= PORT_F_RX_FULLED;
  467. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  468. if (port->flags & PORT_F_ALLOW_DROP /* || !(port->flags&PORT_F_RX_EXCLUSIVE) */) {
  469. CCCI_INF_MSG(port->modem->index, IPC, "port %s Rx full, drop packet\n", port->name);
  470. goto drop;
  471. } else {
  472. return -CCCI_ERR_PORT_RX_FULL;
  473. }
  474. drop:
  475. /* drop this packet */
  476. CCCI_INF_MSG(port->modem->index, IPC, "drop on %s, len=%d\n", port->name, port->rx_length);
  477. list_del(&req->entry);
  478. req->policy = RECYCLE;
  479. ccci_free_req(req);
  480. return -CCCI_ERR_DROP_PACKET;
  481. }
  482. static int port_ipc_kernel_thread(void *arg)
  483. {
  484. struct ccci_port *port = arg;
  485. struct ccci_request *req;
  486. struct ccci_header *ccci_h;
  487. unsigned long flags;
  488. int ret;
  489. struct ccci_ipc_ilm *ilm;
  490. ipc_ilm_t out_ilm;
  491. CCCI_DBG_MSG(port->modem->index, IPC, "port %s's thread running\n", port->name);
  492. while (1) {
  493. if (list_empty(&port->rx_req_list)) {
  494. ret = wait_event_interruptible(port->rx_wq, !list_empty(&port->rx_req_list));
  495. if (ret == -ERESTARTSYS)
  496. continue; /* FIXME */
  497. }
  498. if (kthread_should_stop())
  499. break;
  500. CCCI_DBG_MSG(port->modem->index, IPC, "read on %s\n", port->name);
  501. /* 1. dequeue */
  502. spin_lock_irqsave(&port->rx_req_lock, flags);
  503. req = list_first_entry(&port->rx_req_list, struct ccci_request, entry);
  504. list_del(&req->entry);
  505. if (--(port->rx_length) == 0)
  506. ccci_port_ask_more_request(port);
  507. spin_unlock_irqrestore(&port->rx_req_lock, flags);
  508. /* 2. process the request */
  509. /* ccci header */
  510. ccci_h = (struct ccci_header *)req->skb->data;
  511. skb_pull(req->skb, sizeof(struct ccci_header));
  512. ilm = (struct ccci_ipc_ilm *)(req->skb->data);
  513. /* copy ilm */
  514. out_ilm.src_mod_id = ilm->src_mod_id;
  515. out_ilm.dest_mod_id = ccci_h->reserved;
  516. out_ilm.sap_id = ilm->sap_id;
  517. out_ilm.msg_id = ilm->msg_id;
  518. /* data pointer */
  519. skb_pull(req->skb, sizeof(struct ccci_ipc_ilm));
  520. out_ilm.local_para_ptr = (struct local_para *)(req->skb->data);
  521. out_ilm.peer_buff_ptr = 0;
  522. #ifdef FEATURE_CONN_MD_EXP_EN
  523. mtk_conn_md_bridge_send_msg(&out_ilm);
  524. #endif
  525. port->rx_length--;
  526. CCCI_DBG_MSG(port->modem->index, IPC, "read done on %s l=%d\n", port->name,
  527. out_ilm.local_para_ptr->msg_len);
  528. req->policy = RECYCLE;
  529. ccci_free_req(req);
  530. }
  531. return 0;
  532. }
  533. static int port_ipc_kernel_init(struct ccci_port *port)
  534. {
  535. struct ccci_ipc_ctrl *ipc_ctrl;
  536. CCCI_DBG_MSG(port->modem->index, IPC, "IPC kernel port %s is initializing\n", port->name);
  537. port->private_data = kthread_run(port_ipc_kernel_thread, port, "%s", port->name);
  538. port->rx_length_th = MAX_QUEUE_LENGTH;
  539. port_ipc_init(port);
  540. ipc_ctrl = (struct ccci_ipc_ctrl *)port->private_data;
  541. if (ipc_ctrl->task_id == AP_IPC_WMT) {
  542. #ifdef FEATURE_CONN_MD_EXP_EN
  543. CONN_MD_BRIDGE_OPS ccci_ipc_conn_ops = {.rx_cb = port_ipc_kernel_write };
  544. mtk_conn_md_bridge_reg(MD_MOD_EL1, &ccci_ipc_conn_ops);
  545. #endif
  546. }
  547. return 0;
  548. }
  549. struct ccci_port_ops ipc_kern_port_ops = {
  550. .init = &port_ipc_kernel_init,
  551. .recv_request = &port_ipc_kernel_recv_req,
  552. .req_match = &port_ipc_req_match,
  553. .md_state_notice = &port_ipc_md_state_notice,
  554. };
  555. int send_new_time_to_md(int tz)
  556. {
  557. ipc_ilm_t in_ilm;
  558. char local_param[sizeof(local_para_struct) + 16];
  559. unsigned int timeinfo[4];
  560. struct timeval tv = { 0 };
  561. do_gettimeofday(&tv);
  562. timeinfo[0] = tv.tv_sec;
  563. timeinfo[1] = sizeof(tv.tv_sec) > 4 ? tv.tv_sec >> 32 : 0;
  564. timeinfo[2] = tz;
  565. timeinfo[3] = sys_tz.tz_dsttime;
  566. in_ilm.src_mod_id = AP_MOD_CCCIIPC;
  567. in_ilm.dest_mod_id = MD_MOD_CCCIIPC;
  568. in_ilm.sap_id = 0;
  569. in_ilm.msg_id = IPC_MSG_ID_CCCIIPC_CLIB_TIME_REQ;
  570. in_ilm.local_para_ptr = (local_para_struct *)&local_param[0];
  571. /* msg_len not only contain local_para_ptr->data, but also contain 4 Bytes header itself */
  572. in_ilm.local_para_ptr->msg_len = 20;
  573. memcpy(in_ilm.local_para_ptr->data, timeinfo, 16);
  574. CCCI_INF_MSG(-1, IPC, "Update time(R): [sec=0x%lx][timezone=0x%08x][des=0x%08x]\n", tv.tv_sec,
  575. sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
  576. CCCI_INF_MSG(-1, IPC, "Update time(A): [L:0x%08x][H:0x%08x][0x%08x][0x%08x]\n", timeinfo[0], timeinfo[1],
  577. timeinfo[2], timeinfo[3]);
  578. if (port_ipc_kernel_write(&in_ilm) < 0) {
  579. CCCI_INF_MSG(-1, IPC, "Update fail\n");
  580. return -1;
  581. }
  582. CCCI_INF_MSG(-1, IPC, "Update success\n");
  583. return 0;
  584. }