ccci_rpc_main.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*****************************************************************************
  2. *
  3. * Filename:
  4. * ---------
  5. * ccci_fs.c
  6. *
  7. * Project:
  8. * --------
  9. * YuSu
  10. *
  11. * Description:
  12. * ------------
  13. * MT6516 CCCI RPC
  14. *
  15. * Author:
  16. * -------
  17. *
  18. *
  19. ****************************************************************************/
  20. #include <linux/sched.h>
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/device.h>
  24. #include <linux/cdev.h>
  25. #include <linux/kfifo.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/delay.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/timer.h>
  31. #include <linux/delay.h>
  32. #include <linux/semaphore.h>
  33. #include <linux/version.h>
  34. #include <ccci.h>
  35. /*********************************************************************************
  36. * RPC Daemon section
  37. *********************************************************************************/
  38. #if defined(CONFIG_MTK_TC1_FEATURE)
  39. struct rpc_stream_msg_t {
  40. unsigned length;
  41. unsigned index;
  42. };
  43. static void rpc_daemon_notify(int md_id, unsigned int buff_index);
  44. #define RPC_CCCI_TC1_CMD 0x00003000
  45. #define CCCI_RPC_IOC_MAGIC 'R'
  46. #define CCCI_RPC_IOCTL_GET_INDEX _IO(CCCI_RPC_IOC_MAGIC, 1)
  47. #define CCCI_RPC_IOCTL_SEND _IOR(CCCI_RPC_IOC_MAGIC, 2, unsigned int)
  48. #endif /* CONFIG_MTK_TC1_FEATURE */
  49. struct rpc_ctl_block_t {
  50. spinlock_t rpc_fifo_lock;
  51. struct RPC_BUF *rpc_buf_vir;
  52. unsigned int rpc_buf_phy;
  53. unsigned int rpc_buf_len;
  54. struct kfifo rpc_fifo;
  55. struct work_struct rpc_work;
  56. int m_md_id;
  57. int rpc_smem_instance_size;
  58. int rpc_max_buf_size;
  59. int rpc_ch_num;
  60. #if defined(CONFIG_MTK_TC1_FEATURE)
  61. spinlock_t rpc_daemon_fifo_lock;
  62. struct kfifo rpc_daemon_fifo;
  63. atomic_t rpcd_response_done;
  64. wait_queue_head_t rpcd_send_waitq;
  65. wait_queue_head_t rpcd_response_waitq;
  66. struct cdev rpc_cdev;
  67. dev_t rpc_dev_num;
  68. atomic_t md_is_ready;
  69. struct MD_CALL_BACK_QUEUE md_status_update_call_back;
  70. #ifdef _DEBUG_RPCD
  71. atomic_t rpcd_resp_indx;
  72. #endif
  73. #endif
  74. };
  75. static struct rpc_ctl_block_t *rpc_ctl_block[MAX_MD_NUM];
  76. static int get_pkt_info(int md_id, unsigned int *pktnum, struct RPC_PKT *pkt_info,
  77. char *pdata)
  78. {
  79. unsigned int pkt_num = *((unsigned int *)pdata);
  80. unsigned int idx = 0;
  81. unsigned int i = 0;
  82. struct rpc_ctl_block_t *ctl_b = rpc_ctl_block[md_id];
  83. CCCI_RPC_MSG(md_id, "package number = 0x%08X\n", pkt_num);
  84. if (pkt_num > IPC_RPC_MAX_ARG_NUM)
  85. return -1;
  86. idx = sizeof(unsigned int);
  87. for (i = 0; i < pkt_num; i++) {
  88. pkt_info[i].len = *((unsigned int *)(pdata + idx));
  89. idx += sizeof(unsigned int);
  90. pkt_info[i].buf = (pdata + idx);
  91. CCCI_RPC_MSG(md_id, "pak[%d]: vir = 0x%08X, len = 0x%08X\n", i,
  92. (unsigned int)pkt_info[i].buf, pkt_info[i].len);
  93. /* 4 byte alignment */
  94. idx += ((pkt_info[i].len + 3) >> 2) << 2;
  95. }
  96. if (idx > ctl_b->rpc_max_buf_size) {
  97. CCCI_MSG_INF(md_id, "rpc",
  98. "over flow, pdata = %p, idx = 0x%08X, max = %p\n",
  99. pdata, idx, pdata + ctl_b->rpc_max_buf_size);
  100. return -1;
  101. }
  102. *pktnum = pkt_num;
  103. return 0;
  104. }
  105. static int rpc_write(int md_id, int buf_idx, struct RPC_PKT *pkt_src,
  106. unsigned int pkt_num)
  107. {
  108. int ret = 0;
  109. struct ccci_msg_t msg;
  110. struct RPC_BUF *rpc_buf_tmp = NULL;
  111. unsigned char *pdata = NULL;
  112. unsigned int data_len = 0;
  113. unsigned int i = 0;
  114. unsigned int AlignLength = 0;
  115. struct rpc_ctl_block_t *ctl_b = rpc_ctl_block[md_id];
  116. /* rpc_buf_tmp = ctl_b->rpc_buf_vir + buf_idx; */
  117. rpc_buf_tmp =
  118. (struct RPC_BUF *) ((unsigned int)(ctl_b->rpc_buf_vir) +
  119. ctl_b->rpc_smem_instance_size * buf_idx);
  120. rpc_buf_tmp->op_id = IPC_RPC_API_RESP_ID | rpc_buf_tmp->op_id;
  121. pdata = rpc_buf_tmp->buf;
  122. *((unsigned int *)pdata) = pkt_num;
  123. pdata += sizeof(unsigned int);
  124. data_len += sizeof(unsigned int);
  125. for (i = 0; i < pkt_num; i++) {
  126. if ((data_len + 2 * sizeof(unsigned int) + pkt_src[i].len) >
  127. ctl_b->rpc_max_buf_size) {
  128. CCCI_MSG_INF(md_id, "rpc", "Stream buffer full!!\n");
  129. ret = -CCCI_ERR_LARGE_THAN_BUF_SIZE;
  130. goto _Exit;
  131. }
  132. *((unsigned int *)pdata) = pkt_src[i].len;
  133. pdata += sizeof(unsigned int);
  134. data_len += sizeof(unsigned int);
  135. /* 4 byte aligned */
  136. AlignLength = ((pkt_src[i].len + 3) >> 2) << 2;
  137. data_len += AlignLength;
  138. if (pdata != pkt_src[i].buf)
  139. memcpy(pdata, pkt_src[i].buf, pkt_src[i].len);
  140. else
  141. CCCI_RPC_MSG(md_id, "same addr, no copy\n");
  142. pdata += AlignLength;
  143. }
  144. /* msg.data0 = ctl_b->rpc_buf_phy + (sizeof(struct RPC_BUF) * buf_idx); */
  145. msg.data0 =
  146. (unsigned int)(ctl_b->rpc_buf_phy) - get_md2_ap_phy_addr_fixed() +
  147. ctl_b->rpc_smem_instance_size * buf_idx;
  148. msg.data1 = data_len + 4;
  149. msg.reserved = buf_idx;
  150. msg.channel = CCCI_RPC_TX;
  151. CCCI_RPC_MSG(md_id, "Write, %08X, %08X, %08X, %08X\n",
  152. msg.data0, msg.data1, msg.channel, msg.reserved);
  153. /* wait memory updated*/
  154. mb();
  155. ret = ccci_message_send(md_id, &msg, 1);
  156. if (ret != sizeof(struct ccci_msg_t)) {
  157. CCCI_MSG_INF(md_id, "rpc", "fail send msg <%d>!!!\n", ret);
  158. return ret;
  159. }
  160. ret = 0;
  161. _Exit:
  162. return ret;
  163. }
  164. static void ccci_rpc_work(struct work_struct *work)
  165. {
  166. int pkt_num = 0;
  167. int ret_val = 0;
  168. unsigned int buf_idx = 0;
  169. struct RPC_PKT pkt[IPC_RPC_MAX_ARG_NUM] = { {0}, };
  170. struct RPC_BUF *rpc_buf_tmp = NULL;
  171. unsigned int tmp_data[4];
  172. struct rpc_ctl_block_t *ctl_b = container_of(work, struct rpc_ctl_block_t, rpc_work);
  173. int md_id = ctl_b->m_md_id;
  174. #ifdef _DEBUG_RPCD
  175. int resp_inx;
  176. #endif
  177. CCCI_RPC_MSG(md_id, "ccci_rpc_work++\n");
  178. if (ctl_b->rpc_buf_vir == NULL) {
  179. CCCI_MSG_INF(md_id, "rpc", "invalid rpc_buf_vir!!\n");
  180. return;
  181. }
  182. while (kfifo_out(&ctl_b->rpc_fifo, &buf_idx, sizeof(unsigned int))) {
  183. if (buf_idx < 0 || buf_idx > ctl_b->rpc_ch_num) {
  184. CCCI_MSG_INF(md_id, "rpc", "invalid idx %d\n", buf_idx);
  185. ret_val = FS_PARAM_ERROR; /* !!!!! Make more meaningful */
  186. pkt[pkt_num].len = sizeof(unsigned int);
  187. pkt[pkt_num++].buf = (void *)&ret_val;
  188. goto _Next;
  189. }
  190. pkt_num = 0;
  191. memset(pkt, 0x00, sizeof(struct RPC_PKT) * IPC_RPC_MAX_ARG_NUM);
  192. /* rpc_buf_tmp = ctl_b->rpc_buf_vir + buf_idx; */
  193. rpc_buf_tmp =
  194. (struct RPC_BUF *) ((unsigned int)(ctl_b->rpc_buf_vir) +
  195. ctl_b->rpc_smem_instance_size * buf_idx);
  196. if (get_pkt_info(md_id, &pkt_num, pkt, rpc_buf_tmp->buf) < 0) {
  197. CCCI_MSG_INF(md_id, "rpc", "Fail to get packet info\n");
  198. ret_val = FS_PARAM_ERROR; /* !!!!! Make more meaningful */
  199. pkt[pkt_num].len = sizeof(unsigned int);
  200. pkt[pkt_num++].buf = (void *)&ret_val;
  201. goto _Next;
  202. }
  203. #if defined(CONFIG_MTK_TC1_FEATURE)
  204. if ((rpc_buf_tmp->op_id & 0x0000F000) == RPC_CCCI_TC1_CMD) {
  205. #ifdef _DEBUG_RPCD
  206. resp_inx = atomic_read(&ctl_b->rpcd_resp_indx);
  207. CCCI_DBG_MSG(ctl_b->m_md_id, "rpc",
  208. "op_id=0x%X, Wait RPCD [Resp=%d]\n",
  209. rpc_buf_tmp->op_id, resp_inx);
  210. #endif
  211. rpc_daemon_notify(md_id, buf_idx);
  212. wait_event_interruptible(ctl_b->rpcd_response_waitq,
  213. atomic_read
  214. (&ctl_b->rpcd_response_done)
  215. == 1);
  216. atomic_set(&ctl_b->rpcd_response_done, 0);
  217. #ifdef _DEBUG_RPCD
  218. CCCI_DBG_MSG(ctl_b->m_md_id, "rpc",
  219. "Done RPCD CMD [Resp=%d]\n", resp_inx);
  220. #endif
  221. continue;
  222. }
  223. #endif
  224. CCCI_RPC_MSG(md_id, "call ccci_rpc_work_helper()\n");
  225. ccci_rpc_work_helper(md_id, &pkt_num, pkt, rpc_buf_tmp, tmp_data);
  226. _Next:
  227. if (rpc_write(md_id, buf_idx, pkt, pkt_num) != 0) {
  228. CCCI_MSG_INF(md_id, "rpc",
  229. "fail to write packet!!\r\n");
  230. return;
  231. }
  232. }
  233. CCCI_RPC_MSG(md_id, "ccci_rpc_work--\n");
  234. }
  235. static void ccci_rpc_callback(void *private)
  236. {
  237. struct logic_channel_info_t *ch_info = (struct logic_channel_info_t *) private;
  238. struct ccci_msg_t msg;
  239. struct rpc_ctl_block_t *ctl_b = (struct rpc_ctl_block_t *) ch_info->m_owner;
  240. int md_id = ctl_b->m_md_id;
  241. while (get_logic_ch_data(ch_info, &msg)) {
  242. CCCI_RPC_MSG(md_id,
  243. "ccci_rpc_callback, %08X, %08X, %08X, %08X\n",
  244. msg.data0, msg.data1, msg.channel, msg.reserved);
  245. spin_lock_bh(&ctl_b->rpc_fifo_lock);
  246. kfifo_in(&ctl_b->rpc_fifo, &msg.reserved, sizeof(unsigned int));
  247. spin_unlock_bh(&ctl_b->rpc_fifo_lock);
  248. }
  249. schedule_work(&ctl_b->rpc_work);
  250. CCCI_RPC_MSG(md_id, "ccci_rpc_callback --\n");
  251. }
  252. #if defined(CONFIG_MTK_TC1_FEATURE)
  253. void rpc_daemon_notify(int md_id, unsigned int buff_index)
  254. {
  255. struct rpc_ctl_block_t *ctl_b = rpc_ctl_block[md_id];
  256. #ifdef _DEBUG_RPCD
  257. CCCI_RPC_MSG(md_id, "[CCCI_RPC] rpc_daemon_notify(%d) [Resp=%d]\n",
  258. buff_index, atomic_read(&ctl_b->rpcd_resp_indx));
  259. #endif
  260. spin_lock_bh(&ctl_b->rpc_daemon_fifo_lock);
  261. kfifo_in(&ctl_b->rpc_daemon_fifo, &buff_index, sizeof(unsigned int));
  262. spin_unlock_bh(&ctl_b->rpc_daemon_fifo_lock);
  263. wake_up_interruptible(&ctl_b->rpcd_send_waitq);
  264. }
  265. static int rpc_get_share_mem_index(struct file *file)
  266. {
  267. int ret;
  268. struct rpc_ctl_block_t *ctl_b = (struct rpc_ctl_block_t *) file->private_data;
  269. if (ctl_b == NULL) {
  270. CCCI_ERR_MSG(ctl_b->m_md_id, "rpc_get_share_mem_index:ctl_b == NULL\n");
  271. return -EFAULT;
  272. }
  273. #ifdef _DEBUG_RPCD
  274. atomic_inc(&ctl_b->rpcd_resp_indx);
  275. CCCI_RPC_MSG(ctl_b->m_md_id, "get index start [Resp=%d]\n",
  276. atomic_read(&ctl_b->rpcd_resp_indx));
  277. #endif
  278. if (wait_event_interruptible
  279. (ctl_b->rpcd_send_waitq,
  280. kfifo_len(&ctl_b->rpc_daemon_fifo) != 0) != 0) {
  281. CCCI_MSG_INF(ctl_b->m_md_id, "rpc",
  282. "return rpc_get_share_mem_index():ERESTARTSYS\n");
  283. return -ERESTARTSYS;
  284. }
  285. if (kfifo_out
  286. (&ctl_b->rpc_daemon_fifo, (unsigned int *)&ret,
  287. sizeof(int)) != sizeof(int)) {
  288. CCCI_MSG_INF(ctl_b->m_md_id, "rpc",
  289. "Unable to get new request from fifo\n");
  290. return -EFAULT;
  291. }
  292. #ifdef _DEBUG_RPCD
  293. CCCI_RPC_MSG(ctl_b->m_md_id, "get index end [Resp=%d]\n",
  294. atomic_read(&ctl_b->rpcd_resp_indx));
  295. #endif
  296. return ret;
  297. }
  298. static void wakeup_rpc_work(struct rpc_ctl_block_t *ctl_b)
  299. {
  300. CCCI_RPC_MSG(ctl_b->m_md_id, "wakeup rpc_work\n");
  301. atomic_set(&ctl_b->rpcd_response_done, 1);
  302. wake_up_interruptible(&ctl_b->rpcd_response_waitq);
  303. }
  304. static void ccci_rpc_resetfifo(struct rpc_ctl_block_t *ctl_b)
  305. {
  306. CCCI_MSG("(%d) ccci_rpc_resetfifo\n", ctl_b->m_md_id);
  307. spin_lock_bh(&ctl_b->rpc_fifo_lock);
  308. kfifo_reset(&ctl_b->rpc_fifo);
  309. spin_unlock_bh(&ctl_b->rpc_fifo_lock);
  310. spin_lock_bh(&ctl_b->rpc_daemon_fifo_lock);
  311. kfifo_reset(&ctl_b->rpc_daemon_fifo);
  312. spin_unlock_bh(&ctl_b->rpc_daemon_fifo_lock);
  313. }
  314. static void rpc_md_ev_callback(struct MD_CALL_BACK_QUEUE *queue, unsigned long data)
  315. {
  316. struct rpc_ctl_block_t *ctl_b =
  317. container_of(queue, struct rpc_ctl_block_t, md_status_update_call_back);
  318. CCCI_DBG_MSG(ctl_b->m_md_id, "rpc", "rpc_md_ev_callback++\n");
  319. switch (data) {
  320. case CCCI_MD_RESET:
  321. CCCI_MSG_INF(ctl_b->m_md_id, "rpc", "MD reset call chain !\n");
  322. atomic_set(&ctl_b->md_is_ready, 0);
  323. ccci_rpc_resetfifo(ctl_b);
  324. wakeup_rpc_work(ctl_b);
  325. break;
  326. case CCCI_MD_BOOTUP:
  327. atomic_set(&ctl_b->rpcd_response_done, 0);
  328. atomic_set(&ctl_b->md_is_ready, 1);
  329. CCCI_MSG_INF(ctl_b->m_md_id, "rpc",
  330. "MD boot up successfully.\n");
  331. break;
  332. }
  333. CCCI_DBG_MSG(ctl_b->m_md_id, "rpc", "rpc_md_ev_callback--\n");
  334. }
  335. static int rpc_daemon_send_helper(struct file *file, unsigned long arg)
  336. {
  337. void __user *argp;
  338. struct ccci_msg_t msg;
  339. struct rpc_stream_msg_t message;
  340. int md_id;
  341. int ret = 0;
  342. struct rpc_ctl_block_t *ctl_b = (struct rpc_ctl_block_t *) file->private_data; /* rpc_ctl_block[0]; */
  343. #ifdef _DEBUG_RPCD
  344. CCCI_DBG_MSG(ctl_b->m_md_id, "rpc", "Send RPCD -> MD [Resp=%d]\n",
  345. atomic_read(&ctl_b->rpcd_resp_indx));
  346. #endif
  347. if (ctl_b == NULL) {
  348. ret = -EFAULT;
  349. goto _send_return;
  350. }
  351. md_id = ctl_b->m_md_id;
  352. argp = (void __user *)arg;
  353. if (atomic_read(&ctl_b->md_is_ready) == 0) {
  354. CCCI_MSG_INF(ctl_b->m_md_id, "rpc",
  355. "rpc_daemon_send_helper-- by MD_RESET!\n");
  356. ret = -EINTR;
  357. goto _send_return;
  358. }
  359. if (copy_from_user((void *)&message, argp, sizeof(struct rpc_stream_msg_t))) {
  360. ret = -EFAULT;
  361. goto _send_return;
  362. }
  363. msg.data0 =
  364. (unsigned int)(ctl_b->rpc_buf_phy) - get_md2_ap_phy_addr_fixed() +
  365. ctl_b->rpc_smem_instance_size * message.index;
  366. msg.data1 = message.length + 4;
  367. msg.reserved = message.index;
  368. msg.channel = CCCI_RPC_TX;
  369. CCCI_RPC_MSG(md_id, "Write, %08X, %08X, %08X, %08X\n",
  370. msg.data0, msg.data1, msg.channel, msg.reserved);
  371. /* wait memory updated*/
  372. mb();
  373. ret = ccci_message_send(md_id, &msg, 1);
  374. if (ret != sizeof(struct ccci_msg_t)) {
  375. CCCI_MSG_INF(md_id, "rpc", "fail send msg <%d>!!!\n", ret);
  376. goto _send_return;
  377. }
  378. _send_return:
  379. CCCI_DBG_MSG(ctl_b->m_md_id, "rpc", "rpc_daemon_send_helper--(%d)\n",
  380. ret);
  381. wakeup_rpc_work(ctl_b);
  382. return ret;
  383. }
  384. static int rpc_mmap(struct file *file, struct vm_area_struct *vma)
  385. {
  386. unsigned long off, start, len;
  387. struct rpc_ctl_block_t *ctl_b = (struct rpc_ctl_block_t *) file->private_data; /* rpc_ctl_block[0]; */
  388. if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
  389. return -EINVAL;
  390. off = vma->vm_pgoff << PAGE_SHIFT;
  391. start = (unsigned long)ctl_b->rpc_buf_phy;
  392. len = PAGE_ALIGN((start & ~PAGE_MASK) + ctl_b->rpc_smem_instance_size);
  393. if ((vma->vm_end - vma->vm_start + off) > len)
  394. return -EINVAL;
  395. off += start & PAGE_MASK;
  396. vma->vm_pgoff = off >> PAGE_SHIFT;
  397. vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
  398. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  399. return remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
  400. vma->vm_end - vma->vm_start, vma->vm_page_prot);
  401. }
  402. static long rpc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  403. {
  404. int ret;
  405. switch (cmd) {
  406. case CCCI_RPC_IOCTL_GET_INDEX:
  407. ret = rpc_get_share_mem_index(file);
  408. break;
  409. case CCCI_RPC_IOCTL_SEND:
  410. ret = rpc_daemon_send_helper(file, arg);
  411. break;
  412. default:
  413. ret = -ENOIOCTLCMD;
  414. break;
  415. }
  416. return ret;
  417. }
  418. static int rpc_open(struct inode *inode, struct file *file)
  419. {
  420. int major = imajor(inode);
  421. int md_id;
  422. md_id = get_md_id_by_dev_major(major);
  423. file->private_data = (void *)rpc_ctl_block[md_id];
  424. return 0;
  425. }
  426. static int rpc_release(struct inode *inode, struct file *file)
  427. {
  428. return 0;
  429. }
  430. static const struct file_operations rpc_fops = {
  431. .owner = THIS_MODULE,
  432. .unlocked_ioctl = rpc_ioctl,
  433. .open = rpc_open,
  434. .mmap = rpc_mmap,
  435. .release = rpc_release,
  436. };
  437. static int rpc_device_init(struct rpc_ctl_block_t *ctl_b)
  438. {
  439. int ret;
  440. int major, minor;
  441. char name[16];
  442. int md_id = ctl_b->m_md_id;
  443. CCCI_MSG_INF(md_id, "rpc", "rpc_device_init++\n");
  444. ctl_b->md_status_update_call_back.call = rpc_md_ev_callback;
  445. ctl_b->md_status_update_call_back.next = NULL,
  446. CCCI_MSG_INF(md_id, "rpc", "register md_db rpc_md_ev_callback\n");
  447. md_register_call_chain(md_id, &ctl_b->md_status_update_call_back);
  448. #ifdef _DEBUG_RPCD
  449. atomic_set(&ctl_b->rpcd_resp_indx, 0);
  450. #endif
  451. atomic_set(&ctl_b->md_is_ready, 1);
  452. atomic_set(&ctl_b->rpcd_response_done, 0);
  453. init_waitqueue_head(&ctl_b->rpcd_response_waitq);
  454. init_waitqueue_head(&ctl_b->rpcd_send_waitq);
  455. ret = get_dev_id_by_md_id(md_id, "rpc", &major, &minor);
  456. if (ret < 0) {
  457. CCCI_MSG_INF(md_id, "rpc", "get rpc dev id fail: %d\n", ret);
  458. goto _ret;
  459. }
  460. spin_lock_init(&ctl_b->rpc_daemon_fifo_lock);
  461. ret =
  462. kfifo_alloc(&ctl_b->rpc_daemon_fifo, sizeof(unsigned int) * 8,
  463. GFP_KERNEL);
  464. if (ret) {
  465. CCCI_MSG_INF(md_id, "rpc", "unable to create daemon fifo\n");
  466. goto _ret;
  467. }
  468. if (md_id)
  469. snprintf(name, 16, "ccci%d_rpc", md_id + 1);
  470. else
  471. strcpy(name, "ccci_rpc");
  472. ctl_b->rpc_dev_num = MKDEV(major, minor); /* Using FS major, sub id is 1, not 0 */
  473. ret = register_chrdev_region(ctl_b->rpc_dev_num, 1, name);
  474. if (ret) {
  475. CCCI_MSG_INF(md_id, "rpc",
  476. "ccci_rpc: Register character device failed\n");
  477. goto _ret_kfifo_free;
  478. }
  479. cdev_init(&ctl_b->rpc_cdev, &rpc_fops);
  480. ctl_b->rpc_cdev.owner = THIS_MODULE;
  481. ctl_b->rpc_cdev.ops = &rpc_fops;
  482. ret = cdev_add(&ctl_b->rpc_cdev, ctl_b->rpc_dev_num, 1);
  483. if (ret) {
  484. CCCI_MSG_INF(md_id, "rpc",
  485. "ccci_rpc: Char device add failed\n");
  486. unregister_chrdev_region(ctl_b->rpc_dev_num, 1);
  487. goto _ret_kfifo_free;
  488. }
  489. goto _ret;
  490. _ret_kfifo_free:
  491. kfifo_free(&ctl_b->rpc_daemon_fifo);
  492. _ret:
  493. CCCI_MSG_INF(md_id, "rpc", "rpc_device_init--\n");
  494. return ret;
  495. }
  496. void rpc_device_deinit(int md_id)
  497. {
  498. struct rpc_ctl_block_t *ctl_b = rpc_ctl_block[md_id];
  499. kfifo_free(&ctl_b->rpc_daemon_fifo);
  500. cdev_del(&ctl_b->rpc_cdev);
  501. unregister_chrdev_region(ctl_b->rpc_dev_num, 1);
  502. }
  503. #endif /* CONFIG_MTK_TC1_FEATURE */
  504. int __init ccci_rpc_init(int md_id)
  505. {
  506. int ret;
  507. struct rpc_ctl_block_t *ctl_b;
  508. struct rpc_cfg_inf_t rpc_cfg;
  509. int rpc_buf_vir, rpc_buf_phy, rpc_buf_len;
  510. /* Allocate fs ctrl struct memory */
  511. ctl_b = kmalloc(sizeof(struct rpc_ctl_block_t), GFP_KERNEL);
  512. if (ctl_b == NULL)
  513. return -CCCI_ERR_GET_MEM_FAIL;
  514. memset(ctl_b, 0, sizeof(struct rpc_ctl_block_t));
  515. rpc_ctl_block[md_id] = ctl_b;
  516. /* Get rpc config information */
  517. ccci_get_sub_module_cfg(md_id, "rpc", (char *)&rpc_cfg, sizeof(struct rpc_cfg_inf_t));
  518. ccci_rpc_base_req(md_id, &rpc_buf_vir, &rpc_buf_phy, &rpc_buf_len);
  519. ctl_b->rpc_buf_vir = (struct RPC_BUF *) rpc_buf_vir;
  520. ctl_b->rpc_buf_phy = (unsigned int)rpc_buf_phy;
  521. ctl_b->rpc_buf_len = rpc_buf_len;
  522. ctl_b->rpc_max_buf_size = rpc_cfg.rpc_max_buf_size;
  523. ctl_b->rpc_ch_num = rpc_cfg.rpc_ch_num;
  524. ctl_b->rpc_smem_instance_size =
  525. sizeof(struct RPC_BUF) + ctl_b->rpc_max_buf_size;
  526. /* Note!!!!! we should check cofigure mistake */
  527. /* Init ctl_b */
  528. ctl_b->m_md_id = md_id;
  529. spin_lock_init(&ctl_b->rpc_fifo_lock);
  530. ret =
  531. kfifo_alloc(&ctl_b->rpc_fifo, sizeof(unsigned) * ctl_b->rpc_ch_num,
  532. GFP_KERNEL);
  533. if (ret < 0) {
  534. CCCI_MSG_INF(md_id, "rpc", "Unable to create fifo\n");
  535. goto _KFIFO_ALLOC_FAIL;
  536. }
  537. INIT_WORK(&ctl_b->rpc_work, ccci_rpc_work);
  538. /* modem related channel registration. */
  539. CCCI_RPC_MSG(md_id,
  540. "rpc_buf_vir=0x%p, rpc_buf_phy=0x%08X, rpc_buf_len=0x%08X\n",
  541. ctl_b->rpc_buf_vir, ctl_b->rpc_buf_phy,
  542. ctl_b->rpc_buf_len);
  543. register_to_logic_ch(md_id, CCCI_RPC_RX, ccci_rpc_callback, ctl_b);
  544. #if defined(CONFIG_MTK_TC1_FEATURE)
  545. register_to_logic_ch(md_id, CCCI_RPC_TX, ccci_rpc_callback, ctl_b);
  546. ret = rpc_device_init(ctl_b);
  547. if (0 != ret)
  548. goto _KFIFO_ALLOC_FAIL;
  549. #endif
  550. return ret;
  551. _KFIFO_ALLOC_FAIL:
  552. kfree(ctl_b);
  553. rpc_ctl_block[md_id] = NULL;
  554. return ret;
  555. }
  556. void __exit ccci_rpc_exit(int md_id)
  557. {
  558. struct rpc_ctl_block_t *ctl_b;
  559. CCCI_RPC_MSG(md_id, "ccci_rpc_exit\n");
  560. ctl_b = rpc_ctl_block[md_id];
  561. if (ctl_b == NULL)
  562. return;
  563. #if defined(CONFIG_MTK_TC1_FEATURE)
  564. rpc_device_deinit(md_id);
  565. #endif
  566. kfifo_free(&ctl_b->rpc_fifo);
  567. un_register_to_logic_ch(md_id, CCCI_RPC_RX);
  568. #if defined(CONFIG_MTK_TC1_FEATURE)
  569. un_register_to_logic_ch(md_id, CCCI_RPC_TX);
  570. #endif
  571. kfree(ctl_b);
  572. rpc_ctl_block[md_id] = NULL;
  573. }