ccci_ipc.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. /*****************************************************************************
  2. *
  3. *Filename:
  4. *---------
  5. *ccci_ipc.c
  6. *
  7. *
  8. *Author:
  9. *-------
  10. *
  11. ****************************************************************************/
  12. #include <linux/module.h>
  13. #include <linux/poll.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/io.h>
  16. #include <ccci.h>
  17. #define local_AP_id_2_unify_id(id) local_xx_id_2_unify_id(id, 1)
  18. #define local_MD_id_2_unify_id(id) local_xx_id_2_unify_id(id, 0)
  19. #define unify_AP_id_2_local_id(id) unify_xx_id_2_local_id(id, 1)
  20. #define unify_MD_id_2_local_id(id) unify_xx_id_2_local_id(id, 0)
  21. struct ipc_ctl_block_t {
  22. int m_md_id;
  23. spinlock_t ccci_ipc_wr_lock;
  24. spinlock_t ccci_ipc_rd_lock;
  25. CCCI_IPC_MEM *ipc_mem;
  26. unsigned int ccci_ipc_smem_base_phy;
  27. int ccci_ipc_smem_size;
  28. unsigned int ccci_ipc_wr_buffer_phy;
  29. unsigned int ccci_ipc_rd_buffer_phy;
  30. struct cdev ccci_ipc_cdev;
  31. wait_queue_head_t poll_md_queue_head;
  32. int md_is_ready;
  33. IPC_TASK ipc_task[MAX_NUM_IPC_TASKS];
  34. struct MD_CALL_BACK_QUEUE md_status_update_call_back;
  35. int major;
  36. int start_minor;
  37. };
  38. static struct ipc_ctl_block_t *ipc_ctl_block[MAX_MD_NUM];
  39. static void release_recv_item(CCCI_RECV_ITEM *item);
  40. static void ipc_call_back_func(struct MD_CALL_BACK_QUEUE *, unsigned long);
  41. static void ipc_smem_init(CCCI_IPC_MEM *ipc_mem)
  42. {
  43. int i;
  44. ipc_mem->buffer.buff_wr.size = CCCI_IPC_BUFFER_SIZE;
  45. ipc_mem->buffer.buff_wr.rx_offset = 0;
  46. ipc_mem->buffer.buff_wr.tx_offset = 0;
  47. ipc_mem->buffer.buff_rd.size = CCCI_IPC_BUFFER_SIZE;
  48. ipc_mem->buffer.buff_rd.rx_offset = 0;
  49. ipc_mem->buffer.buff_rd.tx_offset = 0;
  50. for (i = 0; i < MAX_NUM_IPC_TASKS; i++) {
  51. (ipc_mem->ilm + i)->src_mod_id = -1UL;
  52. (ipc_mem->ilm + i)->dest_mod_id = -1UL;
  53. (ipc_mem->ilm + i)->sap_id = -1UL;
  54. (ipc_mem->ilm + i)->msg_id = -1UL;
  55. (ipc_mem->ilm + i)->local_para_ptr = NULL;
  56. (ipc_mem->ilm + i)->local_para_ptr = NULL;
  57. }
  58. }
  59. int ccci_ipc_ipo_h_restore(int md_id)
  60. {
  61. struct ipc_ctl_block_t *ctl_b;
  62. ctl_b = ipc_ctl_block[md_id];
  63. ipc_smem_init(ctl_b->ipc_mem);
  64. return 0;
  65. }
  66. static void ipc_call_back_func(struct MD_CALL_BACK_QUEUE *queue,
  67. unsigned long data)
  68. {
  69. IPC_TASK *tsk;
  70. int i;
  71. CCCI_RECV_ITEM *item, *n;
  72. struct ipc_ctl_block_t *ctl_b =
  73. container_of(queue, struct ipc_ctl_block_t,
  74. md_status_update_call_back);
  75. unsigned long flags;
  76. switch (data) {
  77. case CCCI_MD_EXCEPTION:
  78. ctl_b->md_is_ready = 0;
  79. CCCI_DBG_MSG(ctl_b->m_md_id, "ipc",
  80. "MD exception call chain !\n");
  81. break;
  82. case CCCI_MD_RESET:
  83. /*if (ctl_b->md_is_ready) */
  84. {
  85. ctl_b->md_is_ready = 0;
  86. CCCI_DBG_MSG(ctl_b->m_md_id, "ipc",
  87. "MD reset call chain !\n");
  88. for (i = 0; i < MAX_NUM_IPC_TASKS; i++) {
  89. tsk = ctl_b->ipc_task + i;
  90. spin_lock_irqsave(&tsk->lock, flags);
  91. list_for_each_entry_safe(item, n,
  92. &tsk->recv_list,
  93. list) {
  94. release_recv_item(item);
  95. }
  96. spin_unlock_irqrestore(&tsk->lock, flags);
  97. /* __wake_up(&tsk->write_wait_queue, TASK_NORMAL, 0, (void*)POLLERR); */
  98. /* __wake_up(&tsk->read_wait_queue, TASK_NORMAL, 0, (void*)POLLERR); */
  99. }
  100. spin_lock_irqsave(&ctl_b->ccci_ipc_wr_lock, flags);
  101. ctl_b->ipc_mem->buffer.buff_wr.tx_offset = 0;
  102. ctl_b->ipc_mem->buffer.buff_wr.rx_offset = 0;
  103. spin_unlock_irqrestore(&ctl_b->ccci_ipc_wr_lock, flags);
  104. spin_lock_irqsave(&ctl_b->ccci_ipc_rd_lock, flags);
  105. ctl_b->ipc_mem->buffer.buff_rd.tx_offset = 0;
  106. ctl_b->ipc_mem->buffer.buff_rd.rx_offset = 0;
  107. spin_unlock_irqrestore(&ctl_b->ccci_ipc_rd_lock, flags);
  108. }
  109. break;
  110. case CCCI_MD_BOOTUP:
  111. ctl_b->md_is_ready = 1;
  112. wake_up_all(&ctl_b->poll_md_queue_head);
  113. CCCI_IPC_MSG(ctl_b->m_md_id, "MD boot up successfully.\n");
  114. break;
  115. }
  116. }
  117. static IPC_MSGSVC_TASKMAP_T ipc_msgsvc_maptbl[] = {
  118. #define __IPC_ID_TABLE
  119. #include "ccci_ipc_task_ID.h"
  120. #undef __IPC_ID_TABLE
  121. };
  122. void find_task_to_clear(IPC_TASK task_table[], unsigned int to_id)
  123. {
  124. IPC_TASK *task = NULL;
  125. int i, tmp;
  126. struct ipc_ctl_block_t *ctl_b =
  127. (struct ipc_ctl_block_t
  128. *)(container_of(task_table, struct ipc_ctl_block_t, ipc_task[0]));
  129. for (i = 0; i < MAX_NUM_IPC_TASKS; i++) {
  130. if (task_table[i].to_id == to_id) {
  131. CCCI_DBG_MSG(ctl_b->m_md_id, "ipc",
  132. "%s: task->to_id(%d:%d)\n", __func__,
  133. i, task_table[i].to_id);
  134. if (task == NULL) {
  135. task = ctl_b->ipc_task + i;
  136. tmp = i;
  137. continue;
  138. }
  139. if (time_after(task->w_jiffies, task_table[i].w_jiffies)) {
  140. task = task_table + i;
  141. CCCI_DBG_MSG(ctl_b->m_md_id, "ipc",
  142. "%s: select task->to_id(%d:%d)\n",
  143. __func__, i, task_table[i].to_id);
  144. } else if (task->w_jiffies == task_table[i].w_jiffies) {
  145. CCCI_DBG_MSG(ctl_b->m_md_id, "ipc",
  146. "[Error]Wrong time stamp(%ld, %ld), select task->to_id(%d:%d)\n",
  147. task->w_jiffies,
  148. task_table[i].w_jiffies, tmp,
  149. task->to_id);
  150. }
  151. }
  152. }
  153. if (task == NULL) {
  154. CCCI_MSG_INF(ctl_b->m_md_id, "ipc",
  155. "Wrong MD ID(%d) to clear for next recv.\n",
  156. to_id);
  157. return;
  158. }
  159. CCCI_IPC_MSG(ctl_b->m_md_id, "wake up task:%d\n",
  160. task - ctl_b->ipc_task);
  161. clear_bit(CCCI_TASK_PENDING, &task->flag);
  162. wake_up_poll(&task->write_wait_queue, POLLOUT);
  163. }
  164. static IPC_MSGSVC_TASKMAP_T *local_xx_id_2_unify_id(uint32 local_id, int AP)
  165. {
  166. int i;
  167. for (i = 0;
  168. i < sizeof(ipc_msgsvc_maptbl) / sizeof(ipc_msgsvc_maptbl[0]);
  169. i++) {
  170. if (ipc_msgsvc_maptbl[i].task_id == local_id
  171. && (AP ? (ipc_msgsvc_maptbl[i].extq_id & AP_UNIFY_ID_FLAG) :
  172. !(ipc_msgsvc_maptbl[i].extq_id & AP_UNIFY_ID_FLAG)))
  173. return ipc_msgsvc_maptbl + i;
  174. }
  175. return NULL;
  176. }
  177. static IPC_MSGSVC_TASKMAP_T *unify_xx_id_2_local_id(uint32 unify_id, int AP)
  178. {
  179. int i;
  180. if (!
  181. (AP ? (unify_id & AP_UNIFY_ID_FLAG) :
  182. !(unify_id & AP_UNIFY_ID_FLAG)))
  183. return NULL;
  184. for (i = 0;
  185. i < sizeof(ipc_msgsvc_maptbl) / sizeof(ipc_msgsvc_maptbl[0]);
  186. i++) {
  187. if (ipc_msgsvc_maptbl[i].extq_id == unify_id)
  188. return ipc_msgsvc_maptbl + i;
  189. }
  190. return NULL;
  191. }
  192. static int ccci_ipc_write_stream(int md_id, int channel, int addr, int len,
  193. uint32 reserved)
  194. {
  195. struct ccci_msg_t msg;
  196. msg.addr = addr;
  197. msg.len = len;
  198. msg.channel = channel;
  199. msg.reserved = reserved;
  200. CCCI_IPC_MSG(md_id, "write to task:%d addr:%#x len:%d.\n", reserved,
  201. addr, len);
  202. return ccci_message_send(md_id, &msg, 1);
  203. }
  204. static int ccci_ipc_ack(int md_id, int channel, int id, uint32 reserved)
  205. {
  206. struct ccci_msg_t msg;
  207. msg.magic = 0xFFFFFFFF;
  208. msg.id = id;
  209. msg.channel = channel;
  210. msg.reserved = reserved;
  211. return ccci_message_send(md_id, &msg, 1);
  212. }
  213. static void release_recv_item(CCCI_RECV_ITEM *item)
  214. {
  215. if (item) {
  216. if (!list_empty(&item->list))
  217. list_del_init(&item->list);
  218. kfree(item->data);
  219. kfree(item);
  220. }
  221. }
  222. void *read_from_ring_buffer(int md_id, ipc_ilm_t *ilm, BUFF *buff_rd,
  223. int *len)
  224. {
  225. int size;
  226. int write;
  227. int read;
  228. int data_size;
  229. uint8 *data;
  230. void *ret = NULL;
  231. int over = 0;
  232. int copy = 0;
  233. int real_size = 0;
  234. unsigned long flag;
  235. struct ipc_ctl_block_t *ctl_b = ipc_ctl_block[md_id];
  236. spin_lock_irqsave(&ctl_b->ccci_ipc_rd_lock, flag);
  237. size = buff_rd->size;
  238. write = buff_rd->tx_offset;
  239. read = buff_rd->rx_offset;
  240. data_size =
  241. (write - read) >= 0 ? (write - read) : (size - (read - write));
  242. if (data_size == 0)
  243. CCCI_IPC_MSG(md_id, "data_size=0, read(%d)", read);
  244. else if (data_size < 0) {
  245. CCCI_MSG_INF(md_id, "ipc", "[Error]wrong data_size: %d",
  246. data_size);
  247. return NULL;
  248. }
  249. CCCI_IPC_MSG(md_id, "tx_offset=%d, rx_offset=%d\n", write, read);
  250. data = kmalloc(data_size + sizeof(ipc_ilm_t), GFP_ATOMIC);
  251. if (data == NULL) {
  252. CCCI_MSG_INF(md_id, "ipc", "kmalloc for read ilm fail!\n");
  253. ret = NULL;
  254. goto out;
  255. }
  256. *((ipc_ilm_t *) data) = *ilm;
  257. ilm = (ipc_ilm_t *) data;
  258. data += sizeof(ipc_ilm_t);
  259. if (write < read)
  260. over = size - read;
  261. if (over) {
  262. if (data_size < over)
  263. over = data_size;
  264. memcpy(data, buff_rd->buffer + read, over);
  265. copy += over;
  266. read = (read + over) & (size - 1);
  267. }
  268. if (copy < data_size)
  269. memcpy(data + copy, buff_rd->buffer + read, data_size - copy);
  270. real_size +=
  271. (ilm->local_para_ptr) ? ((local_para_struct *) data)->msg_len : 0;
  272. data += real_size;
  273. real_size +=
  274. (ilm->peer_buff_ptr) ? ((peer_buff_struct *) data)->pdu_len : 0;
  275. buff_rd->rx_offset += real_size;
  276. buff_rd->rx_offset &= size - 1;
  277. ret = ilm;
  278. *len = real_size + sizeof(ipc_ilm_t);
  279. if (real_size > data_size)
  280. CCCI_MSG_INF(md_id, "ipc",
  281. "[Error]wrong real_size(%d)>data_size(%d)",
  282. real_size, data_size);
  283. out:
  284. spin_unlock_irqrestore(&ctl_b->ccci_ipc_rd_lock, flag);
  285. CCCI_IPC_MSG(md_id, "recv real_size=%08x data_size=%08x\n", real_size,
  286. data_size);
  287. return ret;
  288. }
  289. static void recv_item(int md_id, unsigned int addr, unsigned int len,
  290. IPC_TASK *task, BUFF *buff_rd)
  291. {
  292. struct ipc_ctl_block_t *ctl_b = ipc_ctl_block[md_id];
  293. ipc_ilm_t *ilm =
  294. (ipc_ilm_t *) ((uint32) ctl_b->ipc_mem +
  295. (addr - ctl_b->ccci_ipc_smem_base_phy +
  296. get_md2_ap_phy_addr_fixed()));
  297. CCCI_RECV_ITEM *item;
  298. unsigned long flags;
  299. if (len != sizeof(ipc_ilm_t))
  300. CCCI_MSG_INF(md_id, "ipc",
  301. "[Error]Wrong msg len: sizeof(ipc_ilm_t)=%d,len=%d\n",
  302. sizeof(ipc_ilm_t), len);
  303. CCCI_IPC_MSG(md_id,
  304. "Recv item Physical_Addr:%x Virtual_Addr:%p Len:%d.\n",
  305. addr, ilm, len);
  306. if (addr >
  307. ctl_b->ccci_ipc_smem_base_phy - get_md2_ap_phy_addr_fixed() +
  308. offset_of(CCCI_IPC_MEM,
  309. ilm_md) + sizeof(ipc_ilm_t) * MAX_NUM_IPC_TASKS_MD) {
  310. CCCI_MSG_INF(md_id, "ipc",
  311. "[Error]Wrong physical address(%x)\n", addr);
  312. return;
  313. }
  314. item = kmalloc(sizeof(CCCI_RECV_ITEM), GFP_ATOMIC);
  315. if (item == NULL) {
  316. CCCI_MSG_INF(md_id, "ipc", "kmalloc for recv_item fail!\n");
  317. goto out;
  318. }
  319. if (ilm->local_para_ptr) {
  320. if ((uint32) ilm->local_para_ptr <
  321. (uint32) ctl_b->ccci_ipc_rd_buffer_phy
  322. || (uint32) ilm->local_para_ptr >=
  323. (uint32) ctl_b->ccci_ipc_rd_buffer_phy +
  324. CCCI_IPC_BUFFER_SIZE)
  325. CCCI_MSG_INF(md_id, "ipc",
  326. "[Error]wrong ilm->local_para_ptr address(%p)",
  327. ilm->local_para_ptr);
  328. }
  329. if (ilm->peer_buff_ptr) {
  330. if ((uint32) ilm->peer_buff_ptr <
  331. (uint32) ctl_b->ccci_ipc_rd_buffer_phy
  332. || (uint32) ilm->peer_buff_ptr >=
  333. (uint32) ctl_b->ccci_ipc_rd_buffer_phy +
  334. CCCI_IPC_BUFFER_SIZE)
  335. CCCI_MSG_INF(md_id, "ipc",
  336. "[Error]wrong ilm->peer_buff_ptr address(%p)",
  337. ilm->peer_buff_ptr);
  338. }
  339. CCCI_IPC_MSG(md_id,
  340. "recv ilm->local_para_ptr(%p), ilm->peer_buff_ptr(%p)\n",
  341. ilm->local_para_ptr, ilm->peer_buff_ptr);
  342. INIT_LIST_HEAD(&item->list);
  343. item->data =
  344. (uint8 *) read_from_ring_buffer(md_id, ilm, buff_rd, &item->len);
  345. if (item->data == NULL) {
  346. CCCI_MSG_INF(md_id, "ipc", "read ipc rx data fail\n");
  347. goto out1;
  348. }
  349. spin_lock_irqsave(&task->lock, flags);
  350. list_add_tail(&item->list, &task->recv_list);
  351. spin_unlock_irqrestore(&task->lock, flags);
  352. kill_fasync(&task->fasync, SIGIO, POLL_IN);
  353. wake_up_poll(&task->read_wait_queue, POLLIN);
  354. goto out;
  355. out1:
  356. kfree(item);
  357. out:
  358. return;
  359. }
  360. static int write_to_ring_buffer(int md_id, uint8 *data, int count,
  361. IPC_TASK *task, BUFF *ipc_buffer)
  362. {
  363. int ret = 0;
  364. int free;
  365. int write, read, over, copy;
  366. int size;
  367. int write_begin;
  368. unsigned long flags;
  369. ipc_ilm_t *ilm = task->ilm_p;
  370. local_para_struct *local_para =
  371. ilm->local_para_ptr ? (local_para_struct *) data : NULL;
  372. peer_buff_struct *peer_buff =
  373. ilm->peer_buff_ptr ? (peer_buff_struct *) ((uint32) data +
  374. (local_para ?
  375. local_para->msg_len :
  376. 0)) : NULL;
  377. struct ipc_ctl_block_t *ctl_b = ipc_ctl_block[md_id];
  378. CCCI_IPC_MSG(md_id,
  379. "local_para_struct addr=%p peer_buff_struct addr=%p\n",
  380. local_para, peer_buff);
  381. if ((local_para ? local_para->msg_len : 0) +
  382. (peer_buff ? peer_buff->pdu_len : 0) != count) {
  383. CCCI_MSG_INF(md_id, "ipc",
  384. "[Error]Count is not equal(%x != %x ) !\n",
  385. (local_para ? local_para->msg_len : 0) +
  386. (peer_buff ? peer_buff->pdu_len : 0), count);
  387. return -EINVAL;
  388. }
  389. if ((local_para ? local_para->ref_count != 1 : 0)
  390. || (peer_buff ? peer_buff->ref_count != 1 : 0)) {
  391. CCCI_MSG_INF(md_id, "ipc", "[Error]ref count !=1 .\n");
  392. return -EINVAL;
  393. }
  394. spin_lock_irqsave(&ctl_b->ccci_ipc_wr_lock, flags);
  395. write_begin = write = ipc_buffer->tx_offset;
  396. read = ipc_buffer->rx_offset;
  397. size = ipc_buffer->size;
  398. copy = 0;
  399. if (read < write) {
  400. free = size - (write - read);
  401. over = size - write;
  402. } else if (read == write) {
  403. free = size - 1;
  404. over = size - write;
  405. } else {
  406. free = read - write - 1;
  407. over = 0;
  408. }
  409. if (count > free) {
  410. CCCI_MSG_INF(md_id, "ipc",
  411. "[Error]memory isn't enough, data_len(%d)>free_len(%d, %d, %d)\n",
  412. count, free, write, read);
  413. ret = -E2BIG;
  414. goto out;
  415. }
  416. if (over) {
  417. if (count < over)
  418. over = count;
  419. memcpy(ipc_buffer->buffer + write, data, over);
  420. copy += over;
  421. write = (write + over) & (size - 1);
  422. data += copy;
  423. }
  424. if (copy < count)
  425. memcpy(ipc_buffer->buffer + write, data, count - copy);
  426. mb(); /* wait mem upated*/
  427. ipc_buffer->tx_offset += count;
  428. ipc_buffer->tx_offset &= size - 1;
  429. ret = count;
  430. ilm->local_para_ptr =
  431. local_para ? (local_para_struct *) (ctl_b->ccci_ipc_wr_buffer_phy +
  432. write_begin) : NULL;
  433. ilm->peer_buff_ptr =
  434. peer_buff ? (peer_buff_struct *) (ctl_b->ccci_ipc_wr_buffer_phy +
  435. ((write_begin +
  436. (local_para ?
  437. local_para->msg_len : 0)) &
  438. (size - 1))) : NULL;
  439. out:
  440. spin_unlock_irqrestore(&ctl_b->ccci_ipc_wr_lock, flags);
  441. return ret;
  442. }
  443. static void ccci_ipc_callback(void *private)
  444. {
  445. IPC_TASK *task;
  446. IPC_MSGSVC_TASKMAP_T *id_map;
  447. struct logic_channel_info_t *ch_info =
  448. (struct logic_channel_info_t *)private;
  449. struct ccci_msg_t msg;
  450. struct ipc_ctl_block_t *ctl_b =
  451. (struct ipc_ctl_block_t *)ch_info->m_owner;
  452. int md_id = ctl_b->m_md_id;
  453. while (get_logic_ch_data(ch_info, &msg)) {
  454. if (msg.channel == CCCI_IPC_RX_ACK
  455. || msg.channel == CCCI_IPC_TX) {
  456. CCCI_MSG_INF(md_id, "ipc",
  457. "[Error]invalid ipc rx channel(%d)!\n",
  458. msg.channel);
  459. }
  460. if (msg.channel == CCCI_IPC_RX) {
  461. CCCI_IPC_MSG(md_id, "CCCI_IPC_RX:Unify AP id(%x)\n",
  462. msg.reserved);
  463. id_map = unify_AP_id_2_local_id(msg.reserved);
  464. if (id_map == NULL) {
  465. CCCI_MSG_INF(md_id, "ipc",
  466. "[Error]Wrong Unify AP id(%x)@RX\n",
  467. msg.reserved);
  468. return;
  469. }
  470. task =
  471. ((struct ipc_ctl_block_t *)(ch_info->m_owner))->
  472. ipc_task + id_map->task_id;
  473. recv_item(md_id, msg.addr, msg.len, task,
  474. &ctl_b->ipc_mem->buffer.buff_rd);
  475. ccci_ipc_ack(md_id, CCCI_IPC_RX_ACK,
  476. IPC_MSGSVC_RVC_DONE, msg.reserved);
  477. }
  478. if (msg.channel == CCCI_IPC_TX_ACK) {
  479. CCCI_IPC_MSG(md_id,
  480. "CCCI_IPC_TX_ACK: Unify MD ID(%x)\n",
  481. msg.reserved);
  482. id_map = unify_MD_id_2_local_id(msg.reserved);
  483. if (id_map == NULL) {
  484. CCCI_MSG_INF(md_id, "ipc",
  485. "[Error]Wrong AP Unify id (%d)@Tx ack.\n",
  486. msg.reserved);
  487. return;
  488. }
  489. find_task_to_clear(ctl_b->ipc_task, id_map->task_id);
  490. if (msg.id != IPC_MSGSVC_RVC_DONE)
  491. CCCI_MSG_INF(md_id, "ipc",
  492. "[Error]Not write mailbox id: %d\n",
  493. msg.id);
  494. }
  495. }
  496. }
  497. static void ipc_task_init(int md_id, IPC_TASK *task, ipc_ilm_t *ilm)
  498. {
  499. struct ipc_ctl_block_t *ctl_b = ipc_ctl_block[md_id];
  500. spin_lock_init(&task->lock);
  501. task->flag = 0;
  502. task->user = (atomic_t) ATOMIC_INIT(0);
  503. task->w_jiffies = -1UL;
  504. task->fasync = NULL;
  505. task->ilm_p = ilm;
  506. task->time_out = -1;
  507. task->ilm_phy_addr =
  508. ctl_b->ccci_ipc_smem_base_phy - get_md2_ap_phy_addr_fixed()
  509. + offset_of(CCCI_IPC_MEM,
  510. ilm) + (uint32) ilm - (uint32) (ctl_b->ipc_mem->ilm);
  511. task->to_id = -1;
  512. init_waitqueue_head(&task->read_wait_queue);
  513. init_waitqueue_head(&task->write_wait_queue);
  514. INIT_LIST_HEAD(&task->recv_list);
  515. task->owner = ipc_ctl_block[md_id];
  516. }
  517. static int ccci_ipc_open(struct inode *inode, struct file *file)
  518. {
  519. int md_id;
  520. int major;
  521. int index;
  522. struct ipc_ctl_block_t *ctl_b;
  523. major = imajor(inode);
  524. md_id = get_md_id_by_dev_major(major);
  525. if (md_id < 0) {
  526. CCCI_MSG("IPC open fail: invalid major id:%d\n", major);
  527. return -1;
  528. }
  529. ctl_b = ipc_ctl_block[md_id];
  530. index = iminor(inode) - ctl_b->start_minor;
  531. if (index >= MAX_NUM_IPC_TASKS) {
  532. CCCI_MSG_INF(md_id, "ipc", "[Error]Wrong minor num %d.\n",
  533. index);
  534. return -EINVAL;
  535. }
  536. CCCI_DBG_MSG(md_id, "ipc", "%s: register task%d\n", __func__, index);
  537. nonseekable_open(inode, file);
  538. file->private_data = ctl_b->ipc_task + index;
  539. atomic_inc(&((ctl_b->ipc_task + index)->user));
  540. return 0;
  541. }
  542. static ssize_t ccci_ipc_read(struct file *file, char *buf, size_t count,
  543. loff_t *ppos)
  544. {
  545. int ret = 0;
  546. IPC_TASK *task = file->private_data;
  547. CCCI_RECV_ITEM *recv_data;
  548. struct ipc_ctl_block_t *ctl_b;
  549. unsigned long flags;
  550. ctl_b = (struct ipc_ctl_block_t *)task->owner;
  551. retry:
  552. spin_lock_irqsave(&task->lock, flags);
  553. if (ctl_b->md_is_ready == 0) {
  554. ret = -EIO;
  555. goto out_unlock;
  556. }
  557. if (list_empty(&task->recv_list)) {
  558. if (file->f_flags & O_NONBLOCK) {
  559. ret = -EAGAIN;
  560. goto out_unlock;
  561. }
  562. spin_unlock_irqrestore(&task->lock, flags);
  563. ret = wait_event_interruptible(task->read_wait_queue, !list_empty(&task->recv_list));
  564. if (ret == -ERESTARTSYS) {
  565. CCCI_IPC_MSG(ctl_b->m_md_id,
  566. "Interrupt read sys_call : task:%s pid:%d tgid:%d SIGPEND:%#llx. GROUP_SIGPEND:%#llx .\n",
  567. current->comm, current->pid, current->tgid,
  568. *(unsigned long long *)current->pending.
  569. signal.sig,
  570. *(unsigned long long *)current->signal->
  571. shared_pending.signal.sig);
  572. ret = -EINTR;
  573. goto out;
  574. }
  575. goto retry;
  576. }
  577. recv_data = container_of(task->recv_list.next, CCCI_RECV_ITEM, list);
  578. if (recv_data->len > count) {
  579. CCCI_MSG_INF(ctl_b->m_md_id, "ipc",
  580. "[Error]Recv buff is too small(count=%d data_len=%d)!\n",
  581. count, recv_data->len);
  582. ret = -E2BIG;
  583. goto out_unlock;
  584. }
  585. list_del_init(&recv_data->list);
  586. spin_unlock_irqrestore(&task->lock, flags);
  587. if (copy_to_user(buf, recv_data->data, recv_data->len)) {
  588. ret = -EFAULT;
  589. release_recv_item(recv_data);
  590. goto out;
  591. }
  592. ret = recv_data->len;
  593. release_recv_item(recv_data);
  594. goto out;
  595. out_unlock:
  596. spin_unlock_irqrestore(&task->lock, flags);
  597. out:
  598. return ret;
  599. }
  600. static ssize_t ccci_ipc_write(struct file *file, const char __user *buf,
  601. size_t count, loff_t *ppos)
  602. {
  603. int ret = 0;
  604. IPC_TASK *task = file->private_data;
  605. struct ipc_ctl_block_t *ctl_b;
  606. IPC_MSGSVC_TASKMAP_T *id_map;
  607. ipc_ilm_t *ilm = NULL;
  608. int md_id;
  609. ctl_b = (struct ipc_ctl_block_t *)task->owner;
  610. md_id = ctl_b->m_md_id;
  611. if (count < sizeof(ipc_ilm_t)) {
  612. CCCI_MSG_INF(md_id, "ipc",
  613. "%s: [Error]Write len(%d) < ipc_ilm_t\n",
  614. __func__, count);
  615. ret = -EINVAL;
  616. goto out;
  617. }
  618. ilm = kmalloc(count, GFP_KERNEL);
  619. if (ilm == NULL) {
  620. CCCI_MSG_INF(md_id, "ipc", "%s: kmalloc fail!\n", __func__);
  621. ret = -ENOMEM;
  622. goto out;
  623. }
  624. if (copy_from_user(ilm, buf, count)) {
  625. CCCI_MSG_INF(md_id, "ipc", "%s: copy_from_user fail!\n",
  626. __func__);
  627. ret = -EFAULT;
  628. goto out_free;
  629. }
  630. id_map = local_MD_id_2_unify_id(ilm->dest_mod_id);
  631. if (id_map == NULL) {
  632. CCCI_MSG_INF(md_id, "ipc",
  633. "%s: [Error]Invalid Dest MD id (%d)\n",
  634. __func__, ilm->dest_mod_id);
  635. ret = -EINVAL;
  636. goto out_free;
  637. }
  638. if (test_and_set_bit(CCCI_TASK_PENDING, &task->flag)) {
  639. CCCI_IPC_MSG(md_id, "write is busy. Task ID=%d.\n",
  640. task - ctl_b->ipc_task);
  641. if (file->f_flags & O_NONBLOCK) {
  642. ret = -EBUSY;
  643. goto out_free;
  644. } else
  645. if (wait_event_interruptible_exclusive
  646. (task->write_wait_queue,
  647. !test_and_set_bit(CCCI_TASK_PENDING, &task->flag)
  648. || ctl_b->md_is_ready == 0) == -ERESTARTSYS) {
  649. ret = -EINTR;
  650. goto out_free;
  651. }
  652. }
  653. spin_lock_irq(&task->lock);
  654. if (ctl_b->md_is_ready == 0) {
  655. ret = -EIO;
  656. spin_unlock_irq(&task->lock);
  657. goto out_free;
  658. }
  659. spin_unlock_irq(&task->lock);
  660. task->w_jiffies = get_jiffies_64();
  661. *task->ilm_p = *ilm;
  662. task->to_id = ilm->dest_mod_id;
  663. task->ilm_p->src_mod_id = task - ctl_b->ipc_task;
  664. CCCI_DBG_MSG(md_id, "ipc", "%s: src=%d, dst=%d, data_len=%d\n",
  665. __func__, task->ilm_p->src_mod_id, task->to_id, count);
  666. if (count > sizeof(ipc_ilm_t)) {
  667. if (write_to_ring_buffer
  668. (md_id, (uint8 *) (ilm + 1), count - sizeof(ipc_ilm_t),
  669. task,
  670. &ctl_b->ipc_mem->buffer.buff_wr) !=
  671. count - sizeof(ipc_ilm_t)) {
  672. CCCI_MSG_INF(md_id, "ipc",
  673. "[Error]write_to_ring_buffer fail!\n");
  674. clear_bit(CCCI_TASK_PENDING, &task->flag);
  675. ret = -EAGAIN;
  676. goto out_free;
  677. }
  678. }
  679. ret =
  680. ccci_ipc_write_stream(md_id, CCCI_IPC_TX, task->ilm_phy_addr,
  681. sizeof(ipc_ilm_t), id_map->extq_id);
  682. if (ret != sizeof(struct ccci_msg_t)) {
  683. CCCI_MSG_INF(md_id, "ipc",
  684. "%s: ccci_ipc_write_stream fail: %d\n",
  685. __func__, ret);
  686. clear_bit(CCCI_TASK_PENDING, &task->flag);
  687. ret = -EAGAIN;
  688. goto out_free;
  689. }
  690. out_free:
  691. kfree(ilm);
  692. out:
  693. return ret == sizeof(struct ccci_msg_t) ? count : ret;
  694. }
  695. static long ccci_ipc_ioctl(struct file *file, unsigned int cmd,
  696. unsigned long arg)
  697. {
  698. IPC_TASK *task = file->private_data;
  699. CCCI_RECV_ITEM *item, *n;
  700. unsigned long flags;
  701. long ret = 0;
  702. struct ipc_ctl_block_t *ctl_b;
  703. ctl_b = (struct ipc_ctl_block_t *)task->owner;
  704. switch (cmd) {
  705. case CCCI_IPC_RESET_RECV:
  706. spin_lock_irqsave(&task->lock, flags);
  707. list_for_each_entry_safe(item, n, &task->recv_list, list) {
  708. release_recv_item(item);
  709. }
  710. spin_unlock_irqrestore(&task->lock, flags);
  711. ret = 0;
  712. break;
  713. case CCCI_IPC_RESET_SEND:
  714. clear_bit(CCCI_TASK_PENDING, &task->flag);
  715. wake_up(&task->write_wait_queue);
  716. break;
  717. case CCCI_IPC_WAIT_MD_READY:
  718. if (ctl_b->md_is_ready == 0) {
  719. ret = wait_event_interruptible(ctl_b->poll_md_queue_head, !ctl_b->md_is_ready);
  720. if (ret == -ERESTARTSYS) {
  721. CCCI_MSG_INF(ctl_b->m_md_id, "ipc",
  722. "Got signal @ WAIT_MD_READY\n");
  723. ret = -EINTR;
  724. }
  725. }
  726. break;
  727. default:
  728. ret = -EINVAL;
  729. break;
  730. }
  731. return ret;
  732. }
  733. static int ccci_ipc_release(struct inode *inode, struct file *file)
  734. {
  735. CCCI_RECV_ITEM *item, *n;
  736. IPC_TASK *task = file->private_data;
  737. unsigned long flags;
  738. if (atomic_dec_and_test(&task->user)) {
  739. spin_lock_irqsave(&task->lock, flags);
  740. list_for_each_entry_safe(item, n, &task->recv_list, list) {
  741. release_recv_item(item);
  742. }
  743. spin_unlock_irqrestore(&task->lock, flags);
  744. }
  745. clear_bit(CCCI_TASK_PENDING, &task->flag);
  746. CCCI_DBG_MSG(0, "ipc", "%s\n", __func__);
  747. return 0;
  748. }
  749. static int ccci_ipc_fasync(int fd, struct file *file, int on)
  750. {
  751. IPC_TASK *task = file->private_data;
  752. return fasync_helper(fd, file, on, &task->fasync);
  753. }
  754. static uint32 ccci_ipc_poll(struct file *file, poll_table *wait)
  755. {
  756. IPC_TASK *task = file->private_data;
  757. int ret = 0;
  758. struct ipc_ctl_block_t *ctl_b;
  759. ctl_b = (struct ipc_ctl_block_t *)task->owner;
  760. poll_wait(file, &task->read_wait_queue, wait);
  761. poll_wait(file, &task->write_wait_queue, wait);
  762. spin_lock_irq(&task->lock);
  763. if (ctl_b->md_is_ready == 0) {
  764. /*ret |= POLLERR; */
  765. goto out;
  766. }
  767. if (!list_empty(&task->recv_list))
  768. ret |= POLLIN | POLLRDNORM;
  769. if (!test_bit(CCCI_TASK_PENDING, &task->flag))
  770. ret |= POLLOUT | POLLWRNORM;
  771. out:
  772. spin_unlock_irq(&task->lock);
  773. return ret;
  774. }
  775. static const struct file_operations ccci_ipc_fops = {
  776. .owner = THIS_MODULE,
  777. .open = ccci_ipc_open,
  778. .read = ccci_ipc_read,
  779. .write = ccci_ipc_write,
  780. .release = ccci_ipc_release,
  781. .unlocked_ioctl = ccci_ipc_ioctl,
  782. .fasync = ccci_ipc_fasync,
  783. .poll = ccci_ipc_poll,
  784. };
  785. int __init ccci_ipc_init(int md_id)
  786. {
  787. int ret = 0;
  788. int i = 0;
  789. int major, minor;
  790. char buf[16];
  791. struct ipc_ctl_block_t *ctl_b;
  792. ret = get_dev_id_by_md_id(md_id, "ipc", &major, &minor);
  793. if (ret < 0) {
  794. CCCI_MSG("ccci_ipc_init: get md device number failed(%d)\n",
  795. ret);
  796. return ret;
  797. }
  798. /*Allocate ipc ctrl struct memory */
  799. ctl_b = kmalloc(sizeof(struct ipc_ctl_block_t), GFP_KERNEL);
  800. if (ctl_b == NULL)
  801. return -CCCI_ERR_GET_MEM_FAIL;
  802. memset(ctl_b, 0, sizeof(struct ipc_ctl_block_t));
  803. ipc_ctl_block[md_id] = ctl_b;
  804. spin_lock_init(&ctl_b->ccci_ipc_wr_lock);
  805. spin_lock_init(&ctl_b->ccci_ipc_rd_lock);
  806. init_waitqueue_head(&ctl_b->poll_md_queue_head);
  807. ctl_b->md_status_update_call_back.call = ipc_call_back_func;
  808. ctl_b->md_status_update_call_back.next = NULL, ctl_b->m_md_id = md_id;
  809. ctl_b->major = major;
  810. ctl_b->start_minor = minor;
  811. ccci_ipc_base_req(md_id, (int *)(&ctl_b->ipc_mem),
  812. &ctl_b->ccci_ipc_smem_base_phy,
  813. &ctl_b->ccci_ipc_smem_size);
  814. ctl_b->ccci_ipc_wr_buffer_phy =
  815. ctl_b->ccci_ipc_smem_base_phy - get_md2_ap_phy_addr_fixed()
  816. + offset_of(CCCI_IPC_MEM, buffer.buff_wr.buffer);
  817. ctl_b->ccci_ipc_rd_buffer_phy =
  818. ctl_b->ccci_ipc_smem_base_phy - get_md2_ap_phy_addr_fixed()
  819. + offset_of(CCCI_IPC_MEM, buffer.buff_rd.buffer);
  820. /*CCCI_MSG_INF(md_id, "ipc", "ccci_ipc_wr_buffer_phy: %#x, ccci_ipc_buffer_phy_rd: %#x.\n", */
  821. /*ctl_b->ccci_ipc_wr_buffer_phy, ctl_b->ccci_ipc_rd_buffer_phy); */
  822. ipc_smem_init(ctl_b->ipc_mem);
  823. for (i = 0; i < MAX_NUM_IPC_TASKS; i++) {
  824. ipc_task_init(md_id, ctl_b->ipc_task + i,
  825. ctl_b->ipc_mem->ilm + i);
  826. }
  827. snprintf(buf, 16, "CCCI_IPC_DEV%d", md_id);
  828. if (register_chrdev_region(MKDEV(major, minor), MAX_NUM_IPC_TASKS, buf)
  829. != 0) {
  830. CCCI_MSG_INF(md_id, "ipc", "Regsiter CCCI_IPC_DEV failed!\n");
  831. ret = -1;
  832. goto _IPC_MAPPING_FAIL;
  833. }
  834. cdev_init(&ctl_b->ccci_ipc_cdev, &ccci_ipc_fops);
  835. ctl_b->ccci_ipc_cdev.owner = THIS_MODULE;
  836. ret =
  837. cdev_add(&ctl_b->ccci_ipc_cdev, MKDEV(major, minor),
  838. MAX_NUM_IPC_TASKS);
  839. if (ret < 0) {
  840. CCCI_MSG_INF(md_id, "ipc", "cdev_add failed!\n");
  841. goto _CHR_DEV_ADD_FAIL;
  842. }
  843. if (register_to_logic_ch(md_id, CCCI_IPC_RX, ccci_ipc_callback, ctl_b)
  844. || register_to_logic_ch(md_id, CCCI_IPC_TX_ACK, ccci_ipc_callback,
  845. ctl_b)) {
  846. CCCI_MSG_INF(md_id, "ipc", "ccci_ipc_register failed!\n");
  847. ret = -1;
  848. goto _REG_LOGIC_CH_FAIL;
  849. }
  850. md_register_call_chain(md_id, &ctl_b->md_status_update_call_back);
  851. goto out;
  852. _REG_LOGIC_CH_FAIL:
  853. un_register_to_logic_ch(md_id, CCCI_IPC_RX);
  854. un_register_to_logic_ch(md_id, CCCI_IPC_TX_ACK);
  855. _CHR_DEV_ADD_FAIL:
  856. cdev_del(&ctl_b->ccci_ipc_cdev);
  857. _IPC_MAPPING_FAIL:
  858. kfree(ctl_b);
  859. ipc_ctl_block[md_id] = NULL;
  860. out:
  861. return ret;
  862. }
  863. void __exit ccci_ipc_exit(int md_id)
  864. {
  865. int i;
  866. struct ipc_ctl_block_t *ctl_b = ipc_ctl_block[md_id];
  867. if (ctl_b == NULL)
  868. return;
  869. for (i = 0; i < MAX_NUM_IPC_TASKS; i++) {
  870. if (atomic_read(&ctl_b->ipc_task[i].user)) {
  871. CCCI_MSG_INF(md_id, "ipc",
  872. "BUG for taskID %d module exit count.\n",
  873. i);
  874. }
  875. }
  876. cdev_del(&ctl_b->ccci_ipc_cdev);
  877. unregister_chrdev_region(MKDEV(ctl_b->major, ctl_b->start_minor),
  878. MAX_NUM_IPC_TASKS);
  879. md_unregister_call_chain(md_id, &ctl_b->md_status_update_call_back);
  880. un_register_to_logic_ch(md_id, CCCI_IPC_RX);
  881. un_register_to_logic_ch(md_id, CCCI_IPC_TX_ACK);
  882. kfree(ctl_b);
  883. ipc_ctl_block[md_id] = NULL;
  884. }