ccci_hw.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. #include <linux/module.h>
  2. #include <linux/init.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/sched.h>
  5. #include <linux/delay.h>
  6. #include <ccci_common.h>
  7. #include <ccci.h>
  8. #define CCIF_DEBUG /* ccif issue debug */
  9. static int __ccif_v1_en_intr(struct ccif_t *ccif)
  10. {
  11. unsigned long flag;
  12. CCCI_FUNC_ENTRY(ccif->m_md_id);
  13. spin_lock_irqsave(&ccif->m_lock, flag);
  14. if (ccif->m_irq_dis_cnt) {
  15. enable_irq(ccif->m_irq_id);
  16. ccif->m_irq_dis_cnt--;
  17. }
  18. spin_unlock_irqrestore(&ccif->m_lock, flag);
  19. return 0;
  20. }
  21. static void __ccif_v1_dis_intr(struct ccif_t *ccif)
  22. {
  23. unsigned long flag;
  24. CCCI_FUNC_ENTRY(ccif->m_md_id);
  25. spin_lock_irqsave(&ccif->m_lock, flag);
  26. if (ccif->m_irq_dis_cnt == 0) {
  27. disable_irq(ccif->m_irq_id);
  28. ccif->m_irq_dis_cnt++;
  29. }
  30. spin_unlock_irqrestore(&ccif->m_lock, flag);
  31. }
  32. static int __ccif_v1_dump_reg(struct ccif_t *ccif, unsigned int buf[], int len)
  33. {
  34. int i, j;
  35. unsigned int *curr_ccif_smem_addr;
  36. curr_ccif_smem_addr = (unsigned int *)CCIF_TXCHDATA(ccif->m_reg_base);
  37. CCCI_DBG_MSG(ccif->m_md_id, "cci", "[CCCI REG_INFO]\n");
  38. CCCI_DBG_MSG(ccif->m_md_id, "cci",
  39. "CON(%lx)=%08X, BUSY(%lx)=%08x, START(%lx)=%08x, MRCHNUM(%lx)=%08x\n",
  40. CCIF_CON(ccif->m_reg_base),
  41. ccci_read32(CCIF_CON(ccif->m_reg_base)),
  42. CCIF_BUSY(ccif->m_reg_base),
  43. ccci_read32(CCIF_BUSY(ccif->m_reg_base)),
  44. CCIF_START(ccif->m_reg_base),
  45. ccci_read32(CCIF_START(ccif->m_reg_base)),
  46. MD_CCIF_RCHNUM(ccif->m_md_reg_base),
  47. ccci_read32(MD_CCIF_RCHNUM(ccif->m_md_reg_base)));
  48. CCCI_DBG_MSG(ccif->m_md_id, "cci",
  49. "MCON(%lx)=%08X, MBUSY(%lx)=%08x, MSTART(%lx)=%08x, RCHNUM(%lx)=%08x\n",
  50. MD_CCIF_CON(ccif->m_md_reg_base),
  51. ccci_read32(MD_CCIF_CON(ccif->m_md_reg_base)),
  52. MD_CCIF_BUSY(ccif->m_md_reg_base),
  53. ccci_read32(MD_CCIF_BUSY(ccif->m_md_reg_base)),
  54. MD_CCIF_START(ccif->m_md_reg_base),
  55. ccci_read32(MD_CCIF_START(ccif->m_md_reg_base)),
  56. CCIF_RCHNUM(ccif->m_reg_base),
  57. ccci_read32(CCIF_RCHNUM(ccif->m_reg_base)));
  58. for (i = 0; i < 16; i++) {
  59. CCCI_DBG_MSG(ccif->m_md_id, "cci",
  60. "%08X: %08X %08X %08X %08X\n",
  61. (unsigned int)curr_ccif_smem_addr,
  62. curr_ccif_smem_addr[0], curr_ccif_smem_addr[1],
  63. curr_ccif_smem_addr[2], curr_ccif_smem_addr[3]);
  64. curr_ccif_smem_addr += 4;
  65. }
  66. if (buf == NULL || len < (4 * 16 + 8))
  67. /* Only dump by log */
  68. return 0;
  69. j = 0;
  70. buf[j++] = ccci_read32(CCIF_CON(ccif->m_reg_base));
  71. buf[j++] = ccci_read32(CCIF_BUSY(ccif->m_reg_base));
  72. buf[j++] = ccci_read32(CCIF_START(ccif->m_reg_base));
  73. buf[j++] = ccci_read32(MD_CCIF_RCHNUM(ccif->m_reg_base));
  74. buf[j++] = ccci_read32(MD_CCIF_CON(ccif->m_reg_base));
  75. buf[j++] = ccci_read32(MD_CCIF_BUSY(ccif->m_reg_base));
  76. buf[j++] = ccci_read32(MD_CCIF_START(ccif->m_reg_base));
  77. buf[j++] = ccci_read32(CCIF_RCHNUM(ccif->m_reg_base));
  78. curr_ccif_smem_addr =
  79. (unsigned int *)CCIF_TXCHDATA(ccif->m_reg_base);
  80. for (i = 0; i < 4 * 16; i++)
  81. buf[j++] = curr_ccif_smem_addr[i];
  82. return j;
  83. }
  84. static int __ccif_v1_read_phy_ch_data(struct ccif_t *ccif, int ch, unsigned int buf[])
  85. {
  86. struct ccif_msg_t *rx_msg = (struct ccif_msg_t *) (CCIF_RXCHDATA(ccif->m_reg_base));
  87. buf[0] = rx_msg[ch].data[0];
  88. buf[1] = rx_msg[ch].data[1];
  89. buf[2] = rx_msg[ch].channel;
  90. buf[3] = rx_msg[ch].reserved;
  91. return sizeof(struct ccif_msg_t);
  92. }
  93. static int __ccif_v1_write_phy_ch_data(struct ccif_t *ccif, unsigned int buf[],
  94. int retry_en)
  95. {
  96. int ret = 0;
  97. unsigned int busy;
  98. unsigned long flag;
  99. unsigned int retry_count = 200;
  100. unsigned int ch;
  101. struct ccif_msg_t *tx_msg;
  102. int md_id = ccif->m_md_id;
  103. CCCI_FUNC_ENTRY(md_id);
  104. if (retry_en == 0)
  105. retry_count = 1;
  106. do {
  107. spin_lock_irqsave(&ccif->m_lock, flag);
  108. busy = ccci_read32(CCIF_BUSY(ccif->m_reg_base));
  109. ch = ccif->m_tx_idx;
  110. if (busy & (1 << ch)) {
  111. ret = -CCCI_ERR_CCIF_NO_PHYSICAL_CHANNEL;
  112. if (busy != 0xff) {
  113. CCCI_DBG_MSG(md_id, "cci",
  114. "Wrong Busy value: 0x%X\n", busy);
  115. }
  116. spin_unlock_irqrestore(&ccif->m_lock, flag);
  117. udelay(1);
  118. retry_count--;
  119. } else {
  120. ccci_write32(CCIF_BUSY(ccif->m_reg_base), 1 << ch);
  121. ccif->m_tx_idx++;
  122. ccif->m_tx_idx &= (CCIF_STD_V1_MAX_CH_NUM - 1);
  123. /* spin_unlock_irqrestore(&ccif->m_lock,flag); */
  124. tx_msg =
  125. (struct ccif_msg_t *) (CCIF_TXCHDATA(ccif->m_reg_base));
  126. ccci_write32(&(tx_msg[ch].data[0]), buf[0]);
  127. ccci_write32(&(tx_msg[ch].data[1]), buf[1]);
  128. ccci_write32(&(tx_msg[ch].channel), buf[2]);
  129. ccci_write32(&(tx_msg[ch].reserved), buf[3]);
  130. /* mb(); */
  131. ccci_write32(CCIF_TCHNUM(ccif->m_reg_base), ch);
  132. spin_unlock_irqrestore(&ccif->m_lock, flag);
  133. ret = sizeof(struct ccif_msg_t);
  134. break;
  135. }
  136. } while (retry_count > 0);
  137. if (lg_ch_tx_debug_enable[md_id] & (1 << buf[2]))
  138. CCCI_MSG_INF(md_id, "cci",
  139. "[TX]: %08X, %08X, %02d, %08X (%02d)\n", buf[0],
  140. buf[1], buf[2], buf[3], ch);
  141. return ret;
  142. }
  143. static int __ccif_v1_get_rx_ch(struct ccif_t *ccif)
  144. {
  145. while (CCIF_CON_ARB != ccci_read32(CCIF_CON(ccif->m_reg_base)))
  146. ccci_write32(CCIF_CON(ccif->m_reg_base), CCIF_CON_ARB);
  147. return ccci_read32(CCIF_RCHNUM(ccif->m_reg_base));
  148. }
  149. static int __ccif_v1_get_busy_state(struct ccif_t *ccif)
  150. {
  151. return ccci_read32(CCIF_BUSY(ccif->m_reg_base));
  152. }
  153. static void __ccif_v1_set_busy_state(struct ccif_t *ccif, unsigned int val)
  154. {
  155. /* *CCIF_BUSY(ccif->m_reg_base) = val; */
  156. ccci_write32(CCIF_BUSY(ccif->m_reg_base), val);
  157. }
  158. static int __ccif_v1_ack(struct ccif_t *ccif, int ch)
  159. {
  160. /* *CCIF_ACK(ccif->m_reg_base) = (1 << ch); */
  161. ccci_write32(CCIF_ACK(ccif->m_reg_base), (1 << ch));
  162. return 0;
  163. }
  164. static int __ccif_v1_clear_sram(struct ccif_t *ccif)
  165. {
  166. int i;
  167. unsigned int *ccif_tx_addr;
  168. ccif_tx_addr = (unsigned int *)CCIF_TXCHDATA(ccif->m_reg_base);
  169. for (i = 0; i < 4 * 16; i++)
  170. ccci_write32(&ccif_tx_addr[i], 0);
  171. return 0;
  172. }
  173. static int __ccif_v1_write_runtime_data(struct ccif_t *ccif, unsigned int buf[],
  174. int len)
  175. {
  176. int i;
  177. unsigned int *curr_ccif_smem_addr;
  178. curr_ccif_smem_addr = (unsigned int *)(ccif->m_reg_base +
  179. CCIF_STD_V1_RUN_TIME_DATA_OFFSET);
  180. if (len > CCIF_STD_V1_RUM_TIME_MEM_MAX_LEN)
  181. return -CCCI_ERR_CCIF_INVALID_RUNTIME_LEN;
  182. for (i = 0; i < len; i++)
  183. ccci_write32(&curr_ccif_smem_addr[i], buf[i]);
  184. /* __ccif_v1_dump_reg(ccif, NULL, 0); */
  185. return 0;
  186. }
  187. static int __ccif_v1_reset(struct ccif_t *ccif)
  188. {
  189. ccci_write32(CCIF_CON(ccif->m_reg_base), 1);
  190. ccif->m_rx_idx = 0;
  191. ccif->m_tx_idx = 0;
  192. /* ACK MD all channel */
  193. ccci_write32(CCIF_ACK(ccif->m_reg_base), 0xFF);
  194. __ccif_v1_clear_sram(ccif);
  195. return 0;
  196. }
  197. /* Note: This is a common function */
  198. static irqreturn_t __ccif_irq_handler(int irq, void *data)
  199. {
  200. int ret;
  201. struct ccif_t *ccif = (struct ccif_t *) data;
  202. ret = ccif->ccif_intr_handler(ccif);
  203. if (ret) {
  204. CCCI_MSG_INF(ccif->m_md_id, "cci",
  205. "ccif_irq_handler fail: %d!\n", ret);
  206. }
  207. return IRQ_HANDLED;
  208. }
  209. static int __ccif_v1_reg_intr(struct ccif_t *ccif)
  210. {
  211. int ret;
  212. unsigned long flags;
  213. spin_lock_irqsave(&ccif->m_lock, flags);
  214. ccif->m_irq_dis_cnt = 0;
  215. spin_unlock_irqrestore(&ccif->m_lock, flags);
  216. ret =
  217. request_irq(ccif->m_irq_id, __ccif_irq_handler, IRQF_TRIGGER_LOW,
  218. "CCIF", ccif);
  219. return ret;
  220. }
  221. static int __ccif_v1_init(struct ccif_t *ccif)
  222. {
  223. /* *CCIF_CON(ccif->m_reg_base) = 1; */
  224. ccci_write32(CCIF_CON(ccif->m_reg_base), CCIF_CON_ARB);
  225. ccif->m_rx_idx = 0;
  226. ccif->m_tx_idx = 0;
  227. /* ACK MD all channel */
  228. /* *CCIF_ACK(ccif->m_reg_base) = 0xFF; */
  229. ccci_write32(CCIF_ACK(ccif->m_reg_base), 0xFF);
  230. __ccif_v1_clear_sram(ccif);
  231. return 0;
  232. }
  233. static int __ccif_v1_de_init(struct ccif_t *ccif)
  234. {
  235. /* Disable ccif irq, no need for there is kernel waring of free already-free irq when free_irq */
  236. /* ccif->ccif_dis_intr(ccif); */
  237. /* Check if TOP half is running */
  238. /* while (test_bit(CCIF_TOP_HALF_RUNNING, &ccif->m_status))
  239. yield(); */
  240. WARN_ON(spin_is_locked(&ccif->m_lock));
  241. /* Un-register irq */
  242. free_irq(ccif->m_irq_id, ccif);
  243. /* Free memory */
  244. kfree(ccif);
  245. return 0;
  246. }
  247. static int __ccif_v1_register_call_back(struct ccif_t *ccif,
  248. int (*push_func)(struct ccif_msg_t *, void *),
  249. void (*notify_func)(void *))
  250. {
  251. if (!test_and_set_bit(CCIF_CALL_BACK_FUNC_LOCKED, &ccif->m_status)) {
  252. ccif->push_msg = push_func;
  253. ccif->notify_push_done = notify_func;
  254. return 0;
  255. }
  256. CCCI_DBG_MSG(ccif->m_md_id, "cci",
  257. "[Error]ccif call back func has registered!\n");
  258. return CCCI_ERR_CCIF_CALL_BACK_HAS_REGISTERED;
  259. }
  260. static int __ccif_v1_register_isr_notify(struct ccif_t *ccif,
  261. void (*notify_func)(int))
  262. {
  263. if (!test_and_set_bit(CCIF_ISR_INFO_CALL_BACK_LOCKED, &ccif->m_status)) {
  264. ccif->isr_notify = notify_func;
  265. return 0;
  266. }
  267. CCCI_DBG_MSG(ccif->m_md_id, "cci",
  268. "[Error]ccif isr call back func has registered\n");
  269. return CCCI_ERR_CCIF_CALL_BACK_HAS_REGISTERED;
  270. }
  271. static int __ccif_v1_intr_handler(struct ccif_t *ccif)
  272. {
  273. struct ccif_msg_t phy_ch_data;
  274. int re_enter_cnt = 0;
  275. int r_ch_val;
  276. int i;
  277. int rx_ch;
  278. int md_id = ccif->m_md_id;
  279. int reg_err = 0;
  280. unsigned int msg[4];
  281. CCCI_FUNC_ENTRY(md_id);
  282. set_bit(CCIF_TOP_HALF_RUNNING, &ccif->m_status);
  283. /* CCCI_DBG_MSG(md_id, "cci", "ISR\n"); */
  284. if (ccif->isr_notify)
  285. ccif->isr_notify(md_id);
  286. rx_ch = ccif->m_rx_idx;
  287. while ((r_ch_val = ccif->ccif_get_rx_ch(ccif))
  288. && (re_enter_cnt < CCIF_INTR_MAX_RE_ENTER_CNT)) {
  289. for (i = 0; i < CCIF_STD_V1_MAX_CH_NUM; i++) {
  290. if (r_ch_val & (1 << rx_ch)) {
  291. /* We suppose always read success */
  292. ccif->ccif_read_phy_ch_data(ccif, rx_ch,
  293. (unsigned int *)
  294. &phy_ch_data);
  295. #ifdef CCIF_DEBUG
  296. if (phy_ch_data.channel >= CCCI_MAX_CH_NUM) {
  297. if (!reg_err) {
  298. reg_err = 1;
  299. __ccif_v1_dump_reg(ccif, NULL,
  300. 0);
  301. CCCI_MSG_INF(md_id, "cci",
  302. "[CCIF Register Error]RX: %08X, %08X, %02d, %08X (%02d)\n",
  303. phy_ch_data.data
  304. [0],
  305. phy_ch_data.data
  306. [1],
  307. phy_ch_data.channel,
  308. phy_ch_data.reserved,
  309. rx_ch);
  310. }
  311. }
  312. #endif
  313. if ((lg_ch_rx_debug_enable[md_id] &
  314. ENABLE_ALL_RX_LOG)
  315. || (lg_ch_rx_debug_enable[md_id] &
  316. (1 << phy_ch_data.channel))) {
  317. CCCI_DBG_MSG(md_id, "cci",
  318. "[RX]: %08X, %08X, %02d, %08X (%02d)\n",
  319. phy_ch_data.data[0],
  320. phy_ch_data.data[1],
  321. phy_ch_data.channel,
  322. phy_ch_data.reserved,
  323. rx_ch);
  324. }
  325. /* push ccif message to up layer */
  326. if (unlikely(ccif->push_msg == NULL)) {
  327. CCCI_DBG_MSG(md_id, "cci",
  328. "push_msg func not registered:0x%08x, 0x%08x, %02d, 0x%08x\n",
  329. phy_ch_data.data[0],
  330. phy_ch_data.data[1],
  331. phy_ch_data.channel,
  332. phy_ch_data.reserved);
  333. } else {
  334. if (ccif->push_msg(&phy_ch_data,
  335. ccif->
  336. m_logic_ctl_block) !=
  337. sizeof(struct ccif_msg_t))
  338. CCCI_DBG_MSG(md_id, "cci", "push data fail(ch%d)\n",
  339. phy_ch_data.channel);
  340. }
  341. /* Ack modem side ccif */
  342. ccci_write32(CCIF_ACK(ccif->m_reg_base),
  343. (1 << rx_ch));
  344. r_ch_val &= ~(1 << rx_ch);
  345. } else {
  346. if (r_ch_val != 0) {
  347. /* We suppose rx channel usage should be fifo mode */
  348. CCCI_DBG_MSG(md_id, "cci",
  349. "rx channel error(rx>%02x : %d<curr)\n",
  350. r_ch_val, rx_ch);
  351. __ccif_v1_dump_reg(ccif, NULL, 0);
  352. } else {
  353. break;
  354. }
  355. }
  356. ++rx_ch;
  357. rx_ch = rx_ch & (CCIF_STD_V1_MAX_CH_NUM - 1);
  358. }
  359. re_enter_cnt++;
  360. }
  361. if ((re_enter_cnt >= CCIF_INTR_MAX_RE_ENTER_CNT) && (r_ch_val != 0)) {
  362. CCCI_DBG_MSG(md_id, "cci", "too much message to process\n");
  363. __ccif_v1_dump_reg(ccif, NULL, 0);
  364. }
  365. /* Store latest rx channel index */
  366. ccif->m_rx_idx = rx_ch;
  367. /* Notify uplayer begin to process data */
  368. if (unlikely(ccif->notify_push_done == NULL))
  369. CCCI_DBG_MSG(md_id, "cci", "notify_push_done not registered!\n");
  370. else
  371. ccif->notify_push_done(ccif->m_logic_ctl_block);
  372. clear_bit(CCIF_TOP_HALF_RUNNING, &ccif->m_status);
  373. #ifdef CCIF_DEBUG
  374. if (reg_err) {
  375. reg_err = 0;
  376. msg[0] = 0xFFFFFFFF;
  377. msg[1] = 0x5B5B5B5B;
  378. msg[2] = CCCI_FORCE_ASSERT_CH;
  379. msg[3] = 0xB5B5B5B5;
  380. __ccif_v1_write_phy_ch_data(ccif, msg, 0);
  381. }
  382. #endif
  383. return 0;
  384. }
  385. struct ccif_t *ccif_create_instance(struct ccif_hw_info_t *info, void *ctl_b, int md_id)
  386. {
  387. struct ccif_t *ccif;
  388. if (info == NULL) {
  389. CCCI_MSG_INF(md_id, "cci", "[error]ccif hw info is null\n");
  390. return NULL;
  391. }
  392. ccif = kmalloc(sizeof(struct ccif_t), GFP_KERNEL);
  393. if (ccif == NULL) {
  394. CCCI_MSG_INF(md_id, "cci",
  395. "[error]allocate memory for ccif structure fail\n");
  396. return NULL;
  397. }
  398. if (info->md_id != md_id) {
  399. CCCI_MSG_INF(md_id, "cci",
  400. "[error]ccif_instance_md_id is mis-match to hw_info_md_id: (%d, %d)\n",
  401. md_id, info->md_id);
  402. return NULL;
  403. }
  404. switch (info->type) {
  405. case CCIF_STD_V1:
  406. ccif->m_ccif_type = info->type;
  407. ccif->m_irq_id = info->irq_id;
  408. ccif->m_reg_base = info->reg_base;
  409. ccif->m_md_reg_base = info->md_reg_base;
  410. ccif->m_irq_attr = info->irq_attr;
  411. ccif->m_status = 0;
  412. ccif->m_rx_idx = 0;
  413. ccif->m_md_id = md_id; /* info->md_id; */
  414. spin_lock_init(&ccif->m_lock);
  415. ccif->register_call_back_func = __ccif_v1_register_call_back;
  416. ccif->register_isr_notify_func = __ccif_v1_register_isr_notify;
  417. ccif->ccif_init = __ccif_v1_init;
  418. ccif->ccif_de_init = __ccif_v1_de_init;
  419. ccif->ccif_register_intr = __ccif_v1_reg_intr;
  420. ccif->ccif_en_intr = __ccif_v1_en_intr;
  421. ccif->ccif_dis_intr = __ccif_v1_dis_intr;
  422. ccif->ccif_dump_reg = __ccif_v1_dump_reg;
  423. ccif->ccif_read_phy_ch_data = __ccif_v1_read_phy_ch_data;
  424. ccif->ccif_write_phy_ch_data = __ccif_v1_write_phy_ch_data;
  425. ccif->ccif_get_rx_ch = __ccif_v1_get_rx_ch;
  426. ccif->ccif_get_busy_state = __ccif_v1_get_busy_state;
  427. ccif->ccif_set_busy_state = __ccif_v1_set_busy_state;
  428. ccif->ccif_ack_phy_ch = __ccif_v1_ack;
  429. ccif->ccif_clear_sram = __ccif_v1_clear_sram;
  430. ccif->ccif_write_runtime_data = __ccif_v1_write_runtime_data;
  431. ccif->ccif_intr_handler = __ccif_v1_intr_handler;
  432. ccif->ccif_reset = __ccif_v1_reset;
  433. ccif->m_logic_ctl_block = ctl_b;
  434. ccif->m_irq_dis_cnt = 0;
  435. return ccif;
  436. case CCIF_VIR:
  437. default:
  438. CCCI_MSG_INF(md_id, "cci", "%s: [error]invalid ccif type(%d)\n",
  439. __func__, info->type);
  440. kfree(ccif);
  441. return NULL;
  442. }
  443. }