modem_ccif.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. /*
  2. * this is a CCIF modem driver for 6595.
  3. *
  4. * V0.1: Xiao Wang <xiao.wang@mediatek.com>
  5. */
  6. #include <linux/list.h>
  7. #include <linux/device.h>
  8. #include <linux/module.h>
  9. #include <linux/kernel.h>
  10. #include <linux/err.h>
  11. #include <linux/kdev_t.h>
  12. #include <linux/slab.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/wait.h>
  15. #include <linux/sched.h>
  16. #include <linux/kthread.h>
  17. #include <linux/delay.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/irq.h>
  20. #include <linux/timer.h>
  21. #include <linux/fs.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/random.h>
  24. #include <linux/platform_device.h>
  25. #include <mach/mt_boot.h>
  26. #include <mt-plat/mt_ccci_common.h>
  27. #ifdef CONFIG_OF
  28. #include <linux/of.h>
  29. #include <linux/of_fdt.h>
  30. #include <linux/of_irq.h>
  31. #include <linux/of_address.h>
  32. #endif
  33. #include "ccci_config.h"
  34. #include "ccci_core.h"
  35. #include "ccci_bm.h"
  36. #include "ccci_platform.h"
  37. #include "modem_ccif.h"
  38. #include "ccif_platform.h"
  39. #if defined(ENABLE_32K_CLK_LESS)
  40. #include <mt-plat/mtk_rtc.h>
  41. #endif
  42. #define TAG "cif"
  43. #define BOOT_TIMER_ON 10
  44. #define NET_RX_QUEUE_MASK 0x38
  45. #define NAPI_QUEUE_MASK NET_RX_QUEUE_MASK /* Rx, only Rx-exclusive port can enable NAPI */
  46. #define IS_PASS_SKB(md, qno) \
  47. ((md->md_state != EXCEPTION || md->ex_stage != EX_INIT_DONE) && ((1<<qno) & NET_RX_QUEUE_MASK))
  48. #define RX_BUGDET 16
  49. #define RINGQ_BASE (8)
  50. #define RINGQ_SRAM (7)
  51. #define RINGQ_EXP_BASE (0)
  52. #define CCIF_CH_NUM 16
  53. #define CCIF_MD_SMEM_RESERVE 0x200000
  54. /* AP to MD */
  55. #define H2D_EXCEPTION_ACK (RINGQ_EXP_BASE+1)
  56. #define H2D_EXCEPTION_CLEARQ_ACK (RINGQ_EXP_BASE+2)
  57. #define H2D_FORCE_MD_ASSERT (RINGQ_EXP_BASE+3)
  58. #define H2D_SRAM (RINGQ_SRAM)
  59. #define H2D_RINGQ0 (RINGQ_BASE+0)
  60. #define H2D_RINGQ1 (RINGQ_BASE+1)
  61. #define H2D_RINGQ2 (RINGQ_BASE+2)
  62. #define H2D_RINGQ3 (RINGQ_BASE+3)
  63. #define H2D_RINGQ4 (RINGQ_BASE+4)
  64. #define H2D_RINGQ5 (RINGQ_BASE+5)
  65. #define H2D_RINGQ6 (RINGQ_BASE+6)
  66. #define H2D_RINGQ7 (RINGQ_BASE+7)
  67. /* MD to AP */
  68. #define D2H_EXCEPTION_INIT (RINGQ_EXP_BASE+1)
  69. #define D2H_EXCEPTION_INIT_DONE (RINGQ_EXP_BASE+2)
  70. #define D2H_EXCEPTION_CLEARQ_DONE (RINGQ_EXP_BASE+3)
  71. #define D2H_EXCEPTION_ALLQ_RESET (RINGQ_EXP_BASE+4)
  72. #define AP_MD_SEQ_ERROR (RINGQ_EXP_BASE+6)
  73. #define D2H_SRAM (RINGQ_SRAM)
  74. #define D2H_RINGQ0 (RINGQ_BASE+0)
  75. #define D2H_RINGQ1 (RINGQ_BASE+1)
  76. #define D2H_RINGQ2 (RINGQ_BASE+2)
  77. #define D2H_RINGQ3 (RINGQ_BASE+3)
  78. #define D2H_RINGQ4 (RINGQ_BASE+4)
  79. #define D2H_RINGQ5 (RINGQ_BASE+5)
  80. #define D2H_RINGQ6 (RINGQ_BASE+6)
  81. #define D2H_RINGQ7 (RINGQ_BASE+7)
  82. /* ccif share memory setting */
  83. static int rx_queue_buffer_size[QUEUE_NUM] = {
  84. 32 * 1024, 100 * 1024, 100 * 1024, 100 * 1024, 16 * 1024, 16 * 1024, 16 * 1024, 16 * 1024, };
  85. static int tx_queue_buffer_size[QUEUE_NUM] = {
  86. 32 * 1024, 100 * 1024, 16 * 1024, 100 * 1024, 16 * 1024, 16 * 1024, 16 * 1024, 16 * 1024, };
  87. static void md_ccif_dump(unsigned char *title, struct ccci_modem *md)
  88. {
  89. int idx;
  90. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  91. CCCI_INF_MSG(md->index, TAG, "md_ccif_dump: %s\n", title);
  92. CCCI_INF_MSG(md->index, TAG, "AP_CON(%p)=%d\n", md_ctrl->ccif_ap_base + APCCIF_CON,
  93. ccif_read32(md_ctrl->ccif_ap_base, APCCIF_CON));
  94. CCCI_INF_MSG(md->index, TAG, "AP_BUSY(%p)=%d\n", md_ctrl->ccif_ap_base + APCCIF_BUSY,
  95. ccif_read32(md_ctrl->ccif_ap_base, APCCIF_BUSY));
  96. CCCI_INF_MSG(md->index, TAG, "AP_START(%p)=%d\n", md_ctrl->ccif_ap_base + APCCIF_START,
  97. ccif_read32(md_ctrl->ccif_ap_base, APCCIF_START));
  98. CCCI_INF_MSG(md->index, TAG, "AP_TCHNUM(%p)=%d\n", md_ctrl->ccif_ap_base + APCCIF_TCHNUM,
  99. ccif_read32(md_ctrl->ccif_ap_base, APCCIF_TCHNUM));
  100. CCCI_INF_MSG(md->index, TAG, "AP_RCHNUM(%p)=%d\n", md_ctrl->ccif_ap_base + APCCIF_RCHNUM,
  101. ccif_read32(md_ctrl->ccif_ap_base, APCCIF_RCHNUM));
  102. CCCI_INF_MSG(md->index, TAG, "AP_ACK(%p)=%d\n", md_ctrl->ccif_ap_base + APCCIF_ACK,
  103. ccif_read32(md_ctrl->ccif_ap_base, APCCIF_ACK));
  104. CCCI_INF_MSG(md->index, TAG, "MD_CON(%p)=%d\n", md_ctrl->ccif_md_base + APCCIF_CON,
  105. ccif_read32(md_ctrl->ccif_md_base, APCCIF_CON));
  106. CCCI_INF_MSG(md->index, TAG, "MD_BUSY(%p)=%d\n", md_ctrl->ccif_md_base + APCCIF_BUSY,
  107. ccif_read32(md_ctrl->ccif_md_base, APCCIF_BUSY));
  108. CCCI_INF_MSG(md->index, TAG, "MD_START(%p)=%d\n", md_ctrl->ccif_md_base + APCCIF_START,
  109. ccif_read32(md_ctrl->ccif_md_base, APCCIF_START));
  110. CCCI_INF_MSG(md->index, TAG, "MD_TCHNUM(%p)=%d\n", md_ctrl->ccif_md_base + APCCIF_TCHNUM,
  111. ccif_read32(md_ctrl->ccif_md_base, APCCIF_TCHNUM));
  112. CCCI_INF_MSG(md->index, TAG, "MD_RCHNUM(%p)=%d\n", md_ctrl->ccif_md_base + APCCIF_RCHNUM,
  113. ccif_read32(md_ctrl->ccif_md_base, APCCIF_RCHNUM));
  114. CCCI_INF_MSG(md->index, TAG, "MD_ACK(%p)=%d\n", md_ctrl->ccif_md_base + APCCIF_ACK,
  115. ccif_read32(md_ctrl->ccif_md_base, APCCIF_ACK));
  116. for (idx = 0; idx < md_ctrl->sram_size / sizeof(u32); idx += 4) {
  117. CCCI_INF_MSG(md->index, TAG, "CHDATA(%p): %08X %08X %08X %08X\n",
  118. md_ctrl->ccif_ap_base + APCCIF_CHDATA + idx * sizeof(u32),
  119. ccif_read32(md_ctrl->ccif_ap_base + APCCIF_CHDATA, (idx + 0) * sizeof(u32)),
  120. ccif_read32(md_ctrl->ccif_ap_base + APCCIF_CHDATA, (idx + 1) * sizeof(u32)),
  121. ccif_read32(md_ctrl->ccif_ap_base + APCCIF_CHDATA, (idx + 2) * sizeof(u32)),
  122. ccif_read32(md_ctrl->ccif_ap_base + APCCIF_CHDATA, (idx + 3) * sizeof(u32)));
  123. }
  124. }
  125. static void md_ccif_sram_rx_work(struct work_struct *work)
  126. {
  127. struct md_ccif_ctrl *md_ctrl = container_of(work, struct md_ccif_ctrl, ccif_sram_work);
  128. struct ccci_modem *md = md_ctrl->rxq[0].modem;
  129. struct ccci_header *dl_pkg = &md_ctrl->ccif_sram_layout->dl_header;
  130. struct ccci_header *ccci_h;
  131. struct ccci_request *new_req = NULL;
  132. struct ccci_request *req;
  133. int pkg_size, ret = 0, retry_cnt = 0;
  134. /* md_ccif_dump("md_ccif_sram_rx_work",md); */
  135. pkg_size = sizeof(struct ccci_header);
  136. new_req = ccci_alloc_req(IN, pkg_size, 1, 0);
  137. INIT_LIST_HEAD(&new_req->entry); /* as port will run list_del */
  138. if (new_req->skb == NULL) {
  139. CCCI_ERR_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_alloc_req pkg_size=%d failed\n", pkg_size);
  140. return;
  141. }
  142. skb_put(new_req->skb, pkg_size);
  143. ccci_h = (struct ccci_header *)new_req->skb->data;
  144. ccci_h->data[0] = ccif_read32(&dl_pkg->data[0], 0);
  145. ccci_h->data[1] = ccif_read32(&dl_pkg->data[1], 0);
  146. /* ccci_h->channel = ccif_read32(&dl_pkg->channel,0); */
  147. *(((u32 *) ccci_h) + 2) = ccif_read32((((u32 *) dl_pkg) + 2), 0);
  148. ccci_h->reserved = ccif_read32(&dl_pkg->reserved, 0);
  149. if (atomic_cmpxchg(&md->wakeup_src, 1, 0) == 1)
  150. CCCI_INF_MSG(md->index, TAG, "CCIF_MD wakeup source:(SRX_IDX/%d)\n", *(((u32 *) ccci_h) + 2));
  151. RETRY:
  152. ret = ccci_port_recv_request(md, new_req, new_req->skb);
  153. CCCI_INF_MSG(md->index, TAG, "Rx msg %x %x %x %x ret=%d\n", ccci_h->data[0], ccci_h->data[1],
  154. *(((u32 *) ccci_h) + 2), ccci_h->reserved, ret);
  155. if (ret >= 0 || ret == -CCCI_ERR_DROP_PACKET) {
  156. CCCI_INF_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_port_recv_request ret=%d\n", ret);
  157. /* step forward */
  158. req = list_entry(req->entry.next, struct ccci_request, entry);
  159. } else {
  160. if (retry_cnt > 20) {
  161. CCCI_ERR_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_port_recv_request ret=%d,retry=%d\n",
  162. ret, retry_cnt);
  163. udelay(5);
  164. retry_cnt++;
  165. goto RETRY;
  166. }
  167. list_del(&new_req->entry);
  168. ccci_free_req(new_req);
  169. CCCI_INF_MSG(md->index, TAG, "md_ccif_sram_rx_work:ccci_port_recv_request ret=%d\n", ret);
  170. }
  171. }
  172. /* this function may be called from both workqueue and softirq (NAPI) */
  173. static int ccif_rx_collect(struct md_ccif_queue *queue, int budget, int blocking, int *result)
  174. {
  175. struct ccci_modem *md = queue->modem;
  176. struct ccci_ringbuf *rx_buf = queue->ringbuf;
  177. struct ccci_request *new_req = NULL;
  178. struct ccci_request *req;
  179. unsigned char *data_ptr;
  180. int ret = 0, count = 0, pkg_size;
  181. unsigned long flags;
  182. int qno = queue->index;
  183. struct ccci_header *ccci_h = NULL;
  184. struct sk_buff *skb;
  185. spin_lock_irqsave(&queue->rx_lock, flags);
  186. if (queue->rx_on_going != 0) {
  187. CCCI_DBG_MSG(md->index, TAG, "Q%d rx is on-going(%d)1\n", queue->index, queue->rx_on_going);
  188. *result = 0;
  189. spin_unlock_irqrestore(&queue->rx_lock, flags);
  190. return -1;
  191. }
  192. queue->rx_on_going = 1;
  193. spin_unlock_irqrestore(&queue->rx_lock, flags);
  194. while (1) {
  195. pkg_size = ccci_ringbuf_readable(md->index, rx_buf);
  196. if (pkg_size < 0) {
  197. CCCI_DBG_MSG(md->index, TAG, "Q%d Rx:rbf readable ret=%d\n", queue->index, pkg_size);
  198. BUG_ON(pkg_size != -CCCI_RINGBUF_EMPTY);
  199. ret = 0;
  200. goto OUT;
  201. }
  202. if (IS_PASS_SKB(md, qno)) {
  203. skb = ccci_alloc_skb(pkg_size, 0, blocking);
  204. if (skb == NULL) {
  205. ret = -ENOMEM;
  206. goto OUT;
  207. }
  208. } else {
  209. new_req = ccci_alloc_req(IN, pkg_size, blocking, 0);
  210. if (new_req == NULL || new_req->skb == NULL) {
  211. CCCI_ERR_MSG(md->index, TAG, "Q%d Rx:ccci_alloc_skb pkg_size=%d failed,count=%d\n",
  212. queue->index, pkg_size, count);
  213. ret = -ENOMEM;
  214. goto OUT;
  215. }
  216. INIT_LIST_HEAD(&new_req->entry); /* as port will run list_del */
  217. skb = new_req->skb;
  218. }
  219. data_ptr = (unsigned char *)skb_put(skb, pkg_size);
  220. /* copy data into skb */
  221. ret = ccci_ringbuf_read(md->index, rx_buf, data_ptr, pkg_size);
  222. BUG_ON(ret < 0);
  223. ccci_h = (struct ccci_header *)skb->data;
  224. if (atomic_cmpxchg(&md->wakeup_src, 1, 0) == 1)
  225. CCCI_INF_MSG(md->index, TAG, "CCIF_MD wakeup source:(%d/%d)\n", queue->index,
  226. *(((u32 *) ccci_h) + 2));
  227. CCCI_DBG_MSG(md->index, TAG, "Q%d Rx msg %x %x %x %x\n", queue->index, ccci_h->data[0], ccci_h->data[1],
  228. *(((u32 *) ccci_h) + 2), ccci_h->reserved);
  229. ret = ccci_port_recv_request(md, new_req, skb);
  230. if (ret >= 0 || ret == -CCCI_ERR_DROP_PACKET) {
  231. count++;
  232. if (queue->debug_id) {
  233. CCCI_INF_MSG(md->index, TAG, "Q%d Rx recv req ret=%d\n", queue->index, ret);
  234. queue->debug_id = 0;
  235. }
  236. ccci_ringbuf_move_rpointer(md->index, rx_buf, pkg_size);
  237. ret = 0;
  238. /* step forward */
  239. req = list_entry(req->entry.next, struct ccci_request, entry);
  240. } else {
  241. /* leave package into share memory, and waiting ccci to receive */
  242. if (IS_PASS_SKB(md, qno)) {
  243. dev_kfree_skb_any(skb);
  244. } else {
  245. list_del(&new_req->entry);
  246. ccci_free_req(new_req);
  247. }
  248. if (queue->debug_id == 0) {
  249. queue->debug_id = 1;
  250. CCCI_ERR_MSG(md->index, TAG, "Q%d Rx recv req ret=%d\n", queue->index, ret);
  251. }
  252. ret = -EAGAIN;
  253. goto OUT;
  254. }
  255. if (count > budget)
  256. goto OUT;
  257. }
  258. OUT:
  259. *result = count;
  260. CCCI_DBG_MSG(md->index, TAG, "Q%d rx %d pkg,ret=%d\n", queue->index, count, ret);
  261. spin_lock_irqsave(&queue->rx_lock, flags);
  262. if (ret != -EAGAIN) {
  263. pkg_size = ccci_ringbuf_readable(md->index, rx_buf);
  264. if (pkg_size > 0)
  265. ret = -EAGAIN;
  266. }
  267. queue->rx_on_going = 0;
  268. spin_unlock_irqrestore(&queue->rx_lock, flags);
  269. return ret;
  270. }
  271. static void ccif_rx_work(struct work_struct *work)
  272. {
  273. int result = 0, ret = 0;
  274. struct md_ccif_queue *queue = container_of(work, struct md_ccif_queue, qwork);
  275. ret = ccif_rx_collect(queue, queue->budget, 1, &result);
  276. if (ret == -EAGAIN)
  277. queue_work(queue->worker, &queue->qwork);
  278. }
  279. static irqreturn_t md_cd_wdt_isr(int irq, void *data)
  280. {
  281. struct ccci_modem *md = (struct ccci_modem *)data;
  282. int ret = 0;
  283. CCCI_INF_MSG(md->index, TAG, "MD WDT IRQ\n");
  284. /* 1. disable MD WDT */
  285. #ifdef ENABLE_MD_WDT_DBG
  286. unsigned int state;
  287. state = ccif_read32(md_ctrl->md_rgu_base, WDT_MD_STA);
  288. ccif_write32(md_ctrl->md_rgu_base, WDT_MD_MODE, WDT_MD_MODE_KEY);
  289. CCCI_INF_MSG(md->index, TAG, "WDT IRQ disabled for debug, state=%X\n", state);
  290. #endif
  291. if (*((int *)(md->mem_layout.smem_region_vir + CCCI_SMEM_OFFSET_EPON)) == 0xBAEBAE10) {
  292. /* 3. reset */
  293. ret = md->ops->reset(md);
  294. CCCI_INF_MSG(md->index, TAG, "reset MD after WDT %d\n", ret);
  295. /* 4. send message, only reset MD on non-eng load */
  296. ccci_send_virtual_md_msg(md, CCCI_MONITOR_CH, CCCI_MD_MSG_RESET, 0);
  297. } else {
  298. ccci_md_exception_notify(md, MD_WDT);
  299. }
  300. return IRQ_HANDLED;
  301. }
  302. static int md_ccif_send(struct ccci_modem *md, int channel_id)
  303. {
  304. int busy = 0;
  305. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  306. busy = ccif_read32(md_ctrl->ccif_ap_base, APCCIF_BUSY);
  307. if (busy & (1 << channel_id)) {
  308. CCCI_DBG_MSG(md->index, TAG, "CCIF channel %d busy\n", channel_id);
  309. } else {
  310. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_BUSY, 1 << channel_id);
  311. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_TCHNUM, channel_id);
  312. CCCI_DBG_MSG(md->index, TAG, "CCIF start=0x%x\n", ccif_read32(md_ctrl->ccif_ap_base, APCCIF_START));
  313. }
  314. return 0;
  315. }
  316. static void md_ccif_sram_reset(struct ccci_modem *md)
  317. {
  318. int idx = 0;
  319. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  320. CCCI_INF_MSG(md->index, TAG, "md_ccif_sram_reset\n");
  321. for (idx = 0; idx < md_ctrl->sram_size / sizeof(u32); idx += 1)
  322. ccif_write32(md_ctrl->ccif_ap_base + APCCIF_CHDATA, idx * sizeof(u32), 0);
  323. }
  324. static void md_ccif_queue_dump(struct ccci_modem *md)
  325. {
  326. int idx;
  327. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  328. CCCI_INF_MSG(md->index, TAG, "Dump CCIF Queue Control\n");
  329. for (idx = 0; idx < QUEUE_NUM; idx++) {
  330. CCCI_INF_MSG(md->index, TAG, "Q%d TX: w=%d, r=%d, len=%d\n", idx,
  331. md_ctrl->txq[idx].ringbuf->tx_control.write, md_ctrl->txq[idx].ringbuf->tx_control.read,
  332. md_ctrl->txq[idx].ringbuf->tx_control.length);
  333. CCCI_INF_MSG(md->index, TAG, "Q%d RX: w=%d, r=%d, len=%d\n", idx,
  334. md_ctrl->rxq[idx].ringbuf->rx_control.write, md_ctrl->rxq[idx].ringbuf->rx_control.read,
  335. md_ctrl->rxq[idx].ringbuf->rx_control.length);
  336. }
  337. }
  338. static void md_ccif_reset_queue(struct ccci_modem *md)
  339. {
  340. int i;
  341. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  342. CCCI_INF_MSG(md->index, TAG, "md_ccif_reset_queue\n");
  343. for (i = 0; i < QUEUE_NUM; ++i) {
  344. ccci_ringbuf_reset(md->index, md_ctrl->rxq[i].ringbuf, 0);
  345. ccci_ringbuf_reset(md->index, md_ctrl->txq[i].ringbuf, 1);
  346. }
  347. }
  348. static void md_ccif_exception(struct ccci_modem *md, HIF_EX_STAGE stage)
  349. {
  350. CCCI_INF_MSG(md->index, TAG, "MD exception HIF %d\n", stage);
  351. switch (stage) {
  352. case HIF_EX_INIT:
  353. ccci_md_exception_notify(md, EX_INIT);
  354. /* Rx dispatch does NOT depend on queue index in port structure, so it still can find right port. */
  355. md_ccif_send(md, H2D_EXCEPTION_ACK);
  356. break;
  357. case HIF_EX_INIT_DONE:
  358. ccci_md_exception_notify(md, EX_DHL_DL_RDY);
  359. break;
  360. case HIF_EX_CLEARQ_DONE:
  361. md_ccif_queue_dump(md);
  362. md_ccif_reset_queue(md);
  363. md_ccif_send(md, H2D_EXCEPTION_CLEARQ_ACK);
  364. break;
  365. case HIF_EX_ALLQ_RESET:
  366. ccci_md_exception_notify(md, EX_INIT_DONE);
  367. break;
  368. default:
  369. break;
  370. };
  371. }
  372. static void md_ccif_irq_tasklet(unsigned long data)
  373. {
  374. struct ccci_modem *md = (struct ccci_modem *)data;
  375. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  376. int i;
  377. CCCI_DBG_MSG(md->index, TAG, "ccif_irq_tasklet1: ch %ld\n", md_ctrl->channel_id);
  378. while (md_ctrl->channel_id != 0) {
  379. if (md_ctrl->channel_id & (1 << D2H_EXCEPTION_INIT)) {
  380. clear_bit(D2H_EXCEPTION_INIT, &md_ctrl->channel_id);
  381. md_ccif_exception(md, HIF_EX_INIT);
  382. }
  383. if (md_ctrl->channel_id & (1 << D2H_EXCEPTION_INIT_DONE)) {
  384. clear_bit(D2H_EXCEPTION_INIT_DONE, &md_ctrl->channel_id);
  385. md_ccif_exception(md, HIF_EX_INIT_DONE);
  386. }
  387. if (md_ctrl->channel_id & (1 << D2H_EXCEPTION_CLEARQ_DONE)) {
  388. clear_bit(D2H_EXCEPTION_CLEARQ_DONE, &md_ctrl->channel_id);
  389. md_ccif_exception(md, HIF_EX_CLEARQ_DONE);
  390. }
  391. if (md_ctrl->channel_id & (1 << D2H_EXCEPTION_ALLQ_RESET)) {
  392. clear_bit(D2H_EXCEPTION_ALLQ_RESET, &md_ctrl->channel_id);
  393. md_ccif_exception(md, HIF_EX_ALLQ_RESET);
  394. }
  395. if (md_ctrl->channel_id & (1 << AP_MD_SEQ_ERROR)) {
  396. clear_bit(AP_MD_SEQ_ERROR, &md_ctrl->channel_id);
  397. CCCI_ERR_MSG(md->index, TAG, "MD check seq fail\n");
  398. md->ops->dump_info(md, DUMP_FLAG_CCIF, NULL, 0);
  399. }
  400. if (md_ctrl->channel_id & (1 << (D2H_SRAM))) {
  401. clear_bit(D2H_SRAM, &md_ctrl->channel_id);
  402. schedule_work(&md_ctrl->ccif_sram_work);
  403. }
  404. for (i = 0; i < QUEUE_NUM; i++) {
  405. if (md_ctrl->channel_id & (1 << (i + D2H_RINGQ0))) {
  406. clear_bit(i + D2H_RINGQ0, &md_ctrl->channel_id);
  407. if (md_ctrl->rxq[i].rx_on_going != 0) {
  408. CCCI_DBG_MSG(md->index, TAG, "Q%d rx is on-going(%d)2\n", md_ctrl->rxq[i].index,
  409. md_ctrl->rxq[i].rx_on_going);
  410. return;
  411. }
  412. if (md->md_state != EXCEPTION && (md->capability & MODEM_CAP_NAPI)
  413. && md_ctrl->rxq[i].napi_port
  414. && ((1 << md_ctrl->rxq[i].napi_port->rxq_index) & NAPI_QUEUE_MASK)) {
  415. md_ctrl->rxq[i].napi_port->ops->md_state_notice(md_ctrl->rxq[i].napi_port,
  416. RX_IRQ);
  417. } else {
  418. queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].qwork);
  419. }
  420. }
  421. }
  422. CCCI_DBG_MSG(md->index, TAG, "ccif_irq_tasklet2: ch %ld\n", md_ctrl->channel_id);
  423. }
  424. }
  425. static irqreturn_t md_ccif_isr(int irq, void *data)
  426. {
  427. struct ccci_modem *md = (struct ccci_modem *)data;
  428. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  429. unsigned int ch_id;
  430. /* disable_irq_nosync(md_ctrl->ccif_irq_id); */
  431. /* must ack first, otherwise IRQ will rush in */
  432. ch_id = ccif_read32(md_ctrl->ccif_ap_base, APCCIF_RCHNUM);
  433. md_ctrl->channel_id |= ch_id;
  434. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_ACK, ch_id);
  435. /* enable_irq(md_ctrl->ccif_irq_id); */
  436. CCCI_DBG_MSG(md->index, TAG, "MD CCIF IRQ %ld\n", md_ctrl->channel_id);
  437. tasklet_hi_schedule(&md_ctrl->ccif_irq_task);
  438. return IRQ_HANDLED;
  439. }
  440. static int md_ccif_op_broadcast_state(struct ccci_modem *md, MD_STATE state)
  441. {
  442. int i;
  443. struct ccci_port *port;
  444. /* only for thoes states which are updated by port_kernel.c */
  445. switch (state) {
  446. case BOOT_FAIL:
  447. return 0;
  448. case RX_IRQ:
  449. CCCI_ERR_MSG(md->index, TAG, "%ps broadcast RX_IRQ to ports!\n", __builtin_return_address(0));
  450. return 0;
  451. default:
  452. break;
  453. };
  454. if (md->md_state == state) /* must have, due to we broadcast EXCEPTION both in MD_EX and EX_INIT */
  455. return 1;
  456. md->md_state = state;
  457. for (i = 0; i < md->port_number; i++) {
  458. port = md->ports + i;
  459. if (port->ops->md_state_notice)
  460. port->ops->md_state_notice(port, state);
  461. }
  462. return 0;
  463. }
  464. static inline void md_ccif_queue_struct_init(struct md_ccif_queue *queue, struct ccci_modem *md,
  465. DIRECTION dir, unsigned char index)
  466. {
  467. queue->dir = dir;
  468. queue->index = index;
  469. queue->modem = md;
  470. queue->napi_port = NULL;
  471. init_waitqueue_head(&queue->req_wq);
  472. spin_lock_init(&queue->rx_lock);
  473. spin_lock_init(&queue->tx_lock);
  474. queue->rx_on_going = 0;
  475. queue->debug_id = 0;
  476. queue->budget = RX_BUGDET;
  477. }
  478. static int md_ccif_op_init(struct ccci_modem *md)
  479. {
  480. int i;
  481. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  482. struct ccci_port *port;
  483. CCCI_INF_MSG(md->index, TAG, "CCIF modem is initializing\n");
  484. /* init queue */
  485. for (i = 0; i < QUEUE_NUM; i++) {
  486. md_ccif_queue_struct_init(&md_ctrl->txq[i], md, OUT, i);
  487. md_ccif_queue_struct_init(&md_ctrl->rxq[i], md, IN, i);
  488. }
  489. /* init port */
  490. for (i = 0; i < md->port_number; i++) {
  491. port = md->ports + i;
  492. ccci_port_struct_init(port, md);
  493. port->ops->init(port);
  494. if ((port->flags & PORT_F_RX_EXCLUSIVE) && (md->capability & MODEM_CAP_NAPI)
  495. && ((1 << port->rxq_index) & NAPI_QUEUE_MASK)) {
  496. md_ctrl->rxq[port->rxq_index].napi_port = port;
  497. CCCI_INF_MSG(md->index, TAG, "queue%d add NAPI port %s\n", port->rxq_index, port->name);
  498. }
  499. }
  500. ccci_setup_channel_mapping(md);
  501. /* update state */
  502. md->md_state = GATED;
  503. return 0;
  504. }
  505. static int md_ccif_op_start(struct ccci_modem *md)
  506. {
  507. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  508. char img_err_str[IMG_ERR_STR_LEN];
  509. int ret = 0;
  510. /* 0. init security, as security depends on dummy_char, which is ready very late. */
  511. ccci_init_security();
  512. md_ccif_sram_reset(md);
  513. md_ccif_reset_queue(md);
  514. ccci_reset_seq_num(md);
  515. CCCI_INF_MSG(md->index, TAG, "CCIF modem is starting\n");
  516. /* 1. load modem image */
  517. if (md->config.setting & MD_SETTING_FIRST_BOOT || md->config.setting & MD_SETTING_RELOAD) {
  518. ccci_clear_md_region_protection(md);
  519. ret = ccci_load_firmware(md->index, &md->img_info[IMG_MD], img_err_str, md->post_fix);
  520. if (ret < 0) {
  521. CCCI_ERR_MSG(md->index, TAG, "load firmware fail, %s\n", img_err_str);
  522. goto out;
  523. }
  524. ret = 0; /* load_std_firmware returns MD image size */
  525. md->config.setting &= ~MD_SETTING_RELOAD;
  526. }
  527. /* 2. enable MPU */
  528. ccci_set_mem_access_protection(md);
  529. /* 3. power on modem, do NOT touch MD register before this */
  530. ret = md_ccif_power_on(md);
  531. if (ret) {
  532. CCCI_ERR_MSG(md->index, TAG, "power on MD fail %d\n", ret);
  533. goto out;
  534. }
  535. /* 4. update mutex */
  536. atomic_set(&md_ctrl->reset_on_going, 0);
  537. /* 5. start timer */
  538. mod_timer(&md->bootup_timer, jiffies + BOOT_TIMER_ON * HZ);
  539. /* 6. let modem go */
  540. md->ops->broadcast_state(md, BOOTING);
  541. md_ccif_let_md_go(md);
  542. enable_irq(md_ctrl->md_wdt_irq_id);
  543. out:
  544. CCCI_INF_MSG(md->index, TAG, "ccif modem started %d\n", ret);
  545. /* used for throttling feature - start */
  546. ccci_modem_boot_count[md->index]++;
  547. /* used for throttling feature - end */
  548. return ret;
  549. }
  550. static int md_ccif_op_stop(struct ccci_modem *md, unsigned int timeout)
  551. {
  552. int ret = 0;
  553. int idx = 0;
  554. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  555. CCCI_INF_MSG(md->index, TAG, "ccif modem is power off, timeout=%d\n", timeout);
  556. ret = md_ccif_power_off(md, timeout);
  557. CCCI_INF_MSG(md->index, TAG, "ccif modem is power off done, %d\n", ret);
  558. for (idx = 0; idx < QUEUE_NUM; idx++)
  559. flush_work(&md_ctrl->rxq[idx].qwork);
  560. CCCI_INF_MSG(md->index, TAG, "ccif flush_work done, %d\n", ret);
  561. md_ccif_reset_queue(md);
  562. md->ops->broadcast_state(md, GATED);
  563. return 0;
  564. }
  565. static int md_ccif_op_reset(struct ccci_modem *md)
  566. {
  567. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  568. /* 1. mutex check */
  569. if (atomic_inc_and_test(&md_ctrl->reset_on_going) > 1) {
  570. CCCI_INF_MSG(md->index, TAG, "One reset flow is on-going\n");
  571. return -CCCI_ERR_MD_IN_RESET;
  572. }
  573. CCCI_INF_MSG(md->index, TAG, "ccif modem is resetting\n");
  574. /* 2. disable IRQ (use nosync) */
  575. disable_irq_nosync(md_ctrl->md_wdt_irq_id);
  576. md->ops->broadcast_state(md, RESET); /* to block char's write operation */
  577. del_timer(&md->bootup_timer);
  578. md->boot_stage = MD_BOOT_STAGE_0;
  579. return 0;
  580. }
  581. static int md_ccif_op_write_room(struct ccci_modem *md, unsigned char qno)
  582. {
  583. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  584. if (qno == 0xFF)
  585. return -CCCI_ERR_INVALID_QUEUE_INDEX;
  586. return ccci_ringbuf_writeable(md->index, md_ctrl->txq[qno].ringbuf, 0);
  587. }
  588. static int md_ccif_op_send_request(struct ccci_modem *md, unsigned char qno,
  589. struct ccci_request *req, struct sk_buff *skb)
  590. {
  591. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  592. struct md_ccif_queue *queue = NULL;
  593. int ret;
  594. struct ccci_header *ccci_h;
  595. unsigned long flags;
  596. if (qno == 0xFF)
  597. return -CCCI_ERR_INVALID_QUEUE_INDEX;
  598. queue = &md_ctrl->txq[qno];
  599. if (req != NULL)
  600. skb = req->skb;
  601. retry:
  602. spin_lock_irqsave(&queue->tx_lock, flags);
  603. /* we use irqsave as network require a lock in softirq, cause a potential deadlock */
  604. ccci_h = (struct ccci_header *)skb->data;
  605. if (ccci_ringbuf_writeable(md->index, queue->ringbuf, skb->len) > 0) {
  606. ccci_inc_tx_seq_num(md, ccci_h);
  607. /* copy skb to ringbuf */
  608. ret = ccci_ringbuf_write(md->index, queue->ringbuf, skb->data, skb->len);
  609. if (ret != skb->len)
  610. CCCI_ERR_MSG(md->index, TAG, "TX:ERR rbf write: ret(%d)!=req(%d)\n", ret, skb->len);
  611. /* ccci_h = (struct ccci_header *)req->skb->data; */
  612. /* if(ccci_h->channel == CCCI_CCMNI1_TX) { */
  613. /* short *ipid = (short *)(req->skb->data+sizeof(struct ccci_header)+4); */
  614. /* int *valid = (int *)(req->skb->data+sizeof(struct ccci_header)+36); */
  615. /* CCCI_INF_MSG(md->index, TAG, "tx %p len=%d ipid=%x, valid=%x\n",
  616. req->skb->data, req->skb->len, *ipid, *valid); */
  617. /* } */
  618. /* free request */
  619. if (req == NULL)
  620. dev_kfree_skb_any(skb);
  621. else
  622. ccci_free_req(req);
  623. /* send ccif request */
  624. md_ccif_send(md, queue->ccif_ch);
  625. spin_unlock_irqrestore(&queue->tx_lock, flags);
  626. if (queue->debug_id == 1) {
  627. CCCI_INF_MSG(md->index, TAG, "TX:OK on q%d,txw=%d,txr=%d,rxw=%d,rxr=%d\n", qno,
  628. queue->ringbuf->tx_control.write, queue->ringbuf->tx_control.read,
  629. queue->ringbuf->rx_control.write, queue->ringbuf->rx_control.read);
  630. queue->debug_id = 0;
  631. }
  632. } else {
  633. spin_unlock_irqrestore(&queue->tx_lock, flags);
  634. if (queue->debug_id == 0) {
  635. CCCI_INF_MSG(md->index, TAG, "TX:busy on q%d,txw=%d,txr=%d,rxw=%d,rxr=%d\n", qno,
  636. queue->ringbuf->tx_control.write, queue->ringbuf->tx_control.read,
  637. queue->ringbuf->rx_control.write, queue->ringbuf->rx_control.read);
  638. queue->debug_id = 1;
  639. }
  640. if (req->blocking) {
  641. udelay(5);
  642. /* TODO: add time out check */
  643. CCCI_INF_MSG(md->index, TAG, "TODO: add time out check busy on q%d\n", qno);
  644. goto retry;
  645. } else {
  646. return -EBUSY;
  647. }
  648. }
  649. return 0;
  650. }
  651. static int md_ccif_op_give_more(struct ccci_modem *md, unsigned char qno)
  652. {
  653. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  654. if (qno == 0xFF)
  655. return -CCCI_ERR_INVALID_QUEUE_INDEX;
  656. queue_work(md_ctrl->rxq[qno].worker, &md_ctrl->rxq[qno].qwork);
  657. return 0;
  658. }
  659. static int md_ccif_op_napi_poll(struct ccci_modem *md, unsigned char qno, struct napi_struct *napi, int budget)
  660. {
  661. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  662. int ret, result = 0;
  663. if (qno == 0xFF)
  664. return -CCCI_ERR_INVALID_QUEUE_INDEX;
  665. if (md_ctrl->rxq[qno].rx_on_going != 0) {
  666. CCCI_DBG_MSG(md->index, TAG, "Q%d rx is on-going(%d)3\n", md_ctrl->rxq[qno].index,
  667. md_ctrl->rxq[qno].rx_on_going);
  668. return 0;
  669. }
  670. budget = budget < md_ctrl->rxq[qno].budget ? budget : md_ctrl->rxq[qno].budget;
  671. ret = ccif_rx_collect(&md_ctrl->rxq[qno], budget, 0, &result);
  672. if (ret == 0 && result == 0)
  673. napi_complete(napi);
  674. return ret;
  675. }
  676. static struct ccci_port *md_ccif_op_get_port_by_minor(struct ccci_modem *md, int minor)
  677. {
  678. int i;
  679. struct ccci_port *port;
  680. for (i = 0; i < md->port_number; i++) {
  681. port = md->ports + i;
  682. if (port->minor == minor)
  683. return port;
  684. }
  685. return NULL;
  686. }
  687. static struct ccci_port *md_ccif_op_get_port_by_channel(struct ccci_modem *md, CCCI_CH ch)
  688. {
  689. int i;
  690. struct ccci_port *port;
  691. for (i = 0; i < md->port_number; i++) {
  692. port = md->ports + i;
  693. if (port->rx_ch == ch || port->tx_ch == ch)
  694. return port;
  695. }
  696. return NULL;
  697. }
  698. static void dump_runtime_data(struct ccci_modem *md, struct modem_runtime *runtime)
  699. {
  700. char ctmp[12];
  701. int *p;
  702. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  703. p = (int *)ctmp;
  704. *p = ccif_read32(&runtime->Prefix, 0);
  705. p++;
  706. *p = ccif_read32(&runtime->Platform_L, 0);
  707. p++;
  708. *p = ccif_read32(&runtime->Platform_H, 0);
  709. BUG_ON(sizeof(struct modem_runtime) > md_ctrl->sram_size);
  710. CCCI_INF_MSG(md->index, TAG, "Prefix %c%c%c%c\n", ctmp[0], ctmp[1], ctmp[2], ctmp[3]);
  711. CCCI_INF_MSG(md->index, TAG, "Platform_L %c%c%c%c\n", ctmp[4], ctmp[5], ctmp[6], ctmp[7]);
  712. CCCI_INF_MSG(md->index, TAG, "Platform_H %c%c%c%c\n", ctmp[8], ctmp[9], ctmp[10], ctmp[11]);
  713. CCCI_INF_MSG(md->index, TAG, "DriverVersion 0x%x\n", ccif_read32(&runtime->DriverVersion, 0));
  714. CCCI_INF_MSG(md->index, TAG, "BootChannel %d\n", ccif_read32(&runtime->BootChannel, 0));
  715. CCCI_INF_MSG(md->index, TAG, "BootingStartID(Mode) 0x%x\n", ccif_read32(&runtime->BootingStartID, 0));
  716. CCCI_INF_MSG(md->index, TAG, "BootAttributes %d\n", ccif_read32(&runtime->BootAttributes, 0));
  717. CCCI_INF_MSG(md->index, TAG, "BootReadyID %d\n", ccif_read32(&runtime->BootReadyID, 0));
  718. CCCI_INF_MSG(md->index, TAG, "ExceShareMemBase 0x%x\n", ccif_read32(&runtime->ExceShareMemBase, 0));
  719. CCCI_INF_MSG(md->index, TAG, "ExceShareMemSize 0x%x\n", ccif_read32(&runtime->ExceShareMemSize, 0));
  720. CCCI_INF_MSG(md->index, TAG, "CCIFShareMemBase 0x%x\n", ccif_read32(&runtime->CCIFShareMemBase, 0));
  721. CCCI_INF_MSG(md->index, TAG, "CCIFShareMemSize 0x%x\n", ccif_read32(&runtime->CCIFShareMemSize, 0));
  722. CCCI_INF_MSG(md->index, TAG, "TotalShareMemBase 0x%x\n",
  723. ccif_read32(&runtime->TotalShareMemBase, 0));
  724. CCCI_INF_MSG(md->index, TAG, "TotalShareMemSize 0x%x\n",
  725. ccif_read32(&runtime->TotalShareMemSize, 0));
  726. CCCI_INF_MSG(md->index, TAG, "CheckSum %d\n", ccif_read32(&runtime->CheckSum, 0));
  727. p = (int *)ctmp;
  728. *p = ccif_read32(&runtime->Postfix, 0);
  729. CCCI_INF_MSG(md->index, TAG, "Postfix %c%c%c%c\n", ctmp[0], ctmp[1], ctmp[2], ctmp[3]);
  730. CCCI_INF_MSG(md->index, TAG, "**********************************************\n");
  731. p = (int *)ctmp;
  732. *p = ccif_read32(&runtime->misc_prefix, 0);
  733. CCCI_INF_MSG(md->index, TAG, "Prefix %c%c%c%c\n", ctmp[0], ctmp[1], ctmp[2], ctmp[3]);
  734. CCCI_INF_MSG(md->index, TAG, "SupportMask 0x%x\n", ccif_read32(&runtime->support_mask, 0));
  735. CCCI_INF_MSG(md->index, TAG, "Index 0x%x\n", ccif_read32(&runtime->index, 0));
  736. CCCI_INF_MSG(md->index, TAG, "Next 0x%x\n", ccif_read32(&runtime->next, 0));
  737. CCCI_INF_MSG(md->index, TAG, "Feature0 0x%x 0x%x 0x%x 0x%x\n", ccif_read32(&runtime->feature_0_val[0], 0),
  738. ccif_read32(&runtime->feature_0_val[1], 0), ccif_read32(&runtime->feature_0_val[2], 0),
  739. ccif_read32(&runtime->feature_0_val[3], 0));
  740. CCCI_INF_MSG(md->index, TAG, "Feature1 0x%x 0x%x 0x%x 0x%x\n", ccif_read32(&runtime->feature_1_val[0], 0),
  741. ccif_read32(&runtime->feature_1_val[1], 0), ccif_read32(&runtime->feature_1_val[2], 0),
  742. ccif_read32(&runtime->feature_1_val[3], 0));
  743. CCCI_INF_MSG(md->index, TAG, "Feature2 0x%x 0x%x 0x%x 0x%x\n", ccif_read32(&runtime->feature_2_val[0], 0),
  744. ccif_read32(&runtime->feature_2_val[1], 0), ccif_read32(&runtime->feature_2_val[2], 0),
  745. ccif_read32(&runtime->feature_2_val[3], 0));
  746. CCCI_INF_MSG(md->index, TAG, "Feature3 0x%x 0x%x 0x%x 0x%x\n", ccif_read32(&runtime->feature_3_val[0], 0),
  747. ccif_read32(&runtime->feature_3_val[1], 0), ccif_read32(&runtime->feature_3_val[2], 0),
  748. ccif_read32(&runtime->feature_3_val[3], 0));
  749. CCCI_INF_MSG(md->index, TAG, "Feature4 0x%x 0x%x 0x%x 0x%x\n", ccif_read32(&runtime->feature_4_val[0], 0),
  750. ccif_read32(&runtime->feature_4_val[1], 0), ccif_read32(&runtime->feature_4_val[2], 0),
  751. ccif_read32(&runtime->feature_4_val[3], 0));
  752. CCCI_INF_MSG(md->index, TAG, "Feature5 0x%x 0x%x 0x%x 0x%x\n", ccif_read32(&runtime->feature_5_val[0], 0),
  753. ccif_read32(&runtime->feature_5_val[1], 0), ccif_read32(&runtime->feature_5_val[2], 0),
  754. ccif_read32(&runtime->feature_5_val[3], 0));
  755. p = (int *)ctmp;
  756. *p = ccif_read32(&runtime->misc_postfix, 0);
  757. CCCI_INF_MSG(md->index, TAG, "Postfix %c%c%c%c\n", ctmp[0], ctmp[1], ctmp[2], ctmp[3]);
  758. CCCI_INF_MSG(md->index, TAG, "----------------------------------------------\n");
  759. }
  760. static int md_ccif_op_send_runtime_data(struct ccci_modem *md, unsigned int sbp_code)
  761. {
  762. int packet_size = sizeof(struct ccci_header) + sizeof(struct modem_runtime);
  763. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  764. struct ccci_header *ccci_h;
  765. struct modem_runtime *runtime;
  766. struct file *filp = NULL;
  767. LOGGING_MODE mdlog_flag = MODE_IDLE;
  768. int ret;
  769. char str[16];
  770. unsigned int random_seed = 0, tmp;
  771. char md_logger_cfg_file[32];
  772. snprintf(str, sizeof(str), "%s", AP_PLATFORM_INFO);
  773. ccci_h = (struct ccci_header *)&md_ctrl->ccif_sram_layout->up_header;
  774. runtime = (struct modem_runtime *)&md_ctrl->ccif_sram_layout->runtime_data;
  775. ccci_set_ap_region_protection(md);
  776. /* header */
  777. ccif_write32(&ccci_h->data[0], 0, 0x00);
  778. ccif_write32(&ccci_h->data[1], 0, packet_size);
  779. ccif_write32(&ccci_h->reserved, 0, MD_INIT_CHK_ID);
  780. /* ccif_write32(&ccci_h->channel,0,CCCI_CONTROL_TX); */
  781. ccif_write32((u32 *) ccci_h + 2, 0, CCCI_CONTROL_TX);
  782. /* runtime data, little endian for string */
  783. ccif_write32(&runtime->Prefix, 0, 0x46494343);
  784. ccif_write32(&runtime->Postfix, 0, 0x46494343);
  785. ccif_write32(&runtime->Platform_L, 0, *((int *)str));
  786. ccif_write32(&runtime->Platform_H, 0, *((int *)&str[4]));
  787. ccif_write32(&runtime->BootChannel, 0, CCCI_CONTROL_RX);
  788. ccif_write32(&runtime->DriverVersion, 0, CCCI_DRIVER_VER);
  789. if (md->index == 0)
  790. snprintf(md_logger_cfg_file, 32, "%s", MD1_LOGGER_FILE_PATH);
  791. else
  792. snprintf(md_logger_cfg_file, 32, "%s", MD2_LOGGER_FILE_PATH);
  793. filp = filp_open(md_logger_cfg_file, O_RDONLY, 0777);
  794. if (!IS_ERR(filp)) {
  795. ret = kernel_read(filp, 0, (char *)&mdlog_flag, sizeof(int));
  796. if (ret != sizeof(int))
  797. mdlog_flag = MODE_IDLE;
  798. } else {
  799. CCCI_ERR_MSG(md->index, TAG, "open %s fail", md_logger_cfg_file);
  800. filp = NULL;
  801. }
  802. if (filp != NULL)
  803. filp_close(filp, NULL);
  804. if (is_meta_mode() || is_advanced_meta_mode())
  805. ccif_write32(&runtime->BootingStartID, 0, ((char)mdlog_flag << 8 | META_BOOT_ID));
  806. else
  807. ccif_write32(&runtime->BootingStartID, 0, ((char)mdlog_flag << 8 | NORMAL_BOOT_ID));
  808. /* share memory layout */
  809. ccif_write32(&runtime->ExceShareMemBase, 0,
  810. md->smem_layout.ccci_exp_smem_base_phy - md->mem_layout.smem_offset_AP_to_MD);
  811. ccif_write32(&runtime->ExceShareMemSize, 0, md->smem_layout.ccci_exp_smem_size);
  812. ccif_write32(&runtime->CCIFShareMemBase, 0,
  813. md->smem_layout.ccci_exp_smem_base_phy + md->smem_layout.ccci_exp_smem_size -
  814. md->mem_layout.smem_offset_AP_to_MD);
  815. ccif_write32(&runtime->CCIFShareMemSize, 0, md_ctrl->total_smem_size);
  816. ccif_write32(&runtime->TotalShareMemBase, 0,
  817. md->mem_layout.smem_region_phy - md->mem_layout.smem_offset_AP_to_MD);
  818. ccif_write32(&runtime->TotalShareMemSize, 0, md->mem_layout.smem_region_size);
  819. /* misc region, little endian for string */
  820. ccif_write32(&runtime->misc_prefix, 0, 0x4353494D);
  821. ccif_write32(&runtime->misc_postfix, 0, 0x4353494D);
  822. ccif_write32(&runtime->index, 0, 0x0);
  823. ccif_write32(&runtime->next, 0, 0x0);
  824. #if defined(ENABLE_32K_CLK_LESS)
  825. if (crystal_exist_status()) {
  826. tmp = ccif_read32(&runtime->support_mask, 0);
  827. tmp &= ~(FEATURE_NOT_SUPPORT << (MISC_32K_LESS * 2));
  828. tmp |= (FEATURE_NOT_SUPPORT << (MISC_32K_LESS * 2));
  829. ccif_write32(&runtime->support_mask, 0, tmp);
  830. } else {
  831. tmp = ccif_read32(&runtime->support_mask, 0);
  832. tmp &= ~(FEATURE_SUPPORT << (MISC_32K_LESS * 2));
  833. tmp |= (FEATURE_SUPPORT << (MISC_32K_LESS * 2));
  834. ccif_write32(&runtime->support_mask, 0, tmp);
  835. }
  836. #else
  837. tmp = ccif_read32(&runtime->support_mask, 0);
  838. tmp &= ~(FEATURE_NOT_SUPPORT << (MISC_32K_LESS * 2));
  839. tmp |= (FEATURE_NOT_SUPPORT << (MISC_32K_LESS * 2));
  840. ccif_write32(&runtime->support_mask, 0, tmp);
  841. #endif
  842. /* random seed */
  843. get_random_bytes(&random_seed, sizeof(int));
  844. ccif_write32(&runtime->feature_2_val[0], 0, random_seed);
  845. tmp = ccif_read32(&runtime->support_mask, 0);
  846. tmp &= ~(FEATURE_SUPPORT << (MISC_RAND_SEED * 2));
  847. tmp |= (FEATURE_SUPPORT << (MISC_RAND_SEED * 2));
  848. ccif_write32(&runtime->support_mask, 0, tmp);
  849. /* MD2 SBP code */
  850. if (sbp_code > 0) {
  851. /* runtime->support_mask |= (FEATURE_SUPPORT<<(MISC_MD_SBP_SETTING * 2)); */
  852. /* runtime->feature_4_val[0] = sbp_code; */
  853. ccif_write32(&runtime->feature_4_val[0], 0, sbp_code);
  854. tmp = ccif_read32(&runtime->support_mask, 0);
  855. tmp &= ~(FEATURE_SUPPORT << (MISC_MD_SBP_SETTING * 2));
  856. tmp |= (FEATURE_SUPPORT << (MISC_MD_SBP_SETTING * 2));
  857. ccif_write32(&runtime->support_mask, 0, tmp);
  858. }
  859. /* CCCI debug */
  860. #if defined(FEATURE_SEQ_CHECK_EN) || defined(FEATURE_POLL_MD_EN)
  861. tmp = ccif_read32(&runtime->support_mask, 0);
  862. tmp &= ~(FEATURE_SUPPORT << (MISC_MD_SEQ_CHECK * 2));
  863. tmp |= (FEATURE_SUPPORT << (MISC_MD_SEQ_CHECK * 2));
  864. ccif_write32(&runtime->support_mask, 0, tmp);
  865. tmp = 0;
  866. ccif_write32(&runtime->feature_5_val[0], 0, tmp);
  867. #ifdef FEATURE_SEQ_CHECK_EN
  868. tmp = ccif_read32(&runtime->feature_5_val[0], 0);
  869. tmp |= (1 << 0);
  870. ccif_write32(&runtime->feature_5_val[0], 0, tmp);
  871. #endif
  872. #ifdef FEATURE_POLL_MD_EN
  873. tmp = ccif_read32(&runtime->feature_5_val[0], 0);
  874. tmp |= (1 << 1);
  875. ccif_write32(&runtime->feature_5_val[0], 0, tmp);
  876. #endif
  877. #endif
  878. /* md_ccif_dump("send_runtime",md); */
  879. dump_runtime_data(md, runtime);
  880. ret = md_ccif_send(md, H2D_SRAM);
  881. return ret;
  882. }
  883. static int md_ccif_op_force_assert(struct ccci_modem *md, MD_COMM_TYPE type)
  884. {
  885. struct ccci_request *req = NULL;
  886. struct ccci_header *ccci_h;
  887. CCCI_INF_MSG(md->index, TAG, "force assert MD using %d\n", type);
  888. switch (type) {
  889. case CCCI_MESSAGE:
  890. req = ccci_alloc_req(OUT, sizeof(struct ccci_header), 1, 1);
  891. if (req) {
  892. req->policy = RECYCLE;
  893. ccci_h = (struct ccci_header *)skb_put(req->skb, sizeof(struct ccci_header));
  894. ccci_h->data[0] = 0xFFFFFFFF;
  895. ccci_h->data[1] = 0x5A5A5A5A;
  896. /* ccci_h->channel = CCCI_FORCE_ASSERT_CH; */
  897. *(((u32 *) ccci_h) + 2) = CCCI_FORCE_ASSERT_CH;
  898. ccci_h->reserved = 0xA5A5A5A5;
  899. return md->ops->send_request(md, 0, req, req->skb); /* hardcode to queue 0 */
  900. }
  901. return -CCCI_ERR_ALLOCATE_MEMORY_FAIL;
  902. case CCIF_INTERRUPT:
  903. md_ccif_send(md, H2D_FORCE_MD_ASSERT);
  904. break;
  905. case CCIF_INTR_SEQ:
  906. md_ccif_send(md, AP_MD_SEQ_ERROR);
  907. break;
  908. };
  909. return 0;
  910. }
  911. static int md_ccif_dump_info(struct ccci_modem *md, MODEM_DUMP_FLAG flag, void *buff, int length)
  912. {
  913. if (flag & DUMP_FLAG_CCIF)
  914. md_ccif_dump("Dump CCIF SRAM\n", md);
  915. return 0;
  916. }
  917. static int md_ccif_ee_callback(struct ccci_modem *md, MODEM_EE_FLAG flag)
  918. {
  919. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  920. if (flag & EE_FLAG_ENABLE_WDT)
  921. enable_irq(md_ctrl->md_wdt_irq_id);
  922. if (flag & EE_FLAG_DISABLE_WDT)
  923. disable_irq_nosync(md_ctrl->md_wdt_irq_id);
  924. return 0;
  925. }
  926. static struct ccci_modem_ops md_ccif_ops = {
  927. .init = &md_ccif_op_init,
  928. .start = &md_ccif_op_start,
  929. .stop = &md_ccif_op_stop,
  930. .reset = &md_ccif_op_reset,
  931. .send_request = &md_ccif_op_send_request,
  932. .give_more = &md_ccif_op_give_more,
  933. .napi_poll = &md_ccif_op_napi_poll,
  934. .send_runtime_data = &md_ccif_op_send_runtime_data,
  935. .broadcast_state = &md_ccif_op_broadcast_state,
  936. .force_assert = &md_ccif_op_force_assert,
  937. .dump_info = &md_ccif_dump_info,
  938. .write_room = &md_ccif_op_write_room,
  939. .get_port_by_minor = &md_ccif_op_get_port_by_minor,
  940. .get_port_by_channel = &md_ccif_op_get_port_by_channel,
  941. .ee_callback = &md_ccif_ee_callback,
  942. };
  943. static void md_ccif_hw_init(struct ccci_modem *md)
  944. {
  945. int idx, ret;
  946. struct md_ccif_ctrl *md_ctrl;
  947. struct md_hw_info *hw_info;
  948. md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  949. hw_info = md_ctrl->hw_info;
  950. /* Copy HW info */
  951. md_ctrl->ccif_ap_base = (void __iomem *)hw_info->ap_ccif_base;
  952. md_ctrl->ccif_md_base = (void __iomem *)hw_info->md_ccif_base;
  953. md_ctrl->ccif_irq_id = hw_info->ap_ccif_irq_id;
  954. md_ctrl->md_wdt_irq_id = hw_info->md_wdt_irq_id;
  955. md_ctrl->sram_size = hw_info->sram_size;
  956. md_ccif_io_remap_md_side_register(md);
  957. md_ctrl->ccif_sram_layout = (struct ccif_sram_layout *)(md_ctrl->ccif_ap_base + APCCIF_CHDATA);
  958. /* request IRQ */
  959. ret = request_irq(md_ctrl->md_wdt_irq_id, md_cd_wdt_isr, hw_info->md_wdt_irq_flags, "MD2_WDT", md);
  960. if (ret) {
  961. CCCI_ERR_MSG(md->index, TAG, "request MD_WDT IRQ(%d) error %d\n", md_ctrl->md_wdt_irq_id, ret);
  962. return;
  963. }
  964. disable_irq_nosync(md_ctrl->md_wdt_irq_id); /* to balance the first start */
  965. ret = request_irq(md_ctrl->ccif_irq_id, md_ccif_isr, hw_info->md_wdt_irq_flags, "CCIF1_AP", md);
  966. if (ret) {
  967. CCCI_ERR_MSG(md->index, TAG, "request CCIF1_AP IRQ(%d) error %d\n", md_ctrl->ccif_irq_id, ret);
  968. return;
  969. }
  970. /* init CCIF */
  971. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_CON, 0x01); /* arbitration */
  972. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_ACK, 0xFFFF);
  973. for (idx = 0; idx < md_ctrl->sram_size / sizeof(u32); idx++)
  974. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_CHDATA + idx * sizeof(u32), 0);
  975. }
  976. static int md_ccif_ring_buf_init(struct ccci_modem *md)
  977. {
  978. int i = 0;
  979. unsigned char *buf;
  980. int bufsize = 0;
  981. struct md_ccif_ctrl *md_ctrl;
  982. struct ccci_ringbuf *ringbuf;
  983. md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  984. md_ctrl->total_smem_size = 0;
  985. buf = ((unsigned char *)md->mem_layout.smem_region_vir) + CCIF_MD_SMEM_RESERVE;
  986. for (i = 0; i < QUEUE_NUM; i++) {
  987. bufsize = CCCI_RINGBUF_CTL_LEN + rx_queue_buffer_size[i] + tx_queue_buffer_size[i];
  988. if (md_ctrl->total_smem_size + bufsize >
  989. md->mem_layout.smem_region_size - md->smem_layout.ccci_exp_smem_size) {
  990. CCCI_ERR_MSG(md->index, TAG,
  991. "share memory too small,please check configure,smem_size=%d, exception_smem=%d\n",
  992. md->mem_layout.smem_region_size, md->smem_layout.ccci_exp_smem_size);
  993. return -1;
  994. }
  995. ringbuf =
  996. ccci_create_ringbuf(md->index, buf, bufsize, rx_queue_buffer_size[i], tx_queue_buffer_size[i]);
  997. if (ringbuf == NULL) {
  998. CCCI_ERR_MSG(md->index, TAG, "ccci_create_ringbuf %d failed\n", i);
  999. return -1;
  1000. }
  1001. /* rx */
  1002. md_ctrl->rxq[i].ringbuf = ringbuf;
  1003. md_ctrl->rxq[i].ccif_ch = D2H_RINGQ0 + i;
  1004. md_ctrl->rxq[i].worker = alloc_workqueue("rx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, i);
  1005. INIT_WORK(&md_ctrl->rxq[i].qwork, ccif_rx_work);
  1006. /* tx */
  1007. md_ctrl->txq[i].ringbuf = ringbuf;
  1008. md_ctrl->txq[i].ccif_ch = H2D_RINGQ0 + i;
  1009. buf += bufsize;
  1010. md_ctrl->total_smem_size += bufsize;
  1011. }
  1012. return 0;
  1013. }
  1014. static int md_ccif_probe(struct platform_device *dev)
  1015. {
  1016. struct ccci_modem *md;
  1017. struct md_ccif_ctrl *md_ctrl;
  1018. int md_id;
  1019. struct ccci_dev_cfg dev_cfg;
  1020. int ret;
  1021. struct md_hw_info *md_hw;
  1022. /* Allocate modem hardware info structure memory */
  1023. md_hw = kzalloc(sizeof(struct md_hw_info), GFP_KERNEL);
  1024. if (md_hw == NULL) {
  1025. CCCI_INF_MSG(-1, TAG, "md_ccif_probe:alloc md hw mem fail\n");
  1026. return -1;
  1027. }
  1028. ret = md_ccif_get_modem_hw_info(dev, &dev_cfg, md_hw);
  1029. if (ret != 0) {
  1030. CCCI_INF_MSG(-1, TAG, "md_ccif_probe:get hw info fail(%d)\n", ret);
  1031. kfree(md_hw);
  1032. md_hw = NULL;
  1033. return -1;
  1034. }
  1035. /* Allocate md ctrl memory and do initialize */
  1036. md = ccci_allocate_modem(sizeof(struct md_ccif_ctrl));
  1037. if (md == NULL) {
  1038. CCCI_INF_MSG(-1, TAG, "md_ccif_probe:alloc modem ctrl mem fail\n");
  1039. kfree(md_hw);
  1040. md_hw = NULL;
  1041. return -1;
  1042. }
  1043. md->index = md_id = dev_cfg.index;
  1044. md->major = dev_cfg.major;
  1045. md->minor_base = dev_cfg.minor_base;
  1046. md->capability = dev_cfg.capability;
  1047. md->plat_dev = dev;
  1048. CCCI_INF_MSG(md_id, TAG, "modem ccif module probe\n");
  1049. /* init modem structure */
  1050. md->ops = &md_ccif_ops;
  1051. CCCI_INF_MSG(md_id, TAG, "md_ccif_probe:md_ccif=%p,md_ctrl=%p\n", md, md->private_data);
  1052. md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  1053. md_ctrl->hw_info = md_hw;
  1054. snprintf(md_ctrl->wakelock_name, sizeof(md_ctrl->wakelock_name), "md%d_ccif_trm", md_id + 1);
  1055. wake_lock_init(&md_ctrl->trm_wake_lock, WAKE_LOCK_SUSPEND, md_ctrl->wakelock_name);
  1056. tasklet_init(&md_ctrl->ccif_irq_task, md_ccif_irq_tasklet, (unsigned long)md);
  1057. INIT_WORK(&md_ctrl->ccif_sram_work, md_ccif_sram_rx_work);
  1058. md_ctrl->channel_id = 0;
  1059. /* register modem */
  1060. ccci_register_modem(md);
  1061. md_ccif_hw_init(md);
  1062. md_ccif_ring_buf_init(md);
  1063. /* hoop up to device */
  1064. dev->dev.platform_data = md;
  1065. return 0;
  1066. }
  1067. int md_ccif_remove(struct platform_device *dev)
  1068. {
  1069. return 0;
  1070. }
  1071. void md_ccif_shutdown(struct platform_device *dev)
  1072. {
  1073. }
  1074. int md_ccif_suspend(struct platform_device *dev, pm_message_t state)
  1075. {
  1076. return 0;
  1077. }
  1078. int md_ccif_resume(struct platform_device *dev)
  1079. {
  1080. struct ccci_modem *md = (struct ccci_modem *)dev->dev.platform_data;
  1081. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  1082. CCCI_INF_MSG(-1, TAG, "md_ccif_resume,md=0x%p,md_ctrl=0x%p\n", md, md_ctrl);
  1083. ccif_write32(md_ctrl->ccif_ap_base, APCCIF_CON, 0x01); /* arbitration */
  1084. return 0;
  1085. }
  1086. int md_ccif_pm_suspend(struct device *device)
  1087. {
  1088. struct platform_device *pdev = to_platform_device(device);
  1089. BUG_ON(pdev == NULL);
  1090. return md_ccif_suspend(pdev, PMSG_SUSPEND);
  1091. }
  1092. int md_ccif_pm_resume(struct device *device)
  1093. {
  1094. struct platform_device *pdev = to_platform_device(device);
  1095. BUG_ON(pdev == NULL);
  1096. return md_ccif_resume(pdev);
  1097. }
  1098. int md_ccif_pm_restore_noirq(struct device *device)
  1099. {
  1100. int ret = 0;
  1101. struct ccci_modem *md = (struct ccci_modem *)device->platform_data;
  1102. struct md_ccif_ctrl *md_ctrl = (struct md_ccif_ctrl *)md->private_data;
  1103. CCCI_INF_MSG(-1, TAG, "md_ccif_ipoh_restore,md=0x%p,md_ctrl=0x%p\n", md, md_ctrl);
  1104. /* IPO-H */
  1105. /* restore IRQ */
  1106. #ifdef FEATURE_PM_IPO_H
  1107. irq_set_irq_type(md_ctrl->md_wdt_irq_id, IRQF_TRIGGER_FALLING);
  1108. #endif
  1109. /* set flag for next md_start */
  1110. md->config.setting |= MD_SETTING_RELOAD;
  1111. md->config.setting |= MD_SETTING_FIRST_BOOT;
  1112. return ret;
  1113. }
  1114. #ifdef CONFIG_PM
  1115. static const struct dev_pm_ops md_ccif_pm_ops = {
  1116. .suspend = md_ccif_pm_suspend,
  1117. .resume = md_ccif_pm_resume,
  1118. .freeze = md_ccif_pm_suspend,
  1119. .thaw = md_ccif_pm_resume,
  1120. .poweroff = md_ccif_pm_suspend,
  1121. .restore = md_ccif_pm_resume,
  1122. .restore_noirq = md_ccif_pm_restore_noirq,
  1123. };
  1124. #endif
  1125. static struct platform_driver modem_ccif_driver = {
  1126. .driver = {
  1127. .name = "ccif_modem",
  1128. #ifdef CONFIG_PM
  1129. .pm = &md_ccif_pm_ops,
  1130. #endif
  1131. },
  1132. .probe = md_ccif_probe,
  1133. .remove = md_ccif_remove,
  1134. .shutdown = md_ccif_shutdown,
  1135. .suspend = md_ccif_suspend,
  1136. .resume = md_ccif_resume,
  1137. };
  1138. #ifdef CONFIG_OF
  1139. static const struct of_device_id ccif_of_ids[] = {
  1140. {.compatible = "mediatek,ap_ccif1",},
  1141. {}
  1142. };
  1143. #endif
  1144. static int __init md_ccif_init(void)
  1145. {
  1146. int ret;
  1147. #ifdef CONFIG_OF
  1148. modem_ccif_driver.driver.of_match_table = ccif_of_ids;
  1149. #endif
  1150. ret = platform_driver_register(&modem_ccif_driver);
  1151. if (ret) {
  1152. CCCI_ERR_MSG(-1, TAG, "CCIF modem platform driver register fail(%d)\n", ret);
  1153. return ret;
  1154. }
  1155. CCCI_INF_MSG(-1, TAG, "CCIF modem platform driver register success\n");
  1156. return 0;
  1157. }
  1158. module_init(md_ccif_init);
  1159. MODULE_AUTHOR("Yanbin Ren <Yanbin.Ren@mediatek.com>");
  1160. MODULE_DESCRIPTION("CCIF modem driver v0.1");
  1161. MODULE_LICENSE("GPL");