cbp_sdio.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. /*
  2. *drivers/mmc/card/cbp_sdio.c
  3. *
  4. *VIA CBP SDIO driver for Linux
  5. *
  6. *Copyright (C) 2009 VIA TELECOM Corporation, Inc.
  7. *Author: VIA TELECOM Corporation, Inc.
  8. *
  9. *This package is free software; you can redistribute it and/or modify
  10. *it under the terms of the GNU General Public License version 2 as
  11. *published by the Free Software Foundation.
  12. *
  13. *THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  14. *IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  15. *WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/mod_devicetable.h>
  19. #include <linux/mmc/core.h>
  20. #include <linux/mmc/card.h>
  21. #include <linux/mmc/sdio_func.h>
  22. #include <linux/mmc/sdio_ids.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/irq.h>
  25. #include <linux/circ_buf.h>
  26. #include <linux/tty.h>
  27. #include <linux/tty_flip.h>
  28. #include <linux/kfifo.h>
  29. #include <linux/slab.h>
  30. #include <linux/delay.h>
  31. #include <linux/timer.h>
  32. #include <linux/init.h>
  33. #include <linux/kernel.h>
  34. #include <linux/sched.h>
  35. #include <linux/mutex.h>
  36. #include <linux/mmc/host.h>
  37. #include <linux/gpio.h>
  38. #include <linux/wait.h>
  39. #include <linux/suspend.h>
  40. #include "modem_sdio.h"
  41. static int modem_detect_host(const char *host_id)
  42. {
  43. /*HACK!!!
  44. *Rely on mmc->class_dev.class set in mmc_alloc_host
  45. *Tricky part: a new mmc hook is being (temporary) created
  46. *to discover mmc_host class.
  47. *Do you know more elegant way how to enumerate mmc_hosts?
  48. */
  49. struct mmc_host *mmc = NULL;
  50. struct mmc_host *host = NULL;
  51. struct class_dev_iter iter;
  52. struct device *dev;
  53. int ret = -1;
  54. #if 1
  55. pr_debug("[C2K] before alloc host\n");
  56. mmc = mmc_alloc_host(0, NULL);
  57. if (!mmc) {
  58. pr_debug("[C2K] mmc_aloc_host error\n");
  59. ret = -ENOMEM;
  60. goto out;
  61. }
  62. pr_debug("[C2K] mmc_aloc_host success\n");
  63. BUG_ON(!mmc->class_dev.class);
  64. class_dev_iter_init(&iter, mmc->class_dev.class, NULL, NULL);
  65. for (;;) {
  66. dev = class_dev_iter_next(&iter);
  67. if (!dev) {
  68. pr_debug("[C2K] class dev iter next failed\n");
  69. LOGPRT(LOG_ERR, "%s: %d\n", __func__, __LINE__);
  70. break;
  71. }
  72. host = container_of(dev, struct mmc_host, class_dev);
  73. if (dev_name(&host->class_dev)
  74. && strcmp(dev_name(&host->class_dev), host_id))
  75. continue;
  76. ret = 0;
  77. break;
  78. }
  79. mmc_free_host(mmc);
  80. #endif
  81. /*ret = 0; */
  82. out:
  83. return ret;
  84. }
  85. static struct cbp_platform_data cbp_data = {
  86. .bus = "sdio",
  87. .host_id = MDM_MMC_ID,
  88. .ipc_enable = false,
  89. .rst_ind_enable = false,
  90. .data_ack_enable = false,
  91. .flow_ctrl_enable = false,
  92. .tx_disable_irq = true,
  93. .gpio_ap_wkup_cp = GPIO_C2K_SDIO_AP_WAKE_MDM,
  94. .gpio_cp_ready = GPIO_C2K_SDIO_MDM_RDY,
  95. .gpio_cp_wkup_ap = GPIO_C2K_SDIO_MDM_WAKE_AP,
  96. .gpio_ap_ready = GPIO_C2K_SDIO_AP_RDY,
  97. .gpio_sync_polar = GPIO_C2K_SDIO_SYNC_POLAR,
  98. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  99. .gpio_cp_exception = GPIO_C2K_EXCEPTION,
  100. .c2k_wdt_irq_id = 0,
  101. #endif
  102. .gpio_rst_ind = GPIO_C2K_MDM_RST_IND,
  103. .gpio_rst_ind_polar = GPIO_C2K_MDM_RST_IND_POLAR,
  104. .gpio_data_ack = GPIO_C2K_SDIO_DATA_ACK,
  105. .gpio_data_ack_polar = GPIO_C2K_SDIO_DATA_ACK_POLAR,
  106. .gpio_flow_ctrl = GPIO_C2K_SDIO_FLOW_CTRL,
  107. .gpio_flow_ctrl_polar = GPIO_C2K_SDIO_FLOW_CTRL_POLAR,
  108. .gpio_pwr_on = GPIO_C2K_MDM_PWR_EN,
  109. .gpio_rst = GPIO_C2K_MDM_RST,
  110. /*for the level transfor chip fssd06 */
  111. .gpio_sd_select = GPIO_C2K_SD_SEL_N,
  112. .gpio_mc3_enable = GPIO_C2K_MC3_EN_N,
  113. .modem = NULL,
  114. .detect_host = modem_detect_host,
  115. .cbp_setup = modem_sdio_init,
  116. .cbp_destroy = modem_sdio_exit,
  117. };
  118. /*----------------------data_ack functions-------------------*/
  119. static struct cbp_wait_event *cbp_data_ack;
  120. static irqreturn_t gpio_irq_data_ack(int irq, void *data)
  121. {
  122. struct cbp_wait_event *cbp_data_ack = (struct cbp_wait_event *)data;
  123. int level;
  124. /*unsigned long long hr_t1,hr_t2; */
  125. /*hr_t1 = sched_clock(); */
  126. level = !!c2k_gpio_get_value(cbp_data_ack->wait_gpio);
  127. /*LOGPRT(LOG_NOTICE, "%s enter, level = %d!\n", __func__, level); */
  128. if (level == cbp_data_ack->wait_polar) {
  129. atomic_set(&cbp_data_ack->state, MODEM_ST_READY);
  130. wake_up(&cbp_data_ack->wait_q);
  131. }
  132. #if defined(CONFIG_MTK_LEGACY)
  133. c2k_gpio_irq_unmask(cbp_data_ack->wait_gpio);
  134. #endif
  135. /*hr_t2 = sched_clock(); */
  136. /*pr_debug("[sdio]ack: t1=%llu,t2 =%llu,delta=%llu\n",hr_t1, hr_t2, hr_t2-hr_t1); */
  137. return IRQ_HANDLED;
  138. }
  139. static void data_ack_wait_event(struct cbp_wait_event *pdata_ack)
  140. {
  141. struct sdio_modem *modem = c2k_modem;
  142. struct cbp_wait_event *cbp_data_ack =
  143. (struct cbp_wait_event *)pdata_ack;
  144. wait_event(cbp_data_ack->wait_q,
  145. (MODEM_ST_READY == atomic_read(&cbp_data_ack->state))
  146. || (modem->status == MD_OFF));
  147. }
  148. /*----------------------flow control functions-------------------*/
  149. unsigned long long hr_t1 = 0;
  150. unsigned long long hr_t2 = 0;
  151. static struct cbp_wait_event *cbp_flow_ctrl;
  152. static irqreturn_t gpio_irq_flow_ctrl(int irq, void *data)
  153. {
  154. struct cbp_wait_event *cbp_flow_ctrl = (struct cbp_wait_event *)data;
  155. int level;
  156. /*hr_t1 = sched_clock(); */
  157. level = !!c2k_gpio_get_value(cbp_flow_ctrl->wait_gpio);
  158. /*c2k_gpio_set_irq_type(cbp_flow_ctrl->wait_gpio, IRQF_TRIGGER_FALLING |IRQF_TRIGGER_RISING); */
  159. /*c2k_gpio_set_irq_type(cbp_flow_ctrl->wait_gpio, IRQF_TRIGGER_FALLING ); */
  160. /*c2k_gpio_set_irq_type(cbp_flow_ctrl->wait_gpio, IRQ_TYPE_LEVEL_LOW |IRQ_TYPE_LEVEL_HIGH); */
  161. #if defined(CONFIG_MTK_LEGACY)
  162. c2k_gpio_irq_unmask(cbp_flow_ctrl->wait_gpio);
  163. #endif
  164. if (level == cbp_flow_ctrl->wait_polar) {
  165. atomic_set(&cbp_flow_ctrl->state, FLOW_CTRL_ENABLE);
  166. /*LOGPRT(LOG_DEBUG, "%s: flow control is enable, please write later!\n", __func__); */
  167. } else {
  168. atomic_set(&cbp_flow_ctrl->state, FLOW_CTRL_DISABLE);
  169. /*LOGPRT(LOG_DEBUG, "%s: %d flow control is disable, can write now!\n", __func__,flw_count); */
  170. wake_up(&cbp_flow_ctrl->wait_q);
  171. }
  172. /*hr_t2 = sched_clock(); */
  173. /*pr_debug("[sdio] t1=%llu,t2 =%llu,delta=%llu\n",hr_t1, hr_t2, hr_t2-hr_t1); */
  174. return IRQ_HANDLED;
  175. }
  176. static void flow_ctrl_wait_event(struct cbp_wait_event *pflow_ctrl)
  177. {
  178. struct cbp_wait_event *cbp_flow_ctrl =
  179. (struct cbp_wait_event *)pflow_ctrl;
  180. struct sdio_modem *modem = c2k_modem;
  181. /*wait_event(cbp_flow_ctrl->wait_q, FLOW_CTRL_DISABLE == atomic_read(&cbp_flow_ctrl->state)); */
  182. wait_event_timeout(cbp_flow_ctrl->wait_q,
  183. (FLOW_CTRL_DISABLE ==
  184. atomic_read(&cbp_flow_ctrl->state)
  185. || (modem->status == MD_OFF)),
  186. msecs_to_jiffies(20));
  187. }
  188. /*----------------------IPC functions-------------------*/
  189. static int modem_sdio_tx_notifier(int event, void *data);
  190. static int modem_sdio_rx_notifier(int event, void *data);
  191. static struct asc_config sdio_tx_handle = {
  192. .name = CBP_TX_HD_NAME,
  193. };
  194. static struct asc_infor sdio_tx_user = {
  195. .name = CBP_TX_USER_NAME,
  196. .data = &sdio_tx_handle,
  197. .notifier = modem_sdio_tx_notifier,
  198. };
  199. static struct asc_config sdio_rx_handle = {
  200. .name = SDIO_RX_HD_NAME,
  201. };
  202. static struct asc_infor sdio_rx_user = {
  203. .name = SDIO_RX_USER_NAME,
  204. .data = &sdio_rx_handle,
  205. .notifier = modem_sdio_rx_notifier,
  206. };
  207. static int modem_sdio_tx_notifier(int event, void *data)
  208. {
  209. return 0;
  210. }
  211. static int modem_sdio_rx_notifier(int event, void *data)
  212. {
  213. struct asc_config *rx_config = (struct asc_config *)data;
  214. struct sdio_modem *modem = c2k_modem;
  215. int ret = 0;
  216. LOGPRT(LOG_NOTICE, "%s event=%d\n", __func__, event);
  217. switch (event) {
  218. case ASC_NTF_RX_PREPARE:
  219. #ifdef WAKE_HOST_BY_SYNC /*wake up sdio host by four wire sync mechanis */
  220. if (modem->status != MD_OFF)
  221. SRC_trigger_signal(1);
  222. else
  223. LOGPRT(LOG_ERR,
  224. "ignor asc event to resume sdio host\n");
  225. #endif
  226. asc_rx_confirm_ready(rx_config->name, 1);
  227. break;
  228. case ASC_NTF_RX_POST:
  229. #ifdef WAKE_HOST_BY_SYNC /*wake up sdio host by four wire sync mechanis */
  230. if (modem->status != MD_OFF)
  231. SRC_trigger_signal(0);
  232. else
  233. LOGPRT(LOG_ERR,
  234. "ignor asc event to suspend sdio host\n");
  235. #endif
  236. /*asc_rx_confirm_ready(rx_config->name, 0); */
  237. break;
  238. default:
  239. LOGPRT(LOG_ERR, "%s: ignor unknown evernt!!\n", __func__);
  240. break;
  241. }
  242. return ret;
  243. }
  244. static struct cbp_exception *cbp_excp_ind;
  245. /*----------------------reset indication functions-------------------*/
  246. static struct cbp_reset *cbp_rst_ind;
  247. #if 0
  248. static int modem_detect_card(struct cbp_reset *cbp_rst_ind)
  249. {
  250. /*HACK!!!
  251. *Rely on mmc->class_dev.class set in mmc_alloc_host
  252. *Tricky part: a new mmc hook is being (temporary) created
  253. *to discover mmc_host class.
  254. *Do you know more elegant way how to enumerate mmc_hosts?
  255. */
  256. struct mmc_host *mmc = NULL;
  257. struct class_dev_iter iter;
  258. struct device *dev;
  259. int ret = -1;
  260. mmc = mmc_alloc_host(0, NULL);
  261. if (!mmc) {
  262. ret = -ENOMEM;
  263. goto out;
  264. }
  265. BUG_ON(!mmc->class_dev.class);
  266. class_dev_iter_init(&iter, mmc->class_dev.class, NULL, NULL);
  267. for (;;) {
  268. dev = class_dev_iter_next(&iter);
  269. if (!dev)
  270. break;
  271. struct mmc_host *host = container_of(dev,
  272. struct mmc_host,
  273. class_dev);
  274. if (dev_name(&host->class_dev)
  275. && strcmp(dev_name(&host->class_dev), MDM_MMC_ID)) {
  276. pr_debug
  277. ("[MODEM SDIO] detect card not match\n");
  278. continue;
  279. }
  280. pr_debug("[MODEM SDIO] detect card matched\n");
  281. cbp_rst_ind->host = host;
  282. mmc_detect_change(host, 0);
  283. ret = 0;
  284. break;
  285. }
  286. mmc_free_host(mmc);
  287. out:
  288. return ret;
  289. }
  290. #endif
  291. #ifdef WAKE_HOST_BY_SYNC /*wake up sdio host by four wire sync mechanis */
  292. void c2k_wake_host(int wake)
  293. {
  294. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  295. static int host_status = 1; /*we should set host off for the first time, so set status to be 1 */
  296. if (wake && !host_status) { /*wake sdio host to enum device for sth host */
  297. host_status = 1;
  298. LOGPRT(LOG_NOTICE, "%s %d host on.\n", __func__, __LINE__);
  299. via_sdio_on(3);
  300. } else if (host_status) {
  301. host_status = 0;
  302. LOGPRT(LOG_NOTICE, "%s %d host off.\n", __func__, __LINE__);
  303. via_sdio_off(3);
  304. }
  305. #endif
  306. }
  307. static void modem_detect(struct work_struct *work)
  308. {
  309. struct cbp_reset *cbp_rst_ind = NULL;
  310. int level = 0;
  311. LOGPRT(LOG_NOTICE, "%s %d .\n", __func__, __LINE__);
  312. cbp_rst_ind = container_of(work, struct cbp_reset, reset_work);
  313. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  314. if (cbp_rst_ind->host == NULL) { /*for first detection and ipoh */
  315. LOGPRT(LOG_NOTICE, "%s %d modem_detect_card.\n", __func__,
  316. __LINE__);
  317. ret = modem_detect_card(cbp_rst_ind);
  318. if (ret)
  319. LOGPRT(LOG_ERR, "%s: modem detect failed.\n", __func__);
  320. } else { /*for device reset */
  321. level = !!c2k_gpio_get_value(cbp_rst_ind->rst_ind_gpio);
  322. if (level == cbp_rst_ind->rst_ind_polar) {
  323. LOGPRT(LOG_NOTICE, "%s %d power on sdio host\n",
  324. __func__, __LINE__);
  325. c2k_wake_host(0);
  326. c2k_wake_host(1);
  327. } else {
  328. LOGPRT(LOG_NOTICE, "%s %d power off sdio host\n",
  329. __func__, __LINE__);
  330. /*c2k_gpio_direction_output(GPIO_C2K_MDM_PWR_EN, 1); */
  331. modem_reset_handler();
  332. c2k_wake_host(0);
  333. }
  334. }
  335. #else
  336. level = !!c2k_gpio_get_value(cbp_rst_ind->rst_ind_gpio);
  337. if (level == cbp_rst_ind->rst_ind_polar) {
  338. LOGPRT(LOG_NOTICE, "%s %d power on sdio host\n", __func__,
  339. __LINE__);
  340. c2k_wake_host(0);
  341. c2k_wake_host(1);
  342. } else {
  343. LOGPRT(LOG_NOTICE, "%s %d power off sdio host\n", __func__,
  344. __LINE__);
  345. /*c2k_gpio_direction_output(GPIO_C2K_MDM_PWR_EN, 1); */
  346. modem_reset_handler();
  347. c2k_wake_host(0);
  348. }
  349. #endif
  350. }
  351. static void modem_detect_for_excp(struct work_struct *work)
  352. {
  353. pr_debug("[MODEM SDIO] excp work sched!!!\n");
  354. modem_reset_handler();
  355. c2k_wake_host(0);
  356. msleep(1000);
  357. c2k_wake_host(1);
  358. }
  359. #else
  360. static void modem_detect(struct work_struct *work)
  361. {
  362. struct cbp_reset *cbp_rst_ind = NULL;
  363. int ret;
  364. int level = 0;
  365. LOGPRT(LOG_NOTICE, "%s %d.\n", __func__, __LINE__);
  366. cbp_rst_ind = container_of(work, struct cbp_reset, reset_work);
  367. ret = modem_detect_card(cbp_rst_ind);
  368. if (ret)
  369. LOGPRT(LOG_ERR, "%s: modem detect failed.\n", __func__);
  370. }
  371. #endif
  372. void gpio_irq_cbp_rst_ind(void)
  373. {
  374. int level = 0;
  375. unsigned long flags;
  376. struct cbp_platform_data *cdata = &cbp_data;
  377. level = !!c2k_gpio_get_value(cbp_rst_ind->rst_ind_gpio);
  378. if (level != cbp_rst_ind->rst_ind_polar) { /*1:cbp reset happened */
  379. if (cdata->modem) {
  380. LOGPRT(LOG_INFO, "%s: set md off.\n", __func__);
  381. spin_lock_irqsave(&cdata->modem->status_lock, flags);
  382. cdata->modem->status = MD_OFF;
  383. spin_unlock_irqrestore(&cdata->modem->status_lock,
  384. flags);
  385. }
  386. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  387. wake_up(&cbp_flow_ctrl->wait_q);
  388. wake_up(&cbp_data_ack->wait_q);
  389. #else
  390. atomic_set(&cdata->modem->tx_fifo_cnt, TX_FIFO_SZ);
  391. wake_up(&cdata->modem->wait_tx_done_q);
  392. #endif
  393. }
  394. queue_work(cbp_rst_ind->reset_wq, &cbp_rst_ind->reset_work);
  395. }
  396. static irqreturn_t gpio_irq_cbp_excp_ind(int irq, void *data)
  397. {
  398. unsigned long flags;
  399. struct cbp_platform_data *cdata = &cbp_data;
  400. LOGPRT(LOG_ERR, "%s: receive c2k exception interrupt...\n", __func__);
  401. spin_lock_irqsave(&cdata->modem->status_lock, flags);
  402. if (cdata->modem->status != MD_OFF && cdata->modem->status != MD_EXCEPTION) {
  403. cdata->modem->status = MD_EXCEPTION;
  404. modem_notify_event(MDM_EVT_NOTIFY_EXCP);
  405. queue_work(cbp_excp_ind->excp_wq, &cbp_excp_ind->excp_work);
  406. } else {
  407. LOGPRT(LOG_ERR, "%s: md status is %u now, ignore this EE\n", __func__, cdata->modem->status);
  408. }
  409. spin_unlock_irqrestore(&cdata->modem->status_lock, flags);
  410. #if defined(CONFIG_MTK_LEGACY)
  411. c2k_gpio_irq_unmask(cbp_excp_ind->excp_ind_gpio);
  412. #endif
  413. return IRQ_HANDLED;
  414. }
  415. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  416. static irqreturn_t c2k_wdt_isr(int irq, void *data)
  417. {
  418. struct cbp_platform_data *cdata = &cbp_data;
  419. LOGPRT(LOG_ERR,
  420. "%s: receive c2k wdt interrupt, prepare to reset c2k...!\n",
  421. __func__);
  422. dump_c2k_iram();
  423. /*wake_lock_timeout(&cmdata->wlock, MDM_RST_LOCK_TIME *HZ); */
  424. modem_notify_event(MDM_EVT_NOTIFY_WDT);
  425. atomic_set(&cdata->modem->tx_fifo_cnt, TX_FIFO_SZ);
  426. wake_up(&cdata->modem->wait_tx_done_q);
  427. return IRQ_HANDLED;
  428. }
  429. #endif
  430. #if 0
  431. /*----------------------cbp sys interface --------------------------*/
  432. static void sys_power_on_cbp(void)
  433. {
  434. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 0);
  435. c2k_gpio_direction_output(GPIO_C2K_MDM_PWR_EN, 0);
  436. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 1);
  437. c2k_gpio_direction_output(GPIO_C2K_MDM_PWR_EN, 1);
  438. msleep(400);
  439. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 0); /*MDM_RST */
  440. }
  441. static void sys_power_off_cbp(void)
  442. {
  443. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 0);
  444. c2k_gpio_direction_output(GPIO_C2K_MDM_PWR_EN, 0);
  445. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 1);
  446. msleep(500);
  447. msleep(600);
  448. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 0);
  449. }
  450. static void sys_reset_cbp(void)
  451. {
  452. c2k_gpio_direction_output(GPIO_C2K_MDM_PWR_EN, 1);
  453. msleep(20);
  454. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 1);
  455. msleep(100);
  456. msleep(300);
  457. c2k_gpio_direction_output(GPIO_C2K_MDM_RST, 0); /*MDM_RST */
  458. }
  459. #endif
  460. static ssize_t cbp_power_show(struct kobject *kobj, struct kobj_attribute *attr,
  461. char *buf)
  462. {
  463. struct cbp_platform_data *cdata = &cbp_data;
  464. char *s = buf;
  465. if (cdata->modem)
  466. s += sprintf(s, "%d\n", cdata->modem->status);
  467. return s - buf;
  468. }
  469. static ssize_t cbp_power_store(struct kobject *kobj,
  470. struct kobj_attribute *attr, const char *buf,
  471. size_t n)
  472. {
  473. unsigned long val;
  474. struct cbp_platform_data *cdata = &cbp_data;
  475. unsigned long flags;
  476. if (kstrtoul(buf, 10, &val))
  477. return -EINVAL;
  478. if (val < 0)
  479. return -EINVAL;
  480. if (val) {
  481. if (cdata->modem) {
  482. if (cdata->modem->status == MD_OFF) {
  483. /*sys_power_on_cbp(); */
  484. /*
  485. spin_lock_irqsave(cdata->modem->status_lock, flags);
  486. cdata.modem->status = MD_OFF;
  487. spin_unlock_irqrestore(cdata->modem->status_lock, flags);
  488. */
  489. LOGPRT(LOG_INFO, "AP power on CBP.\n");
  490. } else {
  491. LOGPRT(LOG_ERR,
  492. "%s: CBP is already power on.\n",
  493. __func__);
  494. }
  495. }
  496. } else {
  497. if (cdata->modem) {
  498. if (cdata->modem->status != MD_OFF) {
  499. /*sys_power_off_cbp(); */
  500. LOGPRT(LOG_INFO, "AP power off CBP.\n");
  501. spin_lock_irqsave(&cdata->modem->status_lock,
  502. flags);
  503. cdata->modem->status = MD_OFF;
  504. spin_unlock_irqrestore(&cdata->
  505. modem->status_lock,
  506. flags);
  507. } else {
  508. LOGPRT(LOG_ERR,
  509. "%s: CBP is already power off.\n",
  510. __func__);
  511. }
  512. }
  513. }
  514. return n;
  515. }
  516. static ssize_t cbp_reset_show(struct kobject *kobj, struct kobj_attribute *attr,
  517. char *buf)
  518. {
  519. return 0;
  520. }
  521. static ssize_t cbp_reset_store(struct kobject *kobj,
  522. struct kobj_attribute *attr, const char *buf,
  523. size_t n)
  524. {
  525. unsigned long val;
  526. if (kstrtoul(buf, 10, &val))
  527. return -EINVAL;
  528. if (val < 0)
  529. return -EINVAL;
  530. if (val) {
  531. /*sys_reset_cbp(); */
  532. /*c2k_modem_reset_platform(); */
  533. c2k_reset_modem();
  534. LOGPRT(LOG_INFO, "AP reset CBP.\n");
  535. } else
  536. LOGPRT(LOG_ERR, "%s: reset cbp use value 1.\n", __func__);
  537. return n;
  538. }
  539. static int jtag_mode;
  540. static ssize_t cbp_jtag_show(struct kobject *kobj, struct kobj_attribute *attr,
  541. char *buf)
  542. {
  543. char *s = buf;
  544. s += sprintf(s, "%d\n", jtag_mode);
  545. return s - buf;
  546. }
  547. static ssize_t cbp_jtag_store(struct kobject *kobj, struct kobj_attribute *attr,
  548. const char *buf, size_t n)
  549. {
  550. unsigned long val;
  551. if (kstrtoul(buf, 10, &val))
  552. return -EINVAL;
  553. if (val < 0)
  554. return -EINVAL;
  555. if (val) {
  556. /*sys_reset_cbp(); */
  557. /*c2k_modem_reset_platform(); */
  558. jtag_mode = val;
  559. enable_c2k_jtag(val);
  560. LOGPRT(LOG_INFO, "set cbp jtag to mode %d.\n", jtag_mode);
  561. }
  562. return n;
  563. }
  564. #define cbp_attr(_name) \
  565. static struct kobj_attribute _name##_attr = { \
  566. .attr = { \
  567. .name = __stringify(_name), \
  568. .mode = 0660, \
  569. }, \
  570. .show = cbp_##_name##_show, \
  571. .store = cbp_##_name##_store, \
  572. }
  573. cbp_attr(power);
  574. cbp_attr(reset);
  575. cbp_attr(jtag);
  576. static struct attribute *cbp_power_attr[] = {
  577. &power_attr.attr,
  578. &reset_attr.attr,
  579. &jtag_attr.attr,
  580. NULL,
  581. };
  582. static struct kobject *cbp_power_kobj;
  583. static struct attribute_group g_power_attr_group = {
  584. .attrs = cbp_power_attr,
  585. };
  586. /*----------------------- cbp platform driver ------------------------*/
  587. /*static int __devinit cbp_probe(struct platform_device *pdev)*/
  588. static int cbp_probe(struct platform_device *pdev)
  589. {
  590. struct cbp_platform_data *plat = pdev->dev.platform_data;
  591. int ret = -1;
  592. #ifndef CONFIG_OF
  593. /*must have platform data */
  594. if (!plat) {
  595. LOGPRT(LOG_ERR, "%s: no platform data!\n", __func__);
  596. ret = -EINVAL;
  597. goto out;
  598. }
  599. #else
  600. pdev->dev.platform_data = &cbp_data;
  601. plat = &cbp_data;
  602. #endif
  603. #if 0
  604. if (plat->bus && !strcmp(plat->bus, "sdio")) {
  605. if (plat->detect_host) {
  606. ret = plat->detect_host(plat->host_id);
  607. if (ret) {
  608. LOGPRT(LOG_ERR, "%s: host %s dectect failed!\n",
  609. __func__, plat->host_id);
  610. goto out;
  611. }
  612. } else {
  613. LOGPRT(LOG_ERR,
  614. "%s: bus %s have no dectect function!\n",
  615. __func__, plat->bus);
  616. goto out;
  617. }
  618. } else {
  619. LOGPRT(LOG_ERR, "%s: unknown bus!\n", __func__);
  620. goto out;
  621. }
  622. #endif
  623. if (GPIO_C2K_VALID(plat->gpio_data_ack)) {
  624. cbp_data_ack =
  625. kzalloc(sizeof(struct cbp_wait_event), GFP_KERNEL);
  626. if (!cbp_data_ack) {
  627. ret = -ENOMEM;
  628. LOGPRT(LOG_ERR, "%s %d kzalloc cbp_data_ack failed\n",
  629. __func__, __LINE__);
  630. goto err_kzalloc_cbp_data_ack;
  631. }
  632. init_waitqueue_head(&cbp_data_ack->wait_q);
  633. atomic_set(&cbp_data_ack->state, MODEM_ST_UNKNOWN);
  634. cbp_data_ack->wait_gpio = plat->gpio_data_ack;
  635. cbp_data_ack->wait_polar = plat->gpio_data_ack_polar;
  636. LOGPRT(LOG_ERR, "cbp_data_ack->wait_gpio=%d\n",
  637. cbp_data_ack->wait_gpio);
  638. LOGPRT(LOG_ERR, "cbp_data_ack->wait_polar=%d\n",
  639. cbp_data_ack->wait_polar);
  640. #if defined(CONFIG_MTK_LEGACY)
  641. c2k_gpio_irq_mask(plat->gpio_data_ack);
  642. #endif
  643. c2k_gpio_direction_input_for_irq(plat->gpio_data_ack);
  644. c2k_gpio_set_irq_type(plat->gpio_data_ack,
  645. IRQF_TRIGGER_FALLING);
  646. ret =
  647. c2k_gpio_request_irq(plat->gpio_data_ack, gpio_irq_data_ack,
  648. IRQF_SHARED | IRQF_TRIGGER_FALLING,
  649. DRIVER_NAME "(data_ack)",
  650. cbp_data_ack);
  651. #if defined(CONFIG_MTK_LEGACY)
  652. c2k_gpio_irq_unmask(plat->gpio_data_ack);
  653. #endif
  654. if (ret < 0) {
  655. LOGPRT(LOG_ERR,
  656. "%s: %d fail to request irq for data_ack!!\n",
  657. __func__, __LINE__);
  658. goto err_req_irq_data_ack;
  659. }
  660. plat->cbp_data_ack = cbp_data_ack;
  661. plat->data_ack_wait_event = data_ack_wait_event;
  662. plat->data_ack_enable = true;
  663. }
  664. if (GPIO_C2K_VALID(plat->gpio_flow_ctrl)) {
  665. cbp_flow_ctrl =
  666. kzalloc(sizeof(struct cbp_wait_event), GFP_KERNEL);
  667. if (!cbp_flow_ctrl) {
  668. ret = -ENOMEM;
  669. LOGPRT(LOG_ERR, "%s %d kzalloc cbp_flow_ctrl failed\n",
  670. __func__, __LINE__);
  671. goto err_kzalloc_cbp_flow_ctrl;
  672. }
  673. init_waitqueue_head(&cbp_flow_ctrl->wait_q);
  674. atomic_set(&cbp_flow_ctrl->state, FLOW_CTRL_DISABLE);
  675. cbp_flow_ctrl->wait_gpio = plat->gpio_flow_ctrl;
  676. cbp_flow_ctrl->wait_polar = plat->gpio_flow_ctrl_polar;
  677. LOGPRT(LOG_ERR, "cbp_flow_ctrl->wait_gpio=%d\n",
  678. cbp_flow_ctrl->wait_gpio);
  679. LOGPRT(LOG_ERR, "cbp_flow_ctrl->wait_polar=%d\n",
  680. cbp_flow_ctrl->wait_polar);
  681. #if defined(CONFIG_MTK_LEGACY)
  682. c2k_gpio_irq_mask(plat->gpio_flow_ctrl);
  683. #endif
  684. c2k_gpio_direction_input_for_irq(plat->gpio_flow_ctrl);
  685. /*c2k_gpio_set_irq_type(plat->gpio_flow_ctrl, IRQ_TYPE_LEVEL_LOW |IRQ_TYPE_LEVEL_HIGH); */
  686. c2k_gpio_set_irq_type(plat->gpio_flow_ctrl,
  687. IRQF_TRIGGER_FALLING);
  688. ret =
  689. c2k_gpio_request_irq(plat->gpio_flow_ctrl,
  690. gpio_irq_flow_ctrl,
  691. IRQF_SHARED | IRQF_TRIGGER_RISING |
  692. IRQF_TRIGGER_FALLING,
  693. DRIVER_NAME "(flow_ctrl)",
  694. cbp_flow_ctrl);
  695. #if defined(CONFIG_MTK_LEGACY)
  696. c2k_gpio_irq_unmask(plat->gpio_flow_ctrl);
  697. #endif
  698. if (ret < 0) {
  699. LOGPRT(LOG_ERR,
  700. "%s: %d fail to request irq for flow_ctrl!!\n",
  701. __func__, __LINE__);
  702. goto err_req_irq_flow_ctrl;
  703. }
  704. plat->cbp_flow_ctrl = cbp_flow_ctrl;
  705. plat->flow_ctrl_wait_event = flow_ctrl_wait_event;
  706. plat->flow_ctrl_enable = true;
  707. }
  708. if (GPIO_C2K_VALID(plat->gpio_rst_ind)) {
  709. cbp_rst_ind = kzalloc(sizeof(struct cbp_reset), GFP_KERNEL);
  710. if (!cbp_rst_ind) {
  711. ret = -ENOMEM;
  712. LOGPRT(LOG_ERR, "%s %d kzalloc cbp_rst_ind failed\n",
  713. __func__, __LINE__);
  714. goto err_kzalloc_cbp_rst_ind;
  715. }
  716. cbp_rst_ind->name = "cbp_rst_ind_wq";
  717. cbp_rst_ind->reset_wq =
  718. create_singlethread_workqueue(cbp_rst_ind->name);
  719. if (cbp_rst_ind->reset_wq == NULL) {
  720. ret = -ENOMEM;
  721. LOGPRT(LOG_ERR, "%s %d error creat rst_ind_workqueue\n",
  722. __func__, __LINE__);
  723. goto err_create_work_queue;
  724. }
  725. INIT_WORK(&cbp_rst_ind->reset_work, modem_detect);
  726. cbp_rst_ind->rst_ind_gpio = plat->gpio_rst_ind;
  727. cbp_rst_ind->rst_ind_polar = plat->gpio_rst_ind_polar;
  728. cbp_rst_ind->host = NULL;
  729. #if 0
  730. /*c2k_gpio_irq_mask(plat->gpio_rst_ind); */
  731. c2k_gpio_direction_input_for_irq(plat->gpio_rst_ind);
  732. c2k_gpio_set_irq_type(plat->gpio_rst_ind,
  733. IRQF_TRIGGER_FALLING |
  734. IRQF_TRIGGER_RISING);
  735. ret =
  736. c2k_gpio_request_irq(plat->gpio_rst_ind,
  737. gpio_irq_cbp_rst_ind,
  738. IRQF_SHARED | IRQF_TRIGGER_FALLING |
  739. IRQF_TRIGGER_RISING,
  740. DRIVER_NAME "(rst_ind)", cbp_rst_ind);
  741. /*c2k_gpio_irq_unmask(plat->gpio_rst_ind); */
  742. if (ret < 0) {
  743. LOGPRT(LOG_ERR,
  744. "%s: %d fail to request irq for rst_ind!!\n",
  745. __func__, __LINE__);
  746. goto err_req_irq_rst_ind;
  747. }
  748. #endif
  749. plat->rst_ind_enable = true;
  750. }
  751. cbp_excp_ind = kzalloc(sizeof(struct cbp_exception), GFP_KERNEL);
  752. if (!cbp_excp_ind) {
  753. ret = -ENOMEM;
  754. LOGPRT(LOG_ERR, "%s %d kzalloc cbp_rst_ind failed\n", __func__,
  755. __LINE__);
  756. goto err_kzalloc_cbp_excp_ind;
  757. }
  758. cbp_excp_ind->name = "cbp_excp_ind_wq";
  759. cbp_excp_ind->excp_wq =
  760. create_singlethread_workqueue(cbp_excp_ind->name);
  761. if (cbp_excp_ind->excp_wq == NULL) {
  762. ret = -ENOMEM;
  763. LOGPRT(LOG_ERR, "%s %d error creat rst_ind_workqueue\n",
  764. __func__, __LINE__);
  765. goto err_create_excp_work_queue;
  766. }
  767. /*Todo: workqueue function need to be implemented */
  768. /*INIT_WORK(&cbp_excp_ind->excp_work, modem_detect); */
  769. INIT_WORK(&cbp_excp_ind->excp_work, modem_detect_for_excp);
  770. cbp_excp_ind->host = NULL;
  771. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  772. if (GPIO_C2K_VALID(plat->gpio_cp_exception)) {
  773. cbp_excp_ind->excp_ind_gpio = plat->gpio_cp_exception;
  774. #if defined(CONFIG_MTK_LEGACY)
  775. c2k_gpio_irq_mask(plat->gpio_cp_exception);
  776. #endif
  777. c2k_gpio_set_irq_type(plat->gpio_cp_exception,
  778. IRQF_TRIGGER_FALLING);
  779. ret =
  780. c2k_gpio_request_irq(plat->gpio_cp_exception,
  781. gpio_irq_cbp_excp_ind,
  782. IRQF_TRIGGER_FALLING,
  783. DRIVER_NAME "(c2k EE)", cbp_excp_ind);
  784. #if defined(CONFIG_MTK_LEGACY)
  785. c2k_gpio_irq_unmask(plat->gpio_cp_exception);
  786. #endif
  787. if (ret < 0) {
  788. LOGPRT(LOG_ERR,
  789. "%s: %d fail to request irq for flow_ctrl!!\n",
  790. __func__, __LINE__);
  791. goto err_req_irq_excp;
  792. }
  793. }
  794. plat->c2k_wdt_irq_id = get_c2k_wdt_irq_id();
  795. LOGPRT(LOG_INFO, "get c2k wdt irq id %d\n", plat->c2k_wdt_irq_id);
  796. #if 1
  797. if (plat->c2k_wdt_irq_id)
  798. ret =
  799. request_irq(plat->c2k_wdt_irq_id, c2k_wdt_isr,
  800. IRQF_TRIGGER_NONE, "C2K_CCCI", plat);
  801. else
  802. LOGPRT(LOG_ERR, "%s: %d fail to get wdt irq id!!\n", __func__,
  803. __LINE__);
  804. #endif
  805. #endif
  806. if ((GPIO_C2K_VALID(plat->gpio_ap_wkup_cp))
  807. && (GPIO_C2K_VALID(plat->gpio_cp_ready))
  808. && (GPIO_C2K_VALID(plat->gpio_cp_wkup_ap))
  809. && (GPIO_C2K_VALID(plat->gpio_ap_ready))) {
  810. sdio_tx_handle.gpio_wake = plat->gpio_ap_wkup_cp;
  811. sdio_tx_handle.gpio_ready = plat->gpio_cp_ready;
  812. sdio_tx_handle.polar = plat->gpio_sync_polar;
  813. ret = asc_tx_register_handle(&sdio_tx_handle);
  814. if (ret) {
  815. LOGPRT(LOG_ERR,
  816. "%s %d asc_tx_register_handle failed.\n",
  817. __func__, __LINE__);
  818. goto err_ipc;
  819. }
  820. ret = asc_tx_add_user(sdio_tx_handle.name, &sdio_tx_user);
  821. if (ret) {
  822. LOGPRT(LOG_ERR, "%s %d asc_tx_add_user failed.\n",
  823. __func__, __LINE__);
  824. goto err_ipc;
  825. }
  826. sdio_rx_handle.gpio_wake = plat->gpio_cp_wkup_ap;
  827. sdio_rx_handle.gpio_ready = plat->gpio_ap_ready;
  828. sdio_rx_handle.polar = plat->gpio_sync_polar;
  829. ret = asc_rx_register_handle(&sdio_rx_handle);
  830. if (ret) {
  831. LOGPRT(LOG_ERR,
  832. "%s %d asc_rx_register_handle failed.\n",
  833. __func__, __LINE__);
  834. goto err_ipc;
  835. }
  836. ret = asc_rx_add_user(sdio_rx_handle.name, &sdio_rx_user);
  837. if (ret) {
  838. LOGPRT(LOG_ERR, "%s %d asc_rx_add_user failed.\n",
  839. __func__, __LINE__);
  840. goto err_ipc;
  841. }
  842. plat->ipc_enable = true;
  843. plat->tx_handle = &sdio_tx_handle;
  844. }
  845. ret = plat->cbp_setup(plat);
  846. if (ret) {
  847. LOGPRT(LOG_ERR, "%s: host %s setup failed!\n", __func__,
  848. plat->host_id);
  849. goto err_ipc;
  850. }
  851. cbp_power_kobj = c2k_kobject_add("power");
  852. if (!cbp_power_kobj) {
  853. LOGPRT(LOG_ERR, "error c2k_kobject_add!\n");
  854. ret = -ENOMEM;
  855. goto err_create_kobj;
  856. }
  857. #if !defined(CONFIG_MTK_CLKMGR)
  858. clk_scp_sys_md2_main = devm_clk_get(&pdev->dev, "scp-sys-md2-main");
  859. if (IS_ERR(clk_scp_sys_md2_main))
  860. LOGPRT(LOG_ERR, "[C2K] get scp-sys-md2-main failed\n");
  861. #endif
  862. LOGPRT(LOG_INFO,
  863. " cbp initialized on host %s successfully, bus is %s !\n",
  864. plat->host_id, plat->bus);
  865. return sysfs_create_group(cbp_power_kobj, &g_power_attr_group);
  866. err_create_kobj:
  867. plat->cbp_destroy();
  868. err_ipc:
  869. #if 0
  870. if (GPIO_C2K_VALID(plat->gpio_rst_ind))
  871. free_irq(c2k_gpio_to_irq(plat->gpio_rst_ind), cbp_rst_ind);
  872. err_req_irq_rst_ind:
  873. #endif
  874. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  875. err_req_irq_excp:
  876. if (GPIO_C2K_VALID(plat->gpio_cp_exception))
  877. destroy_workqueue(cbp_excp_ind->excp_wq);
  878. #endif
  879. err_create_excp_work_queue:
  880. kfree(cbp_excp_ind);
  881. err_kzalloc_cbp_excp_ind:
  882. if (GPIO_C2K_VALID(plat->gpio_rst_ind))
  883. destroy_workqueue(cbp_rst_ind->reset_wq);
  884. err_create_work_queue:
  885. if (GPIO_C2K_VALID(plat->gpio_rst_ind))
  886. kfree(cbp_rst_ind);
  887. err_kzalloc_cbp_rst_ind:
  888. if (GPIO_C2K_VALID(plat->gpio_flow_ctrl))
  889. free_irq(c2k_gpio_to_irq(plat->gpio_flow_ctrl), cbp_flow_ctrl);
  890. err_req_irq_flow_ctrl:
  891. if (GPIO_C2K_VALID(plat->gpio_flow_ctrl))
  892. kfree(cbp_flow_ctrl);
  893. err_kzalloc_cbp_flow_ctrl:
  894. if (GPIO_C2K_VALID(plat->gpio_data_ack))
  895. free_irq(c2k_gpio_to_irq(plat->gpio_data_ack), cbp_data_ack);
  896. err_req_irq_data_ack:
  897. if (GPIO_C2K_VALID(plat->gpio_data_ack))
  898. kfree(cbp_data_ack);
  899. err_kzalloc_cbp_data_ack:
  900. return ret;
  901. }
  902. /*static int __devexit cbp_remove(struct platform_device *pdev)*/
  903. static int cbp_remove(struct platform_device *pdev)
  904. {
  905. struct cbp_platform_data *plat = pdev->dev.platform_data;
  906. /*
  907. if ((GPIO_C2K_VALID(plat->gpio_sd_select))
  908. && (GPIO_C2K_VALID(plat->gpio_mc3_enable))) {
  909. }
  910. */
  911. if (plat->data_ack_enable && (GPIO_C2K_VALID(plat->gpio_data_ack))) {
  912. free_irq(c2k_gpio_to_irq(plat->gpio_data_ack), cbp_data_ack);
  913. kfree(cbp_data_ack);
  914. }
  915. if (plat->flow_ctrl_enable && (GPIO_C2K_VALID(plat->gpio_flow_ctrl))) {
  916. free_irq(c2k_gpio_to_irq(plat->gpio_flow_ctrl), cbp_flow_ctrl);
  917. kfree(cbp_flow_ctrl);
  918. }
  919. if (plat->rst_ind_enable && (GPIO_C2K_VALID(plat->gpio_rst_ind))) {
  920. free_irq(c2k_gpio_to_irq(plat->gpio_rst_ind), cbp_rst_ind);
  921. destroy_workqueue(cbp_rst_ind->reset_wq);
  922. kfree(cbp_rst_ind);
  923. }
  924. destroy_workqueue(cbp_excp_ind->excp_wq);
  925. kfree(cbp_excp_ind);
  926. plat->cbp_destroy();
  927. sysfs_remove_group(cbp_power_kobj, &g_power_attr_group);
  928. kobject_put(cbp_power_kobj);
  929. LOGPRT(LOG_INFO, " cbp removed on host %s, bus is %s!\n", plat->host_id,
  930. plat->bus);
  931. return 0;
  932. }
  933. #ifdef CONFIG_OF
  934. static const struct of_device_id c2k_of_ids[] = {
  935. {.compatible = "mediatek,MDC2K", },
  936. {}
  937. };
  938. #endif
  939. static struct platform_device cbp_device = {
  940. .name = "cbp",
  941. .dev = {
  942. .platform_data = &cbp_data,
  943. },
  944. };
  945. static struct platform_driver cbp_driver = {
  946. .driver = {
  947. .name = "cbp",
  948. .owner = THIS_MODULE,
  949. #ifdef CONFIG_OF
  950. .of_match_table = c2k_of_ids,
  951. #endif
  952. },
  953. .probe = cbp_probe,
  954. .remove = cbp_remove,
  955. };
  956. static int cbp_pm_event(struct notifier_block *notifier, unsigned long pm_event,
  957. void *unused)
  958. {
  959. struct cbp_platform_data *cdata = &cbp_data;
  960. unsigned long flags;
  961. LOGPRT(LOG_NOTICE, "%s pm_event=%ld\n", __func__, pm_event);
  962. switch (pm_event) {
  963. case PM_HIBERNATION_PREPARE:
  964. /*This event is received when system is preparing to hibernation. */
  965. /*i.e., IPO-H power off in kernel space, where user/kernel space processes are not freezed yet. */
  966. return NOTIFY_DONE;
  967. case PM_RESTORE_PREPARE:
  968. /*This event is received before system is preparing to restore, */
  969. /*i.e., IPO-H power on, where kernel is on the way to late_initcall() in normal boot. */
  970. return NOTIFY_DONE;
  971. case PM_POST_HIBERNATION:
  972. /*
  973. This event is received, after system is restored,
  974. and user/kernel processes are unfreezed and can operate on.
  975. */
  976. if (cdata->modem) {
  977. LOGPRT(LOG_INFO, "%s: set md off.\n", __func__);
  978. spin_lock_irqsave(&cdata->modem->status_lock, flags);
  979. cdata->modem->status = MD_OFF;
  980. spin_unlock_irqrestore(&cdata->modem->status_lock,
  981. flags);
  982. }
  983. LOGPRT(LOG_NOTICE, "[%s] ipoh occurred\n", __func__);
  984. modem_reset_handler();
  985. c2k_platform_restore_first_init();
  986. LOGPRT(LOG_NOTICE, "%s %d power off sdio host\n", __func__,
  987. __LINE__);
  988. c2k_wake_host(0);
  989. LOGPRT(LOG_NOTICE, "%s %d notify user space ipoh\n", __func__,
  990. __LINE__);
  991. modem_ipoh_indication_usr();
  992. return NOTIFY_DONE;
  993. }
  994. return NOTIFY_OK;
  995. }
  996. static struct notifier_block cbp_pm_notifier_block = {
  997. .notifier_call = cbp_pm_event,
  998. .priority = 0,
  999. };
  1000. static int __init cbp_init(void)
  1001. {
  1002. int ret;
  1003. #ifndef CONFIG_OF
  1004. ret = platform_device_register(&cbp_device);
  1005. if (ret) {
  1006. LOGPRT(LOG_ERR, "platform_device_register failed\n");
  1007. goto err_platform_device_register;
  1008. }
  1009. #endif
  1010. ret = platform_driver_register(&cbp_driver);
  1011. if (ret) {
  1012. LOGPRT(LOG_ERR, "platform_driver_register failed\n");
  1013. goto err_platform_driver_register;
  1014. }
  1015. ret = register_pm_notifier(&cbp_pm_notifier_block);
  1016. if (ret) {
  1017. LOGPRT(LOG_ERR, "%s failed to register PM notifier\n",
  1018. __func__);
  1019. goto err_platform_driver_register;
  1020. } else {
  1021. LOGPRT(LOG_ERR, "%s success to register PM notifier\n",
  1022. __func__);
  1023. }
  1024. return ret;
  1025. err_platform_driver_register:
  1026. platform_device_unregister(&cbp_device);
  1027. return ret;
  1028. }
  1029. late_initcall(cbp_init);