sync.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719
  1. /*
  2. *viatel_cbp_sync.c
  3. *
  4. *VIA CBP driver for Linux
  5. *
  6. *Copyright (C) 2011 VIA TELECOM Corporation, Inc.
  7. *Author: VIA TELECOM Corporation, Inc.
  8. *
  9. *This package is free software; you can redistribute it and/or modify
  10. *it under the terms of the GNU General Public License version 2 as
  11. *published by the Free Software Foundation.
  12. *
  13. *THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  14. *IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  15. *WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/time.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/irq.h>
  26. #include <linux/sched.h>
  27. #include <linux/wakelock.h>
  28. #include <linux/delay.h>
  29. #include <linux/wait.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/version.h>
  32. #include <linux/kthread.h>
  33. #include "c2k_hw.h"
  34. static int asc_debug;
  35. #define ASCDPRT(fmt, arg...) do { \
  36. if (asc_debug) \
  37. pr_debug("[C2K MODEM] " fmt, ##arg); \
  38. } while (0)
  39. #define ASCPRT(fmt, arg...) pr_debug("[C2K MODEM] " fmt, ##arg)
  40. /*mS*/
  41. #define ASC_RX_WAIT_IDLE_TIME (1000)
  42. #define ASC_TX_WAIT_READY_TIME (500) /*1000 */
  43. #define ASC_TX_WAIT_IDLE_TIME (2000)
  44. #define ASC_TX_AUTO_DELAY_TIME (200) /*(2000) */
  45. #define ASC_TX_WAIT_SLEEP_TIME (500)
  46. #define ASC_TX_TRY_TIMES (5) /*3 */
  47. #define ASC_TX_RETRY_DELAY (100)
  48. #define ASC_TX_DEBOUNCE_TIME (10)
  49. #define ASC_TX_AFTER_CP_SLEEP_TIME (10)
  50. #define ASC_TX_SYSFS_USER "AscApp"
  51. #define ASC_TX_AUTO_USER "AscAuto"
  52. /*asc_list contains all registered struct struct asc_handle*/
  53. static DEFINE_SPINLOCK(hdlock);
  54. static LIST_HEAD(asc_tx_handle_list);
  55. static LIST_HEAD(asc_rx_handle_list);
  56. static struct workqueue_struct *asc_work_queue;
  57. static struct kobject *asc_kobj;
  58. enum {
  59. ASC_TX_HD = 0,
  60. ASC_RX_HD
  61. };
  62. #define ASC_EVENT_POOL_MAX (60)
  63. enum {
  64. ASC_EVENT_UNUSE = 0,
  65. ASC_EVENT_STATIC,
  66. ASC_EVENT_DYNAMIC
  67. };
  68. struct asc_event {
  69. int id;
  70. struct list_head list;
  71. char usage;
  72. };
  73. static struct asc_event event_pool[ASC_EVENT_POOL_MAX];
  74. struct asc_user {
  75. struct asc_infor infor;
  76. atomic_t count;
  77. struct list_head node;
  78. };
  79. struct asc_state_dsp {
  80. char name[ASC_NAME_LEN];
  81. /*state callback handle for events */
  82. int (*handle)(void *hd, int event);
  83. };
  84. /*TX STATUS and TX EVENT*/
  85. enum {
  86. AP_TX_EVENT_REQUEST = 0, /*internal */
  87. AP_TX_EVENT_CP_READY,
  88. AP_TX_EVENT_CP_UNREADY,
  89. AP_TX_EVENT_WAIT_TIMEOUT,
  90. AP_TX_EVENT_IDLE_TIMEOUT,
  91. AP_TX_EVENT_STOP,
  92. AP_TX_EVENT_RESET,
  93. AP_TX_EVENT_NUM
  94. };
  95. enum {
  96. AP_TX_ST_SLEEP = 0,
  97. AP_TX_ST_WAIT_READY,
  98. AP_TX_ST_READY, /*wait All Tx channel finished */
  99. AP_TX_ST_IDLE,
  100. AP_TX_ST_NUM
  101. };
  102. struct asc_tx_handle {
  103. struct asc_config cfg;
  104. atomic_t state;
  105. atomic_t count;
  106. int ready_hold;
  107. atomic_t delay_sleep;
  108. struct list_head user_list;
  109. struct asc_state_dsp *table;
  110. /*process the event to switch different states */
  111. struct task_struct *thread;
  112. atomic_t sleeping;
  113. int ntf;
  114. int wait_try;
  115. int auto_delay;
  116. spinlock_t slock;
  117. spinlock_t ready_slock;
  118. /*spinlock user_count_lock is used to aviod operating user->count by mistake
  119. when concurrent between asc_tx_get_ready/asc_tx_auto_ready and asc_tx_put_ready */
  120. spinlock_t user_count_lock;
  121. wait_queue_head_t wait;
  122. wait_queue_head_t wait_tx_state;
  123. struct mutex mlock;
  124. struct wake_lock wlock;
  125. struct timer_list timer_wait_ready;
  126. struct timer_list timer_wait_idle;
  127. struct timer_list timer_wait_sleep;
  128. /*
  129. after trigger cp sleep, we should wait for a while before wake cp,
  130. to make sure cp can handle ap_wake_cp interrupt
  131. */
  132. struct timer_list timer_wait_after_cp_sleep;
  133. atomic_t trigger_cp_sleep;
  134. struct work_struct ntf_work;
  135. struct list_head event_q;
  136. struct list_head node;
  137. struct kobject *kobj;
  138. };
  139. static int asc_tx_handle_sleep(void *, int);
  140. static int asc_tx_handle_wait_ready(void *, int);
  141. static int asc_tx_handle_ready(void *, int);
  142. static int asc_tx_handle_idle(void *, int);
  143. /*the table used to discribe all tx states*/
  144. static struct asc_state_dsp asc_tx_table[AP_TX_ST_NUM] = {
  145. [AP_TX_ST_SLEEP] = {
  146. .name = "AP_TX_ST_SLEEP",
  147. .handle = asc_tx_handle_sleep,
  148. },
  149. [AP_TX_ST_WAIT_READY] = {
  150. .name = "AP_TX_ST_WAIT_READY",
  151. .handle = asc_tx_handle_wait_ready,
  152. },
  153. [AP_TX_ST_READY] = {
  154. .name = "AP_TX_ST_READY",
  155. .handle = asc_tx_handle_ready,
  156. },
  157. [AP_TX_ST_IDLE] = {
  158. .name = "AP_TX_ST_IDLE",
  159. .handle = asc_tx_handle_idle,
  160. },
  161. };
  162. /*RX STATUS and RX EVENT*/
  163. enum {
  164. AP_RX_EVENT_REQUEST = 0,
  165. AP_RX_EVENT_AP_READY,
  166. AP_RX_EVENT_AP_UNREADY,
  167. AP_RX_EVENT_STOP,
  168. AP_RX_EVENT_IDLE_TIMEOUT,
  169. AP_RX_EVENT_RESET,
  170. AP_RX_EVENT_NUM
  171. };
  172. enum {
  173. AP_RX_ST_SLEEP = 0,
  174. AP_RX_ST_WAIT_READY,
  175. AP_RX_ST_READY,
  176. AP_RX_ST_IDLE,
  177. AP_RX_ST_NUM
  178. };
  179. struct asc_rx_handle {
  180. struct asc_config cfg;
  181. atomic_t state;
  182. struct list_head user_list;
  183. struct asc_state_dsp *table;
  184. int ntf;
  185. /*process the event to switch different states */
  186. struct task_struct *thread;
  187. spinlock_t slock;
  188. wait_queue_head_t wait;
  189. struct mutex mlock;
  190. struct wake_lock wlock;
  191. struct timer_list timer;
  192. struct list_head event_q;
  193. struct list_head node;
  194. struct work_struct ntf_prepare_work;
  195. struct work_struct ntf_post_work;
  196. struct kobject *kobj;
  197. };
  198. static int asc_rx_handle_sleep(void *, int);
  199. static int asc_rx_handle_wait_ready(void *, int);
  200. static int asc_rx_handle_ready(void *, int);
  201. static int asc_rx_handle_idle(void *, int);
  202. /*the table used to discribe all rx states*/
  203. static struct asc_state_dsp asc_rx_table[AP_RX_ST_NUM] = {
  204. [AP_RX_ST_SLEEP] = {
  205. .name = "AP_RX_ST_SLEEP",
  206. .handle = asc_rx_handle_sleep,
  207. },
  208. [AP_RX_ST_WAIT_READY] = {
  209. .name = "AP_RX_ST_WAIT_READY",
  210. .handle = asc_rx_handle_wait_ready,
  211. },
  212. [AP_RX_ST_READY] = {
  213. .name = "AP_RX_ST_READY",
  214. .handle = asc_rx_handle_ready,
  215. },
  216. [AP_RX_ST_IDLE] = {
  217. .name = "AP_RX_ST_IDLE",
  218. .handle = asc_rx_handle_idle,
  219. },
  220. };
  221. static int asc_tx_event_send(struct asc_tx_handle *tx, int id);
  222. static void asc_tx_handle_reset(struct asc_tx_handle *tx);
  223. static int asc_rx_event_send(struct asc_rx_handle *rx, int id);
  224. static void asc_rx_handle_reset(struct asc_rx_handle *rx);
  225. static struct asc_event *asc_event_malloc(void)
  226. {
  227. int i = 0;
  228. unsigned long flags = 0;
  229. struct asc_event *event = NULL;
  230. spin_lock_irqsave(&hdlock, flags);
  231. for (i = 0; i < ASC_EVENT_POOL_MAX; i++) {
  232. if (ASC_EVENT_UNUSE == event_pool[i].usage) {
  233. event = &(event_pool[i]);
  234. event->usage = ASC_EVENT_STATIC;
  235. }
  236. }
  237. if (NULL == event) {
  238. event = kmalloc(sizeof(struct asc_event), GFP_ATOMIC);
  239. if (event)
  240. event->usage = ASC_EVENT_DYNAMIC;
  241. }
  242. spin_unlock_irqrestore(&hdlock, flags);
  243. return event;
  244. }
  245. static void asc_event_free(struct asc_event *event)
  246. {
  247. unsigned long flags = 0;
  248. if (!event)
  249. return;
  250. spin_lock_irqsave(&hdlock, flags);
  251. if (ASC_EVENT_STATIC == event->usage)
  252. memset(event, 0, sizeof(struct asc_event));
  253. else
  254. kfree(event);
  255. spin_unlock_irqrestore(&hdlock, flags);
  256. }
  257. static irqreturn_t asc_irq_cp_indicate_state(int irq, void *data)
  258. {
  259. int level;
  260. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  261. struct asc_config *cfg = &tx->cfg;
  262. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  263. spin_lock(&tx->ready_slock);
  264. /*when using internal EINT, we cannot get line status directly from GPIO API */
  265. level = !!c2k_gpio_to_ls(cfg->gpio_ready);
  266. c2k_gpio_set_irq_type(cfg->gpio_ready,
  267. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
  268. spin_unlock(&tx->ready_slock);
  269. #else
  270. c2k_gpio_set_irq_type(cfg->gpio_ready,
  271. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
  272. #endif
  273. level = !!c2k_gpio_get_value(cfg->gpio_ready);
  274. ASCDPRT("Irq %s cp_indicate_ap %s.\n", cfg->name,
  275. (level == cfg->polar) ? "WAKEN" : "SLEEP");
  276. if (level == cfg->polar) {
  277. asc_tx_event_send(tx, AP_TX_EVENT_CP_READY);
  278. } else {
  279. /*do not care */
  280. /*asc_tx_event_send(tx, AP_TX_EVENT_CP_UNREADY); */
  281. }
  282. #if defined(CONFIG_MTK_LEGACY)
  283. c2k_gpio_irq_unmask(cfg->gpio_ready);
  284. #endif
  285. return IRQ_HANDLED;
  286. }
  287. int c2k_exception = 0;
  288. static irqreturn_t asc_irq_cp_wake_ap(int irq, void *data)
  289. {
  290. int level;
  291. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  292. struct asc_config *cfg = &rx->cfg;
  293. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  294. /*when using internal EINT, we cannot get line status directly from GPIO API */
  295. level = !!c2k_gpio_to_ls(cfg->gpio_wake);
  296. #endif
  297. level = !!c2k_gpio_get_value(cfg->gpio_wake);
  298. c2k_gpio_set_irq_type(cfg->gpio_wake,
  299. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING);
  300. ASCDPRT("Irq %s cp_wake_ap, requset ap to be %s.\n", cfg->name,
  301. (level == cfg->polar) ? "WAKEN" : "SLEEP");
  302. if (level == cfg->polar) {
  303. /*Cp requset Ap wake */
  304. wake_lock(&rx->wlock);
  305. /*FIXME: jump to ready as soon as possible to avoid the AP_READY error indication to CBP */
  306. if (AP_RX_ST_IDLE == atomic_read(&rx->state)) {
  307. ASCDPRT("Rx(%s): process event(%d) in state(%s).\n",
  308. cfg->name, AP_RX_EVENT_REQUEST,
  309. rx->table[AP_RX_ST_IDLE].name);
  310. asc_rx_handle_idle(rx, AP_RX_EVENT_REQUEST);
  311. ASCDPRT("Rx(%s): go into state(%s).\n", cfg->name,
  312. rx->table[atomic_read(&rx->state)].name);
  313. }
  314. asc_rx_event_send(rx, AP_RX_EVENT_REQUEST);
  315. } else {
  316. /*Cp allow Ap sleep */
  317. asc_rx_event_send(rx, AP_RX_EVENT_STOP);
  318. }
  319. #if 0
  320. if (mt_get_gpio_in(GPIO120)) { /*GPIO120 high, md exception happend */
  321. pr_debug("[MODEM SDIO] GPIO120 high, modem exception!\n");
  322. c2k_exception = 1;
  323. gpio_irq_cbp_excp_ind();
  324. } else {
  325. pr_debug("[MODEM SDIO] no exception!\n");
  326. }
  327. #endif
  328. #if defined(CONFIG_MTK_LEGACY)
  329. c2k_gpio_irq_unmask(cfg->gpio_wake);
  330. #endif
  331. return IRQ_HANDLED;
  332. }
  333. static struct asc_tx_handle *asc_tx_handle_lookup(const char *name)
  334. {
  335. unsigned long flags;
  336. struct asc_tx_handle *hd, *tmp, *t;
  337. if (!name)
  338. return NULL;
  339. hd = NULL;
  340. spin_lock_irqsave(&hdlock, flags);
  341. list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
  342. if (!strncmp(name, tmp->cfg.name, ASC_NAME_LEN - 1)) {
  343. hd = tmp;
  344. break;
  345. }
  346. }
  347. spin_unlock_irqrestore(&hdlock, flags);
  348. return hd;
  349. }
  350. static struct asc_rx_handle *asc_rx_handle_lookup(const char *name)
  351. {
  352. unsigned long flags;
  353. struct asc_rx_handle *hd, *tmp, *t;
  354. if (!name)
  355. return NULL;
  356. hd = NULL;
  357. spin_lock_irqsave(&hdlock, flags);
  358. list_for_each_entry_safe(tmp, t, &asc_rx_handle_list, node) {
  359. if (!strncmp(name, tmp->cfg.name, ASC_NAME_LEN - 1)) {
  360. hd = tmp;
  361. break;
  362. }
  363. }
  364. spin_unlock_irqrestore(&hdlock, flags);
  365. return hd;
  366. }
  367. static struct asc_user *asc_tx_user_lookup(struct asc_tx_handle *tx,
  368. const char *name)
  369. {
  370. unsigned long flags = 0;
  371. struct asc_user *user = NULL, *tmp = NULL, *t = NULL;
  372. if (!name)
  373. return NULL;
  374. spin_lock_irqsave(&tx->slock, flags);
  375. list_for_each_entry_safe(tmp, t, &tx->user_list, node) {
  376. if (!strncmp(name, tmp->infor.name, ASC_NAME_LEN - 1)) {
  377. user = tmp;
  378. break;
  379. }
  380. }
  381. spin_unlock_irqrestore(&tx->slock, flags);
  382. return user;
  383. }
  384. static struct asc_user *asc_rx_user_lookup(struct asc_rx_handle *rx,
  385. const char *name)
  386. {
  387. unsigned long flags = 0;
  388. struct asc_user *user = NULL, *tmp = NULL, *t = NULL;
  389. if (!name)
  390. return NULL;
  391. spin_lock_irqsave(&rx->slock, flags);
  392. list_for_each_entry_safe(tmp, t, &rx->user_list, node) {
  393. if (!strncmp(name, tmp->infor.name, ASC_NAME_LEN - 1)) {
  394. user = tmp;
  395. break;
  396. }
  397. }
  398. spin_unlock_irqrestore(&rx->slock, flags);
  399. return user;
  400. }
  401. static inline void asc_rx_indicate_wake(struct asc_rx_handle *rx)
  402. {
  403. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  404. /*if(rx->cfg.gpio_ready >= 0) */
  405. if (((rx->cfg.gpio_ready) & 0xFFFF) >= 0)
  406. c2k_gpio_direction_output(rx->cfg.gpio_ready, rx->cfg.polar);
  407. #else
  408. if (rx->cfg.gpio_ready == AP_USING_REGISTER)
  409. c2k_ap_ready_indicate(rx->cfg.polar);
  410. #endif
  411. }
  412. static inline void asc_rx_indicate_sleep(struct asc_rx_handle *rx)
  413. {
  414. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  415. /*if(rx->cfg.gpio_ready >= 0) */
  416. if (((rx->cfg.gpio_ready) & 0xFFFF) >= 0)
  417. c2k_gpio_direction_output(rx->cfg.gpio_ready, !rx->cfg.polar);
  418. #else
  419. if (rx->cfg.gpio_ready == AP_USING_REGISTER)
  420. c2k_ap_ready_indicate(!rx->cfg.polar);
  421. #endif
  422. }
  423. static int asc_rx_event_send(struct asc_rx_handle *rx, int id)
  424. {
  425. unsigned long flags = 0;
  426. struct asc_event *event = NULL;
  427. int ret = -ENODEV;
  428. if (rx->thread == NULL) {
  429. ASCPRT("%s:no thread for event\n", __func__);
  430. return ret;
  431. }
  432. /*check whether the event is cared by current charge state */
  433. if (id >= 0) {
  434. event = asc_event_malloc();
  435. if (!event) {
  436. ASCPRT("No memory to create new event.\n");
  437. ret = -ENOMEM;
  438. goto send_event_error;
  439. }
  440. /*insert a new event to the list tail and wakeup the process thread */
  441. /*ASCDPRT("Rx(%s):send event(%d) to state(%s).\n",
  442. rx->name, id, rx->table[atomic_read(&rx->state)].name); */
  443. event->id = id;
  444. spin_lock_irqsave(&rx->slock, flags);
  445. if (AP_RX_EVENT_RESET == id)
  446. list_add(&event->list, &rx->event_q);
  447. else
  448. list_add_tail(&event->list, &rx->event_q);
  449. spin_unlock_irqrestore(&rx->slock, flags);
  450. wake_up(&rx->wait);
  451. }
  452. ret = 0;
  453. send_event_error:
  454. return ret;
  455. }
  456. static int asc_rx_event_recv(struct asc_rx_handle *rx)
  457. {
  458. unsigned long flags = 0;
  459. struct asc_event *event = NULL;
  460. int ret = -ENODEV;
  461. if (rx->thread == NULL) {
  462. ASCPRT("%s:no thread for event\n", __func__);
  463. return ret;
  464. }
  465. spin_lock_irqsave(&rx->slock, flags);
  466. if (!list_empty(&rx->event_q)) {
  467. event = list_first_entry(&rx->event_q, struct asc_event, list);
  468. list_del(&event->list);
  469. }
  470. spin_unlock_irqrestore(&rx->slock, flags);
  471. if (event) {
  472. ret = event->id;
  473. asc_event_free(event);
  474. }
  475. return ret;
  476. }
  477. static int asc_rx_event_thread(void *data)
  478. {
  479. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  480. int id = 0, index;
  481. char name[ASC_NAME_LEN] = { 0 };
  482. struct asc_state_dsp *dsp = NULL;
  483. rx->thread = current;
  484. snprintf(name, ASC_NAME_LEN, "asc_rx_%s", rx->cfg.name);
  485. ASCDPRT("%s thread start now.\n", name);
  486. while (1) {
  487. /*sleep until receive an evnet or thread exist */
  488. wait_event(rx->wait, ((id = asc_rx_event_recv(rx)) >= 0)
  489. || (!rx->thread));
  490. /*thread is existed */
  491. if (!rx->thread)
  492. break;
  493. mutex_lock(&rx->mlock);
  494. if (AP_RX_EVENT_RESET == id) {
  495. asc_rx_handle_reset(rx);
  496. } else {
  497. index = atomic_read(&rx->state);
  498. dsp = rx->table + index;
  499. if (dsp->handle) {
  500. ASCDPRT
  501. ("Rx(%s): process event(%d) in state(%s).\n",
  502. rx->cfg.name, id, dsp->name);
  503. dsp->handle(rx, id);
  504. ASCDPRT("Rx(%s): go into state(%s).\n",
  505. rx->cfg.name,
  506. rx->
  507. table[atomic_read(&rx->state)].name);
  508. }
  509. }
  510. mutex_unlock(&rx->mlock);
  511. }
  512. ASCDPRT("%s thread exit.\n", name);
  513. kfree(rx);
  514. return 0;
  515. }
  516. static void asc_rx_event_timer(unsigned long data)
  517. {
  518. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  519. /*ASCDPRT("%s timer is time out.\n", rx->name); */
  520. asc_rx_event_send(rx, AP_RX_EVENT_IDLE_TIMEOUT);
  521. }
  522. static void asc_tx_notifier_work(struct work_struct *work)
  523. {
  524. struct asc_infor *infor;
  525. struct asc_user *user = NULL, *t = NULL;
  526. struct asc_tx_handle *tx = container_of(work, struct asc_tx_handle,
  527. ntf_work);
  528. list_for_each_entry_safe(user, t, &tx->user_list, node) {
  529. infor = &user->infor;
  530. if (infor->notifier)
  531. infor->notifier(tx->ntf, infor->data);
  532. }
  533. }
  534. static void asc_rx_notifier_prepare_work(struct work_struct *work)
  535. {
  536. struct asc_infor *infor;
  537. struct asc_user *user = NULL, *t = NULL;
  538. struct asc_rx_handle *rx = container_of(work, struct asc_rx_handle,
  539. ntf_prepare_work);
  540. list_for_each_entry_safe(user, t, &rx->user_list, node) {
  541. infor = &user->infor;
  542. if (infor->notifier)
  543. infor->notifier(ASC_NTF_RX_PREPARE, infor->data);
  544. }
  545. }
  546. static void asc_rx_notifier_post_work(struct work_struct *work)
  547. {
  548. struct asc_infor *infor;
  549. struct asc_user *user = NULL, *t = NULL;
  550. struct asc_rx_handle *rx = container_of(work, struct asc_rx_handle,
  551. ntf_post_work);
  552. list_for_each_entry_safe(user, t, &rx->user_list, node) {
  553. infor = &user->infor;
  554. if (infor->notifier)
  555. infor->notifier(ASC_NTF_RX_POST, infor->data);
  556. }
  557. }
  558. static void asc_tx_notifier(struct asc_tx_handle *tx, int ntf)
  559. {
  560. tx->ntf = ntf;
  561. queue_work(asc_work_queue, &tx->ntf_work);
  562. }
  563. static void asc_rx_notifier(struct asc_rx_handle *rx, int ntf)
  564. {
  565. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  566. rx->ntf = ntf;
  567. if (ASC_NTF_RX_PREPARE == ntf) {
  568. ASCPRT("asc_rx_notifier: queue_work ntf_prepare_work\n");
  569. queue_work(asc_work_queue, &rx->ntf_prepare_work);
  570. } else if (ASC_NTF_RX_POST == ntf) {
  571. queue_work(asc_work_queue, &rx->ntf_post_work);
  572. }
  573. #else
  574. struct asc_infor *infor;
  575. struct asc_user *user = NULL, *t = NULL;
  576. ASCDPRT("asc_rx_notifier start, ntf = %d\n", ntf);
  577. list_for_each_entry_safe(user, t, &rx->user_list, node) {
  578. infor = &user->infor;
  579. if (infor->notifier)
  580. infor->notifier(ntf, infor->data);
  581. }
  582. ASCDPRT("asc_rx_notifier end, ntf = %d\n", ntf);
  583. #endif
  584. }
  585. static int asc_rx_handle_init(struct asc_rx_handle *rx)
  586. {
  587. int ret = 0;
  588. char *name = NULL;
  589. struct asc_config *cfg = &rx->cfg;
  590. if (((cfg->gpio_ready) & 0xFFFF) >= 0)
  591. asc_rx_indicate_sleep(rx);
  592. if (((cfg->gpio_wake) & 0xFFFF) >= 0) {
  593. #if defined(CONFIG_MTK_LEGACY)
  594. c2k_gpio_irq_mask(cfg->gpio_wake);
  595. #endif
  596. c2k_gpio_direction_input_for_irq(cfg->gpio_wake);
  597. c2k_gpio_set_irq_type(cfg->gpio_wake,
  598. IRQF_TRIGGER_RISING |
  599. IRQF_TRIGGER_FALLING);
  600. ret =
  601. c2k_gpio_request_irq(cfg->gpio_wake, asc_irq_cp_wake_ap,
  602. IRQF_SHARED | IRQF_NO_SUSPEND,
  603. "cp_wake_ap", rx);
  604. #if defined(CONFIG_MTK_LEGACY)
  605. c2k_gpio_irq_unmask(cfg->gpio_wake);
  606. #endif
  607. if (ret < 0) {
  608. ASCPRT("fail to request cp_wake_ap irq for %s\n",
  609. cfg->name);
  610. goto err_req_irq_cp_wake_ap;
  611. }
  612. }
  613. rx->table = asc_rx_table;
  614. mutex_init(&rx->mlock);
  615. INIT_LIST_HEAD(&rx->event_q);
  616. INIT_LIST_HEAD(&rx->user_list);
  617. spin_lock_init(&rx->slock);
  618. setup_timer(&rx->timer, asc_rx_event_timer, (unsigned long)rx);
  619. name = kzalloc(ASC_NAME_LEN, GFP_KERNEL);
  620. if (!name) {
  621. ret = -ENOMEM;
  622. ASCPRT("%s: no memory to malloc for wake lock name\n",
  623. __func__);
  624. goto err_malloc_name;
  625. }
  626. snprintf(name, ASC_NAME_LEN, "asc_rx_%s", rx->cfg.name);
  627. wake_lock_init(&rx->wlock, WAKE_LOCK_SUSPEND, name);
  628. init_waitqueue_head(&rx->wait);
  629. INIT_WORK(&rx->ntf_prepare_work, asc_rx_notifier_prepare_work);
  630. INIT_WORK(&rx->ntf_post_work, asc_rx_notifier_post_work);
  631. atomic_set(&rx->state, AP_RX_ST_SLEEP);
  632. kthread_run(asc_rx_event_thread, rx, "C2K_RX_ASC");
  633. if (!!c2k_gpio_get_value(cfg->gpio_wake) == cfg->polar)
  634. asc_rx_event_send(rx, AP_RX_EVENT_REQUEST);
  635. return 0;
  636. /*err_create_rx_thread:*/
  637. kfree(name);
  638. err_malloc_name:
  639. /*if(cfg->gpio_wake >= 0) */
  640. if (((cfg->gpio_wake) & 0xFFFF) >= 0)
  641. free_irq(c2k_gpio_to_irq(cfg->gpio_wake), rx);
  642. err_req_irq_cp_wake_ap:
  643. /*err_request_gpio_cp_wake_ap:*/
  644. /*err_request_gpio_ap_ready:*/
  645. return ret;
  646. }
  647. static int asc_rx_handle_sleep(void *data, int event)
  648. {
  649. int ret = 0;
  650. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  651. /*ASCDPRT("Rx(%s): process event(%d) in state(%s).\n", rx->name,
  652. event, rx->table[atomic_read(&rx->state)].name); */
  653. if (AP_RX_ST_SLEEP != atomic_read(&rx->state))
  654. return 0;
  655. switch (event) {
  656. case AP_RX_EVENT_REQUEST:
  657. wake_lock(&rx->wlock);
  658. atomic_set(&rx->state, AP_RX_ST_WAIT_READY);
  659. asc_rx_notifier(rx, ASC_NTF_RX_PREPARE);
  660. break;
  661. default:
  662. ASCDPRT("ignore the rx event %d in state(%s)", event,
  663. rx->table[atomic_read(&rx->state)].name);
  664. }
  665. /*ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name); */
  666. return ret;
  667. }
  668. static int asc_rx_handle_wait_ready(void *data, int event)
  669. {
  670. int ret = 0;
  671. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  672. /*ASCDPRT("Rx(%s): process event(%d) in state(%s).\n",
  673. rx->name, event, rx->table[atomic_read(&rx->state)].name); */
  674. if (AP_RX_ST_WAIT_READY != atomic_read(&rx->state))
  675. return 0;
  676. switch (event) {
  677. case AP_RX_EVENT_AP_READY:
  678. /*need ack ready to cp, do nothing if no gpio for ap_ready */
  679. asc_rx_indicate_wake(rx);
  680. atomic_set(&rx->state, AP_RX_ST_READY);
  681. break;
  682. case AP_RX_EVENT_AP_UNREADY:
  683. case AP_RX_EVENT_STOP:
  684. atomic_set(&rx->state, AP_RX_ST_SLEEP);
  685. asc_rx_notifier(rx, ASC_NTF_RX_POST);
  686. /*need ack ready to cp, do nothing if no gpio for ap_ready */
  687. asc_rx_indicate_sleep(rx);
  688. wake_unlock(&rx->wlock);
  689. break;
  690. default:
  691. ASCDPRT("ignore the rx event %d in state(%s)", event,
  692. rx->table[atomic_read(&rx->state)].name);
  693. }
  694. /*ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name); */
  695. return ret;
  696. }
  697. static int asc_rx_handle_ready(void *data, int event)
  698. {
  699. int ret = 0;
  700. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  701. /*ASCDPRT("Rx(%s): process event(%d) in state(%s).\n",
  702. rx->name, event, rx->table[atomic_read(&rx->state)].name); */
  703. if (AP_RX_ST_READY != atomic_read(&rx->state))
  704. return 0;
  705. switch (event) {
  706. case AP_RX_EVENT_STOP:
  707. atomic_set(&rx->state, AP_RX_ST_IDLE);
  708. asc_rx_event_send(rx, AP_RX_EVENT_IDLE_TIMEOUT);
  709. /*mod_timer(&rx->timer,
  710. jiffies + msecs_to_jiffies(ASC_RX_WAIT_IDLE_TIME));*/
  711. break;
  712. default:
  713. ASCDPRT("ignore the rx event %d in state(%s)", event,
  714. rx->table[atomic_read(&rx->state)].name);
  715. }
  716. /*ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name); */
  717. return ret;
  718. }
  719. static int asc_rx_handle_idle(void *data, int event)
  720. {
  721. int ret = 0;
  722. unsigned long flags = 0;
  723. struct asc_rx_handle *rx = (struct asc_rx_handle *)data;
  724. /*FIXME: prevent from scheduled and interrupted to avoid error indication to CBP */
  725. spin_lock_irqsave(&rx->slock, flags);
  726. /*ASCDPRT("Rx(%s): process event(%d) in state(%s).\n",
  727. rx->name, event, rx->table[atomic_read(&rx->state)].name); */
  728. if (AP_RX_ST_IDLE != atomic_read(&rx->state))
  729. goto _end;
  730. switch (event) {
  731. case AP_RX_EVENT_REQUEST:
  732. del_timer(&rx->timer);
  733. atomic_set(&rx->state, AP_RX_ST_READY);
  734. break;
  735. case AP_RX_EVENT_IDLE_TIMEOUT:
  736. asc_rx_notifier(rx, ASC_NTF_RX_POST);
  737. atomic_set(&rx->state, AP_RX_ST_SLEEP);
  738. /*need ack ready to cp, do nothing if no gpio for ap_ready */
  739. asc_rx_indicate_sleep(rx);
  740. wake_unlock(&rx->wlock);
  741. break;
  742. default:
  743. ASCDPRT("ignore the rx event %d in state(%s)", event,
  744. rx->table[atomic_read(&rx->state)].name);
  745. }
  746. _end:
  747. spin_unlock_irqrestore(&rx->slock, flags);
  748. /*ASCDPRT("Rx(%s): go into state(%s).\n", rx->name, rx->table[atomic_read(&rx->state)].name); */
  749. return ret;
  750. }
  751. static void asc_tx_trig_busy(struct asc_tx_handle *tx)
  752. {
  753. atomic_set(&tx->delay_sleep, 0);
  754. mod_timer(&tx->timer_wait_idle,
  755. jiffies + msecs_to_jiffies(tx->auto_delay));
  756. }
  757. static inline void asc_tx_wake_cp(struct asc_tx_handle *tx)
  758. {
  759. int retry = 0;
  760. unsigned long flags = 0;
  761. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  762. /*if(tx->cfg.gpio_wake >= 0) */
  763. if (((tx->cfg.gpio_wake) & 0xFFFF) >= 0)
  764. c2k_gpio_direction_output(tx->cfg.gpio_wake, tx->cfg.polar);
  765. #else
  766. if (tx->cfg.gpio_wake == AP_USING_REGISTER) {
  767. while (atomic_read(&tx->trigger_cp_sleep)) {
  768. msleep(20);
  769. retry++;
  770. if (retry >= 5) {
  771. atomic_set(&tx->trigger_cp_sleep, 0);
  772. break;
  773. }
  774. }
  775. spin_lock_irqsave(&tx->ready_slock, flags);
  776. c2k_ap_wake_cp(tx->cfg.polar);
  777. spin_unlock_irqrestore(&tx->ready_slock, flags);
  778. }
  779. #endif
  780. }
  781. static inline void asc_tx_sleep_cp(struct asc_tx_handle *tx)
  782. {
  783. #ifdef CONFIG_EVDO_DT_VIA_SUPPORT
  784. /*if(tx->cfg.gpio_wake >= 0) */
  785. if (((tx->cfg.gpio_wake) & 0xFFFF) >= 0)
  786. c2k_gpio_direction_output(tx->cfg.gpio_wake, !tx->cfg.polar);
  787. #else
  788. if (tx->cfg.gpio_wake == AP_USING_REGISTER) {
  789. atomic_set(&tx->trigger_cp_sleep, 1);
  790. mod_timer(&tx->timer_wait_after_cp_sleep,
  791. jiffies +
  792. msecs_to_jiffies(ASC_TX_AFTER_CP_SLEEP_TIME));
  793. c2k_ap_wake_cp(!tx->cfg.polar);
  794. }
  795. #endif
  796. }
  797. static inline int asc_tx_cp_be_ready(struct asc_tx_handle *tx)
  798. {
  799. int ret = 0;
  800. /*if(tx->cfg.gpio_ready >= 0) */
  801. if (((tx->cfg.gpio_ready) & 0xFFFF) >= 0)
  802. ret =
  803. ((!!c2k_gpio_get_value(tx->cfg.gpio_ready)) ==
  804. (tx->cfg.polar));
  805. ASCDPRT("asc_tx_cp_be_ready ret %d.\n", ret);
  806. return ret;
  807. }
  808. static int asc_tx_event_send(struct asc_tx_handle *tx, int id)
  809. {
  810. unsigned long flags = 0;
  811. struct asc_event *event = NULL;
  812. int ret = -ENODEV;
  813. if (tx->thread == NULL) {
  814. ASCPRT("%s:no thread for event\n", __func__);
  815. return ret;
  816. }
  817. /*check whether the event is cared by current charge state */
  818. if (id >= 0) {
  819. event = asc_event_malloc();
  820. if (!event) {
  821. ASCPRT("No memory to create new event.\n");
  822. ret = -ENOMEM;
  823. goto send_event_error;
  824. }
  825. /*insert a new event to the list tail and wakeup the process thread */
  826. /*ASCDPRT("Send tx event(%d) to state(%s).\n", id, tx->table[atomic_read(&tx->state)].name); */
  827. event->id = id;
  828. spin_lock_irqsave(&tx->slock, flags);
  829. if (AP_TX_EVENT_RESET == id)
  830. list_add(&event->list, &tx->event_q);
  831. else
  832. list_add_tail(&event->list, &tx->event_q);
  833. spin_unlock_irqrestore(&tx->slock, flags);
  834. wake_up(&tx->wait);
  835. }
  836. send_event_error:
  837. return ret;
  838. }
  839. static int asc_tx_event_recv(struct asc_tx_handle *tx)
  840. {
  841. unsigned long flags = 0;
  842. struct asc_event *event = NULL;
  843. int ret = -ENODEV;
  844. if (tx->thread == NULL) {
  845. ASCPRT("%s:no thread for event\n", __func__);
  846. return ret;
  847. }
  848. spin_lock_irqsave(&tx->slock, flags);
  849. if (!list_empty(&tx->event_q)) {
  850. event = list_first_entry(&tx->event_q, struct asc_event, list);
  851. list_del(&event->list);
  852. }
  853. spin_unlock_irqrestore(&tx->slock, flags);
  854. if (event) {
  855. ret = event->id;
  856. asc_event_free(event);
  857. }
  858. return ret;
  859. }
  860. static int asc_tx_get_user(struct asc_tx_handle *tx, const char *name)
  861. {
  862. int ret = 0;
  863. struct asc_user *user = NULL;
  864. user = asc_tx_user_lookup(tx, name);
  865. if (user)
  866. atomic_inc(&user->count);
  867. else
  868. ret = -ENODEV;
  869. return ret;
  870. }
  871. static int asc_tx_put_user(struct asc_tx_handle *tx, const char *name)
  872. {
  873. struct asc_user *user = NULL;
  874. int ret = 0;
  875. user = asc_tx_user_lookup(tx, name);
  876. if (user) {
  877. if (atomic_read(&user->count) >= 1)
  878. atomic_dec(&user->count);
  879. } else {
  880. ret = -ENODEV;
  881. }
  882. return ret;
  883. }
  884. static int asc_tx_refer(struct asc_tx_handle *tx, const char *name)
  885. {
  886. unsigned long flags = 0;
  887. struct asc_user *user = NULL, *t = NULL;
  888. int count = 0;
  889. if (name) {
  890. /*get the reference count of the user */
  891. user = asc_tx_user_lookup(tx, name);
  892. if (user)
  893. count = atomic_read(&user->count);
  894. } else {
  895. spin_lock_irqsave(&tx->slock, flags);
  896. list_for_each_entry_safe(user, t, &tx->user_list, node) {
  897. count += atomic_read(&user->count);
  898. }
  899. spin_unlock_irqrestore(&tx->slock, flags);
  900. }
  901. return count;
  902. }
  903. static int asc_rx_refer(struct asc_rx_handle *rx, const char *name)
  904. {
  905. unsigned long flags = 0;
  906. struct asc_user *user = NULL, *t = NULL;
  907. int count = 0;
  908. if (name) {
  909. /*get the reference count of the user */
  910. user = asc_rx_user_lookup(rx, name);
  911. if (user)
  912. count = atomic_read(&user->count);
  913. } else {
  914. spin_lock_irqsave(&rx->slock, flags);
  915. list_for_each_entry_safe(user, t, &rx->user_list, node) {
  916. count += atomic_read(&user->count);
  917. }
  918. spin_unlock_irqrestore(&rx->slock, flags);
  919. }
  920. return count;
  921. }
  922. static void asc_tx_refer_clear(struct asc_tx_handle *tx)
  923. {
  924. unsigned long flags = 0;
  925. struct asc_user *user = NULL, *t = NULL;
  926. spin_lock_irqsave(&tx->slock, flags);
  927. list_for_each_entry_safe(user, t, &tx->user_list, node) {
  928. atomic_set(&user->count, 0);
  929. }
  930. spin_unlock_irqrestore(&tx->slock, flags);
  931. }
  932. static int asc_tx_event_thread(void *data)
  933. {
  934. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  935. int id = 0, index;
  936. char name[ASC_NAME_LEN] = { 0 };
  937. struct asc_state_dsp *dsp = NULL;
  938. snprintf(name, ASC_NAME_LEN, "asc_tx_%s", tx->cfg.name);
  939. tx->thread = current;
  940. ASCDPRT("%s thread start now.\n", name);
  941. while (1) {
  942. /*sleep until receive an evnet or thread exist */
  943. wait_event(tx->wait, ((id = asc_tx_event_recv(tx)) >= 0)
  944. || (!tx->thread));
  945. /*thread is existed */
  946. if (!tx->thread)
  947. break;
  948. mutex_lock(&tx->mlock);
  949. if (AP_TX_EVENT_RESET == id) {
  950. asc_tx_handle_reset(tx);
  951. } else {
  952. index = atomic_read(&tx->state);
  953. dsp = tx->table + index;
  954. if (dsp->handle) {
  955. ASCDPRT
  956. ("Tx(%s): process event(%d) in state(%s).\n",
  957. tx->cfg.name, id, dsp->name);
  958. dsp->handle(tx, id);
  959. ASCDPRT("Tx(%s): go into state(%s) .\n",
  960. tx->cfg.name,
  961. tx->
  962. table[atomic_read(&tx->state)].name);
  963. }
  964. }
  965. mutex_unlock(&tx->mlock);
  966. }
  967. ASCDPRT("%s thread exit.\n", name);
  968. kfree(tx);
  969. return 0;
  970. }
  971. static void asc_tx_wait_ready_timer(unsigned long data)
  972. {
  973. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  974. /*ASCDPRT("%s tx wait ready timer is timeout.\n", tx->name); */
  975. asc_tx_event_send(tx, AP_TX_EVENT_WAIT_TIMEOUT);
  976. }
  977. static void asc_tx_wait_idle_timer(unsigned long data)
  978. {
  979. char path[ASC_NAME_LEN] = { 0 };
  980. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  981. unsigned long flags = 0;
  982. ASCDPRT("%s: tx wait idle timer is timeout.\n", tx->cfg.name);
  983. spin_lock_irqsave(&tx->slock, flags);
  984. if (0 == tx->ready_hold) {
  985. spin_unlock_irqrestore(&tx->slock, flags);
  986. snprintf(path, ASC_NAME_LEN, "%s.%s", tx->cfg.name,
  987. ASC_TX_AUTO_USER);
  988. asc_tx_put_ready(path, 0);
  989. } else {
  990. atomic_set(&tx->delay_sleep, 1);
  991. spin_unlock_irqrestore(&tx->slock, flags);
  992. ASCPRT("%s: has user.\n", tx->cfg.name);
  993. }
  994. }
  995. static void asc_tx_wait_sleep_timer(unsigned long data)
  996. {
  997. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  998. /*ASCDPRT("%s tx wait sleep timer is timeout.\n", tx->name); */
  999. asc_tx_event_send(tx, AP_TX_EVENT_IDLE_TIMEOUT);
  1000. }
  1001. static void asc_tx_wait_after_cp_sleep(unsigned long data)
  1002. {
  1003. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  1004. atomic_set(&tx->trigger_cp_sleep, 0);
  1005. }
  1006. static int asc_tx_handle_init(struct asc_tx_handle *tx)
  1007. {
  1008. int ret = 0;
  1009. char *name = NULL;
  1010. struct asc_config *cfg = &tx->cfg;
  1011. spin_lock_init(&tx->ready_slock);
  1012. if (((cfg->gpio_ready) & 0xFFFF) >= 0) {
  1013. #if defined(CONFIG_MTK_LEGACY)
  1014. c2k_gpio_irq_mask(cfg->gpio_ready);
  1015. #endif
  1016. c2k_gpio_direction_input_for_irq(cfg->gpio_ready);
  1017. c2k_gpio_set_irq_type(cfg->gpio_ready,
  1018. IRQF_TRIGGER_RISING |
  1019. IRQF_TRIGGER_FALLING);
  1020. ret =
  1021. c2k_gpio_request_irq(cfg->gpio_ready,
  1022. asc_irq_cp_indicate_state, IRQF_SHARED,
  1023. "cp_indicate_state", tx);
  1024. #if defined(CONFIG_MTK_LEGACY)
  1025. c2k_gpio_irq_unmask(cfg->gpio_ready);
  1026. #endif
  1027. if (ret < 0) {
  1028. ASCPRT("fail to request irq for %s:cp_ready\n",
  1029. cfg->name);
  1030. goto err_req_irq_cp_indicate_state;
  1031. }
  1032. }
  1033. atomic_set(&tx->trigger_cp_sleep, 0);
  1034. setup_timer(&tx->timer_wait_after_cp_sleep, asc_tx_wait_after_cp_sleep,
  1035. (unsigned long)tx);
  1036. asc_tx_sleep_cp(tx);
  1037. tx->auto_delay = ASC_TX_AUTO_DELAY_TIME;
  1038. tx->table = asc_tx_table;
  1039. mutex_init(&tx->mlock);
  1040. INIT_LIST_HEAD(&tx->event_q);
  1041. INIT_LIST_HEAD(&tx->user_list);
  1042. spin_lock_init(&tx->slock);
  1043. spin_lock_init(&tx->user_count_lock);
  1044. name = kzalloc(ASC_NAME_LEN, GFP_KERNEL);
  1045. if (!name) {
  1046. ret = -ENOMEM;
  1047. ASCPRT("%s: no memory to malloc for wake lock name\n",
  1048. __func__);
  1049. goto err_malloc_name;
  1050. }
  1051. snprintf(name, ASC_NAME_LEN, "asc_tx_%s", tx->cfg.name);
  1052. wake_lock_init(&tx->wlock, WAKE_LOCK_SUSPEND, name);
  1053. init_waitqueue_head(&tx->wait);
  1054. init_waitqueue_head(&tx->wait_tx_state);
  1055. setup_timer(&tx->timer_wait_ready, asc_tx_wait_ready_timer,
  1056. (unsigned long)tx);
  1057. setup_timer(&tx->timer_wait_idle, asc_tx_wait_idle_timer,
  1058. (unsigned long)tx);
  1059. setup_timer(&tx->timer_wait_sleep, asc_tx_wait_sleep_timer,
  1060. (unsigned long)tx);
  1061. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1062. atomic_set(&tx->count, 0);
  1063. atomic_set(&tx->sleeping, 0);
  1064. atomic_set(&tx->delay_sleep, 0);
  1065. tx->ready_hold = 0;
  1066. INIT_WORK(&tx->ntf_work, asc_tx_notifier_work);
  1067. kthread_run(asc_tx_event_thread, tx, "C2K_TX_ASC");
  1068. return 0;
  1069. /*err_create_tx_event_thread:*/
  1070. /*if(cfg->gpio_ready >= 0) */
  1071. if (((cfg->gpio_ready) & 0xFFFF) >= 0)
  1072. free_irq(c2k_gpio_to_irq(cfg->gpio_ready), tx);
  1073. err_malloc_name:
  1074. kfree(name);
  1075. err_req_irq_cp_indicate_state:
  1076. /*err_request_gpio_cp_ready:*/
  1077. /*err_request_gpio_ap_wake_cp:*/
  1078. return ret;
  1079. }
  1080. static int asc_tx_handle_sleep(void *data, int event)
  1081. {
  1082. int ret = 0;
  1083. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  1084. /*ASCDPRT("Tx(%s): process event(%d) in state(%s).\n",
  1085. tx->name, event, tx->table[atomic_read(&tx->state)].name); */
  1086. if (AP_TX_ST_SLEEP != atomic_read(&tx->state))
  1087. return 0;
  1088. switch (event) {
  1089. case AP_TX_EVENT_REQUEST:
  1090. wake_lock(&tx->wlock);
  1091. asc_tx_wake_cp(tx);
  1092. /*if(tx->cfg.gpio_ready >= 0) */
  1093. if (((tx->cfg.gpio_ready) & 0xFFFF) >= 0) {
  1094. mod_timer(&tx->timer_wait_ready,
  1095. jiffies +
  1096. msecs_to_jiffies(ASC_TX_WAIT_READY_TIME));
  1097. atomic_set(&tx->state, AP_TX_ST_WAIT_READY);
  1098. if (asc_tx_cp_be_ready(tx)) {
  1099. mdelay(ASC_TX_DEBOUNCE_TIME); /*debounce wait, make sure CBP has already be ready */
  1100. if (asc_tx_cp_be_ready(tx)) {
  1101. ASCDPRT("Tx:cp %s was ready now.\n",
  1102. tx->cfg.name);
  1103. asc_tx_handle_wait_ready(tx,
  1104. AP_TX_EVENT_CP_READY);
  1105. }
  1106. }
  1107. } else {
  1108. mdelay(ASC_TX_DEBOUNCE_TIME);
  1109. atomic_set(&tx->state, AP_TX_ST_WAIT_READY);
  1110. asc_tx_handle_wait_ready(tx, AP_TX_EVENT_CP_READY);
  1111. }
  1112. break;
  1113. default:
  1114. ASCDPRT("Tx: ignore event %d in state(%s)", event,
  1115. tx->table[atomic_read(&tx->state)].name);
  1116. }
  1117. /*ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name); */
  1118. return ret;
  1119. }
  1120. static int asc_tx_handle_wait_ready(void *data, int event)
  1121. {
  1122. int ret = 0;
  1123. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  1124. if (AP_TX_ST_WAIT_READY != atomic_read(&tx->state))
  1125. return 0;
  1126. /*ASCDPRT("Tx(%s): process event(%d) in state(%s).\n",
  1127. tx->name, event, tx->table[atomic_read(&tx->state)].name); */
  1128. switch (event) {
  1129. case AP_TX_EVENT_CP_READY:
  1130. if (asc_debug == 1)
  1131. asc_debug = 0;
  1132. del_timer(&tx->timer_wait_ready);
  1133. tx->wait_try = 0;
  1134. atomic_set(&tx->state, AP_TX_ST_READY);
  1135. wake_up_interruptible_all(&tx->wait_tx_state);
  1136. /*if(asc_tx_refer(tx, ASC_TX_AUTO_USER) > 0){ */
  1137. asc_tx_trig_busy(tx);
  1138. /*} */
  1139. asc_tx_notifier(tx, ASC_NTF_TX_READY);
  1140. break;
  1141. case AP_TX_EVENT_WAIT_TIMEOUT:
  1142. ASCPRT("Tx: %s wait cp ready timeout, try=%d.\n", tx->cfg.name,
  1143. tx->wait_try);
  1144. if (asc_debug == 0)
  1145. asc_debug = 1;
  1146. asc_tx_sleep_cp(tx);
  1147. mdelay(ASC_TX_RETRY_DELAY); /*delay to create a implus */
  1148. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1149. if (tx->wait_try++ <= ASC_TX_TRY_TIMES) {
  1150. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1151. } else {
  1152. tx->wait_try = 0;
  1153. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1154. asc_tx_refer_clear(tx);
  1155. wake_up_interruptible_all(&tx->wait_tx_state);
  1156. wake_unlock(&tx->wlock);
  1157. asc_tx_notifier(tx, ASC_NTF_TX_UNREADY);
  1158. ASCPRT("try out to wake %s.\n", tx->cfg.name);
  1159. }
  1160. break;
  1161. case AP_TX_EVENT_STOP:
  1162. asc_tx_sleep_cp(tx);
  1163. del_timer(&tx->timer_wait_ready);
  1164. tx->wait_try = 0;
  1165. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1166. atomic_set(&tx->sleeping, 0);
  1167. wake_unlock(&tx->wlock);
  1168. wake_up_interruptible_all(&tx->wait_tx_state);
  1169. break;
  1170. default:
  1171. ASCDPRT("Tx: ignore event %d in state(%s)", event,
  1172. tx->table[atomic_read(&tx->state)].name);
  1173. }
  1174. /*ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name); */
  1175. return ret;
  1176. }
  1177. static int asc_tx_handle_ready(void *data, int event)
  1178. {
  1179. int ret = 0;
  1180. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  1181. if (AP_TX_ST_READY != atomic_read(&tx->state))
  1182. return 0;
  1183. /*ASCDPRT("Tx(%s): process event(%d) in state(%s).\n",
  1184. tx->name, event, tx->table[atomic_read(&tx->state)].name); */
  1185. switch (event) {
  1186. case AP_TX_EVENT_STOP:
  1187. del_timer(&tx->timer_wait_idle);
  1188. asc_tx_sleep_cp(tx);
  1189. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1190. atomic_set(&tx->sleeping, 0);
  1191. wake_unlock(&tx->wlock);
  1192. /*mod_timer(&tx->timer_wait_sleep, jiffies + msecs_to_jiffies(ASC_TX_WAIT_IDLE_TIME)); */
  1193. break;
  1194. default:
  1195. ASCDPRT("Tx: ignore event %d in state(%s)", event,
  1196. tx->table[atomic_read(&tx->state)].name);
  1197. }
  1198. /*ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name); */
  1199. return ret;
  1200. }
  1201. /*Ignore the idle state, wait for a while to let CBP go to sleep*/
  1202. static int asc_tx_handle_idle(void *data, int event)
  1203. {
  1204. int ret = 0;
  1205. struct asc_tx_handle *tx = (struct asc_tx_handle *)data;
  1206. if (AP_TX_ST_IDLE != atomic_read(&tx->state))
  1207. return 0;
  1208. /*ASCDPRT("Tx(%s): process event(%d) in state(%s).\n",
  1209. tx->name, event, tx->table[atomic_read(&tx->state)].name); */
  1210. switch (event) {
  1211. case AP_TX_EVENT_IDLE_TIMEOUT:
  1212. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1213. wake_unlock(&tx->wlock);
  1214. break;
  1215. case AP_TX_EVENT_REQUEST:
  1216. del_timer(&tx->timer_wait_sleep);
  1217. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1218. /*loop back to SLEEP handle */
  1219. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1220. break;
  1221. default:
  1222. ASCDPRT("Tx: ignore event %d in state(%s)", event,
  1223. tx->table[atomic_read(&tx->state)].name);
  1224. }
  1225. /*ASCDPRT("Tx(%s): go into state(%s).\n", tx->name, tx->table[atomic_read(&tx->state)].name); */
  1226. return ret;
  1227. }
  1228. static void asc_tx_handle_reset(struct asc_tx_handle *tx)
  1229. {
  1230. unsigned long flags;
  1231. ASCDPRT("%s %s\n", __func__, tx->cfg.name);
  1232. del_timer(&tx->timer_wait_ready);
  1233. del_timer(&tx->timer_wait_idle);
  1234. del_timer(&tx->timer_wait_sleep);
  1235. spin_lock_irqsave(&tx->slock, flags);
  1236. INIT_LIST_HEAD(&tx->event_q);
  1237. spin_unlock_irqrestore(&tx->slock, flags);
  1238. asc_tx_sleep_cp(tx);
  1239. atomic_set(&tx->state, AP_TX_ST_SLEEP);
  1240. wake_up_interruptible_all(&tx->wait_tx_state);
  1241. wake_unlock(&tx->wlock);
  1242. }
  1243. /**
  1244. *asc_tx_reset - reset the tx handle
  1245. *@name: the config name for the handle
  1246. *
  1247. *return 0 ok, others be error
  1248. */
  1249. void asc_tx_reset(const char *name)
  1250. {
  1251. struct asc_tx_handle *tx = NULL;
  1252. tx = asc_tx_handle_lookup(name);
  1253. if (tx) {
  1254. asc_tx_event_send(tx, AP_TX_EVENT_RESET);
  1255. #ifndef CONFIG_EVDO_DT_VIA_SUPPORT
  1256. c2k_reset_tx_gpio_ready(tx->cfg.gpio_ready);
  1257. pr_debug("[C2K] reset tx handler!");
  1258. #endif
  1259. }
  1260. }
  1261. /**
  1262. *asc_tx_set_auto_delay - change the delay time for auto ready
  1263. *@name: the config name for the handle
  1264. *@delay: the time for auto ready which is valid while more than default value
  1265. *return 0 ok,others be error
  1266. */
  1267. int asc_tx_set_auto_delay(const char *name, int delay)
  1268. {
  1269. int ret = 0;
  1270. unsigned long flags;
  1271. struct asc_tx_handle *tx;
  1272. tx = asc_tx_handle_lookup(name);
  1273. if (!tx) {
  1274. ret = -ENODEV;
  1275. goto end;
  1276. }
  1277. if (delay > 0) {
  1278. spin_lock_irqsave(&tx->slock, flags);
  1279. tx->auto_delay = delay;
  1280. spin_unlock_irqrestore(&tx->slock, flags);
  1281. }
  1282. end:
  1283. return ret;
  1284. }
  1285. /**
  1286. *asc_tx_check_ready - check whether tx tanslation has alreay be ready
  1287. *@name: the config name for the handle
  1288. *
  1289. *return 1 waken, 0 not, others be error
  1290. */
  1291. int asc_tx_check_ready(const char *name)
  1292. {
  1293. int ret = 0;
  1294. struct asc_tx_handle *tx;
  1295. tx = asc_tx_handle_lookup(name);
  1296. if (NULL == tx)
  1297. return -ENODEV;
  1298. ret = atomic_read(&tx->state);
  1299. if ((ret == AP_TX_ST_READY) && (!atomic_read(&tx->sleeping)))
  1300. ret = 1;
  1301. else
  1302. ret = 0;
  1303. return ret;
  1304. }
  1305. /**
  1306. *asc_tx_user_counts - get the refernce count of the user or the handle
  1307. *@path: (handle name).[user name]
  1308. *If user name is NULL, return the count of tx handle.
  1309. *others return the count of the tx user
  1310. */
  1311. int asc_tx_user_count(const char *path)
  1312. {
  1313. const char *name;
  1314. char hname[ASC_NAME_LEN] = { 0 };
  1315. struct asc_tx_handle *tx = NULL;
  1316. name = strchr(path, '.');
  1317. if (name) {
  1318. memcpy(hname, path, min((int)(name - path), (int)(ASC_NAME_LEN - 1)));
  1319. name++;
  1320. } else {
  1321. strncpy(hname, path, ASC_NAME_LEN - 1);
  1322. }
  1323. tx = asc_tx_handle_lookup(hname);
  1324. if (NULL == tx)
  1325. return -ENODEV;
  1326. return asc_tx_refer(tx, name);
  1327. }
  1328. /**
  1329. *asc_tx_add_user - add a user for tx handle
  1330. *@name: the config name for the handle
  1331. *@infor: the user information
  1332. *
  1333. *return 0, others be error
  1334. */
  1335. int asc_tx_add_user(const char *name, struct asc_infor *infor)
  1336. {
  1337. int ret = 0;
  1338. unsigned long flags = 0;
  1339. struct asc_tx_handle *tx;
  1340. struct asc_user *user;
  1341. tx = asc_tx_handle_lookup(name);
  1342. if (NULL == tx)
  1343. return -ENODEV;
  1344. user = asc_tx_user_lookup(tx, infor->name);
  1345. if (NULL == user) {
  1346. user = kzalloc(sizeof(*user), GFP_KERNEL);
  1347. if (!user) {
  1348. ASCPRT("No memory to create new user reference.\n");
  1349. ret = -ENOMEM;
  1350. goto error;
  1351. }
  1352. user->infor.data = infor->data;
  1353. user->infor.notifier = infor->notifier;
  1354. strncpy(user->infor.name, infor->name, ASC_NAME_LEN - 1);
  1355. atomic_set(&user->count, 0);
  1356. spin_lock_irqsave(&tx->slock, flags);
  1357. list_add_tail(&user->node, &tx->user_list);
  1358. spin_unlock_irqrestore(&tx->slock, flags);
  1359. } else {
  1360. ASCPRT("%s error: user %s already exist!!\n", __func__,
  1361. infor->name);
  1362. ret = -EINVAL;
  1363. }
  1364. error:
  1365. return ret;
  1366. }
  1367. /**
  1368. *asc_tx_del_user - delete a user for tx handle
  1369. *@path: (handle name).(user name)
  1370. *
  1371. *no return
  1372. */
  1373. void asc_tx_del_user(const char *path)
  1374. {
  1375. unsigned long flags = 0;
  1376. char hname[ASC_NAME_LEN] = { 0 };
  1377. const char *name;
  1378. struct asc_user *user = NULL;
  1379. struct asc_tx_handle *tx = NULL;
  1380. name = strchr(path, '.');
  1381. if (name) {
  1382. memcpy(hname, path, min((int)(name - path), (int)(ASC_NAME_LEN - 1)));
  1383. name++;
  1384. } else {
  1385. ASCPRT("%s: invalid path %s\n", __func__, path);
  1386. return;
  1387. }
  1388. /*if reserve user, do nothing */
  1389. if (!strncmp(name, ASC_TX_SYSFS_USER, ASC_NAME_LEN - 1) ||
  1390. !strncmp(name, ASC_TX_AUTO_USER, ASC_NAME_LEN - 1)) {
  1391. ASCPRT("Can not delete reserve user %s\n", path);
  1392. return;
  1393. }
  1394. tx = asc_tx_handle_lookup(hname);
  1395. if (NULL == tx)
  1396. return;
  1397. user = asc_tx_user_lookup(tx, name);
  1398. if (user) {
  1399. /*put ready if the user had operated Tx handle */
  1400. if (atomic_read(&user->count) > 0) {
  1401. atomic_set(&user->count, 1);
  1402. asc_tx_put_ready(path, 0);
  1403. }
  1404. spin_lock_irqsave(&tx->slock, flags);
  1405. list_del(&user->node);
  1406. spin_unlock_irqrestore(&tx->slock, flags);
  1407. kfree(user);
  1408. }
  1409. }
  1410. /**
  1411. *asc_tx_get_ready - lock CBP to work
  1412. *@path: (handle name).(user name)
  1413. *@block: whether block wait for CBP has already waken
  1414. *
  1415. *This function try to wake the CBP and add the reference count.
  1416. *It will block wait for CBP has already be waken if set sync parameter,
  1417. *otherwise it just trig the action to wake CBP, which can not make sure
  1418. *that CBP has be waken after return.
  1419. *return 0 is ok, otherwise something error
  1420. */
  1421. int asc_tx_get_ready(const char *path, int sync)
  1422. {
  1423. int ret = 0;
  1424. int try = 1;
  1425. char hname[ASC_NAME_LEN] = { 0 };
  1426. const char *name;
  1427. struct asc_tx_handle *tx = NULL;
  1428. unsigned long flags = 0;
  1429. name = strchr(path, '.');
  1430. if (name) {
  1431. memcpy(hname, path, min((int)(name - path), (int)(ASC_NAME_LEN - 1)));
  1432. name++;
  1433. } else {
  1434. ASCPRT("Invalid path %s\n", path);
  1435. return -EINVAL;
  1436. }
  1437. tx = asc_tx_handle_lookup(hname);
  1438. if (NULL == tx)
  1439. return -ENODEV;
  1440. if (!strncmp(name, ASC_TX_AUTO_USER, strlen(ASC_TX_AUTO_USER))) {
  1441. ASCPRT("%s:tx user name %s is reserved\n", __func__, name);
  1442. return -EINVAL;
  1443. }
  1444. spin_lock_irqsave(&tx->user_count_lock, flags);
  1445. if (asc_tx_get_user(tx, name) < 0) {
  1446. ASCPRT("%s:tx user name %s is unknown\n", __func__, name);
  1447. spin_unlock_irqrestore(&tx->user_count_lock, flags);
  1448. return -ENODEV;
  1449. }
  1450. ASCDPRT("%s: %s=%d, %s=%d\n", __func__,
  1451. tx->cfg.name, asc_tx_refer(tx, NULL), path, asc_tx_refer(tx,
  1452. name));
  1453. spin_unlock_irqrestore(&tx->user_count_lock, flags);
  1454. switch (atomic_read(&tx->state)) {
  1455. case AP_TX_ST_SLEEP:
  1456. /*To make CP wake ASAP,call the function directly */
  1457. if (!list_empty(&tx->event_q))
  1458. asc_tx_handle_sleep(tx, AP_TX_EVENT_REQUEST);
  1459. else
  1460. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1461. break;
  1462. case AP_TX_ST_IDLE:
  1463. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1464. break;
  1465. case AP_TX_ST_WAIT_READY:
  1466. break;
  1467. case AP_TX_ST_READY:
  1468. if (atomic_read(&tx->sleeping)) {
  1469. ASCDPRT("%s: tx state is sleeping\n", __func__);
  1470. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1471. }
  1472. break;
  1473. default:
  1474. ASCPRT("unknown tx state %d\n", atomic_read(&tx->state));
  1475. return -EINVAL;
  1476. }
  1477. if (sync) {
  1478. if ((AP_TX_ST_READY != atomic_read(&tx->state))
  1479. || atomic_read(&tx->sleeping)) {
  1480. do {
  1481. wait_event_interruptible(tx->wait_tx_state,
  1482. (AP_TX_ST_READY == atomic_read(&tx->state))
  1483. && !atomic_read(&tx->sleeping));
  1484. if (AP_TX_ST_READY == atomic_read(&tx->state)) {
  1485. break;
  1486. } else if (try < ASC_TX_TRY_TIMES) {
  1487. asc_tx_event_send(tx,
  1488. AP_TX_EVENT_REQUEST);
  1489. try++;
  1490. } else {
  1491. ret = -EBUSY;
  1492. break;
  1493. }
  1494. } while (1);
  1495. }
  1496. }
  1497. return ret;
  1498. }
  1499. /**
  1500. *asc_tx_put_ready - lock CBP to work if not set auto sleep
  1501. *@path: (config name).[user name]
  1502. *@block: whether block wait for CBP has already waken
  1503. *
  1504. *This function try to wake the CBP. It will block wait for CBP
  1505. *has already be sleep if set sync parameter, otherwise it just
  1506. *trig the action to sleep CBP, which can not make sure that
  1507. *CBP has be sleep after return. If the reference count is not 1. it
  1508. *do nothing but sub one.
  1509. *return 0 is ok, otherwise something error
  1510. */
  1511. int asc_tx_put_ready(const char *path, int sync)
  1512. {
  1513. int ret = 0;
  1514. char hname[ASC_NAME_LEN] = { 0 };
  1515. const char *name;
  1516. struct asc_tx_handle *tx = NULL;
  1517. unsigned long flags = 0;
  1518. name = strchr(path, '.');
  1519. if (name) {
  1520. memcpy(hname, path, min((int)(name - path), (int)(ASC_NAME_LEN - 1)));
  1521. name++;
  1522. } else {
  1523. ASCPRT("Invalid path %s\n", path);
  1524. return -EINVAL;
  1525. }
  1526. tx = asc_tx_handle_lookup(hname);
  1527. if (NULL == tx)
  1528. return -ENODEV;
  1529. spin_lock_irqsave(&tx->user_count_lock, flags);
  1530. if (asc_tx_put_user(tx, name) < 0) {
  1531. ASCPRT("%s:tx user name %s is unknown\n", __func__, name);
  1532. spin_unlock_irqrestore(&tx->user_count_lock, flags);
  1533. return -ENODEV;
  1534. }
  1535. ASCDPRT("%s: %s=%d, %s=%d\n", __func__,
  1536. tx->cfg.name, asc_tx_refer(tx, NULL), path, asc_tx_refer(tx,
  1537. name));
  1538. /*count is not 0, so do nothing */
  1539. if (asc_tx_refer(tx, NULL) != 0) {
  1540. ASCPRT("%s:asc_tx_refer user count is not 0\n", __func__);
  1541. spin_unlock_irqrestore(&tx->user_count_lock, flags);
  1542. return 0;
  1543. }
  1544. spin_unlock_irqrestore(&tx->user_count_lock, flags);
  1545. switch (atomic_read(&tx->state)) {
  1546. case AP_TX_ST_SLEEP:
  1547. break;
  1548. case AP_TX_ST_WAIT_READY:
  1549. case AP_TX_ST_READY:
  1550. atomic_set(&tx->sleeping, 1);
  1551. asc_tx_event_send(tx, AP_TX_EVENT_STOP);
  1552. break;
  1553. case AP_TX_ST_IDLE:
  1554. asc_tx_event_send(tx, AP_TX_EVENT_IDLE_TIMEOUT);
  1555. break;
  1556. default:
  1557. ASCPRT("unknown tx state %d\n", atomic_read(&tx->state));
  1558. return -EINVAL;
  1559. }
  1560. if (sync) {
  1561. if (AP_TX_ST_SLEEP != atomic_read(&tx->state)) {
  1562. wait_event_interruptible(tx->wait_tx_state,
  1563. AP_TX_ST_SLEEP == atomic_read(&tx->state));
  1564. if (AP_TX_ST_SLEEP != atomic_read(&tx->state))
  1565. ret = -EBUSY;
  1566. }
  1567. }
  1568. return ret;
  1569. }
  1570. /**
  1571. *asc_tx_auto_ready - call each time before operate for CBP if set auto sleep
  1572. *@name: the cofnig name for the handle
  1573. *@sync: whether block wait for CBP has already waken
  1574. *
  1575. *This function try to wake the CBP and trig the tx state. It will
  1576. *block wait for CBP has already be waken if set sync parameter,
  1577. *otherwise it just trig the action to wake CBP, which can not make
  1578. *sure that CBP has be waken after return.
  1579. *return 0 is ok, otherwise something error
  1580. */
  1581. int asc_tx_auto_ready(const char *name, int sync)
  1582. {
  1583. int ret = 0;
  1584. int try = 1;
  1585. long timeout = 1;
  1586. long cur_timeout = 0;
  1587. struct asc_user *user;
  1588. struct asc_tx_handle *tx;
  1589. unsigned long flags = 0;
  1590. if (!name) {
  1591. ASCPRT("%s:Invalid name\n", __func__);
  1592. return -EINVAL;
  1593. }
  1594. tx = asc_tx_handle_lookup(name);
  1595. if (NULL == tx)
  1596. return -ENODEV;
  1597. user = asc_tx_user_lookup(tx, ASC_TX_AUTO_USER);
  1598. if (!user)
  1599. return -ENODEV;
  1600. spin_lock_irqsave(&tx->user_count_lock, flags);
  1601. if (atomic_read(&user->count) == 0) {
  1602. ASCDPRT("%s: %s=%d, %s=%d\n", __func__,
  1603. tx->cfg.name, asc_tx_refer(tx, NULL), ASC_TX_AUTO_USER,
  1604. asc_tx_refer(tx, ASC_TX_AUTO_USER));
  1605. atomic_inc(&user->count);
  1606. }
  1607. spin_unlock_irqrestore(&tx->user_count_lock, flags);
  1608. switch (atomic_read(&tx->state)) {
  1609. case AP_TX_ST_SLEEP:
  1610. /*To make CP wake ASAP,call the function directly */
  1611. if (!list_empty(&tx->event_q))
  1612. asc_tx_handle_sleep(tx, AP_TX_EVENT_REQUEST);
  1613. else
  1614. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1615. break;
  1616. case AP_TX_ST_IDLE:
  1617. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1618. break;
  1619. case AP_TX_ST_WAIT_READY:
  1620. break;
  1621. case AP_TX_ST_READY:
  1622. /*NOTE: it will be concurrent while tx idle timer is running in other CPU,
  1623. which maybe cause message STOP behind REQUEST.
  1624. To avoid misleading, make sure the timer had been expired */
  1625. for (;;) {
  1626. ret = try_to_del_timer_sync(&tx->timer_wait_idle);
  1627. if (ret > 0) { /*pending timer, just trig busy */
  1628. asc_tx_trig_busy(tx);
  1629. break;
  1630. } else if (ret == 0) { /*expired timer, send REQUEST to reactive the STOP state */
  1631. asc_tx_event_send(tx, AP_TX_EVENT_REQUEST);
  1632. break;
  1633. }
  1634. /*running timer, just wait for expired */
  1635. mdelay(1);
  1636. }
  1637. ret = 0;
  1638. break;
  1639. default:
  1640. ASCPRT("unknown tx state %d\n", atomic_read(&tx->state));
  1641. return -EINVAL;
  1642. }
  1643. if (sync) {
  1644. if ((AP_TX_ST_READY != atomic_read(&tx->state))
  1645. || atomic_read(&tx->sleeping)) {
  1646. do {
  1647. cur_timeout = wait_event_interruptible_timeout
  1648. (tx->wait_tx_state, (AP_TX_ST_READY == atomic_read(&tx->state))
  1649. && !atomic_read(&tx->sleeping), msecs_to_jiffies(20));
  1650. if(cur_timeout == 0)
  1651. cur_timeout = msecs_to_jiffies(20);
  1652. timeout += cur_timeout;
  1653. /*interruptible_sleep_on(&tx->wait_tx_state);*/
  1654. if (AP_TX_ST_READY == atomic_read(&tx->state)) {
  1655. if (timeout >
  1656. msecs_to_jiffies
  1657. (ASC_TX_WAIT_READY_TIME)) {
  1658. /*unlikely,unless sleeping has some unknown bug */
  1659. ASCDPRT
  1660. ("why come here . %s %s now is ready,but wait time expire\n",
  1661. __func__, tx->cfg.name);
  1662. break;
  1663. }
  1664. if (atomic_read(&tx->sleeping)) {
  1665. /*sleep handle is on the way,we should wait another 20ms for it done */
  1666. ASCDPRT
  1667. ("%s %s sleep handle is on the way,should wait it finish\n",
  1668. __func__, tx->cfg.name);
  1669. continue;
  1670. } else {
  1671. /*likely,cp is ready now */
  1672. break;
  1673. }
  1674. } else if (try < ASC_TX_TRY_TIMES) {
  1675. if (timeout <=
  1676. msecs_to_jiffies
  1677. (ASC_TX_WAIT_READY_TIME)) {
  1678. /*one time of waitting cp ready's time had not expired,
  1679. contine to wait another 20ms */
  1680. continue;
  1681. }
  1682. /*now one time of waitting cp ready time had expired,retry once more */
  1683. asc_tx_event_send(tx,
  1684. AP_TX_EVENT_REQUEST);
  1685. timeout = 0;
  1686. try++;
  1687. } else {
  1688. ret = -EBUSY;
  1689. ASCDPRT(" %s %s wait cp ready failed\n",
  1690. __func__, tx->cfg.name);
  1691. break;
  1692. }
  1693. } while (1);
  1694. }
  1695. }
  1696. return ret;
  1697. }
  1698. /*inc: 1 inc, 0 dec*/
  1699. int asc_tx_ready_count(const char *name, int inc)
  1700. {
  1701. struct asc_tx_handle *tx = NULL;
  1702. char path[ASC_NAME_LEN] = { 0 };
  1703. unsigned long flags = 0;
  1704. if (!name) {
  1705. ASCPRT("%s:Invalid name\n", __func__);
  1706. return -EINVAL;
  1707. }
  1708. tx = asc_tx_handle_lookup(name);
  1709. if (NULL == tx)
  1710. return -ENODEV;
  1711. spin_lock_irqsave(&tx->slock, flags);
  1712. if (inc) {
  1713. tx->ready_hold++;
  1714. } else {
  1715. tx->ready_hold--;
  1716. if (tx->ready_hold == 0 && atomic_read(&tx->delay_sleep)) {
  1717. atomic_set(&tx->delay_sleep, 0);
  1718. spin_unlock_irqrestore(&tx->slock, flags);
  1719. ASCDPRT("%s:asc_tx_put_ready for %s name\n", __func__,
  1720. tx->cfg.name);
  1721. snprintf(path, ASC_NAME_LEN, "%s.%s", tx->cfg.name,
  1722. ASC_TX_AUTO_USER);
  1723. asc_tx_put_ready(path, 0);
  1724. return 0;
  1725. }
  1726. }
  1727. spin_unlock_irqrestore(&tx->slock, flags);
  1728. return 0;
  1729. }
  1730. static void asc_rx_handle_reset(struct asc_rx_handle *rx)
  1731. {
  1732. unsigned long flags;
  1733. ASCDPRT("%s %s\n", __func__, rx->cfg.name);
  1734. del_timer(&rx->timer);
  1735. wake_unlock(&rx->wlock);
  1736. asc_rx_indicate_sleep(rx);
  1737. atomic_set(&rx->state, AP_RX_ST_SLEEP);
  1738. spin_lock_irqsave(&rx->slock, flags);
  1739. INIT_LIST_HEAD(&rx->event_q);
  1740. spin_unlock_irqrestore(&rx->slock, flags);
  1741. }
  1742. /**
  1743. *asc_rx_reset - reset the rx handle
  1744. *@name: the config name for the handle
  1745. *
  1746. *return 0 ok, others be error
  1747. */
  1748. void asc_rx_reset(const char *name)
  1749. {
  1750. struct asc_rx_handle *rx = NULL;
  1751. rx = asc_rx_handle_lookup(name);
  1752. if (rx)
  1753. asc_rx_event_send(rx, AP_RX_EVENT_RESET);
  1754. }
  1755. /**
  1756. *asc_rx_add_user - add a user for rx handle
  1757. *@name: the config name for the handle
  1758. *@infor: the user information
  1759. *
  1760. *return 0, others be error
  1761. */
  1762. int asc_rx_add_user(const char *name, struct asc_infor *infor)
  1763. {
  1764. int ret = 0;
  1765. unsigned long flags = 0;
  1766. struct asc_rx_handle *rx;
  1767. struct asc_user *user;
  1768. rx = asc_rx_handle_lookup(name);
  1769. if (NULL == rx)
  1770. return -ENODEV;
  1771. user = asc_rx_user_lookup(rx, infor->name);
  1772. if (NULL == user) {
  1773. user = kzalloc(sizeof(*user), GFP_KERNEL);
  1774. if (!user) {
  1775. ASCPRT("No memory to create new user reference.\n");
  1776. ret = -ENOMEM;
  1777. goto error;
  1778. }
  1779. user->infor.data = infor->data;
  1780. user->infor.notifier = infor->notifier;
  1781. strncpy(user->infor.name, infor->name, ASC_NAME_LEN - 1);
  1782. atomic_set(&user->count, 0);
  1783. spin_lock_irqsave(&rx->slock, flags);
  1784. list_add_tail(&user->node, &rx->user_list);
  1785. spin_unlock_irqrestore(&rx->slock, flags);
  1786. if (AP_RX_ST_WAIT_READY == atomic_read(&rx->state)) {
  1787. if (infor->notifier) {
  1788. infor->notifier(ASC_NTF_RX_PREPARE,
  1789. infor->data);
  1790. }
  1791. }
  1792. } else {
  1793. ASCPRT("%s error: user %s already exist!!\n", __func__,
  1794. infor->name);
  1795. ret = -EINVAL;
  1796. }
  1797. error:
  1798. return ret;
  1799. }
  1800. /**
  1801. *asc_rx_del_user - add a user for rx handle
  1802. *@path: (config name).[user name]
  1803. *
  1804. *no return
  1805. */
  1806. void asc_rx_del_user(const char *path)
  1807. {
  1808. unsigned long flags = 0;
  1809. const char *name;
  1810. char hname[ASC_NAME_LEN] = { 0 };
  1811. struct asc_user *user;
  1812. struct asc_rx_handle *rx;
  1813. name = strchr(path, '.');
  1814. if (name) {
  1815. memcpy(hname, path, min((int)(name - path), (int)(ASC_NAME_LEN - 1)));
  1816. name++;
  1817. } else {
  1818. ASCPRT("%s: Invalid path %s\n", __func__, path);
  1819. return;
  1820. }
  1821. rx = asc_rx_handle_lookup(hname);
  1822. if (NULL == rx)
  1823. return;
  1824. user = asc_rx_user_lookup(rx, name);
  1825. if (user) {
  1826. atomic_set(&user->count, 0);
  1827. spin_lock_irqsave(&rx->slock, flags);
  1828. list_del(&user->node);
  1829. spin_unlock_irqrestore(&rx->slock, flags);
  1830. kfree(user);
  1831. if (list_empty(&rx->user_list))
  1832. asc_rx_handle_reset(rx);
  1833. }
  1834. }
  1835. /**
  1836. *asc_rx_confirm_ready - echo AP state to rx CBP data
  1837. *@name: the config name to rx handle
  1838. *@ready: whether AP has been ready to rx data
  1839. *
  1840. *After CBP request AP to rx data, the function can be used to
  1841. *tell CBP whether AP has been ready to receive.
  1842. *return 0 is ok, otherwise something error
  1843. */
  1844. int asc_rx_confirm_ready(const char *name, int ready)
  1845. {
  1846. struct asc_rx_handle *rx = NULL;
  1847. rx = asc_rx_handle_lookup(name);
  1848. if (!rx) {
  1849. ASCDPRT("%s: name %s is unknown\n", __func__, name);
  1850. return -ENODEV;
  1851. }
  1852. ASCDPRT("Rx(%s) confirm ready=%d\n", rx->cfg.name, ready);
  1853. return asc_rx_event_send(rx,
  1854. ready ? AP_RX_EVENT_AP_READY :
  1855. AP_RX_EVENT_AP_UNREADY);
  1856. }
  1857. /**
  1858. *check_on_start - prevent from missing cp waking
  1859. *@name: the name of rx handle
  1860. *
  1861. *Before the rx user is registed,CP may wake up AP.Usually AP will ignore
  1862. *the waking.Because the interrupt don't be register at that time.So the
  1863. *signal will be missed.When opening the tty device, we should check whether CP
  1864. *has waken up AP or not.If CP did that,we should send the signal "AP READY"
  1865. *to CP.
  1866. */
  1867. int asc_rx_check_on_start(const char *name)
  1868. {
  1869. int level;
  1870. struct asc_config *cfg = NULL;
  1871. struct asc_rx_handle *rx = NULL;
  1872. int ret = 1;
  1873. rx = asc_rx_handle_lookup(name);
  1874. if (!rx) {
  1875. ASCPRT("config %s has not already exist.\n", name);
  1876. return -EINVAL;
  1877. }
  1878. cfg = &(rx->cfg);
  1879. level = !!c2k_gpio_get_value(cfg->gpio_wake);
  1880. if (level == cfg->polar) {
  1881. /*Cp has requested Ap wake */
  1882. if (AP_RX_ST_SLEEP == atomic_read(&rx->state)) {
  1883. ASCDPRT
  1884. ("Rx(%s):check_on_start--send event AP_RX_EVENT_REQUEST.\n",
  1885. cfg->name);
  1886. ret = asc_rx_event_send(rx, AP_RX_EVENT_REQUEST);
  1887. } else {
  1888. ASCDPRT
  1889. ("Rx(%s): check_on_start--send event AP_RX_EVENT_AP_READY.\n",
  1890. cfg->name);
  1891. ret = asc_rx_event_send(rx, AP_RX_EVENT_AP_READY);
  1892. }
  1893. }
  1894. return ret;
  1895. }
  1896. static ssize_t asc_debug_show(struct kobject *kobj, struct kobj_attribute *attr,
  1897. char *buf)
  1898. {
  1899. char *s = buf;
  1900. s += sprintf(s, "%d\n", asc_debug);
  1901. return s - buf;
  1902. }
  1903. static ssize_t asc_debug_store(struct kobject *kobj,
  1904. struct kobj_attribute *attr, const char *buf,
  1905. size_t n)
  1906. {
  1907. unsigned long val;
  1908. if (kstrtoul(buf, 10, &val))
  1909. return -EINVAL;
  1910. if (val < 0)
  1911. return -EINVAL;
  1912. asc_debug = val;
  1913. return n;
  1914. }
  1915. static ssize_t asc_infor_show(struct kobject *kobj, struct kobj_attribute *attr,
  1916. char *buf)
  1917. {
  1918. char *s = buf;
  1919. int val1, val2;
  1920. struct asc_config *cfg;
  1921. struct asc_infor *infor;
  1922. struct asc_user *user = NULL, *tuser = NULL;
  1923. struct asc_tx_handle *tx = NULL, *ttmp = NULL;
  1924. struct asc_rx_handle *rx = NULL, *rtmp = NULL;
  1925. list_for_each_entry_safe(tx, ttmp, &asc_tx_handle_list, node) {
  1926. cfg = &tx->cfg;
  1927. val1 = val2 = -1;
  1928. /*if(cfg->gpio_wake >= 0) */
  1929. if (((cfg->gpio_wake) & 0xFFFF) >= 0)
  1930. val1 = !!c2k_gpio_get_value(cfg->gpio_wake);
  1931. /*if(cfg->gpio_ready >= 0) */
  1932. if (((cfg->gpio_ready) & 0xFFFF) >= 0)
  1933. val2 = !!c2k_gpio_get_value(cfg->gpio_ready);
  1934. s += sprintf(s,
  1935. "Tx %s: ref=%d, ap_wake_cp(%d)=%d, cp_ready(%d)=%d, polar=%d, auto_delay=%d mS\n",
  1936. cfg->name, asc_tx_refer(tx, NULL), cfg->gpio_wake,
  1937. val1, cfg->gpio_ready, val2, cfg->polar,
  1938. tx->auto_delay);
  1939. list_for_each_entry_safe(user, tuser, &tx->user_list, node) {
  1940. infor = &user->infor;
  1941. s += sprintf(s, " user %s: ref=%d\n", infor->name,
  1942. atomic_read(&user->count));
  1943. }
  1944. }
  1945. s += sprintf(s, "\n");
  1946. list_for_each_entry_safe(rx, rtmp, &asc_rx_handle_list, node) {
  1947. cfg = &rx->cfg;
  1948. val1 = val2 = -1;
  1949. /*if(cfg->gpio_wake >= 0) */
  1950. if (((cfg->gpio_wake) & 0xFFFF) >= 0)
  1951. val1 = !!c2k_gpio_get_value(cfg->gpio_wake);
  1952. /*if(cfg->gpio_ready >= 0) */
  1953. if (((cfg->gpio_ready) & 0xFFFF) >= 0)
  1954. val2 = !!c2k_gpio_get_value(cfg->gpio_ready);
  1955. s += sprintf(s,
  1956. "Rx %s: ref=%d, cp_wake_ap(%d)=%d, ap_ready(%d)=%d, polar=%d\n",
  1957. cfg->name, asc_rx_refer(rx, NULL), cfg->gpio_wake,
  1958. val1, cfg->gpio_ready, val2, cfg->polar);
  1959. list_for_each_entry_safe(user, tuser, &rx->user_list, node) {
  1960. infor = &user->infor;
  1961. s += sprintf(s, " user %s: ref=%d\n", infor->name,
  1962. atomic_read(&user->count));
  1963. }
  1964. }
  1965. return s - buf;
  1966. }
  1967. static ssize_t asc_infor_store(struct kobject *kobj,
  1968. struct kobj_attribute *attr, const char *buf,
  1969. size_t n)
  1970. {
  1971. return n;
  1972. }
  1973. static ssize_t asc_refer_show(struct kobject *kobj, struct kobj_attribute *attr,
  1974. char *buf)
  1975. {
  1976. unsigned long flags;
  1977. char *s = buf;
  1978. struct asc_tx_handle *tx, *tmp, *t;
  1979. tx = tmp = NULL;
  1980. spin_lock_irqsave(&hdlock, flags);
  1981. list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
  1982. if (tmp->kobj == kobj) {
  1983. tx = tmp;
  1984. break;
  1985. }
  1986. }
  1987. spin_unlock_irqrestore(&hdlock, flags);
  1988. if (tx) {
  1989. s += sprintf(s, "%d\n", asc_tx_refer(tx, ASC_TX_SYSFS_USER));
  1990. return s - buf;
  1991. }
  1992. ASCPRT("%s read error\n", __func__);
  1993. return -EINVAL;
  1994. }
  1995. static ssize_t asc_refer_store(struct kobject *kobj,
  1996. struct kobj_attribute *attr, const char *buf,
  1997. size_t n)
  1998. {
  1999. unsigned long flags;
  2000. char *p;
  2001. int error = 0, len;
  2002. char path[ASC_NAME_LEN] = { 0 };
  2003. struct asc_tx_handle *tx, *tmp, *t;
  2004. tx = tmp = NULL;
  2005. spin_lock_irqsave(&hdlock, flags);
  2006. list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
  2007. if (tmp->kobj == kobj) {
  2008. tx = tmp;
  2009. break;
  2010. }
  2011. }
  2012. spin_unlock_irqrestore(&hdlock, flags);
  2013. if (tx) {
  2014. p = memchr(buf, '\n', n);
  2015. len = p ? p - buf : n;
  2016. snprintf(path, ASC_NAME_LEN, "%s.%s", tx->cfg.name,
  2017. ASC_TX_SYSFS_USER);
  2018. if (len == 3 && !strncmp(buf, "get", len))
  2019. error = asc_tx_get_ready(path, 1);
  2020. else if (len == 3 && !strncmp(buf, "put", len))
  2021. error = asc_tx_put_ready(path, 1);
  2022. }
  2023. return error ? error : n;
  2024. }
  2025. static ssize_t asc_state_show(struct kobject *kobj, struct kobj_attribute *attr,
  2026. char *buf)
  2027. {
  2028. unsigned long flags;
  2029. char *s = buf;
  2030. struct asc_tx_handle *tx, *tmp, *t;
  2031. tx = tmp = NULL;
  2032. spin_lock_irqsave(&hdlock, flags);
  2033. list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
  2034. if (tmp->kobj == kobj) {
  2035. tx = tmp;
  2036. break;
  2037. }
  2038. }
  2039. spin_unlock_irqrestore(&hdlock, flags);
  2040. if (tx) {
  2041. s += sprintf(s, "%s\n",
  2042. tx->table[atomic_read(&tx->state)].name);
  2043. return s - buf;
  2044. }
  2045. ASCPRT("%s read error\n", __func__);
  2046. return -EINVAL;
  2047. }
  2048. static ssize_t asc_state_store(struct kobject *kobj,
  2049. struct kobj_attribute *attr, const char *buf,
  2050. size_t n)
  2051. {
  2052. return n;
  2053. }
  2054. static ssize_t asc_auto_ready_show(struct kobject *kobj,
  2055. struct kobj_attribute *attr, char *buf)
  2056. {
  2057. unsigned long flags;
  2058. char *s = buf;
  2059. struct asc_tx_handle *tx, *tmp, *t;
  2060. tx = tmp = NULL;
  2061. spin_lock_irqsave(&hdlock, flags);
  2062. list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
  2063. if (tmp->kobj == kobj) {
  2064. tx = tmp;
  2065. break;
  2066. }
  2067. }
  2068. spin_unlock_irqrestore(&hdlock, flags);
  2069. if (tx) {
  2070. s += sprintf(s, "%d\n", tx->auto_delay);
  2071. return s - buf;
  2072. }
  2073. ASCPRT("%s read error\n", __func__);
  2074. return -EINVAL;
  2075. }
  2076. static ssize_t asc_auto_ready_store(struct kobject *kobj,
  2077. struct kobj_attribute *attr,
  2078. const char *buf, size_t n)
  2079. {
  2080. int error = 0;
  2081. long val;
  2082. unsigned long flags;
  2083. struct asc_tx_handle *tx, *tmp, *t;
  2084. tx = tmp = NULL;
  2085. spin_lock_irqsave(&hdlock, flags);
  2086. list_for_each_entry_safe(tmp, t, &asc_tx_handle_list, node) {
  2087. if (tmp->kobj == kobj) {
  2088. tx = tmp;
  2089. break;
  2090. }
  2091. }
  2092. spin_unlock_irqrestore(&hdlock, flags);
  2093. if (tx) {
  2094. error = kstrtol(buf, 10, &val);
  2095. if (error || (val < 0)) {
  2096. error = -EINVAL;
  2097. goto end;
  2098. }
  2099. if (val > 0) {
  2100. spin_lock_irqsave(&tx->slock, flags);
  2101. tx->auto_delay = val;
  2102. spin_unlock_irqrestore(&tx->slock, flags);
  2103. }
  2104. error = asc_tx_auto_ready(tx->cfg.name, 1);
  2105. } else {
  2106. ASCPRT("%s read error\n", __func__);
  2107. error = -EINVAL;
  2108. }
  2109. end:
  2110. return error ? error : n;
  2111. }
  2112. static ssize_t asc_confirm_ready_show(struct kobject *kobj,
  2113. struct kobj_attribute *attr, char *buf)
  2114. {
  2115. unsigned long flags;
  2116. char *s = buf;
  2117. struct asc_rx_handle *rx, *tmp, *t;
  2118. rx = tmp = NULL;
  2119. spin_lock_irqsave(&hdlock, flags);
  2120. list_for_each_entry_safe(tmp, t, &asc_rx_handle_list, node) {
  2121. if (tmp->kobj == kobj) {
  2122. rx = tmp;
  2123. break;
  2124. }
  2125. }
  2126. spin_unlock_irqrestore(&hdlock, flags);
  2127. if (rx)
  2128. s += sprintf(s, "done\n");
  2129. else
  2130. s += sprintf(s, "null\n");
  2131. return s - buf;
  2132. }
  2133. static ssize_t asc_confirm_ready_store(struct kobject *kobj,
  2134. struct kobj_attribute *attr,
  2135. const char *buf, size_t n)
  2136. {
  2137. int error = 0;
  2138. long val;
  2139. unsigned long flags;
  2140. struct asc_rx_handle *rx, *tmp, *t;
  2141. rx = tmp = NULL;
  2142. spin_lock_irqsave(&hdlock, flags);
  2143. list_for_each_entry_safe(tmp, t, &asc_rx_handle_list, node) {
  2144. if (tmp->kobj == kobj) {
  2145. rx = tmp;
  2146. break;
  2147. }
  2148. }
  2149. spin_unlock_irqrestore(&hdlock, flags);
  2150. if (rx) {
  2151. error = kstrtol(buf, 10, &val);
  2152. if (error || (val < 0)) {
  2153. error = -EINVAL;
  2154. goto end;
  2155. }
  2156. error = asc_rx_confirm_ready(rx->cfg.name, !!val);
  2157. } else {
  2158. ASCPRT("%s read error\n", __func__);
  2159. error = -EINVAL;
  2160. }
  2161. end:
  2162. return error ? error : n;
  2163. }
  2164. #define asc_attr(_name) \
  2165. static struct kobj_attribute _name##_attr = { \
  2166. .attr = { \
  2167. .name = __stringify(_name), \
  2168. .mode = 0644, \
  2169. }, \
  2170. .show = asc_##_name##_show, \
  2171. .store = asc_##_name##_store, \
  2172. }
  2173. asc_attr(debug);
  2174. asc_attr(infor);
  2175. static struct attribute *g_attr[] = {
  2176. &debug_attr.attr,
  2177. &infor_attr.attr,
  2178. NULL,
  2179. };
  2180. static struct attribute_group g_attr_group = {
  2181. .attrs = g_attr,
  2182. };
  2183. asc_attr(refer);
  2184. asc_attr(state);
  2185. asc_attr(auto_ready);
  2186. static struct attribute *tx_hd_attr[] = {
  2187. &refer_attr.attr,
  2188. &state_attr.attr,
  2189. &auto_ready_attr.attr,
  2190. NULL,
  2191. };
  2192. static struct attribute_group tx_hd_attr_group = {
  2193. .attrs = tx_hd_attr,
  2194. };
  2195. asc_attr(confirm_ready);
  2196. static struct attribute *rx_hd_attr[] = {
  2197. &confirm_ready_attr.attr,
  2198. NULL,
  2199. };
  2200. static struct attribute_group rx_hd_attr_group = {
  2201. .attrs = rx_hd_attr,
  2202. };
  2203. static struct platform_driver asc_driver = {
  2204. .driver.name = "asc",
  2205. };
  2206. static struct platform_device asc_device = {
  2207. .name = "asc",
  2208. };
  2209. /**
  2210. *asc_rx_register_handle - register the rx handle
  2211. *@cfg: the config for the handle
  2212. *
  2213. *the device which receive data from CBP can register a notifier to
  2214. *listen the event according to the changes from CBP.
  2215. *ASC_PREPARE_RX_DATA event will be send when CBP start tx data
  2216. *to the device which must be ready to work;
  2217. *ASC_POST_RX_DATA event will be send when CBP stop tx data to
  2218. *the device which can go to sleep.
  2219. *The gpio for ap_ready can be -1 which be ignored when the device
  2220. *can receive the data from CBP correctly any time.
  2221. *return index according to the notifier in handle, otherwise something error
  2222. */
  2223. int asc_rx_register_handle(struct asc_config *cfg)
  2224. {
  2225. int ret = 0;
  2226. unsigned long flags;
  2227. struct asc_rx_handle *rx = NULL;
  2228. if (NULL == asc_work_queue) {
  2229. ASCPRT("%s: error Asc has not been init\n", __func__);
  2230. return -EINVAL;
  2231. }
  2232. if (NULL == cfg)
  2233. return -EINVAL;
  2234. if (((cfg->gpio_wake) & 0xFFFF) < 0) {
  2235. ASCPRT("%s: config %s gpio is invalid.\n", __func__, cfg->name);
  2236. return -EINVAL;
  2237. }
  2238. rx = asc_rx_handle_lookup(cfg->name);
  2239. if (rx) {
  2240. ASCPRT("config %s has already exist.\n", cfg->name);
  2241. return -EINVAL;
  2242. }
  2243. rx = kzalloc(sizeof(struct asc_rx_handle), GFP_KERNEL);
  2244. if (NULL == rx) {
  2245. ASCPRT("No memory to alloc rx handle.\n");
  2246. return -ENOMEM;
  2247. }
  2248. rx->cfg.gpio_ready = cfg->gpio_ready;
  2249. rx->cfg.gpio_wake = cfg->gpio_wake;
  2250. rx->cfg.polar = !!cfg->polar;
  2251. strncpy(rx->cfg.name, cfg->name, ASC_NAME_LEN - 1);
  2252. ret = asc_rx_handle_init(rx);
  2253. if (ret < 0) {
  2254. kfree(rx);
  2255. ASCPRT("fail to init rx handle %s\n", rx->cfg.name);
  2256. return -EINVAL;
  2257. }
  2258. rx->kobj = kobject_create_and_add(cfg->name, asc_kobj);
  2259. if (!rx->kobj) {
  2260. kfree(rx);
  2261. ASCPRT("fail to create rx handle %s kobject\n", rx->cfg.name);
  2262. return -ENOMEM;
  2263. }
  2264. /*Add the handle to the asc list */
  2265. spin_lock_irqsave(&hdlock, flags);
  2266. list_add(&rx->node, &asc_rx_handle_list);
  2267. spin_unlock_irqrestore(&hdlock, flags);
  2268. ASCDPRT("Register rx handle %s\n", rx->cfg.name);
  2269. return sysfs_create_group(rx->kobj, &rx_hd_attr_group);
  2270. }
  2271. /**
  2272. *asc_tx_register_handle - register the tx handle for state change
  2273. *@cfg: the config for the handle
  2274. *
  2275. *the chip which exchanged data with CBP must create a handle.
  2276. *There is only one tx state handle between the AP and CBP because
  2277. *all devices in CBP will be ready to receive data after CBP has been
  2278. *waken. But servial rx state handles can be exist because different
  2279. *devices in AP maybe waken indivially. Each rx state handle must be
  2280. *registed a notifier to listen the evnets.
  2281. *return 0 is ok, otherwise something error
  2282. */
  2283. int asc_tx_register_handle(struct asc_config *cfg)
  2284. {
  2285. int ret = 0;
  2286. unsigned long flags;
  2287. struct asc_infor infor;
  2288. struct asc_tx_handle *tx = NULL;
  2289. if (NULL == asc_work_queue) {
  2290. ASCPRT("%s: error Asc has not been init\n", __func__);
  2291. return -EINVAL;
  2292. }
  2293. if (NULL == cfg)
  2294. return -EINVAL;
  2295. /*by yfu */
  2296. if (((cfg->gpio_wake) & 0xFFFF) < 0) {
  2297. ASCPRT("%s: config %s gpio is invalid.\n", __func__, cfg->name);
  2298. return -EINVAL;
  2299. }
  2300. tx = asc_tx_handle_lookup(cfg->name);
  2301. if (tx) {
  2302. ASCPRT("config %s has already exist.\n", cfg->name);
  2303. return -EINVAL;
  2304. }
  2305. tx = kzalloc(sizeof(struct asc_tx_handle), GFP_KERNEL);
  2306. if (NULL == tx) {
  2307. ASCPRT("Fail to alloc memory for tx handle.\n");
  2308. return -ENOMEM;
  2309. }
  2310. tx->cfg.gpio_ready = cfg->gpio_ready;
  2311. tx->cfg.gpio_wake = cfg->gpio_wake;
  2312. tx->cfg.polar = !!cfg->polar;
  2313. strncpy(tx->cfg.name, cfg->name, ASC_NAME_LEN - 1);
  2314. ret = asc_tx_handle_init(tx);
  2315. if (ret < 0) {
  2316. ASCPRT("Fail to init tx handle %s.\n", tx->cfg.name);
  2317. goto err_tx_handle_init;
  2318. }
  2319. /*Add the handle to the asc list */
  2320. spin_lock_irqsave(&hdlock, flags);
  2321. list_add(&tx->node, &asc_tx_handle_list);
  2322. spin_unlock_irqrestore(&hdlock, flags);
  2323. ASCDPRT("Register tx handle %s.\n", tx->cfg.name);
  2324. tx->kobj = kobject_create_and_add(cfg->name, asc_kobj);
  2325. if (!tx->kobj) {
  2326. ret = -ENOMEM;
  2327. goto err_create_kobj;
  2328. }
  2329. /*add default user for application */
  2330. memset(&infor, 0, sizeof(infor));
  2331. strncpy(infor.name, ASC_TX_SYSFS_USER, ASC_NAME_LEN);
  2332. asc_tx_add_user(tx->cfg.name, &infor);
  2333. memset(&infor, 0, sizeof(infor));
  2334. strncpy(infor.name, ASC_TX_AUTO_USER, ASC_NAME_LEN);
  2335. asc_tx_add_user(tx->cfg.name, &infor);
  2336. return sysfs_create_group(tx->kobj, &tx_hd_attr_group);
  2337. err_create_kobj:
  2338. list_del(&tx->node);
  2339. err_tx_handle_init:
  2340. kfree(tx);
  2341. return ret;
  2342. }
  2343. static int __init asc_init(void)
  2344. {
  2345. int ret;
  2346. ret = platform_device_register(&asc_device);
  2347. if (ret) {
  2348. ASCPRT("platform_device_register failed\n");
  2349. goto err_platform_device_register;
  2350. }
  2351. ret = platform_driver_register(&asc_driver);
  2352. if (ret) {
  2353. ASCPRT("platform_driver_register failed\n");
  2354. goto err_platform_driver_register;
  2355. }
  2356. asc_work_queue = create_singlethread_workqueue("asc_work");
  2357. if (asc_work_queue == NULL) {
  2358. ret = -ENOMEM;
  2359. goto err_create_work_queue;
  2360. }
  2361. asc_kobj = c2k_kobject_add("asc");
  2362. if (!asc_kobj) {
  2363. ret = -ENOMEM;
  2364. goto err_create_kobj;
  2365. }
  2366. return sysfs_create_group(asc_kobj, &g_attr_group);
  2367. err_create_kobj:
  2368. destroy_workqueue(asc_work_queue);
  2369. err_create_work_queue:
  2370. platform_driver_unregister(&asc_driver);
  2371. err_platform_driver_register:
  2372. platform_device_unregister(&asc_device);
  2373. err_platform_device_register:
  2374. return ret;
  2375. }
  2376. device_initcall(asc_init);