ccci_core.h 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039
  1. #ifndef __CCCI_CORE_H__
  2. #define __CCCI_CORE_H__
  3. #include <linux/wait.h>
  4. #include <linux/skbuff.h>
  5. #include <linux/timer.h>
  6. #include <linux/types.h>
  7. #include <linux/ktime.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/wakelock.h>
  11. #include <linux/kobject.h>
  12. #include <linux/sysfs.h>
  13. #include <mt-plat/mt_ccci_common.h>
  14. #include "ccci_debug.h"
  15. #define CCCI_DEV_NAME "ccci"
  16. #define CCCI_MAGIC_NUM 0xFFFFFFFF
  17. #define MAX_TXQ_NUM 8
  18. #define MAX_RXQ_NUM 8
  19. #define PACKET_HISTORY_DEPTH 16 /* must be power of 2 */
  20. struct ccci_log {
  21. struct ccci_header msg;
  22. u64 tv;
  23. int droped;
  24. };
  25. struct ccci_md_attribute {
  26. struct attribute attr;
  27. struct ccci_modem *modem;
  28. ssize_t (*show)(struct ccci_modem *md, char *buf);
  29. ssize_t (*store)(struct ccci_modem *md, const char *buf, size_t count);
  30. };
  31. #define CCCI_MD_ATTR(_modem, _name, _mode, _show, _store) \
  32. static struct ccci_md_attribute ccci_md_attr_##_name = { \
  33. .attr = {.name = __stringify(_name), .mode = _mode }, \
  34. .modem = _modem, \
  35. .show = _show, \
  36. .store = _store, \
  37. }
  38. /* enumerations and marcos */
  39. typedef enum {
  40. MD_BOOT_STAGE_0 = 0,
  41. MD_BOOT_STAGE_1 = 1,
  42. MD_BOOT_STAGE_2 = 2,
  43. MD_BOOT_STAGE_EXCEPTION = 3
  44. } MD_BOOT_STAGE; /* for other module */
  45. typedef enum {
  46. EX_NONE = 0,
  47. EX_INIT,
  48. EX_DHL_DL_RDY,
  49. EX_INIT_DONE,
  50. /* internal use */
  51. MD_NO_RESPONSE,
  52. MD_WDT,
  53. } MD_EX_STAGE;
  54. #ifdef MD_UMOLY_EE_SUPPORT
  55. #define MD_HS1_FAIL_DUMP_SIZE (2048)
  56. typedef enum {
  57. MD_FIGHT_MODE_NONE = 0,
  58. MD_FIGHT_MODE_ENTER = 1,
  59. MD_FIGHT_MODE_LEAVE = 2
  60. } FLIGHT_STAGE; /* for other module */
  61. #endif
  62. /* MODEM MAUI Exception header (4 bytes)*/
  63. typedef struct _exception_record_header_t {
  64. u8 ex_type;
  65. u8 ex_nvram;
  66. u16 ex_serial_num;
  67. } __packed EX_HEADER_T;
  68. /* MODEM MAUI Environment information (164 bytes) */
  69. typedef struct _ex_environment_info_t {
  70. u8 boot_mode; /* offset: +0x10 */
  71. u8 reserved1[8];
  72. u8 execution_unit[8];
  73. u8 status; /* offset: +0x21, length: 1 */
  74. u8 ELM_status; /* offset: +0x22, length: 1 */
  75. u8 reserved2[145];
  76. } __packed EX_ENVINFO_T;
  77. /* MODEM MAUI Special for fatal error (8 bytes)*/
  78. typedef struct _ex_fatalerror_code_t {
  79. u32 code1;
  80. u32 code2;
  81. } __packed EX_FATALERR_CODE_T;
  82. /* MODEM MAUI fatal error (296 bytes)*/
  83. typedef struct _ex_fatalerror_t {
  84. EX_FATALERR_CODE_T error_code;
  85. u8 reserved1[288];
  86. } __packed EX_FATALERR_T;
  87. /* MODEM MAUI Assert fail (296 bytes)*/
  88. typedef struct _ex_assert_fail_t {
  89. u8 filename[24];
  90. u32 linenumber;
  91. u32 parameters[3];
  92. u8 reserved1[256];
  93. } __packed EX_ASSERTFAIL_T;
  94. /* MODEM MAUI Globally exported data structure (300 bytes) */
  95. typedef union {
  96. EX_FATALERR_T fatalerr;
  97. EX_ASSERTFAIL_T assert;
  98. } __packed EX_CONTENT_T;
  99. /* MODEM MAUI Standard structure of an exception log ( */
  100. typedef struct _ex_exception_log_t {
  101. EX_HEADER_T header;
  102. u8 reserved1[12];
  103. EX_ENVINFO_T envinfo;
  104. u8 reserved2[36];
  105. EX_CONTENT_T content;
  106. } __packed EX_LOG_T;
  107. #ifdef MD_UMOLY_EE_SUPPORT
  108. /* MD32 exception struct */
  109. typedef enum {
  110. CMIF_MD32_EX_INVALID = 0,
  111. CMIF_MD32_EX_ASSERT_LINE,
  112. CMIF_MD32_EX_ASSERT_EXT,
  113. CMIF_MD32_EX_FATAL_ERROR,
  114. CMIF_MD32_EX_FATAL_ERROR_EXT,
  115. } CMIF_MD32_EX_TYPE;
  116. typedef struct ex_fatalerr_md32_ {
  117. unsigned int ex_code[2];
  118. unsigned int ifabtpc;
  119. unsigned int ifabtcau;
  120. unsigned int daabtcau;
  121. unsigned int daabtpc;
  122. unsigned int daabtad;
  123. unsigned int daabtsp;
  124. unsigned int lr;
  125. unsigned int sp;
  126. unsigned int interrupt_count;
  127. unsigned int vic_mask;
  128. unsigned int vic_pending;
  129. unsigned int cirq_mask_31_0;
  130. unsigned int cirq_mask_63_32;
  131. unsigned int cirq_pend_31_0;
  132. unsigned int cirq_pend_63_32;
  133. } __packed EX_FATALERR_MD32;
  134. typedef struct ex_assertfail_md32_ {
  135. u32 ex_code[3];
  136. u32 line_num;
  137. char file_name[64];
  138. } __packed EX_ASSERTFAIL_MD32;
  139. typedef union {
  140. EX_FATALERR_MD32 fatalerr;
  141. EX_ASSERTFAIL_MD32 assert;
  142. } __packed EX_MD32_CONTENT_T;
  143. #define MD32_FDD_ROCODE "FDD_ROCODE"
  144. #define MD32_TDD_ROCODE "TDD_ROCODE"
  145. typedef struct _ex_md32_log_ {
  146. u32 finish_fill;
  147. u32 except_type;
  148. EX_MD32_CONTENT_T except_content;
  149. unsigned int ex_log_mem_addr;
  150. unsigned int md32_active_mode;
  151. } __packed EX_MD32_LOG_T;
  152. /* CoreSonic exception struct */
  153. typedef enum {
  154. CS_EXCEPTION_ASSERTION = 0x45584300,
  155. CS_EXCEPTION_FATAL_ERROR = 0x45584301,
  156. CS_EXCEPTION_CTI_EVENT = 0x45584302,
  157. CS_EXCEPTION_UNKNOWN = 0x45584303,
  158. } CS_EXCEPTION_TYPE_T;
  159. typedef struct ex_fatalerr_cs_ {
  160. u32 error_status;
  161. u32 error_pc;
  162. u32 error_lr;
  163. u32 error_address;
  164. u32 error_code1;
  165. u32 error_code2;
  166. } __packed EX_FATALERR_CS;
  167. typedef struct ex_assertfail_cs_ {
  168. u32 line_num;
  169. u32 para1;
  170. u32 para2;
  171. u32 para3;
  172. char file_name[128];
  173. } __packed EX_ASSERTFAIL_CS;
  174. typedef union {
  175. EX_FATALERR_CS fatalerr;
  176. EX_ASSERTFAIL_CS assert;
  177. } __packed EX_CS_CONTENT_T;
  178. typedef struct _ex_cs_log_t {
  179. u32 except_type;
  180. EX_CS_CONTENT_T except_content;
  181. } __packed EX_CS_LOG_T;
  182. /* PCORE, L1CORE exception struct */
  183. enum {
  184. MD_EX_PL_INVALID = 0,
  185. MD_EX_PL_UNDEF = 1,
  186. MD_EX_PL_SWI = 2,
  187. MD_EX_PL_PREF_ABT = 3,
  188. MD_EX_PL_DATA_ABT = 4,
  189. MD_EX_PL_STACKACCESS = 5,
  190. MD_EX_PL_FATALERR_TASK = 6,
  191. MD_EX_PL_FATALERR_BUF = 7,
  192. MD_EX_PL_FATALE_TOTAL,
  193. MD_EX_PL_ASSERT_FAIL = 16,
  194. MD_EX_PL_ASSERT_DUMP = 17,
  195. MD_EX_PL_ASSERT_NATIVE = 18,
  196. MD_EX_CC_INVALID_EXCEPTION = 0x20,
  197. MD_EX_CC_PCORE_EXCEPTION = 0x21,
  198. MD_EX_CC_L1CORE_EXCEPTION = 0x22,
  199. MD_EX_CC_CS_EXCEPTION = 0x23,
  200. MD_EX_CC_MD32_EXCEPTION = 0x24,
  201. MD_EX_CC_C2K_EXCEPTION = 0x25,
  202. MD_EX_CC_ARM7_EXCEPTION = 0x26,
  203. MD_EX_OTHER_CORE_EXCEPTIN,
  204. EMI_MPU_VIOLATION = 0x30,
  205. /* NUM_EXCEPTION, */
  206. };
  207. /* MD core list */
  208. typedef enum {
  209. MD_PCORE,
  210. MD_L1CORE,
  211. MD_CS_ICC,
  212. MD_CS_IMC,
  213. MD_CS_MPC,
  214. MD_MD32_DFE,
  215. MD_MD32_BRP,
  216. MD_MD32_RAKE,
  217. MD_CORE_NUM
  218. } MD_CORE_NAME;
  219. typedef struct _exp_pl_header_t {
  220. u32 ex_core_id;
  221. u8 ex_type;
  222. u8 ex_nvram;
  223. u16 ex_serial_num;
  224. } __packed EX_PL_HEADER_T;
  225. typedef struct ex_time_stamp {
  226. u32 USCNT;
  227. u32 GLB_TS;
  228. } EX_TIME_STAMP;
  229. typedef struct _ex_pl_environment_info_t {
  230. EX_TIME_STAMP ex_timestamp;
  231. u8 boot_mode; /* offset: +0x10 */
  232. u8 execution_unit[8];
  233. u8 status; /* offset: +0x21, length: 1 */
  234. u8 ELM_status; /* offset: +0x22, length: 1 */
  235. u8 reserved2;
  236. unsigned int stack_ptr;
  237. u8 stack_dump[40];
  238. u16 ext_queue_pending_cnt;
  239. u16 interrupt_mask3;
  240. u8 ext_queue_pending[80];
  241. u8 interrupt_mask[8];
  242. u32 processing_lisr;
  243. u32 lr;
  244. } __packed EX_PL_ENVINFO_T;
  245. typedef struct _ex_pl_fatalerror_code_t {
  246. u32 code1;
  247. u32 code2;
  248. u32 code3;
  249. } __packed EX_PL_FATALERR_CODE_T;
  250. typedef struct _ex_pl_analysis_t {
  251. u32 trace;
  252. u8 param[40];
  253. u8 owner[8];
  254. unsigned char core[7];
  255. u8 is_cadefa_sup;
  256. } __packed EX_PL_ANALYSIS_T;
  257. typedef struct _ex_pl_fatalerror_t {
  258. EX_PL_FATALERR_CODE_T error_code;
  259. u8 description[20];
  260. EX_PL_ANALYSIS_T ex_analy;
  261. u8 reserved1[356];
  262. } __packed EX_PL_FATALERR_T;
  263. typedef struct _ex_pl_assert_t {
  264. u8 filepath[256];
  265. u8 filename[64];
  266. u32 linenumber;
  267. u32 para[3];
  268. u8 reserved1[368];
  269. u8 guard[4];
  270. } __packed EX_PL_ASSERTFAIL_T;
  271. typedef struct _ex_pl_diagnosisinfo_t {
  272. u8 diagnosis;
  273. char owner[8];
  274. u8 reserve[3];
  275. u8 timing_check[24];
  276. } __packed EX_PL_DIAGNOSISINFO_T;
  277. typedef union {
  278. EX_PL_FATALERR_T fatalerr;
  279. EX_PL_ASSERTFAIL_T assert;
  280. } __packed EX_PL_CONTENT_T;
  281. typedef struct _ex_exp_PL_log_t {
  282. EX_PL_HEADER_T header; /* 8 bytes */
  283. char sw_ver[32]; /* 4 bytes: 8 */
  284. char sw_project_name[32]; /* 8: 12 */
  285. char sw_flavor[32];/* 8:20 */
  286. char sw_buildtime[16];/* 4: 28 */
  287. EX_PL_ENVINFO_T envinfo;/* : 32 */
  288. EX_PL_DIAGNOSISINFO_T diagnoinfo; /* 36: */
  289. EX_PL_CONTENT_T content;
  290. } __packed EX_PL_LOG_T;
  291. /* exception overview struct */
  292. #define MD_CORE_TOTAL_NUM (8)
  293. #define MD_CORE_NAME_LEN (11)
  294. #define MD_CORE_NAME_DEBUG (MD_CORE_NAME_LEN + 5 + 16) /* +5 for 16, +16 for md32 TDD FDD */
  295. #define ECT_SRC_NONE (0x0)
  296. #define ECT_SRC_PS (0x1 << 0)
  297. #define ECT_SRC_L1 (0x1 << 1)
  298. #define ECT_SRC_MD32 (0x1 << 2)
  299. #define ECT_SRC_CS (0x1 << 3)
  300. #define ECT_SRC_ARM7 (0x1 << 10)
  301. #define ECT_SRC_RMPU (0x1 << 11)
  302. #define ECT_SRC_C2K (0x1 << 12)
  303. typedef struct ex_main_reason_t {
  304. u32 core_offset;
  305. u8 is_offender;
  306. char core_name[MD_CORE_NAME_LEN];
  307. } __packed EX_MAIN_REASON_T;
  308. typedef struct ex_overview_t {
  309. u32 core_num;
  310. EX_MAIN_REASON_T main_reson[MD_CORE_TOTAL_NUM];
  311. u32 ect_status;
  312. u32 cs_status;
  313. u32 md32_status;
  314. } __packed EX_OVERVIEW_T;
  315. /* #define CCCI_EXREC_OFFSET_OFFENDER1 396 */
  316. #endif
  317. typedef struct _ccci_msg {
  318. union {
  319. u32 magic; /* For mail box magic number */
  320. u32 addr; /* For stream start addr */
  321. u32 data0; /* For ccci common data[0] */
  322. };
  323. union {
  324. u32 id; /* For mail box message id */
  325. u32 len; /* For stream len */
  326. u32 data1; /* For ccci common data[1] */
  327. };
  328. u32 channel;
  329. u32 reserved;
  330. } __packed ccci_msg_t;
  331. typedef struct dump_debug_info {
  332. unsigned int type;
  333. char *name;
  334. #ifdef MD_UMOLY_EE_SUPPORT
  335. char core_name[MD_CORE_NAME_DEBUG];
  336. #endif
  337. unsigned int more_info;
  338. union {
  339. struct {
  340. #ifdef MD_UMOLY_EE_SUPPORT
  341. char file_name[256]; /* use pCore: file path, contain file name */
  342. #else
  343. char file_name[30];
  344. #endif
  345. int line_num;
  346. unsigned int parameters[3];
  347. } assert;
  348. struct {
  349. int err_code1;
  350. int err_code2;
  351. #ifdef MD_UMOLY_EE_SUPPORT
  352. int err_code3;
  353. char *ExStr;
  354. #endif
  355. char offender[9];
  356. } fatal_error;
  357. ccci_msg_t data;
  358. struct {
  359. unsigned char execution_unit[9]; /* 8+1 */
  360. char file_name[30];
  361. int line_num;
  362. unsigned int parameters[3];
  363. } dsp_assert;
  364. struct {
  365. unsigned char execution_unit[9];
  366. unsigned int code1;
  367. } dsp_exception;
  368. struct {
  369. unsigned char execution_unit[9];
  370. unsigned int err_code[2];
  371. } dsp_fatal_err;
  372. };
  373. void *ext_mem;
  374. size_t ext_size;
  375. void *md_image;
  376. size_t md_size;
  377. void *platform_data;
  378. void (*platform_call)(void *data);
  379. } DEBUG_INFO_T;
  380. typedef enum {
  381. IDLE = 0, /* update by buffer manager */
  382. FLYING, /* update by buffer manager */
  383. PARTIAL_READ, /* update by port_char */
  384. ERROR, /* not using */
  385. } REQ_STATE;
  386. typedef enum {
  387. IN = 0,
  388. OUT
  389. } DIRECTION;
  390. /*
  391. * This tells request free routine how it handles skb.
  392. * The CCCI request structure will always be recycled, but its skb can have different policy.
  393. * CCCI request can work as just a wrapper, due to netowork subsys will handler skb itself.
  394. * Tx: policy is determined by sender;
  395. * Rx: policy is determined by receiver;
  396. */
  397. typedef enum {
  398. NOOP = 0, /* don't handle the skb, just recycle the reqeuest wrapper */
  399. RECYCLE, /* put the skb back into our pool */
  400. FREE, /* simply free the skb */
  401. } DATA_POLICY;
  402. /* core classes */
  403. struct ccci_request {
  404. struct sk_buff *skb;
  405. struct list_head entry;
  406. DATA_POLICY policy;
  407. REQ_STATE state;
  408. unsigned char blocking; /* only for Tx */
  409. unsigned char ioc_override; /* bit7: override or not; bit0: IOC setting */
  410. };
  411. struct ccci_modem;
  412. struct ccci_port;
  413. struct ccci_port_ops {
  414. /* must-have */
  415. int (*init)(struct ccci_port *port);
  416. int (*recv_request)(struct ccci_port *port, struct ccci_request *req);
  417. int (*recv_skb)(struct ccci_port *port, struct sk_buff *skb);
  418. /* optional */
  419. int (*req_match)(struct ccci_port *port, struct ccci_request *req);
  420. void (*md_state_notice)(struct ccci_port *port, MD_STATE state);
  421. void (*dump_info)(struct ccci_port *port, unsigned flag);
  422. };
  423. struct ccci_port {
  424. /* don't change the sequence unless you modified modem drivers as well */
  425. /* identity */
  426. CCCI_CH tx_ch;
  427. CCCI_CH rx_ch;
  428. /*
  429. * 0xF? is used as invalid index number, all virtual ports should use queue 0, but not 0xF?.
  430. * always access queue index by using PORT_TXQ_INDEX and PORT_RXQ_INDEX macros.
  431. * modem driver should always use >valid_queue_number to check invalid index, but not
  432. * using ==0xF? style.
  433. *
  434. * here is a nasty trick, we assume no modem provide more than 0xF0 queues, so we use
  435. * the lower 4 bit to smuggle info for network ports.
  436. * Attention, in this trick we assume hardware queue index for net port will not exceed 0xF.
  437. * check NET_ACK_TXQ_INDEX@port_net.c
  438. */
  439. unsigned char txq_index;
  440. unsigned char rxq_index;
  441. unsigned char txq_exp_index;
  442. unsigned char rxq_exp_index;
  443. unsigned char flags;
  444. struct ccci_port_ops *ops;
  445. /* device node related */
  446. unsigned int minor;
  447. char *name;
  448. /* un-initiallized in defination, always put them at the end */
  449. struct ccci_modem *modem;
  450. void *private_data;
  451. atomic_t usage_cnt;
  452. struct list_head entry;
  453. /*
  454. * the Tx and Rx flow are asymmetric due to ports are mutilplexed on queues.
  455. * Tx: data block are sent directly to queue's list, so port won't maitain a Tx list. It only
  456. provide a wait_queue_head for blocking write.
  457. * Rx: due to modem needs to dispatch Rx packet as quickly as possible, so port needs a
  458. * Rx list to hold packets.
  459. */
  460. struct list_head rx_req_list;
  461. spinlock_t rx_req_lock;
  462. wait_queue_head_t rx_wq; /* for uplayer user */
  463. int rx_length;
  464. int rx_length_th;
  465. struct wake_lock rx_wakelock;
  466. unsigned int tx_busy_count;
  467. unsigned int rx_busy_count;
  468. int interception;
  469. };
  470. #define PORT_F_ALLOW_DROP (1<<0) /* packet will be dropped if port's Rx buffer full */
  471. #define PORT_F_RX_FULLED (1<<1) /* rx buffer has been full once */
  472. #define PORT_F_USER_HEADER (1<<2) /* CCCI header will be provided by user, but not by CCCI */
  473. #define PORT_F_RX_EXCLUSIVE (1<<3) /* Rx queue only has this one port */
  474. struct ccci_modem_cfg {
  475. unsigned int load_type;
  476. unsigned int load_type_saving;
  477. unsigned int setting;
  478. };
  479. #define MD_SETTING_ENABLE (1<<0)
  480. #define MD_SETTING_RELOAD (1<<1)
  481. #define MD_SETTING_FIRST_BOOT (1<<2) /* this is the first time of boot up */
  482. #define MD_SETTING_STOP_RETRY_BOOT (1<<3)
  483. #define MD_SETTING_DUMMY (1<<7)
  484. struct ccci_mem_layout { /* all from AP view, AP has no haredware remap after MT6592 */
  485. /* MD image */
  486. void __iomem *md_region_vir;
  487. phys_addr_t md_region_phy;
  488. unsigned int md_region_size;
  489. /* DSP image */
  490. void __iomem *dsp_region_vir;
  491. phys_addr_t dsp_region_phy;
  492. unsigned int dsp_region_size;
  493. /* Share memory */
  494. void __iomem *smem_region_vir;
  495. phys_addr_t smem_region_phy;
  496. unsigned int smem_region_size;
  497. unsigned int smem_offset_AP_to_MD; /* offset between AP and MD view of share memory */
  498. /*DHL info*/
  499. void __iomem *dhl_smem_vir;
  500. phys_addr_t dhl_smem_phy;
  501. unsigned int dhl_smem_size;
  502. /*MD1 MD3 shared memory*/
  503. void __iomem *md1_md3_smem_vir;
  504. phys_addr_t md1_md3_smem_phy;
  505. unsigned int md1_md3_smem_size;
  506. };
  507. /**
  508. *
  509. *--smem layout--
  510. *
  511. *--0x00200000 _ _ _ _ _ _ _ _
  512. * | share mem |
  513. *--0x00xxx000 |_ _ _ _ _ _ _ _ |
  514. * | (CCIF) |
  515. *--0x00011000 |_ _ _ _ _ _ _ _ |
  516. * | runtime data |
  517. *--0x00010000 |_ _ _ _ _ _ _ _ |
  518. * | excption |
  519. *--0x00000000 |_ _(4k dump)_ _ |
  520. *
  521. **/
  522. struct ccci_smem_layout {
  523. /* total exception region */
  524. void __iomem *ccci_exp_smem_base_vir;
  525. phys_addr_t ccci_exp_smem_base_phy;
  526. unsigned int ccci_exp_smem_size;
  527. unsigned int ccci_exp_dump_size;
  528. /* runtime data and mpu region */
  529. void __iomem *ccci_rt_smem_base_vir;
  530. phys_addr_t ccci_rt_smem_base_phy;
  531. unsigned int ccci_rt_smem_size;
  532. unsigned int ccci_ccif_smem_size;
  533. /* how we dump exception region */
  534. void __iomem *ccci_exp_smem_ccci_debug_vir;
  535. unsigned int ccci_exp_smem_ccci_debug_size;
  536. void __iomem *ccci_exp_smem_mdss_debug_vir;
  537. unsigned int ccci_exp_smem_mdss_debug_size;
  538. void __iomem *ccci_exp_smem_sleep_debug_vir;
  539. unsigned int ccci_exp_smem_sleep_debug_size;
  540. /* the address we parse MD exception record */
  541. void __iomem *ccci_exp_rec_base_vir;
  542. };
  543. typedef enum {
  544. DUMP_FLAG_CCIF = (1 << 0),
  545. DUMP_FLAG_CLDMA = (1 << 1), /* tricky part, use argument length as queue index */
  546. DUMP_FLAG_REG = (1 << 2),
  547. DUMP_FLAG_SMEM = (1 << 3),
  548. DUMP_FLAG_IMAGE = (1 << 4),
  549. DUMP_FLAG_LAYOUT = (1 << 5),
  550. DUMP_FLAG_QUEUE_0 = (1 << 6),
  551. DUMP_FLAG_QUEUE_0_1 = (1 << 7),
  552. DUMP_FLAG_CCIF_REG = (1 << 8),
  553. DUMP_FLAG_SMEM_MDSLP = (1 << 9),
  554. DUMP_FLAG_MD_WDT = (1 << 10),
  555. } MODEM_DUMP_FLAG;
  556. typedef enum {
  557. EE_FLAG_ENABLE_WDT = (1 << 0),
  558. EE_FLAG_DISABLE_WDT = (1 << 1),
  559. } MODEM_EE_FLAG;
  560. #define MD_IMG_DUMP_SIZE (1<<8)
  561. #define DSP_IMG_DUMP_SIZE (1<<9)
  562. typedef enum {
  563. LOW_BATTERY,
  564. BATTERY_PERCENT,
  565. } LOW_POEWR_NOTIFY_TYPE;
  566. typedef enum {
  567. CCCI_MESSAGE,
  568. CCIF_INTERRUPT,
  569. CCIF_INTR_SEQ,
  570. } MD_COMM_TYPE;
  571. typedef enum {
  572. MD_STATUS_POLL_BUSY = (1 << 0),
  573. MD_STATUS_ASSERTED = (1 << 1),
  574. } MD_STATUS_POLL_FLAG;
  575. struct ccci_modem_ops {
  576. /* must-have */
  577. int (*init)(struct ccci_modem *md);
  578. int (*start)(struct ccci_modem *md);
  579. int (*reset)(struct ccci_modem *md); /* as pre-stop */
  580. int (*stop)(struct ccci_modem *md, unsigned int timeout);
  581. int (*send_request)(struct ccci_modem *md, unsigned char txqno, struct ccci_request *req,
  582. struct sk_buff *skb);
  583. int (*give_more)(struct ccci_modem *md, unsigned char rxqno);
  584. int (*write_room)(struct ccci_modem *md, unsigned char txqno);
  585. int (*start_queue)(struct ccci_modem *md, unsigned char qno, DIRECTION dir);
  586. int (*stop_queue)(struct ccci_modem *md, unsigned char qno, DIRECTION dir);
  587. int (*napi_poll)(struct ccci_modem *md, unsigned char rxqno, struct napi_struct *napi, int weight);
  588. int (*send_runtime_data)(struct ccci_modem *md, unsigned int sbp_code);
  589. int (*broadcast_state)(struct ccci_modem *md, MD_STATE state);
  590. int (*force_assert)(struct ccci_modem *md, MD_COMM_TYPE type);
  591. int (*dump_info)(struct ccci_modem *md, MODEM_DUMP_FLAG flag, void *buff, int length);
  592. struct ccci_port *(*get_port_by_minor)(struct ccci_modem *md, int minor);
  593. /*
  594. * here we assume Rx and Tx channels are in the same address space,
  595. * and Rx channel should be check first, so user can save one comparison if it always sends
  596. * in Rx channel ID to identify a port.
  597. */
  598. struct ccci_port *(*get_port_by_channel)(struct ccci_modem *md, CCCI_CH ch);
  599. int (*low_power_notify)(struct ccci_modem *md, LOW_POEWR_NOTIFY_TYPE type, int level);
  600. int (*ee_callback)(struct ccci_modem *md, MODEM_EE_FLAG flag);
  601. };
  602. typedef void __iomem *(*smem_sub_region_cb_t)(void *md_blk, int *size_o);
  603. /****************handshake v2*************/
  604. typedef enum{
  605. BOOT_INFO = 0,
  606. EXCEPTION_SHARE_MEMORY,
  607. CCIF_SHARE_MEMORY,
  608. DHL_SHARE_MEMORY,
  609. MD1MD3_SHARE_MEMORY,
  610. /*ccci misc info*/
  611. MISC_INFO_HIF_DMA_REMAP,
  612. MISC_INFO_RTC_32K_LESS,
  613. MISC_INFO_RANDOM_SEED_NUM,
  614. MISC_INFO_GPS_COCLOCK,
  615. MISC_INFO_SBP_ID,
  616. MISC_INFO_CCCI,
  617. MISC_INFO_CLIB_TIME,
  618. MISC_INFO_C2K,
  619. MD_IMAGE_START_MEMORY,
  620. CCISM_SHARE_MEMORY,
  621. CCB_SHARE_MEMORY, /* total size of all CCB regions */
  622. DHL_RAW_SHARE_MEMORY,
  623. DT_NETD_SHARE_MEMORY,
  624. DT_USB_SHARE_MEMORY,
  625. EE_AFTER_EPOF,
  626. CCMNI_MTU, /* max Rx packet buffer size on AP side */
  627. RUNTIME_FEATURE_ID_MAX,
  628. } MD_CCCI_RUNTIME_FEATURE_ID;
  629. typedef enum{
  630. AP_RUNTIME_FEATURE_ID_MAX,
  631. } AP_CCCI_RUNTIME_FEATURE_ID;
  632. typedef enum{
  633. CCCI_FEATURE_NOT_EXIST = 0,
  634. CCCI_FEATURE_NOT_SUPPORT = 1,
  635. CCCI_FEATURE_MUST_SUPPORT = 2,
  636. CCCI_FEATURE_OPTIONAL_SUPPORT = 3,
  637. CCCI_FEATURE_SUPPORT_BACKWARD_COMPAT = 4,
  638. } CCCI_RUNTIME_FEATURE_SUPPORT_TYPE;
  639. struct ccci_feature_support {
  640. u8 support_mask:4;
  641. u8 version:4;
  642. };
  643. struct ccci_runtime_feature {
  644. u8 feature_id; /*for debug only*/
  645. struct ccci_feature_support support_info;
  646. u8 reserved[2];
  647. u32 data_len;
  648. u32 data[0];
  649. };
  650. struct ccci_runtime_boot_info {
  651. u32 boot_channel;
  652. u32 booting_start_id;
  653. u32 boot_attributes;
  654. u32 boot_ready_id;
  655. };
  656. struct ccci_runtime_share_memory {
  657. u32 addr;
  658. u32 size;
  659. };
  660. struct ccci_misc_info_element {
  661. u32 feature[4];
  662. };
  663. #define FEATURE_COUNT 64
  664. #define MD_FEATURE_QUERY_PATTERN 0x49434343
  665. #define AP_FEATURE_QUERY_PATTERN 0x43434349
  666. struct md_query_ap_feature {
  667. u32 head_pattern;
  668. struct ccci_feature_support feature_set[FEATURE_COUNT];
  669. u32 tail_pattern;
  670. };
  671. struct ap_query_md_feature {
  672. u32 head_pattern;
  673. struct ccci_feature_support feature_set[FEATURE_COUNT];
  674. u32 share_memory_support;
  675. u32 ap_runtime_data_addr;
  676. u32 ap_runtime_data_size;
  677. u32 md_runtime_data_addr;
  678. u32 md_runtime_data_size;
  679. u32 set_md_mpu_start_addr;
  680. u32 set_md_mpu_total_size;
  681. u32 tail_pattern;
  682. };
  683. /*********************************************/
  684. struct ccci_modem {
  685. unsigned char index;
  686. unsigned char *private_data;
  687. struct list_head rx_ch_ports[CCCI_MAX_CH_NUM]; /* port list of each Rx channel, for Rx dispatching */
  688. short seq_nums[2][CCCI_MAX_CH_NUM];
  689. unsigned int capability;
  690. volatile MD_STATE md_state; /* check comments below, put it here for cache benefit */
  691. struct ccci_modem_ops *ops;
  692. atomic_t wakeup_src;
  693. struct ccci_port *ports;
  694. struct list_head entry;
  695. unsigned char port_number;
  696. char post_fix[IMG_POSTFIX_LEN];
  697. unsigned int major;
  698. unsigned int minor_base;
  699. struct kobject kobj;
  700. struct ccci_mem_layout mem_layout;
  701. struct ccci_smem_layout smem_layout;
  702. struct ccci_image_info img_info[IMG_NUM];
  703. unsigned int sim_type;
  704. unsigned int sbp_code;
  705. unsigned int sbp_code_default;
  706. unsigned int rf_desense;
  707. unsigned int is_forced_assert;
  708. unsigned char critical_user_active[4];
  709. unsigned int md_img_exist[MAX_IMG_NUM];
  710. struct platform_device *plat_dev;
  711. /*
  712. * the following members are readonly for CCCI core. they are maintained by modem and
  713. * port_kernel.c.
  714. * port_kernel.c should not be considered as part of CCCI core, we just move common part
  715. * of modem message handling into this file. current modem all follows the same message
  716. * protocol during bootup and exception. if future modem abandoned this protocl, we can
  717. * simply replace function set of kernel port to support it.
  718. */
  719. volatile MD_BOOT_STAGE boot_stage;
  720. MD_EX_STAGE ex_stage; /* only for logging */
  721. phys_addr_t invalid_remap_base;
  722. struct ccci_modem_cfg config;
  723. struct timer_list bootup_timer;
  724. struct timer_list ex_monitor;
  725. struct timer_list ex_monitor2;
  726. struct timer_list md_status_poller;
  727. struct timer_list md_status_timeout;
  728. unsigned int md_status_poller_flag;
  729. spinlock_t ctrl_lock;
  730. volatile unsigned int ee_info_flag;
  731. DEBUG_INFO_T debug_info;
  732. #ifdef MD_UMOLY_EE_SUPPORT
  733. DEBUG_INFO_T debug_info1[MD_CORE_NUM - 1];
  734. unsigned char ex_core_num;
  735. unsigned char flight_mode;
  736. #endif
  737. unsigned char ex_type;
  738. EX_LOG_T ex_info;
  739. #ifdef MD_UMOLY_EE_SUPPORT
  740. /* EX_PL_LOG_T ex_pl_info; */
  741. unsigned char ex_pl_info[MD_HS1_FAIL_DUMP_SIZE]; /* request by modem, change to 2k: include EX_PL_LOG_T*/
  742. #endif
  743. unsigned short heart_beat_counter;
  744. int dtr_state; /* only for usb bypass */
  745. #if PACKET_HISTORY_DEPTH
  746. struct ccci_log tx_history[MAX_TXQ_NUM][PACKET_HISTORY_DEPTH];
  747. struct ccci_log rx_history[MAX_RXQ_NUM][PACKET_HISTORY_DEPTH];
  748. int tx_history_ptr[MAX_TXQ_NUM];
  749. int rx_history_ptr[MAX_RXQ_NUM];
  750. #endif
  751. unsigned long logic_ch_pkt_cnt[CCCI_MAX_CH_NUM];
  752. unsigned long logic_ch_pkt_pre_cnt[CCCI_MAX_CH_NUM];
  753. #ifdef CCCI_SKB_TRACE
  754. unsigned long long netif_rx_profile[8];
  755. #endif
  756. int data_usb_bypass;
  757. int runtime_version;
  758. smem_sub_region_cb_t sub_region_cb_tbl[SMEM_SUB_REGION_MAX];
  759. /* unsigned char private_data[0];
  760. do NOT use this manner, otherwise spinlock inside private_data will trigger alignment exception */
  761. };
  762. /* APIs */
  763. extern void ccci_free_req(struct ccci_request *req);
  764. extern void ccci_md_exception_notify(struct ccci_modem *md, MD_EX_STAGE stage);
  765. static inline void ccci_setup_channel_mapping(struct ccci_modem *md)
  766. {
  767. int i;
  768. struct ccci_port *port = NULL;
  769. /* setup mapping */
  770. for (i = 0; i < ARRAY_SIZE(md->rx_ch_ports); i++)
  771. INIT_LIST_HEAD(&md->rx_ch_ports[i]); /* clear original list */
  772. for (i = 0; i < md->port_number; i++)
  773. list_add_tail(&md->ports[i].entry, &md->rx_ch_ports[md->ports[i].rx_ch]);
  774. for (i = 0; i < ARRAY_SIZE(md->rx_ch_ports); i++) {
  775. if (!list_empty(&md->rx_ch_ports[i])) {
  776. list_for_each_entry(port, &md->rx_ch_ports[i], entry) {
  777. CCCI_DBG_MSG(md->index, CORE, "CH%d ports:%s(%d/%d)\n",
  778. i, port->name, port->rx_ch, port->tx_ch);
  779. }
  780. }
  781. }
  782. }
  783. static inline void ccci_reset_seq_num(struct ccci_modem *md)
  784. {
  785. /* it's redundant to use 2 arrays, but this makes sequence checking easy */
  786. memset(md->seq_nums[OUT], 0, sizeof(md->seq_nums[OUT]));
  787. memset(md->seq_nums[IN], -1, sizeof(md->seq_nums[IN]));
  788. }
  789. /* as one channel can only use one hardware queue,
  790. so it's safe we call this function in hardware queue's lock protection */
  791. static inline void ccci_inc_tx_seq_num(struct ccci_modem *md, struct ccci_header *ccci_h)
  792. {
  793. #ifdef FEATURE_SEQ_CHECK_EN
  794. if (ccci_h->channel >= ARRAY_SIZE(md->seq_nums[OUT]) || ccci_h->channel < 0) {
  795. CCCI_INF_MSG(md->index, CORE, "ignore seq inc on channel %x\n", *(((u32 *) ccci_h) + 2));
  796. return; /* for force assert channel, etc. */
  797. }
  798. ccci_h->seq_num = md->seq_nums[OUT][ccci_h->channel]++;
  799. ccci_h->assert_bit = 1;
  800. /* for rpx channel, can only set assert_bit when md is in single-task phase. */
  801. /* when md is in multi-task phase, assert bit should be 0, since ipc task are preemptible */
  802. if ((ccci_h->channel == CCCI_RPC_TX || ccci_h->channel == CCCI_FS_TX) && md->boot_stage != MD_BOOT_STAGE_1)
  803. ccci_h->assert_bit = 0;
  804. #endif
  805. }
  806. static inline void ccci_chk_rx_seq_num(struct ccci_modem *md, struct ccci_header *ccci_h, int qno)
  807. {
  808. #ifdef FEATURE_SEQ_CHECK_EN
  809. u16 channel, seq_num, assert_bit;
  810. channel = ccci_h->channel;
  811. seq_num = ccci_h->seq_num;
  812. assert_bit = ccci_h->assert_bit;
  813. if (md->is_forced_assert == 0 &&
  814. assert_bit && md->seq_nums[IN][channel] != 0 && ((seq_num - md->seq_nums[IN][channel]) & 0x7FFF) != 1) {
  815. CCCI_ERR_MSG(md->index, CORE, "channel %d seq number out-of-order %d->%d\n",
  816. channel, seq_num, md->seq_nums[IN][channel]);
  817. md->ops->dump_info(md, DUMP_FLAG_CLDMA, NULL, qno);
  818. md->ops->force_assert(md, CCIF_INTR_SEQ);
  819. } else {
  820. /* CCCI_INF_MSG(md->index, CORE, "ch %d seq %d->%d %d\n",
  821. channel, md->seq_nums[IN][channel], seq_num, assert_bit); */
  822. md->seq_nums[IN][channel] = seq_num;
  823. }
  824. #endif
  825. }
  826. static inline void ccci_channel_update_packet_counter(struct ccci_modem *md, struct ccci_header *ccci_h)
  827. {
  828. if ((ccci_h->channel & 0xFF) < CCCI_MAX_CH_NUM)
  829. md->logic_ch_pkt_cnt[ccci_h->channel]++;
  830. }
  831. static inline void ccci_channel_dump_packet_counter(struct ccci_modem *md)
  832. {
  833. CCCI_DBG_MSG(md->index, CORE, "traffic(ch): tx:[%d]%ld, [%d]%ld, [%d]%ld rx:[%d]%ld, [%d]%ld, [%d]%ld\n",
  834. CCCI_PCM_TX, md->logic_ch_pkt_cnt[CCCI_PCM_TX],
  835. CCCI_UART2_TX, md->logic_ch_pkt_cnt[CCCI_UART2_TX],
  836. CCCI_FS_TX, md->logic_ch_pkt_cnt[CCCI_FS_TX],
  837. CCCI_PCM_RX, md->logic_ch_pkt_cnt[CCCI_PCM_RX],
  838. CCCI_UART2_RX, md->logic_ch_pkt_cnt[CCCI_UART2_RX], CCCI_FS_RX, md->logic_ch_pkt_cnt[CCCI_FS_RX]);
  839. CCCI_INF_MSG(md->index, CORE,
  840. "traffic(net): tx: [%d]%ld %ld, [%d]%ld %ld, [%d]%ld %ld, rx:[%d]%ld, [%d]%ld, [%d]%ld\n",
  841. CCCI_CCMNI1_TX, md->logic_ch_pkt_pre_cnt[CCCI_CCMNI1_TX], md->logic_ch_pkt_cnt[CCCI_CCMNI1_TX],
  842. CCCI_CCMNI2_TX, md->logic_ch_pkt_pre_cnt[CCCI_CCMNI2_TX], md->logic_ch_pkt_cnt[CCCI_CCMNI2_TX],
  843. CCCI_CCMNI3_TX, md->logic_ch_pkt_pre_cnt[CCCI_CCMNI3_TX], md->logic_ch_pkt_cnt[CCCI_CCMNI3_TX],
  844. CCCI_CCMNI1_RX, md->logic_ch_pkt_cnt[CCCI_CCMNI1_RX],
  845. CCCI_CCMNI2_RX, md->logic_ch_pkt_cnt[CCCI_CCMNI2_RX],
  846. CCCI_CCMNI3_RX, md->logic_ch_pkt_cnt[CCCI_CCMNI3_RX]);
  847. }
  848. #define PORT_TXQ_INDEX(p) ((p)->modem->md_state == EXCEPTION?(p)->txq_exp_index:(p)->txq_index)
  849. #define PORT_RXQ_INDEX(p) ((p)->modem->md_state == EXCEPTION?(p)->rxq_exp_index:(p)->rxq_index)
  850. /*
  851. * if send_request returns 0, then it's modem driver's duty to free the request, and caller should NOT reference the
  852. * request any more. but if it returns error, calller should be responsible to free the request.
  853. */
  854. static inline int ccci_port_send_request(struct ccci_port *port, struct ccci_request *req)
  855. {
  856. struct ccci_modem *md = port->modem;
  857. struct ccci_header *ccci_h = (struct ccci_header *)req->skb->data;
  858. md->logic_ch_pkt_pre_cnt[ccci_h->channel]++;
  859. return md->ops->send_request(md, PORT_TXQ_INDEX(port), req, NULL);
  860. }
  861. /*
  862. * caller should lock with port->rx_req_lock
  863. */
  864. static inline int ccci_port_ask_more_request(struct ccci_port *port)
  865. {
  866. struct ccci_modem *md = port->modem;
  867. int ret;
  868. if (port->flags & PORT_F_RX_FULLED)
  869. ret = md->ops->give_more(port->modem, PORT_RXQ_INDEX(port));
  870. else
  871. ret = -1;
  872. return ret;
  873. }
  874. /* structure initialize */
  875. static inline void ccci_port_struct_init(struct ccci_port *port, struct ccci_modem *md)
  876. {
  877. INIT_LIST_HEAD(&port->rx_req_list);
  878. spin_lock_init(&port->rx_req_lock);
  879. INIT_LIST_HEAD(&port->entry);
  880. init_waitqueue_head(&port->rx_wq);
  881. port->rx_length = 0;
  882. port->tx_busy_count = 0;
  883. port->rx_busy_count = 0;
  884. atomic_set(&port->usage_cnt, 0);
  885. port->modem = md;
  886. wake_lock_init(&port->rx_wakelock, WAKE_LOCK_SUSPEND, port->name);
  887. }
  888. /*
  889. * only used during allocate buffer pool, should NOT be used after allocated a request
  890. */
  891. static inline void ccci_request_struct_init(struct ccci_request *req)
  892. {
  893. memset(req, 0, sizeof(struct ccci_request));
  894. req->state = IDLE;
  895. req->policy = FREE;
  896. INIT_LIST_HEAD(&req->entry);
  897. }
  898. #ifdef FEATURE_MD_GET_CLIB_TIME
  899. extern volatile int current_time_zone;
  900. #endif
  901. extern void __iomem *md1_excp_smem_vir;
  902. extern unsigned int md1_excp_smem__size;
  903. struct ccci_modem *ccci_allocate_modem(int private_size);
  904. int ccci_register_modem(struct ccci_modem *modem);
  905. int ccci_register_dev_node(const char *name, int major_id, int minor);
  906. struct ccci_port *ccci_get_port_for_node(int major, int minor);
  907. int ccci_send_msg_to_md(struct ccci_modem *md, CCCI_CH ch, CCCI_MD_MSG msg, u32 resv, int blocking);
  908. int ccci_send_virtual_md_msg(struct ccci_modem *md, CCCI_CH ch, CCCI_MD_MSG msg, u32 resv);
  909. struct ccci_modem *ccci_get_modem_by_id(int md_id);
  910. int exec_ccci_kern_func_by_md_id(int md_id, unsigned int id, char *buf, unsigned int len);
  911. void ccci_dump_log_history(struct ccci_modem *md, int dump_multi_rec, int tx_queue_num, int rx_queue_num);
  912. void ccci_dump_log_add(struct ccci_modem *md, DIRECTION dir, int queue_index, struct ccci_header *msg, int is_dropped);
  913. /* common sub-system */
  914. extern int ccci_subsys_bm_init(void);
  915. extern int ccci_subsys_sysfs_init(void);
  916. extern int ccci_subsys_dfo_init(void);
  917. /* per-modem sub-system */
  918. extern int ccci_subsys_char_init(struct ccci_modem *md);
  919. extern void md_ex_monitor_func(unsigned long data);
  920. extern void md_ex_monitor2_func(unsigned long data);
  921. extern void md_bootup_timeout_func(unsigned long data);
  922. extern void md_status_poller_func(unsigned long data);
  923. extern void md_status_timeout_func(unsigned long data);
  924. extern void ccci_subsys_kernel_init(void);
  925. extern int ccci_update_rf_desense(struct ccci_modem *md, int rf_desense);
  926. /*
  927. * if recv_request returns 0 or -CCCI_ERR_DROP_PACKET, then it's port's duty to free the request, and caller should
  928. * NOT reference the request any more. but if it returns other error, caller should be responsible to free the request.
  929. */
  930. extern int ccci_port_recv_request(struct ccci_modem *md, struct ccci_request *req, struct sk_buff *skb);
  931. int register_smem_sub_region_mem_func(int md_id, smem_sub_region_cb_t pfunc, int region_id);
  932. #endif /* __CCCI_CORE_H__ */