ccci_platform.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. #include <linux/platform_device.h>
  2. #include <linux/device.h>
  3. #include <linux/module.h>
  4. #include <linux/interrupt.h>
  5. #ifdef CONFIG_OF
  6. #include <linux/of.h>
  7. #include <linux/of_fdt.h>
  8. #include <linux/of_irq.h>
  9. #include <linux/of_address.h>
  10. #endif
  11. #include <mt-plat/mt_ccci_common.h>
  12. #include <mt-plat/mtk_meminfo.h>
  13. #include "ccci_config.h"
  14. #include "ccci_core.h"
  15. #include "ccci_debug.h"
  16. #include "ccci_bm.h"
  17. #include "ccci_platform.h"
  18. #ifdef FEATURE_USING_4G_MEMORY_API
  19. #include <mach/memory.h>
  20. #endif
  21. #ifdef ENABLE_EMI_PROTECTION
  22. #include <mach/emi_mpu.h>
  23. #endif
  24. #define TAG "plat"
  25. static int is_4g_memory_size_support(void)
  26. {
  27. #ifdef FEATURE_USING_4G_MEMORY_API
  28. return enable_4G();
  29. #else
  30. return 0;
  31. #endif
  32. }
  33. /* =================================================== */
  34. /* MPU Region defination */
  35. /* =================================================== */
  36. #ifdef CONFIG_ARCH_MT6735M
  37. #define MPU_REGION_ID_SEC_OS 0
  38. #define MPU_REGION_ID_ATF 1
  39. #define MPU_REGION_ID_MD1_SEC_SMEM 2
  40. #define MPU_REGION_ID_MD1_ROM 3
  41. #define MPU_REGION_ID_MD1_DSP 4
  42. #define MPU_REGION_ID_MD1_SMEM 4
  43. #define MPU_REGION_ID_MD1_RW 5
  44. #define MPU_REGION_ID_CONNSYS 6
  45. #define MPU_REGION_ID_AP 7
  46. /* //////////////////////////////////////////////////////////////// ( D3(MM), D2(CONNSYS), \
  47. D1(MD), D0(AP)) */
  48. #define MPU_ACCESS_PERMISSON_CLEAR SET_ACCESS_PERMISSON(NO_PROTECTION, NO_PROTECTION, \
  49. NO_PROTECTION, NO_PROTECTION)
  50. #define MPU_ACCESS_PERMISSON_AP_MD1_RO_ATTR SET_ACCESS_PERMISSON(NO_PROTECTION, NO_PROTECTION, \
  51. SEC_R_NSEC_R, NO_PROTECTION)
  52. #define MPU_ACCESS_PERMISSON_AP_ATTR SET_ACCESS_PERMISSON(NO_PROTECTION, FORBIDDEN, \
  53. FORBIDDEN, NO_PROTECTION)
  54. #define MPU_ACCESS_PERMISSON_MD1_ROM_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  55. SEC_R_NSEC_R, SEC_R_NSEC_R)
  56. #define MPU_ACCESS_PERMISSON_MD1_RW_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  57. NO_PROTECTION, FORBIDDEN)
  58. #define MPU_ACCESS_PERMISSON_MD1_SMEM_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  59. NO_PROTECTION, NO_PROTECTION)
  60. #define MPU_ACCESS_PERMISSON_MD1_DSP_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  61. SEC_R_NSEC_R, SEC_R_NSEC_R)
  62. #define MPU_ACCESS_PERMISSON_MD1_DSP_CL_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  63. NO_PROTECTION, SEC_R_NSEC_R)
  64. #else
  65. #define MPU_REGION_ID_SEC_OS 0
  66. #define MPU_REGION_ID_ATF 1
  67. /* #define MPU_REGION_ID_MD32_SMEM 2 */
  68. #define MPU_REGION_ID_MD1_SEC_SMEM 3
  69. #define MPU_REGION_ID_MD2_SEC_SMEM 4
  70. #define MPU_REGION_ID_MD1_ROM 5
  71. #define MPU_REGION_ID_MD1_DSP 6
  72. #define MPU_REGION_ID_MD2_ROM 7
  73. #define MPU_REGION_ID_MD2_RW 8
  74. #define MPU_REGION_ID_MD1_SMEM 9
  75. #define MPU_REGION_ID_MD2_LG3G 10
  76. #define MPU_REGION_ID_MD3_ROM 7
  77. #define MPU_REGION_ID_MD3_RW 8
  78. #define MPU_REGION_ID_MD3_SMEM 10
  79. #define MPU_REGION_ID_WIFI_EMI_FW 12
  80. #define MPU_REGION_ID_WMT 13
  81. #define MPU_REGION_ID_MD1_RW 14
  82. #define MPU_REGION_ID_AP 15
  83. /* ////////////////////////////////////////////////////////////// (D7, D6(MFG), \
  84. D5(MD2), D4(MM), D3(MD32), D2(CONN), D1(MD1), D0(AP)) */
  85. #define MPU_ACCESS_PERMISSON_CLEAR SET_ACCESS_PERMISSON(NO_PROTECTION, NO_PROTECTION, \
  86. NO_PROTECTION, NO_PROTECTION, NO_PROTECTION, NO_PROTECTION, NO_PROTECTION, NO_PROTECTION)
  87. #define MPU_ACCESS_PERMISSON_AP_MD1_RO_ATTR SET_ACCESS_PERMISSON(NO_PROTECTION, NO_PROTECTION, \
  88. NO_PROTECTION, NO_PROTECTION, NO_PROTECTION, NO_PROTECTION, SEC_R_NSEC_R, NO_PROTECTION)
  89. #define MPU_ACCESS_PERMISSON_AP_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, NO_PROTECTION, \
  90. FORBIDDEN, NO_PROTECTION, FORBIDDEN, FORBIDDEN, FORBIDDEN, NO_PROTECTION)
  91. #define MPU_ACCESS_PERMISSON_MD1_ROM_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  92. FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, SEC_R_NSEC_R, SEC_R_NSEC_R)
  93. #define MPU_ACCESS_PERMISSON_MD1_RW_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  94. FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, NO_PROTECTION, FORBIDDEN)
  95. #define MPU_ACCESS_PERMISSON_MD1_SMEM_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  96. FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, NO_PROTECTION, NO_PROTECTION)
  97. #define MPU_ACCESS_PERMISSON_MD1_DSP_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  98. FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, SEC_R_NSEC_R, SEC_R_NSEC_R)
  99. #define MPU_ACCESS_PERMISSON_MD1_DSP_CL_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  100. FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, NO_PROTECTION, FORBIDDEN)
  101. #define MPU_ACCESS_PERMISSON_MD3_ROM_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  102. NO_PROTECTION, FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, SEC_R_NSEC_R)
  103. #define MPU_ACCESS_PERMISSON_MD3_RW_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  104. NO_PROTECTION, FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, SEC_R_NSEC_R)
  105. #define MPU_ACCESS_PERMISSON_MD3_SMEM_ATTR SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, \
  106. NO_PROTECTION, FORBIDDEN, FORBIDDEN, FORBIDDEN, FORBIDDEN, NO_PROTECTION)
  107. #endif
  108. unsigned long infra_ao_base;
  109. unsigned long dbgapb_base;
  110. /* -- MD1 Bank 0 */
  111. #define MD1_BANK0_MAP0 ((unsigned int *)(infra_ao_base+0x300))
  112. #define MD1_BANK0_MAP1 ((unsigned int *)(infra_ao_base+0x304))
  113. /* -- MD1 Bank 4 */
  114. #define MD1_BANK4_MAP0 ((unsigned int *)(infra_ao_base+0x308))
  115. #define MD1_BANK4_MAP1 ((unsigned int *)(infra_ao_base+0x30C))
  116. /* -- MD2 Bank 0 */
  117. #define MD2_BANK0_MAP0 ((unsigned int *)(infra_ao_base+0x310))
  118. #define MD2_BANK0_MAP1 ((unsigned int *)(infra_ao_base+0x314))
  119. /* -- MD2 Bank 4 */
  120. #define MD2_BANK4_MAP0 ((unsigned int *)(infra_ao_base+0x318))
  121. #define MD2_BANK4_MAP1 ((unsigned int *)(infra_ao_base+0x31C))
  122. /*-- MD3 Bank 0 */
  123. #define MD3_BANK0_MAP0 ((unsigned int *)(infra_ao_base+0x310))
  124. #define MD3_BANK0_MAP1 ((unsigned int *)(infra_ao_base+0x314))
  125. /*-- MD3 Bank 4 */
  126. #define MD3_BANK4_MAP0 ((unsigned int *)(infra_ao_base+0x318))
  127. #define MD3_BANK4_MAP1 ((unsigned int *)(infra_ao_base+0x31C))
  128. void ccci_clear_md_region_protection(struct ccci_modem *md)
  129. {
  130. #ifdef ENABLE_EMI_PROTECTION
  131. unsigned int rom_mem_mpu_id, rw_mem_mpu_id;
  132. switch (md->index) {
  133. case MD_SYS1:
  134. rom_mem_mpu_id = MPU_REGION_ID_MD1_ROM;
  135. rw_mem_mpu_id = MPU_REGION_ID_MD1_RW;
  136. break;
  137. #ifndef CONFIG_ARCH_MT6735M
  138. case MD_SYS2:
  139. rom_mem_mpu_id = MPU_REGION_ID_MD2_ROM;
  140. rw_mem_mpu_id = MPU_REGION_ID_MD2_RW;
  141. break;
  142. case MD_SYS3:
  143. rom_mem_mpu_id = MPU_REGION_ID_MD3_ROM;
  144. rw_mem_mpu_id = MPU_REGION_ID_MD3_RW;
  145. break;
  146. #endif
  147. default:
  148. CCCI_INF_MSG(md->index, TAG, "[error]MD ID invalid when clear MPU protect\n");
  149. return;
  150. }
  151. CCCI_INF_MSG(md->index, TAG, "Clear MPU protect MD ROM region<%d>\n", rom_mem_mpu_id);
  152. emi_mpu_set_region_protection(0, /*START_ADDR */
  153. 0, /*END_ADDR */
  154. rom_mem_mpu_id, /*region */
  155. MPU_ACCESS_PERMISSON_CLEAR);
  156. CCCI_INF_MSG(md->index, TAG, "Clear MPU protect MD R/W region<%d>\n", rw_mem_mpu_id);
  157. emi_mpu_set_region_protection(0, /*START_ADDR */
  158. 0, /*END_ADDR */
  159. rw_mem_mpu_id, /*region */
  160. MPU_ACCESS_PERMISSON_CLEAR);
  161. #endif
  162. }
  163. void ccci_clear_dsp_region_protection(struct ccci_modem *md)
  164. {
  165. #ifdef ENABLE_EMI_PROTECTION
  166. unsigned int dsp_mem_mpu_id;
  167. switch (md->index) {
  168. case MD_SYS1:
  169. dsp_mem_mpu_id = MPU_REGION_ID_MD1_DSP;
  170. break;
  171. default:
  172. CCCI_INF_MSG(md->index, TAG, "[error]MD ID invalid when clear MPU protect\n");
  173. return;
  174. }
  175. CCCI_INF_MSG(md->index, TAG, "Clear MPU protect DSP ROM region<%d>\n", dsp_mem_mpu_id);
  176. emi_mpu_set_region_protection(0, /*START_ADDR */
  177. 0, /*END_ADDR */
  178. dsp_mem_mpu_id, /*region */
  179. MPU_ACCESS_PERMISSON_CLEAR);
  180. #endif
  181. }
  182. /*
  183. * for some unkonw reason on 6582 and 6572, MD will read AP's memory during boot up, so we
  184. * set AP region as MD read-only at first, and re-set it to portected after MD boot up.
  185. * this function should be called right before sending runtime data.
  186. */
  187. void ccci_set_ap_region_protection(struct ccci_modem *md)
  188. {
  189. #ifdef ENABLE_EMI_PROTECTION
  190. unsigned int ap_mem_mpu_id, ap_mem_mpu_attr;
  191. phys_addr_t kernel_base;
  192. phys_addr_t dram_size;
  193. if (is_4g_memory_size_support())
  194. kernel_base = 0;
  195. else
  196. kernel_base = get_phys_offset();
  197. #ifdef ENABLE_DRAM_API
  198. dram_size = get_max_DRAM_size();
  199. #else
  200. dram_size = 256 * 1024 * 1024;
  201. #endif
  202. ap_mem_mpu_id = MPU_REGION_ID_AP;
  203. ap_mem_mpu_attr = MPU_ACCESS_PERMISSON_AP_ATTR;
  204. #if 1
  205. CCCI_INF_MSG(md->index, TAG, "MPU Start protect AP region<%d:%08x:%08x> %x\n",
  206. ap_mem_mpu_id, (unsigned int)kernel_base, (unsigned int)(kernel_base + dram_size - 1),
  207. ap_mem_mpu_attr);
  208. emi_mpu_set_region_protection((unsigned int)kernel_base,
  209. (unsigned int)(kernel_base + dram_size - 1), ap_mem_mpu_id, ap_mem_mpu_attr);
  210. #endif
  211. #endif
  212. }
  213. void ccci_set_dsp_region_protection(struct ccci_modem *md, int loaded)
  214. {
  215. #ifdef ENABLE_EMI_PROTECTION
  216. unsigned int dsp_mem_mpu_id, dsp_mem_mpu_attr;
  217. unsigned int dsp_mem_phy_start, dsp_mem_phy_end;
  218. switch (md->index) {
  219. case MD_SYS1:
  220. dsp_mem_mpu_id = MPU_REGION_ID_MD1_DSP;
  221. if (!loaded)
  222. dsp_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_DSP_ATTR;
  223. else
  224. dsp_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_DSP_CL_ATTR;
  225. break;
  226. default:
  227. CCCI_ERR_MSG(md->index, CORE, "[error]invalid when MPU protect\n");
  228. return;
  229. }
  230. #ifndef ENABLE_DSP_SMEM_SHARE_MPU_REGION
  231. dsp_mem_phy_start = (unsigned int)md->mem_layout.dsp_region_phy;
  232. dsp_mem_phy_end = ((dsp_mem_phy_start + md->mem_layout.dsp_region_size + 0xFFFF) & (~0xFFFF)) - 0x1;
  233. CCCI_INF_MSG(md->index, TAG, "MPU Start protect DSP region<%d:%08x:%08x> %x\n",
  234. dsp_mem_mpu_id, dsp_mem_phy_start, dsp_mem_phy_end, dsp_mem_mpu_attr);
  235. emi_mpu_set_region_protection(dsp_mem_phy_start, dsp_mem_phy_end, dsp_mem_mpu_id, dsp_mem_mpu_attr);
  236. #else
  237. if (!loaded) {
  238. dsp_mem_phy_start = (unsigned int)md->mem_layout.dsp_region_phy;
  239. dsp_mem_phy_end = ((dsp_mem_phy_start + md->mem_layout.dsp_region_size + 0xFFFF) & (~0xFFFF)) - 0x1;
  240. CCCI_INF_MSG(md->index, TAG, "MPU Start protect DSP region<%d:%08x:%08x> %x\n",
  241. dsp_mem_mpu_id, dsp_mem_phy_start, dsp_mem_phy_end, dsp_mem_mpu_attr);
  242. emi_mpu_set_region_protection(dsp_mem_phy_start, dsp_mem_phy_end, dsp_mem_mpu_id, dsp_mem_mpu_attr);
  243. } else {
  244. unsigned int rom_mem_phy_start, rom_mem_phy_end;
  245. unsigned int shr_mem_phy_start, shr_mem_phy_end, shr_mem_mpu_id, shr_mem_mpu_attr;
  246. unsigned int rw_mem_phy_start, rw_mem_phy_end, rw_mem_mpu_id, rw_mem_mpu_attr;
  247. switch (md->index) {
  248. case MD_SYS1:
  249. rw_mem_mpu_id = MPU_REGION_ID_MD1_RW;
  250. shr_mem_mpu_id = MPU_REGION_ID_MD1_SMEM;
  251. rw_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_RW_ATTR;
  252. shr_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_SMEM_ATTR;
  253. break;
  254. default:
  255. CCCI_ERR_MSG(md->index, CORE, "[error]invalid when MPU protect\n");
  256. return;
  257. }
  258. rom_mem_phy_start = (unsigned int)md->mem_layout.md_region_phy;
  259. rom_mem_phy_end = ((rom_mem_phy_start + md->img_info[IMG_MD].size + 0xFFFF) & (~0xFFFF)) - 0x1;
  260. rw_mem_phy_start = rom_mem_phy_end + 0x1;
  261. rw_mem_phy_end = rom_mem_phy_start + md->mem_layout.md_region_size - 0x1;
  262. shr_mem_phy_start = (unsigned int)md->mem_layout.smem_region_phy;
  263. shr_mem_phy_end = ((shr_mem_phy_start + md->mem_layout.smem_region_size + 0xFFFF) & (~0xFFFF)) - 0x1;
  264. CCCI_INF_MSG(md->index, TAG, "After DSP: MPU Start protect MD Share region<%d:%08x:%08x> %x\n",
  265. shr_mem_mpu_id, shr_mem_phy_start, shr_mem_phy_end, shr_mem_mpu_attr);
  266. emi_mpu_set_region_protection(shr_mem_phy_start, /*START_ADDR */
  267. shr_mem_phy_end, /*END_ADDR */
  268. shr_mem_mpu_id, /*region */
  269. shr_mem_mpu_attr);
  270. CCCI_INF_MSG(md->index, TAG, "After DSP: MPU Start protect MD R/W region<%d:%08x:%08x> %x\n",
  271. rw_mem_mpu_id, rw_mem_phy_start, rw_mem_phy_end, rw_mem_mpu_attr);
  272. emi_mpu_set_region_protection(rw_mem_phy_start, /*START_ADDR */
  273. rw_mem_phy_end, /*END_ADDR */
  274. rw_mem_mpu_id, /*region */
  275. rw_mem_mpu_attr);
  276. }
  277. #endif
  278. #endif
  279. }
  280. void ccci_set_mem_access_protection(struct ccci_modem *md)
  281. {
  282. #ifdef ENABLE_EMI_PROTECTION
  283. unsigned int shr_mem_phy_start, shr_mem_phy_end, shr_mem_mpu_id, shr_mem_mpu_attr;
  284. unsigned int rom_mem_phy_start, rom_mem_phy_end, rom_mem_mpu_id, rom_mem_mpu_attr;
  285. unsigned int rw_mem_phy_start, rw_mem_phy_end, rw_mem_mpu_id, rw_mem_mpu_attr;
  286. unsigned int ap_mem_mpu_id, ap_mem_mpu_attr;
  287. struct ccci_image_info *img_info;
  288. struct ccci_mem_layout *md_layout;
  289. phys_addr_t kernel_base;
  290. phys_addr_t dram_size;
  291. switch (md->index) {
  292. case MD_SYS1:
  293. img_info = &md->img_info[IMG_MD];
  294. md_layout = &md->mem_layout;
  295. rom_mem_mpu_id = MPU_REGION_ID_MD1_ROM;
  296. rw_mem_mpu_id = MPU_REGION_ID_MD1_RW;
  297. shr_mem_mpu_id = MPU_REGION_ID_MD1_SMEM;
  298. rom_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_ROM_ATTR;
  299. rw_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_RW_ATTR;
  300. shr_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD1_SMEM_ATTR;
  301. break;
  302. #ifndef CONFIG_ARCH_MT6735M
  303. case MD_SYS3:
  304. img_info = &md->img_info[IMG_MD];
  305. md_layout = &md->mem_layout;
  306. rom_mem_mpu_id = MPU_REGION_ID_MD3_ROM;
  307. rw_mem_mpu_id = MPU_REGION_ID_MD3_RW;
  308. shr_mem_mpu_id = MPU_REGION_ID_MD3_SMEM;
  309. rom_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD3_ROM_ATTR;
  310. rw_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD3_RW_ATTR;
  311. shr_mem_mpu_attr = MPU_ACCESS_PERMISSON_MD3_SMEM_ATTR;
  312. break;
  313. #endif
  314. default:
  315. CCCI_ERR_MSG(md->index, CORE, "[error]invalid when MPU protect\n");
  316. return;
  317. }
  318. if (is_4g_memory_size_support())
  319. kernel_base = 0;
  320. else
  321. kernel_base = get_phys_offset();
  322. #ifdef ENABLE_DRAM_API
  323. dram_size = get_max_DRAM_size();
  324. #else
  325. dram_size = 256 * 1024 * 1024;
  326. #endif
  327. ap_mem_mpu_id = MPU_REGION_ID_AP;
  328. ap_mem_mpu_attr = MPU_ACCESS_PERMISSON_AP_MD1_RO_ATTR;
  329. /*
  330. * if set start=0x0, end=0x10000, the actural protected area will be 0x0-0x1FFFF,
  331. * here we use 64KB align, MPU actually request 32KB align since MT6582, but this works...
  332. * we assume emi_mpu_set_region_protection will round end address down to 64KB align.
  333. */
  334. rom_mem_phy_start = (unsigned int)md_layout->md_region_phy;
  335. rom_mem_phy_end = ((rom_mem_phy_start + img_info->size + 0xFFFF) & (~0xFFFF)) - 0x1;
  336. rw_mem_phy_start = rom_mem_phy_end + 0x1;
  337. rw_mem_phy_end = rom_mem_phy_start + md_layout->md_region_size - 0x1;
  338. #ifdef ENABLE_DSP_SMEM_SHARE_MPU_REGION
  339. rw_mem_phy_end += md_layout->smem_region_size;
  340. #endif
  341. shr_mem_phy_start = (unsigned int)md_layout->smem_region_phy;
  342. shr_mem_phy_end = ((shr_mem_phy_start + md_layout->smem_region_size + 0xFFFF) & (~0xFFFF)) - 0x1;
  343. CCCI_INF_MSG(md->index, TAG, "MPU Start protect MD ROM region<%d:%08x:%08x> %x, invalid_map=0x%llx\n",
  344. rom_mem_mpu_id, rom_mem_phy_start, rom_mem_phy_end, rom_mem_mpu_attr,
  345. (unsigned long long)md->invalid_remap_base);
  346. emi_mpu_set_region_protection(rom_mem_phy_start, /*START_ADDR */
  347. rom_mem_phy_end, /*END_ADDR */
  348. rom_mem_mpu_id, /*region */
  349. rom_mem_mpu_attr);
  350. CCCI_INF_MSG(md->index, TAG, "MPU Start protect MD R/W region<%d:%08x:%08x> %x\n",
  351. rw_mem_mpu_id, rw_mem_phy_start, rw_mem_phy_end, rw_mem_mpu_attr);
  352. emi_mpu_set_region_protection(rw_mem_phy_start, /*START_ADDR */
  353. rw_mem_phy_end, /*END_ADDR */
  354. rw_mem_mpu_id, /*region */
  355. rw_mem_mpu_attr);
  356. #ifndef ENABLE_DSP_SMEM_SHARE_MPU_REGION
  357. CCCI_INF_MSG(md->index, TAG, "MPU Start protect MD Share region<%d:%08x:%08x> %x\n",
  358. shr_mem_mpu_id, shr_mem_phy_start, shr_mem_phy_end, shr_mem_mpu_attr);
  359. emi_mpu_set_region_protection(shr_mem_phy_start, /*START_ADDR */
  360. shr_mem_phy_end, /*END_ADDR */
  361. shr_mem_mpu_id, /*region */
  362. shr_mem_mpu_attr);
  363. #endif
  364. /* This part need to move common part */
  365. #if 1
  366. CCCI_INF_MSG(md->index, TAG, "MPU Start protect AP region<%d:%08x:%08x> %x\n",
  367. ap_mem_mpu_id, (unsigned int)kernel_base, (unsigned int)(kernel_base + dram_size - 1), ap_mem_mpu_attr);
  368. emi_mpu_set_region_protection((unsigned int)kernel_base, (unsigned int)(kernel_base + dram_size - 1),
  369. ap_mem_mpu_id, ap_mem_mpu_attr);
  370. #endif
  371. #endif
  372. }
  373. #ifdef ENABLE_DSP_SMEM_SHARE_MPU_REGION
  374. void ccci_set_exp_region_protection(struct ccci_modem *md)
  375. {
  376. unsigned int shr_mem_phy_start, shr_mem_phy_end, shr_mem_mpu_id, shr_mem_mpu_attr;
  377. shr_mem_phy_start = (unsigned int)md->mem_layout.smem_region_phy;
  378. shr_mem_phy_end = ((shr_mem_phy_start + md->mem_layout.smem_region_size + 0xFFFF) & (~0xFFFF)) - 0x1;
  379. shr_mem_mpu_id = MPU_REGION_ID_MD1_SMEM;
  380. shr_mem_mpu_attr = SET_ACCESS_PERMISSON(FORBIDDEN, FORBIDDEN, NO_PROTECTION, NO_PROTECTION);
  381. CCCI_INF_MSG(md->index, TAG, "After EE: MPU Start protect MD Share region<%d:%08x:%08x> %x\n",
  382. shr_mem_mpu_id, shr_mem_phy_start, shr_mem_phy_end, shr_mem_mpu_attr);
  383. emi_mpu_set_region_protection(shr_mem_phy_start, /*START_ADDR */
  384. shr_mem_phy_end, /*END_ADDR */
  385. shr_mem_mpu_id, /*region */
  386. shr_mem_mpu_attr);
  387. }
  388. #endif
  389. /* This function has phase out!!! */
  390. int set_ap_smem_remap(struct ccci_modem *md, phys_addr_t src, phys_addr_t des)
  391. {
  392. unsigned int remap1_val = 0;
  393. unsigned int remap2_val = 0;
  394. static int smem_remapped;
  395. if (!smem_remapped) {
  396. smem_remapped = 1;
  397. remap1_val = (((des >> 24) | 0x1) & 0xFF)
  398. + (((INVALID_ADDR >> 16) | 1 << 8) & 0xFF00)
  399. + (((INVALID_ADDR >> 8) | 1 << 16) & 0xFF0000)
  400. + (((INVALID_ADDR >> 0) | 1 << 24) & 0xFF000000);
  401. remap2_val = (((INVALID_ADDR >> 24) | 0x1) & 0xFF)
  402. + (((INVALID_ADDR >> 16) | 1 << 8) & 0xFF00)
  403. + (((INVALID_ADDR >> 8) | 1 << 16) & 0xFF0000)
  404. + (((INVALID_ADDR >> 0) | 1 << 24) & 0xFF000000);
  405. CCCI_INF_MSG(md->index, TAG, "AP Smem remap: [%llx]->[%llx](%08x:%08x)\n", (unsigned long long)des,
  406. (unsigned long long)src, remap1_val, remap2_val);
  407. #ifdef ENABLE_MEM_REMAP_HW
  408. mt_reg_sync_writel(remap1_val, AP_BANK4_MAP0);
  409. mt_reg_sync_writel(remap2_val, AP_BANK4_MAP1);
  410. mt_reg_sync_writel(remap2_val, AP_BANK4_MAP1); /* HW bug, write twice to activate setting */
  411. #endif
  412. }
  413. return 0;
  414. }
  415. int set_md_smem_remap(struct ccci_modem *md, phys_addr_t src, phys_addr_t des, phys_addr_t invalid)
  416. {
  417. unsigned int remap1_val = 0;
  418. unsigned int remap2_val = 0;
  419. if (is_4g_memory_size_support())
  420. des &= 0xFFFFFFFF;
  421. else
  422. des -= KERN_EMI_BASE;
  423. switch (md->index) {
  424. case MD_SYS1:
  425. remap1_val = (((des >> 24) | 0x1) & 0xFF)
  426. + ((((des + 0x2000000 * 1) >> 16) | 1 << 8) & 0xFF00)
  427. + ((((des + 0x2000000 * 2) >> 8) | 1 << 16) & 0xFF0000)
  428. + ((((des + 0x2000000 * 3) >> 0) | 1 << 24) & 0xFF000000);
  429. remap2_val = ((((des + 0x2000000 * 4) >> 24) | 0x1) & 0xFF)
  430. + ((((des + 0x2000000 * 5) >> 16) | 1 << 8) & 0xFF00)
  431. + ((((des + 0x2000000 * 6) >> 8) | 1 << 16) & 0xFF0000)
  432. + ((((des + 0x2000000 * 7) >> 0) | 1 << 24) & 0xFF000000);
  433. #ifdef ENABLE_MEM_REMAP_HW
  434. mt_reg_sync_writel(remap1_val, MD1_BANK4_MAP0);
  435. mt_reg_sync_writel(remap2_val, MD1_BANK4_MAP1);
  436. #endif
  437. break;
  438. case MD_SYS2:
  439. remap1_val = (((des >> 24) | 0x1) & 0xFF)
  440. + ((((invalid + 0x2000000 * 0) >> 16) | 1 << 8) & 0xFF00)
  441. + ((((invalid + 0x2000000 * 1) >> 8) | 1 << 16) & 0xFF0000)
  442. + ((((invalid + 0x2000000 * 2) >> 0) | 1 << 24) & 0xFF000000);
  443. remap2_val = ((((invalid + 0x2000000 * 3) >> 24) | 0x1) & 0xFF)
  444. + ((((invalid + 0x2000000 * 4) >> 16) | 1 << 8) & 0xFF00)
  445. + ((((invalid + 0x2000000 * 5) >> 8) | 1 << 16) & 0xFF0000)
  446. + ((((invalid + 0x2000000 * 6) >> 0) | 1 << 24) & 0xFF000000);
  447. #ifdef ENABLE_MEM_REMAP_HW
  448. mt_reg_sync_writel(remap1_val, MD2_BANK4_MAP0);
  449. mt_reg_sync_writel(remap2_val, MD2_BANK4_MAP1);
  450. #endif
  451. break;
  452. case MD_SYS3:
  453. remap1_val = (((des>>24)|0x1)&0xFF)
  454. + ((((invalid+0x2000000*0)>>16)|1<<8)&0xFF00)
  455. + ((((invalid+0x2000000*1)>>8)|1<<16)&0xFF0000)
  456. + ((((invalid+0x2000000*2)>>0)|1<<24)&0xFF000000);
  457. remap2_val = ((((invalid+0x2000000*3)>>24)|0x1)&0xFF)
  458. + ((((invalid+0x2000000*4)>>16)|1<<8)&0xFF00)
  459. + ((((invalid+0x2000000*5)>>8)|1<<16)&0xFF0000)
  460. + ((((invalid+0x2000000*6)>>0)|1<<24)&0xFF000000);
  461. #ifdef ENABLE_MEM_REMAP_HW
  462. mt_reg_sync_writel(remap1_val, MD3_BANK4_MAP0);
  463. mt_reg_sync_writel(remap2_val, MD3_BANK4_MAP1);
  464. #endif
  465. break;
  466. default:
  467. break;
  468. }
  469. CCCI_INF_MSG(md->index, TAG, "MD Smem remap:[%llx]->[%llx](%08x:%08x), invalid_map=0x%llx\n",
  470. (unsigned long long)des, (unsigned long long)src, remap1_val, remap2_val,
  471. (unsigned long long)md->invalid_remap_base);
  472. return 0;
  473. }
  474. int set_md_rom_rw_mem_remap(struct ccci_modem *md, phys_addr_t src, phys_addr_t des, phys_addr_t invalid)
  475. {
  476. unsigned int remap1_val = 0;
  477. unsigned int remap2_val = 0;
  478. if (is_4g_memory_size_support())
  479. des &= 0xFFFFFFFF;
  480. else
  481. des -= KERN_EMI_BASE;
  482. switch (md->index) {
  483. case MD_SYS1:
  484. remap1_val = (((des >> 24) | 0x1) & 0xFF)
  485. + ((((des + 0x2000000 * 1) >> 16) | 1 << 8) & 0xFF00)
  486. + ((((des + 0x2000000 * 2) >> 8) | 1 << 16) & 0xFF0000)
  487. + ((((des + 0x2000000 * 3) >> 0) | 1 << 24) & 0xFF000000);
  488. remap2_val = ((((des + 0x2000000 * 4) >> 24) | 0x1) & 0xFF)
  489. + ((((des + 0x2000000 * 5) >> 16) | 1 << 8) & 0xFF00)
  490. + ((((des + 0x2000000 * 6) >> 8) | 1 << 16) & 0xFF0000)
  491. + ((((des + 0x2000000 * 7) >> 0) | 1 << 24) & 0xFF000000);
  492. #ifdef ENABLE_MEM_REMAP_HW
  493. mt_reg_sync_writel(remap1_val, MD1_BANK0_MAP0);
  494. mt_reg_sync_writel(remap2_val, MD1_BANK0_MAP1);
  495. #endif
  496. break;
  497. case MD_SYS2:
  498. remap1_val = (((des >> 24) | 0x1) & 0xFF)
  499. + ((((des + 0x2000000 * 1) >> 16) | 1 << 8) & 0xFF00)
  500. + ((((des + 0x2000000 * 2) >> 8) | 1 << 16) & 0xFF0000)
  501. + ((((invalid + 0x2000000 * 7) >> 0) | 1 << 24) & 0xFF000000);
  502. remap2_val = ((((invalid + 0x2000000 * 8) >> 24) | 0x1) & 0xFF)
  503. + ((((invalid + 0x2000000 * 9) >> 16) | 1 << 8) & 0xFF00)
  504. + ((((invalid + 0x2000000 * 10) >> 8) | 1 << 16) & 0xFF0000)
  505. + ((((invalid + 0x2000000 * 11) >> 0) | 1 << 24) & 0xFF000000);
  506. #ifdef ENABLE_MEM_REMAP_HW
  507. mt_reg_sync_writel(remap1_val, MD2_BANK0_MAP0);
  508. mt_reg_sync_writel(remap2_val, MD2_BANK0_MAP1);
  509. #endif
  510. break;
  511. case MD_SYS3:
  512. remap1_val = (((des>>24)|0x1)&0xFF)
  513. + ((((des+0x2000000*1)>>16)|1<<8)&0xFF00)
  514. + ((((des+0x2000000*2)>>8)|1<<16)&0xFF0000)
  515. + ((((invalid+0x2000000*7)>>0)|1<<24)&0xFF000000);
  516. remap2_val = ((((invalid+0x2000000*8)>>24)|0x1)&0xFF)
  517. + ((((invalid+0x2000000*9)>>16)|1<<8)&0xFF00)
  518. + ((((invalid+0x2000000*10)>>8)|1<<16)&0xFF0000)
  519. + ((((invalid+0x2000000*11)>>0)|1<<24)&0xFF000000);
  520. #ifdef ENABLE_MEM_REMAP_HW
  521. mt_reg_sync_writel(remap1_val, MD3_BANK0_MAP0);
  522. mt_reg_sync_writel(remap2_val, MD3_BANK0_MAP1);
  523. #endif
  524. break;
  525. default:
  526. break;
  527. }
  528. CCCI_INF_MSG(md->index, TAG, "MD ROM mem remap:[%llx]->[%llx](%08x:%08x)\n", (unsigned long long)des,
  529. (unsigned long long)src, remap1_val, remap2_val);
  530. return 0;
  531. }
  532. void ccci_set_mem_remap(struct ccci_modem *md, unsigned long smem_offset, phys_addr_t invalid)
  533. {
  534. unsigned long remainder;
  535. if (is_4g_memory_size_support())
  536. invalid &= 0xFFFFFFFF;
  537. else
  538. invalid -= KERN_EMI_BASE;
  539. md->invalid_remap_base = invalid;
  540. /* Set share memory remapping */
  541. #if 0 /* no hardware AP remap after MT6592 */
  542. set_ap_smem_remap(md, 0x40000000, md->mem_layout.smem_region_phy_before_map);
  543. md->mem_layout.smem_region_phy = smem_offset + 0x40000000;
  544. #endif
  545. /*
  546. * always remap only the 1 slot where share memory locates. smem_offset is the offset between
  547. * ROM start address(32M align) and share memory start address.
  548. * (AP view smem address) - [(smem_region_phy) - (bank4 start address) - (un-32M-align space)]
  549. * = (MD view smem address)
  550. */
  551. remainder = smem_offset % 0x02000000;
  552. md->mem_layout.smem_offset_AP_to_MD = md->mem_layout.smem_region_phy - (remainder + 0x40000000);
  553. set_md_smem_remap(md, 0x40000000, md->mem_layout.md_region_phy + (smem_offset - remainder), invalid);
  554. CCCI_INF_MSG(md->index, TAG, "AP to MD share memory offset 0x%X", md->mem_layout.smem_offset_AP_to_MD);
  555. /* Set md image and rw runtime memory remapping */
  556. set_md_rom_rw_mem_remap(md, 0x00000000, md->mem_layout.md_region_phy, invalid);
  557. }
  558. /*
  559. * when MD attached its codeviser for debuging, this bit will be set. so CCCI should disable some
  560. * checkings and operations as MD may not respond to us.
  561. */
  562. unsigned int ccci_get_md_debug_mode(struct ccci_modem *md)
  563. {
  564. unsigned int dbg_spare;
  565. static unsigned int debug_setting_flag;
  566. /* this function does NOT distinguish modem ID, may be a risk point */
  567. if ((debug_setting_flag & DBG_FLAG_JTAG) == 0) {
  568. dbg_spare = ioread32((void __iomem *)(dbgapb_base + 0x10));
  569. if (dbg_spare & MD_DBG_JTAG_BIT) {
  570. CCCI_INF_MSG(md->index, TAG, "Jtag Debug mode(%08x)\n", dbg_spare);
  571. debug_setting_flag |= DBG_FLAG_JTAG;
  572. mt_reg_sync_writel(dbg_spare & (~MD_DBG_JTAG_BIT), (dbgapb_base + 0x10));
  573. }
  574. }
  575. return debug_setting_flag;
  576. }
  577. EXPORT_SYMBOL(ccci_get_md_debug_mode);
  578. void ccci_get_platform_version(char *ver)
  579. {
  580. #ifdef ENABLE_CHIP_VER_CHECK
  581. sprintf(ver, "MT%04x_S%02x", get_chip_hw_ver_code(), (get_chip_hw_subcode() & 0xFF));
  582. #else
  583. sprintf(ver, "MT6735_S00");
  584. #endif
  585. }
  586. #ifdef FEATURE_LOW_BATTERY_SUPPORT
  587. static int ccci_md_low_power_notify(struct ccci_modem *md, LOW_POEWR_NOTIFY_TYPE type, int level)
  588. {
  589. unsigned int reserve = 0xFFFFFFFF;
  590. int ret = 0;
  591. CCCI_INF_MSG(md->index, TAG, "low power notification type=%d, level=%d\n", type, level);
  592. /*
  593. * byte3 byte2 byte1 byte0
  594. * 0 4G 3G 2G
  595. */
  596. switch (type) {
  597. case LOW_BATTERY:
  598. if (level == LOW_BATTERY_LEVEL_0)
  599. reserve = 0; /* 0 */
  600. else if (level == LOW_BATTERY_LEVEL_1 || level == LOW_BATTERY_LEVEL_2)
  601. reserve = (1 << 6); /* 64 */
  602. ret = ccci_send_msg_to_md(md, CCCI_SYSTEM_TX, MD_LOW_BATTERY_LEVEL, reserve, 1);
  603. if (ret)
  604. CCCI_ERR_MSG(md->index, TAG, "send low battery notification fail, ret=%d\n", ret);
  605. break;
  606. case BATTERY_PERCENT:
  607. if (level == BATTERY_PERCENT_LEVEL_0)
  608. reserve = 0; /* 0 */
  609. else if (level == BATTERY_PERCENT_LEVEL_1)
  610. reserve = (1 << 6); /* 64 */
  611. ret = ccci_send_msg_to_md(md, CCCI_SYSTEM_TX, MD_LOW_BATTERY_LEVEL, reserve, 1);
  612. if (ret)
  613. CCCI_ERR_MSG(md->index, TAG, "send battery percent notification fail, ret=%d\n", ret);
  614. break;
  615. default:
  616. break;
  617. };
  618. return ret;
  619. }
  620. static void ccci_md_low_battery_cb(LOW_BATTERY_LEVEL level)
  621. {
  622. int idx = 0;
  623. struct ccci_modem *md;
  624. for (idx = 0; idx < MAX_MD_NUM; idx++) {
  625. md = ccci_get_modem_by_id(idx);
  626. if (md != NULL)
  627. ccci_md_low_power_notify(md, LOW_BATTERY, level);
  628. }
  629. }
  630. static void ccci_md_battery_percent_cb(BATTERY_PERCENT_LEVEL level)
  631. {
  632. int idx = 0;
  633. struct ccci_modem *md;
  634. for (idx = 0; idx < MAX_MD_NUM; idx++) {
  635. md = ccci_get_modem_by_id(idx);
  636. if (md != NULL)
  637. ccci_md_low_power_notify(md, BATTERY_PERCENT, level);
  638. }
  639. }
  640. #endif
  641. int ccci_platform_init(struct ccci_modem *md)
  642. {
  643. return 0;
  644. }
  645. int ccci_plat_common_init(void)
  646. {
  647. struct device_node *node;
  648. /* Get infra cfg ao base */
  649. node = of_find_compatible_node(NULL, NULL, "mediatek,INFRACFG_AO");
  650. infra_ao_base = (unsigned long)of_iomap(node, 0);
  651. CCCI_INF_MSG(-1, TAG, "infra_ao_base:0x%p\n", (void *)infra_ao_base);
  652. node = of_find_compatible_node(NULL, NULL, "mediatek,dbgapb_base");
  653. dbgapb_base = (unsigned long)of_iomap(node, 0);
  654. CCCI_INF_MSG(-1, TAG, "dbgapb_base:%pa\n", &dbgapb_base);
  655. #ifdef FEATURE_LOW_BATTERY_SUPPORT
  656. register_low_battery_notify(&ccci_md_low_battery_cb, LOW_BATTERY_PRIO_MD);
  657. register_battery_percent_notify(&ccci_md_battery_percent_cb, BATTERY_PERCENT_PRIO_MD);
  658. #endif
  659. return 0;
  660. }