smi_common.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003
  1. #include <linux/of.h>
  2. #include <linux/of_irq.h>
  3. #include <linux/of_address.h>
  4. #include <linux/kobject.h>
  5. #include <linux/uaccess.h>
  6. #include <linux/module.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/cdev.h>
  9. #include <linux/mm.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <aee.h>
  13. /* Define SMI_INTERNAL_CCF_SUPPORT when CCF needs to be enabled */
  14. #if !defined(CONFIG_MTK_CLKMGR)
  15. #define SMI_INTERNAL_CCF_SUPPORT
  16. #endif
  17. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  18. #include <linux/clk.h>
  19. /* for ccf clk CB */
  20. #if defined(SMI_D1)
  21. #include "clk-mt6735-pg.h"
  22. #elif defined(SMI_J)
  23. #include "clk-mt6755-pg.h"
  24. #endif
  25. /* notify clk is enabled/disabled for m4u*/
  26. #include "m4u.h"
  27. #else
  28. #include <mach/mt_clkmgr.h>
  29. #endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
  30. #include <asm/io.h>
  31. #include <linux/ioctl.h>
  32. #include <linux/fs.h>
  33. #if IS_ENABLED(CONFIG_COMPAT)
  34. #include <linux/uaccess.h>
  35. #include <linux/compat.h>
  36. #endif
  37. #include <mt_smi.h>
  38. #include "smi_reg.h"
  39. #include "smi_common.h"
  40. #include "smi_debug.h"
  41. #include "smi_info_util.h"
  42. #include "smi_configuration.h"
  43. #if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
  44. #include "mmdvfs_mgr.h"
  45. #endif
  46. #undef pr_fmt
  47. #define pr_fmt(fmt) "[SMI]" fmt
  48. #define SMI_LOG_TAG "SMI"
  49. #define LARB_BACKUP_REG_SIZE 128
  50. #define SMI_COMMON_BACKUP_REG_NUM 7
  51. #define SF_HWC_PIXEL_MAX_NORMAL (1920 * 1080 * 7)
  52. #define SF_HWC_PIXEL_MAX_VR (1920 * 1080 * 4 + 1036800) /* 4.5 FHD size */
  53. #define SF_HWC_PIXEL_MAX_VP (1920 * 1080 * 7)
  54. #define SF_HWC_PIXEL_MAX_ALWAYS_GPU (1920 * 1080 * 1)
  55. /* debug level */
  56. static unsigned int smi_debug_level;
  57. #define SMIDBG(level, x...) \
  58. do { \
  59. if (smi_debug_level >= (level)) \
  60. SMIMSG(x); \
  61. } while (0)
  62. #define DEFINE_ATTR_RO(_name)\
  63. static struct kobj_attribute _name##_attr = {\
  64. .attr = {\
  65. .name = #_name,\
  66. .mode = 0444,\
  67. },\
  68. .show = _name##_show,\
  69. }
  70. #define DEFINE_ATTR_RW(_name)\
  71. static struct kobj_attribute _name##_attr = {\
  72. .attr = {\
  73. .name = #_name,\
  74. .mode = 0644,\
  75. },\
  76. .show = _name##_show,\
  77. .store = _name##_store,\
  78. }
  79. #define __ATTR_OF(_name) (&_name##_attr.attr)
  80. struct SMI_struct {
  81. spinlock_t SMI_lock;
  82. unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT]; /* one bit represent one module */
  83. };
  84. static struct SMI_struct g_SMIInfo;
  85. /* LARB BASE ADDRESS */
  86. unsigned long gLarbBaseAddr[SMI_LARB_NR] = { 0 };
  87. /* DT porting */
  88. unsigned long smi_reg_base_common_ext = 0;
  89. unsigned long smi_reg_base_barb0 = 0;
  90. unsigned long smi_reg_base_barb1 = 0;
  91. unsigned long smi_reg_base_barb2 = 0;
  92. unsigned long smi_reg_base_barb3 = 0;
  93. char *smi_get_region_name(unsigned int region_indx);
  94. static struct smi_device *smi_dev;
  95. static struct device *smiDeviceUevent;
  96. static struct cdev *pSmiDev;
  97. #define SMI_COMMON_REG_INDX 0
  98. #define SMI_LARB0_REG_INDX 1
  99. #define SMI_LARB1_REG_INDX 2
  100. #define SMI_LARB2_REG_INDX 3
  101. #define SMI_LARB3_REG_INDX 4
  102. #if defined(SMI_D2)
  103. #define SMI_REG_REGION_MAX 4
  104. static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
  105. SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM
  106. };
  107. static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 1 };
  108. static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
  109. static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
  110. static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
  111. static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
  112. larb0_port_backup, larb1_port_backup, larb2_port_backup
  113. };
  114. #elif defined(SMI_D1)
  115. #define SMI_REG_REGION_MAX 5
  116. static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
  117. SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM
  118. };
  119. static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 0, 1 };
  120. static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
  121. static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
  122. static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
  123. static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
  124. static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
  125. larb0_port_backup, larb1_port_backup, larb2_port_backup, larb3_port_backup
  126. };
  127. #elif defined(SMI_D3)
  128. #define SMI_REG_REGION_MAX 5
  129. static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
  130. SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM
  131. };
  132. static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
  133. static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
  134. static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
  135. static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
  136. static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 1, 1 };
  137. static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
  138. larb0_port_backup, larb1_port_backup, larb2_port_backup, larb3_port_backup
  139. };
  140. #elif defined(SMI_R)
  141. #define SMI_REG_REGION_MAX 3
  142. static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
  143. SMI_LARB1_PORT_NUM
  144. };
  145. static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
  146. static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
  147. static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2 };
  148. static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
  149. larb0_port_backup, larb1_port_backup
  150. };
  151. #elif defined(SMI_J)
  152. #define SMI_REG_REGION_MAX 5
  153. static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
  154. SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM
  155. };
  156. static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
  157. static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
  158. static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
  159. static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
  160. static unsigned char larb_vc_setting[SMI_LARB_NR] = { 1, 2, 1, 1 };
  161. static unsigned short int *larb_port_backup[SMI_LARB_NR] = { larb0_port_backup,
  162. larb1_port_backup, larb2_port_backup, larb3_port_backup
  163. };
  164. #endif
  165. static unsigned long gSMIBaseAddrs[SMI_REG_REGION_MAX];
  166. /* SMI COMMON register list to be backuped */
  167. static unsigned short g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = { 0x100, 0x104,
  168. 0x108, 0x10c, 0x110, 0x230, 0x234
  169. };
  170. static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
  171. struct smi_device {
  172. struct device *dev;
  173. void __iomem *regs[SMI_REG_REGION_MAX];
  174. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  175. struct clk *smi_common_clk;
  176. struct clk *smi_larb0_clk;
  177. struct clk *img_larb2_clk;
  178. struct clk *vdec0_vdec_clk;
  179. struct clk *vdec1_larb_clk;
  180. struct clk *venc_larb_clk;
  181. struct clk *venc_venc_clk;
  182. struct clk *larb0_mtcmos;
  183. struct clk *larb1_mtcmos;
  184. struct clk *larb2_mtcmos;
  185. struct clk *larb3_mtcmos;
  186. #endif
  187. };
  188. /* To keep the HW's init value */
  189. static int is_default_value_saved;
  190. static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
  191. static unsigned int wifi_disp_transaction;
  192. /* larb backuprestore */
  193. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  194. static bool fglarbcallback;
  195. #endif
  196. /* tuning mode, 1 for register ioctl */
  197. static unsigned int smi_tuning_mode;
  198. #if defined(SMI_J)
  199. static unsigned int disable_freq_hopping;
  200. static unsigned int disable_freq_mux = 1;
  201. static unsigned int force_max_mmsys_clk;
  202. static unsigned int force_camera_hpm;
  203. #endif
  204. static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
  205. static unsigned int *pLarbRegBackUp[SMI_LARB_NR];
  206. static int g_bInited;
  207. MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = {
  208. 0, 0, {0, 0}, {0, 0}, {0, 0}, {0, 0}, 0, 0, 0,
  209. SF_HWC_PIXEL_MAX_NORMAL
  210. };
  211. char *smi_port_name[][21] = {
  212. { /* 0 MMSYS */
  213. "disp_ovl0", "disp_rdma0", "disp_rdma1", "disp_wdma0", "disp_ovl1",
  214. "disp_rdma2", "disp_wdma1", "disp_od_r", "disp_od_w", "mdp_rdma0",
  215. "mdp_rdma1", "mdp_wdma", "mdp_wrot0", "mdp_wrot1"},
  216. { /* 1 VDEC */ "hw_vdec_mc_ext", "hw_vdec_pp_ext", "hw_vdec_ufo_ext", "hw_vdec_vld_ext",
  217. "hw_vdec_vld2_ext", "hw_vdec_avc_mv_ext", "hw_vdec_pred_rd_ext",
  218. "hw_vdec_pred_wr_ext", "hw_vdec_ppwrap_ext"},
  219. { /* 2 ISP */ "imgo", "rrzo", "aao", "lcso", "esfko", "imgo_d", "lsci", "lsci_d", "bpci",
  220. "bpci_d", "ufdi", "imgi", "img2o", "img3o", "vipi", "vip2i", "vip3i",
  221. "lcei", "rb", "rp", "wr"},
  222. { /* 3 VENC */ "venc_rcpu", "venc_rec", "venc_bsdma", "venc_sv_comv", "venc_rd_comv",
  223. "jpgenc_bsdma", "remdc_sdma", "remdc_bsdma", "jpgenc_rdma", "jpgenc_sdma",
  224. "jpgdec_wdma", "jpgdec_bsdma", "venc_cur_luma", "venc_cur_chroma",
  225. "venc_ref_luma", "venc_ref_chroma", "remdc_wdma", "venc_nbm_rdma",
  226. "venc_nbm_wdma"},
  227. { /* 4 MJC */ "mjc_mv_rd", "mjc_mv_wr", "mjc_dma_rd", "mjc_dma_wr"}
  228. };
  229. static unsigned long get_register_base(int i);
  230. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  231. static struct clk *get_smi_clk(char *smi_clk_name);
  232. #endif
  233. #if IS_ENABLED(CONFIG_COMPAT)
  234. static long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
  235. #else
  236. #define MTK_SMI_COMPAT_ioctl NULL
  237. #endif
  238. /* Use this function to get base address of Larb resgister */
  239. /* to support error checking */
  240. unsigned long get_larb_base_addr(int larb_id)
  241. {
  242. if (larb_id >= SMI_LARB_NR || larb_id < 0)
  243. return SMI_ERROR_ADDR;
  244. else
  245. return gLarbBaseAddr[larb_id];
  246. }
  247. /* 0 for common, 1 for larb0, 2 for larb1... */
  248. unsigned long get_smi_base_addr(int larb_id)
  249. {
  250. if (larb_id >= SMI_LARB_NR || larb_id < 0)
  251. return SMI_ERROR_ADDR;
  252. else
  253. return gSMIBaseAddrs[larb_id];
  254. }
  255. EXPORT_SYMBOL(get_smi_base_addr);
  256. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  257. struct clk *get_smi_clk(char *smi_clk_name)
  258. {
  259. struct clk *smi_clk_ptr = NULL;
  260. smi_clk_ptr = devm_clk_get(smi_dev->dev, smi_clk_name);
  261. if (IS_ERR(smi_clk_ptr)) {
  262. SMIMSG("cannot get %s\n", smi_clk_name);
  263. smi_clk_ptr = NULL;
  264. }
  265. return smi_clk_ptr;
  266. }
  267. static void smi_prepare_clk(struct clk *smi_clk, char *name)
  268. {
  269. if (smi_clk != NULL) {
  270. int ret = 0;
  271. ret = clk_prepare(smi_clk);
  272. if (ret)
  273. SMIMSG("clk_prepare return error %d, %s\n", ret, name);
  274. } else {
  275. SMIMSG("clk_prepare error, smi_clk can't be NULL, %s\n", name);
  276. }
  277. }
  278. static void smi_enable_clk(struct clk *smi_clk, char *name)
  279. {
  280. if (smi_clk != NULL) {
  281. int ret = 0;
  282. ret = clk_enable(smi_clk);
  283. if (ret)
  284. SMIMSG("clk_enable return error %d, %s\n", ret, name);
  285. } else {
  286. SMIMSG("clk_enable error, smi_clk can't be NULL, %s\n", name);
  287. }
  288. }
  289. static void smi_unprepare_clk(struct clk *smi_clk, char *name)
  290. {
  291. if (smi_clk != NULL)
  292. clk_unprepare(smi_clk);
  293. else
  294. SMIMSG("smi_unprepare error, smi_clk can't be NULL, %s\n", name);
  295. }
  296. static void smi_disable_clk(struct clk *smi_clk, char *name)
  297. {
  298. if (smi_clk != NULL)
  299. clk_disable(smi_clk);
  300. else
  301. SMIMSG("smi_disable error, smi_clk can't be NULL, %s\n", name);
  302. }
  303. /* end MTCMOS*/
  304. #endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
  305. static int larb_clock_enable(int larb_id, int enable_mtcmos)
  306. {
  307. #if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING)
  308. char name[30];
  309. sprintf(name, "smi+%d", larb_id);
  310. switch (larb_id) {
  311. #if !defined(SMI_INTERNAL_CCF_SUPPORT)
  312. case 0:
  313. enable_clock(MT_CG_DISP0_SMI_COMMON, name);
  314. enable_clock(MT_CG_DISP0_SMI_LARB0, name);
  315. break;
  316. case 1:
  317. enable_clock(MT_CG_DISP0_SMI_COMMON, name);
  318. #if defined(SMI_R)
  319. enable_clock(MT_CG_LARB1_SMI_CKPDN, name);
  320. #else
  321. enable_clock(MT_CG_VDEC1_LARB, name);
  322. #endif
  323. break;
  324. case 2:
  325. #if !defined(SMI_R)
  326. enable_clock(MT_CG_DISP0_SMI_COMMON, name);
  327. enable_clock(MT_CG_IMAGE_LARB2_SMI, name);
  328. #endif
  329. break;
  330. case 3:
  331. enable_clock(MT_CG_DISP0_SMI_COMMON, name);
  332. #if defined(SMI_D1)
  333. enable_clock(MT_CG_VENC_LARB, name);
  334. #elif defined(SMI_D3)
  335. enable_clock(MT_CG_VENC_VENC, name);
  336. #endif
  337. break;
  338. #else
  339. case 0:
  340. if (enable_mtcmos)
  341. smi_enable_clk(smi_dev->larb0_mtcmos, name);
  342. smi_enable_clk(smi_dev->smi_common_clk, name);
  343. smi_enable_clk(smi_dev->smi_larb0_clk, name);
  344. break;
  345. case 1:
  346. if (enable_mtcmos) {
  347. smi_enable_clk(smi_dev->larb0_mtcmos, name);
  348. smi_enable_clk(smi_dev->larb1_mtcmos, name);
  349. }
  350. smi_enable_clk(smi_dev->smi_common_clk, name);
  351. smi_enable_clk(smi_dev->vdec1_larb_clk, name);
  352. break;
  353. case 2:
  354. if (enable_mtcmos) {
  355. smi_enable_clk(smi_dev->larb0_mtcmos, name);
  356. smi_enable_clk(smi_dev->larb2_mtcmos, name);
  357. }
  358. smi_enable_clk(smi_dev->smi_common_clk, name);
  359. smi_enable_clk(smi_dev->img_larb2_clk, name);
  360. break;
  361. case 3:
  362. if (enable_mtcmos) {
  363. smi_enable_clk(smi_dev->larb0_mtcmos, name);
  364. smi_enable_clk(smi_dev->larb3_mtcmos, name);
  365. }
  366. smi_enable_clk(smi_dev->smi_common_clk, name);
  367. #if defined(SMI_D1)
  368. smi_enable_clk(smi_dev->venc_larb_clk, name);
  369. #elif defined(SMI_J)
  370. smi_enable_clk(smi_dev->venc_venc_clk, name);
  371. #endif
  372. break;
  373. #endif
  374. default:
  375. break;
  376. }
  377. #endif
  378. return 0;
  379. }
  380. static int larb_clock_prepare(int larb_id, int enable_mtcmos)
  381. {
  382. #if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING) && defined(SMI_INTERNAL_CCF_SUPPORT)
  383. char name[30];
  384. sprintf(name, "smi+%d", larb_id);
  385. switch (larb_id) {
  386. case 0:
  387. /* must enable MTCOMS before clk */
  388. /* common MTCMOS is called with larb0_MTCMOS */
  389. if (enable_mtcmos)
  390. smi_prepare_clk(smi_dev->larb0_mtcmos, name);
  391. smi_prepare_clk(smi_dev->smi_common_clk, name);
  392. smi_prepare_clk(smi_dev->smi_larb0_clk, name);
  393. break;
  394. case 1:
  395. if (enable_mtcmos) {
  396. smi_prepare_clk(smi_dev->larb0_mtcmos, name);
  397. smi_prepare_clk(smi_dev->larb1_mtcmos, name);
  398. }
  399. smi_prepare_clk(smi_dev->smi_common_clk, name);
  400. smi_prepare_clk(smi_dev->vdec1_larb_clk, name);
  401. break;
  402. case 2:
  403. if (enable_mtcmos) {
  404. smi_prepare_clk(smi_dev->larb0_mtcmos, name);
  405. smi_prepare_clk(smi_dev->larb2_mtcmos, name);
  406. }
  407. smi_prepare_clk(smi_dev->smi_common_clk, name);
  408. smi_prepare_clk(smi_dev->img_larb2_clk, name);
  409. break;
  410. case 3:
  411. if (enable_mtcmos) {
  412. smi_prepare_clk(smi_dev->larb0_mtcmos, name);
  413. smi_prepare_clk(smi_dev->larb3_mtcmos, name);
  414. }
  415. smi_prepare_clk(smi_dev->smi_common_clk, name);
  416. #if defined(SMI_D1)
  417. smi_prepare_clk(smi_dev->venc_larb_clk, name);
  418. #elif defined(SMI_J)
  419. smi_prepare_clk(smi_dev->venc_venc_clk, name);
  420. #endif
  421. break;
  422. default:
  423. break;
  424. }
  425. #endif
  426. return 0;
  427. }
  428. static int larb_clock_disable(int larb_id, int enable_mtcmos)
  429. {
  430. #if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING)
  431. char name[30];
  432. sprintf(name, "smi+%d", larb_id);
  433. switch (larb_id) {
  434. #if !defined(SMI_INTERNAL_CCF_SUPPORT)
  435. case 0:
  436. disable_clock(MT_CG_DISP0_SMI_LARB0, name);
  437. disable_clock(MT_CG_DISP0_SMI_COMMON, name);
  438. break;
  439. case 1:
  440. #if defined(SMI_R)
  441. disable_clock(MT_CG_LARB1_SMI_CKPDN, name);
  442. #else
  443. disable_clock(MT_CG_VDEC1_LARB, name);
  444. #endif
  445. disable_clock(MT_CG_DISP0_SMI_COMMON, name);
  446. break;
  447. case 2:
  448. #if !defined(SMI_R)
  449. disable_clock(MT_CG_IMAGE_LARB2_SMI, name);
  450. disable_clock(MT_CG_DISP0_SMI_COMMON, name);
  451. #endif
  452. break;
  453. case 3:
  454. #if defined(SMI_D1)
  455. disable_clock(MT_CG_VENC_LARB, name);
  456. #elif defined(SMI_D3)
  457. disable_clock(MT_CG_VENC_VENC, name);
  458. #endif
  459. disable_clock(MT_CG_DISP0_SMI_COMMON, name);
  460. break;
  461. #else
  462. case 0:
  463. smi_disable_clk(smi_dev->smi_larb0_clk, name);
  464. smi_disable_clk(smi_dev->smi_common_clk, name);
  465. if (enable_mtcmos)
  466. smi_disable_clk(smi_dev->larb0_mtcmos, name);
  467. break;
  468. case 1:
  469. smi_disable_clk(smi_dev->vdec1_larb_clk, name);
  470. smi_disable_clk(smi_dev->smi_common_clk, name);
  471. if (enable_mtcmos) {
  472. smi_disable_clk(smi_dev->larb1_mtcmos, name);
  473. smi_disable_clk(smi_dev->larb0_mtcmos, name);
  474. }
  475. break;
  476. case 2:
  477. smi_disable_clk(smi_dev->img_larb2_clk, name);
  478. smi_disable_clk(smi_dev->smi_common_clk, name);
  479. if (enable_mtcmos) {
  480. smi_disable_clk(smi_dev->larb2_mtcmos, name);
  481. smi_disable_clk(smi_dev->larb0_mtcmos, name);
  482. }
  483. break;
  484. case 3:
  485. #if defined(SMI_D1)
  486. smi_disable_clk(smi_dev->venc_larb_clk, name);
  487. #elif defined(SMI_J)
  488. smi_disable_clk(smi_dev->venc_venc_clk, name);
  489. #endif
  490. smi_disable_clk(smi_dev->smi_common_clk, name);
  491. if (enable_mtcmos) {
  492. smi_disable_clk(smi_dev->larb3_mtcmos, name);
  493. smi_disable_clk(smi_dev->larb0_mtcmos, name);
  494. }
  495. break;
  496. #endif
  497. default:
  498. break;
  499. }
  500. #endif
  501. return 0;
  502. }
  503. static int larb_clock_unprepare(int larb_id, int enable_mtcmos)
  504. {
  505. #if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING) && defined(SMI_INTERNAL_CCF_SUPPORT)
  506. char name[30];
  507. sprintf(name, "smi+%d", larb_id);
  508. switch (larb_id) {
  509. case 0:
  510. /* must enable MTCOMS before clk */
  511. /* common MTCMOS is called with larb0_MTCMOS */
  512. smi_unprepare_clk(smi_dev->smi_larb0_clk, name);
  513. smi_unprepare_clk(smi_dev->smi_common_clk, name);
  514. if (enable_mtcmos)
  515. smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
  516. break;
  517. case 1:
  518. smi_unprepare_clk(smi_dev->vdec1_larb_clk, name);
  519. smi_unprepare_clk(smi_dev->smi_common_clk, name);
  520. if (enable_mtcmos) {
  521. smi_unprepare_clk(smi_dev->larb1_mtcmos, name);
  522. smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
  523. }
  524. break;
  525. case 2:
  526. smi_unprepare_clk(smi_dev->img_larb2_clk, name);
  527. smi_unprepare_clk(smi_dev->smi_common_clk, name);
  528. if (enable_mtcmos) {
  529. smi_unprepare_clk(smi_dev->larb2_mtcmos, name);
  530. smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
  531. }
  532. break;
  533. case 3:
  534. #if defined(SMI_D1)
  535. smi_unprepare_clk(smi_dev->venc_larb_clk, name);
  536. #elif defined(SMI_J)
  537. smi_unprepare_clk(smi_dev->venc_venc_clk, name);
  538. #endif
  539. smi_unprepare_clk(smi_dev->smi_common_clk, name);
  540. if (enable_mtcmos) {
  541. smi_unprepare_clk(smi_dev->larb3_mtcmos, name);
  542. smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
  543. }
  544. break;
  545. default:
  546. break;
  547. }
  548. #endif
  549. return 0;
  550. }
  551. static void backup_smi_common(void)
  552. {
  553. int i;
  554. for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
  555. g_smi_common_backup[i] = M4U_ReadReg32(SMI_COMMON_EXT_BASE, (unsigned long)
  556. g_smi_common_backup_reg_offset[i]);
  557. }
  558. }
  559. static void restore_smi_common(void)
  560. {
  561. int i;
  562. for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
  563. M4U_WriteReg32(SMI_COMMON_EXT_BASE,
  564. (unsigned long)g_smi_common_backup_reg_offset[i],
  565. g_smi_common_backup[i]);
  566. }
  567. }
  568. static void backup_larb_smi(int index)
  569. {
  570. int port_index = 0;
  571. unsigned short int *backup_ptr = NULL;
  572. unsigned long larb_base = 0;
  573. unsigned long larb_offset = 0x200;
  574. int total_port_num = 0;
  575. /* boundary check for larb_port_num and larb_port_backup access */
  576. if (index < 0 || index >= SMI_LARB_NR)
  577. return;
  578. larb_base = gLarbBaseAddr[index];
  579. total_port_num = larb_port_num[index];
  580. backup_ptr = larb_port_backup[index];
  581. /* boundary check for port value access */
  582. if (total_port_num <= 0 || backup_ptr == NULL)
  583. return;
  584. for (port_index = 0; port_index < total_port_num; port_index++) {
  585. *backup_ptr = (unsigned short int)(M4U_ReadReg32(larb_base, larb_offset));
  586. backup_ptr++;
  587. larb_offset += 4;
  588. }
  589. /* backup smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
  590. if (index == 0)
  591. backup_smi_common();
  592. }
  593. static void restore_larb_smi(int index)
  594. {
  595. int port_index = 0;
  596. unsigned short int *backup_ptr = NULL;
  597. unsigned long larb_base = 0;
  598. unsigned long larb_offset = 0x200;
  599. unsigned int backup_value = 0;
  600. int total_port_num = 0;
  601. /* boundary check for larb_port_num and larb_port_backup access */
  602. if (index < 0 || index >= SMI_LARB_NR)
  603. return;
  604. larb_base = gLarbBaseAddr[index];
  605. total_port_num = larb_port_num[index];
  606. backup_ptr = larb_port_backup[index];
  607. /* boundary check for port value access */
  608. if (total_port_num <= 0 || backup_ptr == NULL)
  609. return;
  610. /* restore smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
  611. if (index == 0)
  612. restore_smi_common();
  613. for (port_index = 0; port_index < total_port_num; port_index++) {
  614. backup_value = *backup_ptr;
  615. M4U_WriteReg32(larb_base, larb_offset, backup_value);
  616. backup_ptr++;
  617. larb_offset += 4;
  618. }
  619. /* we do not backup 0x20 because it is a fixed setting */
  620. M4U_WriteReg32(larb_base, 0x20, larb_vc_setting[index]);
  621. /* turn off EMI empty OSTD dobule, fixed setting */
  622. M4U_WriteReg32(larb_base, 0x2c, 4);
  623. }
  624. static int larb_reg_backup(int larb)
  625. {
  626. unsigned int *pReg = pLarbRegBackUp[larb];
  627. unsigned long larb_base = gLarbBaseAddr[larb];
  628. *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
  629. backup_larb_smi(larb);
  630. if (0 == larb)
  631. g_bInited = 0;
  632. return 0;
  633. }
  634. static int smi_larb_init(unsigned int larb)
  635. {
  636. unsigned int regval = 0;
  637. unsigned int regval1 = 0;
  638. unsigned int regval2 = 0;
  639. unsigned long larb_base = get_larb_base_addr(larb);
  640. /* Clock manager enable LARB clock before call back restore already,
  641. * it will be disabled after restore call back returns
  642. * Got to enable OSTD before engine starts */
  643. regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
  644. /* TODO: FIX ME */
  645. /* regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0); */
  646. /* regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1); */
  647. if (0 == regval) {
  648. SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
  649. M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
  650. } else {
  651. SMIMSG("Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n",
  652. larb_base, regval, regval1, regval2);
  653. if (smi_debug_level >= 1) {
  654. SMIERR("DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
  655. larb_base, regval, regval1, regval2);
  656. } else {
  657. dump_stack();
  658. }
  659. }
  660. restore_larb_smi(larb);
  661. return 0;
  662. }
  663. int larb_reg_restore(int larb)
  664. {
  665. unsigned long larb_base = SMI_ERROR_ADDR;
  666. unsigned int regval = 0;
  667. unsigned int *pReg = NULL;
  668. larb_base = get_larb_base_addr(larb);
  669. /* The larb assign doesn't exist */
  670. if (larb_base == SMI_ERROR_ADDR) {
  671. SMIMSG("Can't find the base address for Larb%d\n", larb);
  672. return 0;
  673. }
  674. if (larb >= SMI_LARB_NR || larb < 0) {
  675. SMIMSG("Can't find the backup register value for Larb%d\n", larb);
  676. return 0;
  677. }
  678. pReg = pLarbRegBackUp[larb];
  679. SMIDBG(1, "+larb_reg_restore(), larb_idx=%d\n", larb);
  680. SMIDBG(1, "m4u part restore, larb_idx=%d\n", larb);
  681. /* warning: larb_con is controlled by set/clr */
  682. regval = *(pReg++);
  683. M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
  684. M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
  685. smi_larb_init(larb);
  686. return 0;
  687. }
  688. /* callback after larb clock is enabled */
  689. #if !defined(SMI_INTERNAL_CCF_SUPPORT)
  690. void on_larb_power_on(struct larb_monitor *h, int larb_idx)
  691. {
  692. larb_reg_restore(larb_idx);
  693. }
  694. /* callback before larb clock is disabled */
  695. void on_larb_power_off(struct larb_monitor *h, int larb_idx)
  696. {
  697. larb_reg_backup(larb_idx);
  698. }
  699. #endif /* !defined(SMI_INTERNAL_CCF_SUPPORT) */
  700. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  701. void on_larb_power_on_with_ccf(int larb_idx)
  702. {
  703. /* MTCMOS has already enable, only enable clk here to set register value */
  704. larb_clock_prepare(larb_idx, 0);
  705. larb_clock_enable(larb_idx, 0);
  706. larb_reg_restore(larb_idx);
  707. larb_clock_disable(larb_idx, 0);
  708. larb_clock_unprepare(larb_idx, 0);
  709. }
  710. void on_larb_power_off_with_ccf(int larb_idx)
  711. {
  712. /* enable clk here for get correct register value */
  713. larb_clock_prepare(larb_idx, 0);
  714. larb_clock_enable(larb_idx, 0);
  715. larb_reg_backup(larb_idx);
  716. larb_clock_disable(larb_idx, 0);
  717. larb_clock_unprepare(larb_idx, 0);
  718. }
  719. #endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
  720. #if defined(SMI_J)
  721. static void DCM_enable(void)
  722. {
  723. M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x300, 0x1 + (0x78 << 1) + (0x4 << 8));
  724. M4U_WriteReg32(LARB0_BASE, 0x14, (0x7 << 8) + (0xf << 4));
  725. M4U_WriteReg32(LARB1_BASE, 0x14, (0x7 << 8) + (0xf << 4));
  726. M4U_WriteReg32(LARB2_BASE, 0x14, (0x7 << 8) + (0xf << 4));
  727. M4U_WriteReg32(LARB3_BASE, 0x14, (0x7 << 8) + (0xf << 4));
  728. }
  729. #endif
  730. /* Fake mode check, e.g. WFD */
  731. static int fake_mode_handling(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
  732. {
  733. if (p_conf->scenario == SMI_BWC_SCEN_WFD) {
  734. if (p_conf->b_on_off) {
  735. wifi_disp_transaction = 1;
  736. SMIMSG("Enable WFD in profile: %d\n", smi_profile);
  737. } else {
  738. wifi_disp_transaction = 0;
  739. SMIMSG("Disable WFD in profile: %d\n", smi_profile);
  740. }
  741. return 1;
  742. } else {
  743. return 0;
  744. }
  745. }
  746. static int ovl_limit_uevent(int bwc_scenario, int ovl_pixel_limit)
  747. {
  748. int err = 0;
  749. char *envp[3];
  750. char scenario_buf[32] = "";
  751. char ovl_limit_buf[32] = "";
  752. snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
  753. snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
  754. envp[0] = scenario_buf;
  755. envp[1] = ovl_limit_buf;
  756. envp[2] = NULL;
  757. if (pSmiDev != NULL) {
  758. err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
  759. SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit, bwc_scenario);
  760. }
  761. if (err < 0)
  762. SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
  763. return err;
  764. }
  765. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  766. static unsigned int smiclk_subsys_2_larb(enum subsys_id sys)
  767. {
  768. unsigned int i4larbid = 0;
  769. switch (sys) {
  770. case SYS_DIS:
  771. i4larbid = 0; /*0&4 is disp */
  772. break;
  773. case SYS_VDE:
  774. i4larbid = 1;
  775. break;
  776. case SYS_ISP:
  777. i4larbid = 2;
  778. break;
  779. case SYS_VEN:
  780. i4larbid = 3;
  781. break;
  782. default:
  783. i4larbid = SMI_LARB_NR;
  784. break;
  785. }
  786. return i4larbid;
  787. }
  788. static void smiclk_subsys_after_on(enum subsys_id sys)
  789. {
  790. unsigned int i4larbid = smiclk_subsys_2_larb(sys);
  791. if (!fglarbcallback) {
  792. SMIDBG(1, "don't need restore incb\n");
  793. return;
  794. }
  795. if (i4larbid < SMI_LARB_NR) {
  796. on_larb_power_on_with_ccf(i4larbid);
  797. #if defined(SMI_D1)
  798. /* inform m4u to restore register value */
  799. m4u_larb_backup((int)i4larbid);
  800. #endif
  801. } else {
  802. SMIDBG(1, "subsys id don't backup sys %d larb %u\n", sys, i4larbid);
  803. }
  804. }
  805. static void smiclk_subsys_before_off(enum subsys_id sys)
  806. {
  807. unsigned int i4larbid = smiclk_subsys_2_larb(sys);
  808. if (!fglarbcallback) {
  809. SMIDBG(1, "don't need backup incb\n");
  810. return;
  811. }
  812. if (i4larbid < SMI_LARB_NR) {
  813. on_larb_power_off_with_ccf(i4larbid);
  814. #if defined(SMI_D1)
  815. /* inform m4u to backup register value */
  816. m4u_larb_restore((int)i4larbid);
  817. #endif
  818. } else {
  819. SMIDBG(1, "subsys id don't restore sys %d larb %d\n", sys, i4larbid);
  820. }
  821. }
  822. struct pg_callbacks smi_clk_subsys_handle = {
  823. .before_off = smiclk_subsys_before_off,
  824. .after_on = smiclk_subsys_after_on
  825. };
  826. #endif
  827. static int smi_bwc_config(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
  828. {
  829. int i;
  830. int result = 0;
  831. unsigned int u4Concurrency = 0;
  832. MTK_SMI_BWC_SCEN eFinalScen;
  833. static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
  834. if (smi_tuning_mode == 1) {
  835. SMIMSG("Doesn't change profile in tunning mode");
  836. return 0;
  837. }
  838. if ((SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario)) {
  839. SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n", p_conf->scenario);
  840. return -1;
  841. }
  842. #if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
  843. if (p_conf->b_on_off) {
  844. /* set mmdvfs step according to certain scenarios */
  845. mmdvfs_notify_scenario_enter(p_conf->scenario);
  846. } else {
  847. /* set mmdvfs step to default after the scenario exits */
  848. mmdvfs_notify_scenario_exit(p_conf->scenario);
  849. }
  850. #endif
  851. spin_lock(&g_SMIInfo.SMI_lock);
  852. result = fake_mode_handling(p_conf, pu4LocalCnt);
  853. spin_unlock(&g_SMIInfo.SMI_lock);
  854. /* Fake mode is not a real SMI profile, so we need to return here */
  855. if (result == 1)
  856. return 0;
  857. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  858. /* prepare larb clk because prepare cannot in spinlock */
  859. for (i = 0; i < SMI_LARB_NR; i++)
  860. larb_clock_prepare(i, 1);
  861. #endif
  862. spin_lock(&g_SMIInfo.SMI_lock);
  863. if (p_conf->b_on_off) {
  864. /* turn on certain scenario */
  865. g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
  866. if (NULL != pu4LocalCnt)
  867. pu4LocalCnt[p_conf->scenario] += 1;
  868. } else {
  869. /* turn off certain scenario */
  870. if (0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]) {
  871. SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
  872. p_conf->scenario, g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
  873. } else {
  874. g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
  875. }
  876. if (NULL != pu4LocalCnt) {
  877. if (0 == pu4LocalCnt[p_conf->scenario]) {
  878. SMIMSG
  879. ("Process : %s did too many turning off for local SMI profile:%d,%d\n",
  880. current->comm, p_conf->scenario,
  881. pu4LocalCnt[p_conf->scenario]);
  882. } else {
  883. pu4LocalCnt[p_conf->scenario] -= 1;
  884. }
  885. }
  886. }
  887. for (i = 0; i < SMI_BWC_SCEN_CNT; i++) {
  888. if (g_SMIInfo.pu4ConcurrencyTable[i])
  889. u4Concurrency |= (1 << i);
  890. }
  891. #if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
  892. /* notify mmdvfs concurrency */
  893. mmdvfs_notify_scenario_concurrency(u4Concurrency);
  894. #endif
  895. if ((1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency)
  896. eFinalScen = SMI_BWC_SCEN_MM_GPU;
  897. else if ((1 << SMI_BWC_SCEN_ICFP) & u4Concurrency)
  898. eFinalScen = SMI_BWC_SCEN_ICFP;
  899. else if ((1 << SMI_BWC_SCEN_VSS) & u4Concurrency)
  900. eFinalScen = SMI_BWC_SCEN_VSS;
  901. else if ((1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency)
  902. eFinalScen = SMI_BWC_SCEN_VR_SLOW;
  903. else if ((1 << SMI_BWC_SCEN_VR) & u4Concurrency)
  904. eFinalScen = SMI_BWC_SCEN_VR;
  905. else if ((1 << SMI_BWC_SCEN_VP) & u4Concurrency)
  906. eFinalScen = SMI_BWC_SCEN_VP;
  907. else if ((1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency)
  908. eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
  909. else if ((1 << SMI_BWC_SCEN_VENC) & u4Concurrency)
  910. eFinalScen = SMI_BWC_SCEN_VENC;
  911. else
  912. eFinalScen = SMI_BWC_SCEN_NORMAL;
  913. if (ePreviousFinalScen != eFinalScen) {
  914. ePreviousFinalScen = eFinalScen;
  915. } else {
  916. SMIMSG("Scen equal%d,don't change\n", eFinalScen);
  917. spin_unlock(&g_SMIInfo.SMI_lock);
  918. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  919. /* unprepare larb clock */
  920. for (i = 0; i < SMI_LARB_NR; i++)
  921. larb_clock_unprepare(i, 1);
  922. #endif
  923. return 0;
  924. }
  925. /* enable larb clock */
  926. for (i = 0; i < SMI_LARB_NR; i++)
  927. larb_clock_enable(i, 1);
  928. smi_profile = eFinalScen;
  929. smi_bus_regs_setting(smi_profile,
  930. smi_profile_config[smi_profile].setting);
  931. /* Bandwidth Limiter */
  932. switch (eFinalScen) {
  933. case SMI_BWC_SCEN_VP:
  934. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
  935. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
  936. break;
  937. case SMI_BWC_SCEN_SWDEC_VP:
  938. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
  939. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
  940. break;
  941. case SMI_BWC_SCEN_ICFP:
  942. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_ICFP");
  943. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
  944. break;
  945. case SMI_BWC_SCEN_VR:
  946. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
  947. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
  948. break;
  949. case SMI_BWC_SCEN_VR_SLOW:
  950. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
  951. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  952. break;
  953. case SMI_BWC_SCEN_VENC:
  954. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
  955. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  956. break;
  957. case SMI_BWC_SCEN_NORMAL:
  958. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
  959. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  960. break;
  961. case SMI_BWC_SCEN_MM_GPU:
  962. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
  963. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  964. break;
  965. default:
  966. SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
  967. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  968. break;
  969. }
  970. /* disable larb clock */
  971. for (i = 0; i < SMI_LARB_NR; i++)
  972. larb_clock_disable(i, 1);
  973. spin_unlock(&g_SMIInfo.SMI_lock);
  974. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  975. /* unprepare larb clock */
  976. for (i = 0; i < SMI_LARB_NR; i++)
  977. larb_clock_unprepare(i, 1);
  978. #endif
  979. ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
  980. /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex,
  981. * call these APIs only when necessary */
  982. {
  983. static unsigned int current_fps;
  984. if ((eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30)) {
  985. /* force 30 fps in VR slow motion profile */
  986. primary_display_force_set_vsync_fps(30);
  987. current_fps = 30;
  988. SMIMSG("[SMI_PROFILE] set 30 fps\n");
  989. } else if ((eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30)) {
  990. /* back to normal fps */
  991. current_fps = primary_display_get_fps();
  992. primary_display_force_set_vsync_fps(current_fps);
  993. SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
  994. }
  995. }
  996. SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
  997. (p_conf->b_on_off ? "on" : "off"), eFinalScen,
  998. g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
  999. g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
  1000. g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
  1001. return 0;
  1002. }
  1003. #if !defined(SMI_INTERNAL_CCF_SUPPORT)
  1004. struct larb_monitor larb_monitor_handler = {
  1005. .level = LARB_MONITOR_LEVEL_HIGH,
  1006. .backup = on_larb_power_off,
  1007. .restore = on_larb_power_on
  1008. };
  1009. #endif /* !defined(SMI_INTERNAL_CCF_SUPPORT) */
  1010. int smi_common_init(void)
  1011. {
  1012. int i;
  1013. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  1014. struct pg_callbacks *pold = 0;
  1015. #endif
  1016. #if defined(SMI_J)
  1017. /* enable DCM */
  1018. DCM_enable();
  1019. #endif
  1020. SMIMSG("Enter smi_common_init\n");
  1021. for (i = 0; i < SMI_LARB_NR; i++) {
  1022. pLarbRegBackUp[i] = kmalloc(LARB_BACKUP_REG_SIZE, GFP_KERNEL | __GFP_ZERO);
  1023. if (pLarbRegBackUp[i] == NULL)
  1024. SMIERR("pLarbRegBackUp kmalloc fail %d\n", i);
  1025. }
  1026. /*
  1027. * make sure all larb power is on before we register callback func.
  1028. * then, when larb power is first off, default register value will be backed up.
  1029. */
  1030. for (i = 0; i < SMI_LARB_NR; i++) {
  1031. larb_clock_prepare(i, 1);
  1032. larb_clock_enable(i, 1);
  1033. }
  1034. /* keep default HW value */
  1035. save_default_common_val(&is_default_value_saved, default_val_smi_l1arb);
  1036. /* set nonconstatnt variables */
  1037. smi_set_nonconstant_variable();
  1038. /* apply init setting after kernel boot */
  1039. SMIMSG("Enter smi_common_init\n");
  1040. smi_bus_regs_setting(SMI_BWC_SCEN_NORMAL, smi_profile_config[SMI_BWC_SCEN_NORMAL].setting);
  1041. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  1042. fglarbcallback = true;
  1043. pold = register_pg_callback(&smi_clk_subsys_handle);
  1044. if (pold)
  1045. SMIERR("smi reg clk cb call fail\n");
  1046. else
  1047. SMIMSG("smi reg clk cb call success\n");
  1048. #else /* !defined(SMI_INTERNAL_CCF_SUPPORT) */
  1049. register_larb_monitor(&larb_monitor_handler);
  1050. #endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
  1051. for (i = 0; i < SMI_LARB_NR; i++) {
  1052. larb_clock_disable(i, 1);
  1053. larb_clock_unprepare(i, 1);
  1054. }
  1055. return 0;
  1056. }
  1057. static int smi_open(struct inode *inode, struct file *file)
  1058. {
  1059. file->private_data = kmalloc_array(SMI_BWC_SCEN_CNT, sizeof(unsigned int), GFP_ATOMIC);
  1060. if (NULL == file->private_data) {
  1061. SMIMSG("Not enough entry for DDP open operation\n");
  1062. return -ENOMEM;
  1063. }
  1064. memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
  1065. return 0;
  1066. }
  1067. static int smi_release(struct inode *inode, struct file *file)
  1068. {
  1069. #if 0
  1070. unsigned long u4Index = 0;
  1071. unsigned long u4AssignCnt = 0;
  1072. unsigned long *pu4Cnt = (unsigned long *)file->private_data;
  1073. MTK_SMI_BWC_CONFIG config;
  1074. for (; u4Index < SMI_BWC_SCEN_CNT; u4Index += 1) {
  1075. if (pu4Cnt[u4Index]) {
  1076. SMIMSG("Process:%s does not turn off BWC properly , force turn off %d\n",
  1077. current->comm, u4Index);
  1078. u4AssignCnt = pu4Cnt[u4Index];
  1079. config.b_on_off = 0;
  1080. config.scenario = (MTK_SMI_BWC_SCEN) u4Index;
  1081. do {
  1082. smi_bwc_config(&config, pu4Cnt);
  1083. } while (0 < u4AssignCnt);
  1084. }
  1085. }
  1086. #endif
  1087. if (NULL != file->private_data) {
  1088. kfree(file->private_data);
  1089. file->private_data = NULL;
  1090. }
  1091. return 0;
  1092. }
  1093. static long smi_ioctl(struct file *pFile, unsigned int cmd, unsigned long param)
  1094. {
  1095. int ret = 0;
  1096. /* unsigned long * pu4Cnt = (unsigned long *)pFile->private_data; */
  1097. switch (cmd) {
  1098. /* disable reg access ioctl by default for possible security holes */
  1099. /* TBD: check valid SMI register range */
  1100. case MTK_IOC_SMI_BWC_CONFIG:{
  1101. MTK_SMI_BWC_CONFIG cfg;
  1102. ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_CONFIG));
  1103. if (ret) {
  1104. SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
  1105. return -EFAULT;
  1106. }
  1107. ret = smi_bwc_config(&cfg, NULL);
  1108. break;
  1109. }
  1110. /* GMP start */
  1111. case MTK_IOC_SMI_BWC_INFO_SET:{
  1112. smi_set_mm_info_ioctl_wrapper(pFile, cmd, param);
  1113. break;
  1114. }
  1115. case MTK_IOC_SMI_BWC_INFO_GET:{
  1116. smi_get_mm_info_ioctl_wrapper(pFile, cmd, param);
  1117. break;
  1118. }
  1119. /* GMP end */
  1120. case MTK_IOC_SMI_DUMP_LARB:{
  1121. unsigned int larb_index;
  1122. ret = copy_from_user(&larb_index, (void *)param, sizeof(unsigned int));
  1123. if (ret)
  1124. return -EFAULT;
  1125. smi_dumpLarb(larb_index);
  1126. }
  1127. break;
  1128. case MTK_IOC_SMI_DUMP_COMMON:{
  1129. unsigned int arg;
  1130. ret = copy_from_user(&arg, (void *)param, sizeof(unsigned int));
  1131. if (ret)
  1132. return -EFAULT;
  1133. smi_dumpCommon();
  1134. }
  1135. break;
  1136. #if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
  1137. case MTK_IOC_MMDVFS_CMD:
  1138. {
  1139. MTK_MMDVFS_CMD mmdvfs_cmd;
  1140. if (copy_from_user(&mmdvfs_cmd, (void *)param, sizeof(MTK_MMDVFS_CMD)))
  1141. return -EFAULT;
  1142. mmdvfs_handle_cmd(&mmdvfs_cmd);
  1143. if (copy_to_user
  1144. ((void *)param, (void *)&mmdvfs_cmd, sizeof(MTK_MMDVFS_CMD))) {
  1145. return -EFAULT;
  1146. }
  1147. }
  1148. break;
  1149. #endif
  1150. default:
  1151. return -1;
  1152. }
  1153. return ret;
  1154. }
  1155. static const struct file_operations smiFops = {
  1156. .owner = THIS_MODULE,
  1157. .open = smi_open,
  1158. .release = smi_release,
  1159. .unlocked_ioctl = smi_ioctl,
  1160. .compat_ioctl = MTK_SMI_COMPAT_ioctl,
  1161. };
  1162. #if defined(SMI_J)
  1163. /*
  1164. static int smi_sel;
  1165. static ssize_t smi_sel_show(struct kobject *kobj, struct kobj_attribute *attr,
  1166. char *buf)
  1167. {
  1168. char *p = buf;
  1169. p += sprintf(p, "%d\n", smi_sel);
  1170. return p - buf;
  1171. }
  1172. static ssize_t smi_sel_store(struct kobject *kobj, struct kobj_attribute *attr,
  1173. const char *buf, size_t count)
  1174. {
  1175. int val;
  1176. if (sscanf(buf, "%d", &val) != 1)
  1177. return -EPERM;
  1178. smi_sel = val;
  1179. return count;
  1180. }
  1181. static ssize_t smi_dbg_show(struct kobject *kobj, struct kobj_attribute *attr,
  1182. char *buf)
  1183. {
  1184. if (smi_sel >= 0 && smi_sel < SMI_LARB_NR)
  1185. smi_dumpLarb(smi_sel);
  1186. else if (smi_sel == 999)
  1187. smi_dumpCommon();
  1188. return 0;
  1189. }
  1190. DEFINE_ATTR_RW(smi_sel);
  1191. DEFINE_ATTR_RO(smi_dbg);
  1192. static struct attribute *smi_attrs[] = {__ATTR_OF(smi_sel), __ATTR_OF(smi_dbg),
  1193. NULL, };
  1194. static struct attribute_group smi_attr_group = {.name = "smi", .attrs =
  1195. smi_attrs, };
  1196. */
  1197. #endif
  1198. static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
  1199. static inline int smi_register(void)
  1200. {
  1201. if (alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI")) {
  1202. SMIERR("Allocate device No. failed");
  1203. return -EAGAIN;
  1204. }
  1205. /* Allocate driver */
  1206. pSmiDev = cdev_alloc();
  1207. if (NULL == pSmiDev) {
  1208. unregister_chrdev_region(smiDevNo, 1);
  1209. SMIERR("Allocate mem for kobject failed");
  1210. return -ENOMEM;
  1211. }
  1212. /* Attatch file operation. */
  1213. cdev_init(pSmiDev, &smiFops);
  1214. pSmiDev->owner = THIS_MODULE;
  1215. /* Add to system */
  1216. if (cdev_add(pSmiDev, smiDevNo, 1)) {
  1217. SMIERR("Attatch file operation failed");
  1218. unregister_chrdev_region(smiDevNo, 1);
  1219. return -EAGAIN;
  1220. }
  1221. return 0;
  1222. }
  1223. static unsigned long get_register_base(int i)
  1224. {
  1225. unsigned long pa_value = 0;
  1226. unsigned long va_value = 0;
  1227. va_value = gSMIBaseAddrs[i];
  1228. pa_value = virt_to_phys((void *)va_value);
  1229. return pa_value;
  1230. }
  1231. void register_base_dump(void)
  1232. {
  1233. int i = 0;
  1234. for (i = 0; i < SMI_REG_REGION_MAX; i++) {
  1235. SMIMSG("REG BASE:%s-->VA=0x%lx,PA=0x%lx\n",
  1236. smi_get_region_name(i), gSMIBaseAddrs[i], get_register_base(i));
  1237. }
  1238. }
  1239. static struct class *pSmiClass;
  1240. static int smi_probe(struct platform_device *pdev)
  1241. {
  1242. int i;
  1243. static unsigned int smi_probe_cnt;
  1244. struct device *smiDevice = NULL;
  1245. SMIMSG("Enter smi_probe\n");
  1246. /* Debug only */
  1247. if (smi_probe_cnt != 0) {
  1248. SMIERR("Only support 1 SMI driver probed\n");
  1249. return 0;
  1250. }
  1251. smi_probe_cnt++;
  1252. SMIMSG("Allocate smi_dev space\n");
  1253. smi_dev = kmalloc(sizeof(struct smi_device), GFP_KERNEL);
  1254. if (smi_dev == NULL) {
  1255. SMIERR("Unable to allocate memory for smi driver\n");
  1256. return -ENOMEM;
  1257. }
  1258. if (NULL == pdev) {
  1259. SMIERR("platform data missed\n");
  1260. return -ENXIO;
  1261. }
  1262. /* Keep the device structure */
  1263. smi_dev->dev = &pdev->dev;
  1264. /* Map registers */
  1265. for (i = 0; i < SMI_REG_REGION_MAX; i++) {
  1266. SMIMSG("Save region: %d\n", i);
  1267. smi_dev->regs[i] = (void *)of_iomap(pdev->dev.of_node, i);
  1268. if (!smi_dev->regs[i]) {
  1269. SMIERR("Unable to ioremap registers, of_iomap fail, i=%d\n", i);
  1270. return -ENOMEM;
  1271. }
  1272. /* Record the register base in global variable */
  1273. gSMIBaseAddrs[i] = (unsigned long)(smi_dev->regs[i]);
  1274. SMIMSG("DT, i=%d, region=%s, map_addr=0x%p, reg_pa=0x%lx\n", i,
  1275. smi_get_region_name(i), smi_dev->regs[i], get_register_base(i));
  1276. }
  1277. #if defined(SMI_INTERNAL_CCF_SUPPORT)
  1278. smi_dev->smi_common_clk = get_smi_clk("smi-common");
  1279. smi_dev->smi_larb0_clk = get_smi_clk("smi-larb0");
  1280. smi_dev->img_larb2_clk = get_smi_clk("img-larb2");
  1281. #if defined(SMI_D1)
  1282. smi_dev->vdec0_vdec_clk = get_smi_clk("vdec0-vdec");
  1283. #endif
  1284. smi_dev->vdec1_larb_clk = get_smi_clk("vdec1-larb");
  1285. smi_dev->venc_larb_clk = get_smi_clk("venc-larb");
  1286. #if defined(SMI_J)
  1287. smi_dev->venc_venc_clk = get_smi_clk("venc-venc");
  1288. #endif
  1289. /* MTCMOS */
  1290. smi_dev->larb1_mtcmos = get_smi_clk("mtcmos-vde");
  1291. smi_dev->larb3_mtcmos = get_smi_clk("mtcmos-ven");
  1292. smi_dev->larb2_mtcmos = get_smi_clk("mtcmos-isp");
  1293. smi_dev->larb0_mtcmos = get_smi_clk("mtcmos-dis");
  1294. #endif
  1295. SMIMSG("Execute smi_register\n");
  1296. if (smi_register()) {
  1297. dev_err(&pdev->dev, "register char failed\n");
  1298. return -EAGAIN;
  1299. }
  1300. pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
  1301. if (IS_ERR(pSmiClass)) {
  1302. int ret = PTR_ERR(pSmiClass);
  1303. SMIERR("Unable to create class, err = %d", ret);
  1304. return ret;
  1305. }
  1306. SMIMSG("Create davice\n");
  1307. smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
  1308. smiDeviceUevent = smiDevice;
  1309. SMIMSG("SMI probe done.\n");
  1310. #if defined(SMI_D2)
  1311. /* To adapt the legacy codes */
  1312. smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
  1313. smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
  1314. smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
  1315. smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
  1316. /* smi_reg_base_barb4 = gSMIBaseAddrs[SMI_LARB4_REG_INDX]; */
  1317. gLarbBaseAddr[0] = LARB0_BASE;
  1318. gLarbBaseAddr[1] = LARB1_BASE;
  1319. gLarbBaseAddr[2] = LARB2_BASE;
  1320. #elif defined(SMI_D1) || defined(SMI_D3)
  1321. /* To adapt the legacy codes */
  1322. smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
  1323. smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
  1324. smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
  1325. smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
  1326. smi_reg_base_barb3 = gSMIBaseAddrs[SMI_LARB3_REG_INDX];
  1327. gLarbBaseAddr[0] = LARB0_BASE;
  1328. gLarbBaseAddr[1] = LARB1_BASE;
  1329. gLarbBaseAddr[2] = LARB2_BASE;
  1330. gLarbBaseAddr[3] = LARB3_BASE;
  1331. #elif defined(SMI_J)
  1332. /* To adapt the legacy codes */
  1333. smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
  1334. smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
  1335. smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
  1336. smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
  1337. smi_reg_base_barb3 = gSMIBaseAddrs[SMI_LARB3_REG_INDX];
  1338. gLarbBaseAddr[0] = LARB0_BASE;
  1339. gLarbBaseAddr[1] = LARB1_BASE;
  1340. gLarbBaseAddr[2] = LARB2_BASE;
  1341. gLarbBaseAddr[3] = LARB3_BASE;
  1342. #else
  1343. smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
  1344. smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
  1345. smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
  1346. gLarbBaseAddr[0] = LARB0_BASE;
  1347. gLarbBaseAddr[1] = LARB1_BASE;
  1348. #endif
  1349. SMIMSG("Execute smi_common_init\n");
  1350. smi_common_init();
  1351. return 0;
  1352. }
  1353. char *smi_get_region_name(unsigned int region_indx)
  1354. {
  1355. switch (region_indx) {
  1356. case SMI_COMMON_REG_INDX:
  1357. return "smi_common";
  1358. case SMI_LARB0_REG_INDX:
  1359. return "larb0";
  1360. case SMI_LARB1_REG_INDX:
  1361. return "larb1";
  1362. case SMI_LARB2_REG_INDX:
  1363. return "larb2";
  1364. case SMI_LARB3_REG_INDX:
  1365. return "larb3";
  1366. default:
  1367. SMIMSG("invalid region id=%d", region_indx);
  1368. return "unknown";
  1369. }
  1370. }
  1371. static int smi_remove(struct platform_device *pdev)
  1372. {
  1373. cdev_del(pSmiDev);
  1374. unregister_chrdev_region(smiDevNo, 1);
  1375. device_destroy(pSmiClass, smiDevNo);
  1376. class_destroy(pSmiClass);
  1377. return 0;
  1378. }
  1379. static int smi_suspend(struct platform_device *pdev, pm_message_t mesg)
  1380. {
  1381. return 0;
  1382. }
  1383. static int smi_resume(struct platform_device *pdev)
  1384. {
  1385. return 0;
  1386. }
  1387. static const struct of_device_id smi_of_ids[] = {
  1388. {.compatible = "mediatek,smi_common",},
  1389. {}
  1390. };
  1391. static struct platform_driver smiDrv = {
  1392. .probe = smi_probe,
  1393. .remove = smi_remove,
  1394. .suspend = smi_suspend,
  1395. .resume = smi_resume,
  1396. .driver = {
  1397. .name = "MTK_SMI",
  1398. .owner = THIS_MODULE,
  1399. .of_match_table = smi_of_ids,
  1400. }
  1401. };
  1402. static int __init smi_init(void)
  1403. {
  1404. SMIMSG("smi_init enter\n");
  1405. spin_lock_init(&g_SMIInfo.SMI_lock);
  1406. #if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
  1407. /* MMDVFS init */
  1408. mmdvfs_init(&g_smi_bwc_mm_info);
  1409. #endif
  1410. memset(g_SMIInfo.pu4ConcurrencyTable, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
  1411. /* Informs the kernel about the function to be called */
  1412. /* if hardware matching MTK_SMI has been found */
  1413. SMIMSG("register platform driver\n");
  1414. if (platform_driver_register(&smiDrv)) {
  1415. SMIERR("failed to register MAU driver");
  1416. return -ENODEV;
  1417. }
  1418. SMIMSG("exit smi_init\n");
  1419. return 0;
  1420. }
  1421. static void __exit smi_exit(void)
  1422. {
  1423. platform_driver_unregister(&smiDrv);
  1424. }
  1425. void smi_client_status_change_notify(int module, int mode)
  1426. {
  1427. }
  1428. #if defined(SMI_J)
  1429. MTK_SMI_BWC_SCEN smi_get_current_profile(void)
  1430. {
  1431. return (MTK_SMI_BWC_SCEN) smi_profile;
  1432. }
  1433. #endif
  1434. #if IS_ENABLED(CONFIG_COMPAT)
  1435. /* 32 bits process ioctl support: */
  1436. /* This is prepared for the future extension since currently the sizes of 32 bits */
  1437. /* and 64 bits smi parameters are the same. */
  1438. struct MTK_SMI_COMPAT_BWC_CONFIG {
  1439. compat_int_t scenario;
  1440. compat_int_t b_on_off; /* 0 : exit this scenario , 1 : enter this scenario */
  1441. };
  1442. struct MTK_SMI_COMPAT_BWC_INFO_SET {
  1443. compat_int_t property;
  1444. compat_int_t value1;
  1445. compat_int_t value2;
  1446. };
  1447. struct MTK_SMI_COMPAT_BWC_MM_INFO {
  1448. compat_uint_t flag; /* Reserved */
  1449. compat_int_t concurrent_profile;
  1450. compat_int_t sensor_size[2];
  1451. compat_int_t video_record_size[2];
  1452. compat_int_t display_size[2];
  1453. compat_int_t tv_out_size[2];
  1454. compat_int_t fps;
  1455. compat_int_t video_encode_codec;
  1456. compat_int_t video_decode_codec;
  1457. compat_int_t hw_ovl_limit;
  1458. };
  1459. #define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, struct MTK_SMI_COMPAT_BWC_CONFIG)
  1460. #define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, struct MTK_SMI_COMPAT_BWC_INFO_SET)
  1461. #define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, struct MTK_SMI_COMPAT_BWC_MM_INFO)
  1462. static int compat_get_smi_bwc_config_struct(struct MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
  1463. MTK_SMI_BWC_CONFIG __user *data)
  1464. {
  1465. compat_int_t i;
  1466. int err;
  1467. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1468. err = get_user(i, &(data32->scenario));
  1469. err |= put_user(i, &(data->scenario));
  1470. err |= get_user(i, &(data32->b_on_off));
  1471. err |= put_user(i, &(data->b_on_off));
  1472. return err;
  1473. }
  1474. static int compat_get_smi_bwc_mm_info_set_struct(struct MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
  1475. MTK_SMI_BWC_INFO_SET __user *data)
  1476. {
  1477. compat_int_t i;
  1478. int err;
  1479. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1480. err = get_user(i, &(data32->property));
  1481. err |= put_user(i, &(data->property));
  1482. err |= get_user(i, &(data32->value1));
  1483. err |= put_user(i, &(data->value1));
  1484. err |= get_user(i, &(data32->value2));
  1485. err |= put_user(i, &(data->value2));
  1486. return err;
  1487. }
  1488. static int compat_get_smi_bwc_mm_info_struct(struct MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
  1489. MTK_SMI_BWC_MM_INFO __user *data)
  1490. {
  1491. compat_uint_t u;
  1492. compat_int_t i;
  1493. compat_int_t p[2];
  1494. int err;
  1495. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1496. err = get_user(u, &(data32->flag));
  1497. err |= put_user(u, &(data->flag));
  1498. err |= get_user(i, &(data32->concurrent_profile));
  1499. err |= put_user(i, &(data->concurrent_profile));
  1500. err |= copy_from_user(p, &(data32->sensor_size), sizeof(p));
  1501. err |= copy_to_user(&(data->sensor_size), p, sizeof(p));
  1502. err |= copy_from_user(p, &(data32->video_record_size), sizeof(p));
  1503. err |= copy_to_user(&(data->video_record_size), p, sizeof(p));
  1504. err |= copy_from_user(p, &(data32->display_size), sizeof(p));
  1505. err |= copy_to_user(&(data->display_size), p, sizeof(p));
  1506. err |= copy_from_user(p, &(data32->tv_out_size), sizeof(p));
  1507. err |= copy_to_user(&(data->tv_out_size), p, sizeof(p));
  1508. err |= get_user(i, &(data32->fps));
  1509. err |= put_user(i, &(data->fps));
  1510. err |= get_user(i, &(data32->video_encode_codec));
  1511. err |= put_user(i, &(data->video_encode_codec));
  1512. err |= get_user(i, &(data32->video_decode_codec));
  1513. err |= put_user(i, &(data->video_decode_codec));
  1514. err |= get_user(i, &(data32->hw_ovl_limit));
  1515. err |= put_user(i, &(data->hw_ovl_limit));
  1516. return err;
  1517. }
  1518. static int compat_put_smi_bwc_mm_info_struct(struct MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
  1519. MTK_SMI_BWC_MM_INFO __user *data)
  1520. {
  1521. compat_uint_t u;
  1522. compat_int_t i;
  1523. compat_int_t p[2];
  1524. int err;
  1525. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1526. err = get_user(u, &(data->flag));
  1527. err |= put_user(u, &(data32->flag));
  1528. err |= get_user(i, &(data->concurrent_profile));
  1529. err |= put_user(i, &(data32->concurrent_profile));
  1530. err |= copy_from_user(p, &(data->sensor_size), sizeof(p));
  1531. err |= copy_to_user(&(data32->sensor_size), p, sizeof(p));
  1532. err |= copy_from_user(p, &(data->video_record_size), sizeof(p));
  1533. err |= copy_to_user(&(data32->video_record_size), p, sizeof(p));
  1534. err |= copy_from_user(p, &(data->display_size), sizeof(p));
  1535. err |= copy_to_user(&(data32->display_size), p, sizeof(p));
  1536. err |= copy_from_user(p, &(data->tv_out_size), sizeof(p));
  1537. err |= copy_to_user(&(data32->tv_out_size), p, sizeof(p));
  1538. err |= get_user(i, &(data->fps));
  1539. err |= put_user(i, &(data32->fps));
  1540. err |= get_user(i, &(data->video_encode_codec));
  1541. err |= put_user(i, &(data32->video_encode_codec));
  1542. err |= get_user(i, &(data->video_decode_codec));
  1543. err |= put_user(i, &(data32->video_decode_codec));
  1544. err |= get_user(i, &(data->hw_ovl_limit));
  1545. err |= put_user(i, &(data32->hw_ovl_limit));
  1546. return err;
  1547. }
  1548. static long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1549. {
  1550. long ret;
  1551. if (!filp->f_op || !filp->f_op->unlocked_ioctl)
  1552. return -ENOTTY;
  1553. switch (cmd) {
  1554. case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
  1555. {
  1556. if (COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG) {
  1557. return filp->f_op->unlocked_ioctl(filp, cmd,
  1558. (unsigned long)compat_ptr(arg));
  1559. } else {
  1560. struct MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
  1561. MTK_SMI_BWC_CONFIG __user *data;
  1562. int err;
  1563. data32 = compat_ptr(arg);
  1564. data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
  1565. if (data == NULL)
  1566. return -EFAULT;
  1567. err = compat_get_smi_bwc_config_struct(data32, data);
  1568. if (err)
  1569. return err;
  1570. ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
  1571. (unsigned long)data);
  1572. return ret;
  1573. }
  1574. }
  1575. case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
  1576. {
  1577. if (COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET) {
  1578. return filp->f_op->unlocked_ioctl(filp, cmd,
  1579. (unsigned long)compat_ptr(arg));
  1580. } else {
  1581. struct MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
  1582. MTK_SMI_BWC_INFO_SET __user *data;
  1583. int err;
  1584. data32 = compat_ptr(arg);
  1585. data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
  1586. if (data == NULL)
  1587. return -EFAULT;
  1588. err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
  1589. if (err)
  1590. return err;
  1591. return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
  1592. (unsigned long)data);
  1593. }
  1594. }
  1595. /* Fall through */
  1596. case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
  1597. {
  1598. if (COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET) {
  1599. return filp->f_op->unlocked_ioctl(filp, cmd,
  1600. (unsigned long)compat_ptr(arg));
  1601. } else {
  1602. struct MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
  1603. MTK_SMI_BWC_MM_INFO __user *data;
  1604. int err;
  1605. data32 = compat_ptr(arg);
  1606. data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
  1607. if (data == NULL)
  1608. return -EFAULT;
  1609. err = compat_get_smi_bwc_mm_info_struct(data32, data);
  1610. if (err)
  1611. return err;
  1612. ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
  1613. (unsigned long)data);
  1614. err = compat_put_smi_bwc_mm_info_struct(data32, data);
  1615. if (err)
  1616. return err;
  1617. return ret;
  1618. }
  1619. }
  1620. case MTK_IOC_SMI_DUMP_LARB:
  1621. case MTK_IOC_SMI_DUMP_COMMON:
  1622. case MTK_IOC_MMDVFS_CMD:
  1623. return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
  1624. default:
  1625. return -ENOIOCTLCMD;
  1626. }
  1627. }
  1628. #endif
  1629. #if defined(SMI_J)
  1630. int is_mmdvfs_freq_hopping_disabled(void)
  1631. {
  1632. return disable_freq_hopping;
  1633. }
  1634. int is_mmdvfs_freq_mux_disabled(void)
  1635. {
  1636. return disable_freq_mux;
  1637. }
  1638. int is_force_max_mmsys_clk(void)
  1639. {
  1640. return force_max_mmsys_clk;
  1641. }
  1642. int is_force_camera_hpm(void)
  1643. {
  1644. return force_camera_hpm;
  1645. }
  1646. subsys_initcall(smi_init);
  1647. module_param_named(disable_freq_hopping, disable_freq_hopping, uint, S_IRUGO | S_IWUSR);
  1648. module_param_named(disable_freq_mux, disable_freq_mux, uint, S_IRUGO | S_IWUSR);
  1649. module_param_named(force_max_mmsys_clk, force_max_mmsys_clk, uint, S_IRUGO | S_IWUSR);
  1650. module_param_named(force_camera_hpm, force_camera_hpm, uint, S_IRUGO | S_IWUSR);
  1651. #endif
  1652. module_init(smi_init);
  1653. module_exit(smi_exit);
  1654. module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
  1655. module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
  1656. module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
  1657. MODULE_DESCRIPTION("MTK SMI driver");
  1658. MODULE_AUTHOR("Kendrick Hsu<kendrick.hsu@mediatek.com>");
  1659. MODULE_LICENSE("GPL");