smi_variant.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760
  1. #include <linux/of.h>
  2. #include <linux/of_irq.h>
  3. #include <linux/of_address.h>
  4. #include <linux/kobject.h>
  5. #include <linux/uaccess.h>
  6. #include <linux/module.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/cdev.h>
  9. #include <linux/mm.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/clk.h>
  13. #include <linux/io.h>
  14. #include <linux/ioctl.h>
  15. #include <linux/fs.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/of_address.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_platform.h>
  20. #if IS_ENABLED(CONFIG_COMPAT)
  21. #include <linux/uaccess.h>
  22. #include <linux/compat.h>
  23. #endif
  24. #include "mt_smi.h"
  25. #include "smi_reg.h"
  26. #include "smi_common.h"
  27. #include "smi_debug.h"
  28. #include "smi_priv.h"
  29. #include "m4u.h"
  30. /*#include "mmdvfs_mgr.h"*/
  31. #define SMI_LOG_TAG "SMI"
  32. #define LARB_BACKUP_REG_SIZE 128
  33. #ifdef MT73
  34. #define SMI_COMMON_BACKUP_REG_NUM 10
  35. /* SMI COMMON register list to be backuped */
  36. static unsigned short g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = {
  37. 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x220, 0x230, 0x234, 0x238
  38. };
  39. #elif defined MT27
  40. /*
  41. * MT8127 do not have the following register, offset(0x220, 0x238),
  42. * which are SMI_BUS_SEL and SMI_FIFO2_TH, so do not backup them.
  43. */
  44. #define SMI_COMMON_BACKUP_REG_NUM 8
  45. static unsigned short g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = {
  46. 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x230, 0x234
  47. };
  48. #endif
  49. #define SF_HWC_PIXEL_MAX_NORMAL (2560 * 1600 * 7)
  50. #define SF_HWC_PIXEL_MAX_VR (2560 * 1600 * 7)
  51. #define SF_HWC_PIXEL_MAX_VP (2560 * 1600 * 7)
  52. #define SF_HWC_PIXEL_MAX_ALWAYS_GPU (2560 * 1600 * 1)
  53. #define SMIDBG(level, x...) \
  54. do { if (smi_debug_level >= (level))\
  55. SMIMSG(x);\
  56. } while (0)
  57. struct SMI_struct {
  58. spinlock_t SMI_lock;
  59. /*one bit represent one module */
  60. unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT];
  61. };
  62. static struct SMI_struct g_SMIInfo;
  63. static struct device *smiDeviceUevent;
  64. static bool fglarbcallback; /*larb backuprestore */
  65. struct mtk_smi_data *smi_data;
  66. static struct cdev *pSmiDev;
  67. static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
  68. /* To keep the HW's init value*/
  69. static bool is_default_value_saved;
  70. static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
  71. static unsigned int wifi_disp_transaction;
  72. /* debug level */
  73. static unsigned int smi_debug_level;
  74. /* tuning mode, 1 for register ioctl */
  75. static unsigned int smi_tuning_mode;
  76. static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
  77. static unsigned int *pLarbRegBackUp[SMI_LARB_NR];
  78. static int g_bInited;
  79. static MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = { 0, 0, {0, 0}, {0, 0},
  80. {0, 0}, {0, 0}, 0, 0, 0,
  81. SF_HWC_PIXEL_MAX_NORMAL
  82. };
  83. struct mtk_smi_common {
  84. void __iomem *base;
  85. struct clk *clk_apb;
  86. struct clk *clk_smi;
  87. };
  88. struct mtk_smi_larb {
  89. void __iomem *base;
  90. struct clk *clk_apb;
  91. struct clk *clk_smi;
  92. struct device *smi;
  93. };
  94. static void smi_dumpLarb(unsigned int index);
  95. static void smi_dumpCommon(void);
  96. static int _mtk_smi_larb_get(struct device *larbdev, bool pm);
  97. static void _mtk_smi_larb_put(struct device *larbdev, bool pm);
  98. #if IS_ENABLED(CONFIG_COMPAT)
  99. static long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
  100. #else
  101. #define MTK_SMI_COMPAT_ioctl NULL
  102. #endif
  103. /* Use this function to get base address of Larb resgister
  104. * to support error checking
  105. */
  106. static unsigned long get_larb_base_addr(int larb_id)
  107. {
  108. if (larb_id >= SMI_LARB_NR || larb_id < 0 || !smi_data)
  109. return SMI_ERROR_ADDR;
  110. else
  111. return smi_data->larb_base[larb_id];
  112. }
  113. unsigned long mtk_smi_larb_get_base(int larbid)
  114. {
  115. return get_larb_base_addr(larbid);
  116. }
  117. static unsigned int smi_get_larb_index(struct device *dev)
  118. {
  119. unsigned int idx;
  120. for (idx = 0; idx < smi_data->larb_nr; idx++) {
  121. if (smi_data->larb[idx] == dev)
  122. break;
  123. }
  124. return idx;
  125. }
  126. int mtk_smi_larb_clock_on(int larbid, bool pm)
  127. {
  128. if (!smi_data || larbid < 0 || larbid >= smi_data->larb_nr)
  129. return -EINVAL;
  130. return _mtk_smi_larb_get(smi_data->larb[larbid], pm);
  131. }
  132. void mtk_smi_larb_clock_off(int larbid, bool pm)
  133. {
  134. if (!smi_data || larbid < 0 || larbid >= smi_data->larb_nr)
  135. return;
  136. _mtk_smi_larb_put(smi_data->larb[larbid], pm);
  137. }
  138. static void backup_smi_common(void)
  139. {
  140. int i;
  141. for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
  142. g_smi_common_backup[i] =
  143. M4U_ReadReg32(SMI_COMMON_EXT_BASE,
  144. (unsigned long)g_smi_common_backup_reg_offset[i]);
  145. }
  146. }
  147. static void restore_smi_common(void)
  148. {
  149. int i;
  150. for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
  151. M4U_WriteReg32(SMI_COMMON_EXT_BASE,
  152. (unsigned long)g_smi_common_backup_reg_offset[i],
  153. g_smi_common_backup[i]);
  154. }
  155. }
  156. static void backup_larb_smi(int index)
  157. {
  158. int port_index = 0;
  159. unsigned short int *backup_ptr = NULL;
  160. unsigned long larb_base = get_larb_base_addr(index);
  161. unsigned long larb_offset = 0x200;
  162. int total_port_num = 0;
  163. /* boundary check for larb_port_num and larb_port_backup access */
  164. if (index < 0 || index >= SMI_LARB_NR)
  165. return;
  166. total_port_num = smi_data->smi_priv->larb_port_num[index];
  167. backup_ptr = smi_data->larb_port_backup + index*SMI_LARB_PORT_NR_MAX;
  168. /* boundary check for port value access */
  169. if (total_port_num <= 0 || backup_ptr == NULL)
  170. return;
  171. for (port_index = 0; port_index < total_port_num; port_index++) {
  172. *backup_ptr = (unsigned short int)(M4U_ReadReg32(larb_base, larb_offset));
  173. backup_ptr++;
  174. larb_offset += 4;
  175. }
  176. /* backup smi common along with larb0,
  177. * smi common clk is guaranteed to be on when processing larbs */
  178. if (index == 0)
  179. backup_smi_common();
  180. }
  181. static void restore_larb_smi(int index)
  182. {
  183. int port_index = 0;
  184. unsigned short int *backup_ptr = NULL;
  185. unsigned long larb_base = get_larb_base_addr(index);
  186. unsigned long larb_offset = 0x200;
  187. unsigned int backup_value = 0;
  188. int total_port_num = 0;
  189. /* boundary check for larb_port_num and larb_port_backup access */
  190. if (index < 0 || index >= SMI_LARB_NR)
  191. return;
  192. total_port_num = smi_data->smi_priv->larb_port_num[index];
  193. backup_ptr = smi_data->larb_port_backup + index*SMI_LARB_PORT_NR_MAX;
  194. /* boundary check for port value access */
  195. if (total_port_num <= 0 || backup_ptr == NULL)
  196. return;
  197. /* restore smi common along with larb0,
  198. * smi common clk is guaranteed to be on when processing larbs */
  199. if (index == 0)
  200. restore_smi_common();
  201. for (port_index = 0; port_index < total_port_num; port_index++) {
  202. backup_value = *backup_ptr;
  203. M4U_WriteReg32(larb_base, larb_offset, backup_value);
  204. backup_ptr++;
  205. larb_offset += 4;
  206. }
  207. #ifndef MT27
  208. /* we do not backup 0x20 because it is a fixed setting */
  209. M4U_WriteReg32(larb_base, 0x20, smi_data->smi_priv->larb_vc_setting[index]);
  210. #endif
  211. /* turn off EMI empty OSTD dobule, fixed setting */
  212. M4U_WriteReg32(larb_base, 0x2c, 4);
  213. }
  214. static int larb_reg_backup(int larb)
  215. {
  216. unsigned int *pReg = pLarbRegBackUp[larb];
  217. unsigned long larb_base = get_larb_base_addr(larb);
  218. *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
  219. /* *(pReg++) = M4U_ReadReg32(larb_base, SMI_SHARE_EN); */
  220. /* *(pReg++) = M4U_ReadReg32(larb_base, SMI_ROUTE_SEL); */
  221. backup_larb_smi(larb);
  222. if (0 == larb)
  223. g_bInited = 0;
  224. #ifndef MT27
  225. m4u_larb_backup_sec(larb);
  226. #endif
  227. return 0;
  228. }
  229. static int smi_larb_init(unsigned int larb)
  230. {
  231. unsigned int regval = 0;
  232. unsigned int regval1 = 0;
  233. unsigned int regval2 = 0;
  234. unsigned long larb_base = get_larb_base_addr(larb);
  235. /* Clock manager enable LARB clock before call back restore already,
  236. *it will be disabled after restore call back returns
  237. * Got to enable OSTD before engine starts */
  238. regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
  239. /*todo */
  240. /* regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0); */
  241. /* regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1); */
  242. if (0 == regval) {
  243. SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
  244. M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
  245. } else {
  246. SMIMSG("Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n", larb_base,
  247. regval, regval1, regval2);
  248. smi_dumpDebugMsg();
  249. if (smi_debug_level >= 1) {
  250. SMIERR("DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
  251. larb_base, regval, regval1, regval2);
  252. } else {
  253. dump_stack();
  254. }
  255. }
  256. restore_larb_smi(larb);
  257. return 0;
  258. }
  259. int larb_reg_restore(int larb)
  260. {
  261. unsigned long larb_base = SMI_ERROR_ADDR;
  262. unsigned int regval = 0;
  263. unsigned int *pReg = NULL;
  264. larb_base = get_larb_base_addr(larb);
  265. /* The larb assign doesn't exist */
  266. if (larb_base == SMI_ERROR_ADDR) {
  267. SMIMSG("Can't find the base address for Larb%d\n", larb);
  268. return 0;
  269. }
  270. pReg = pLarbRegBackUp[larb];
  271. SMIDBG(1, "+larb_reg_restore(), larb_idx=%d\n", larb);
  272. SMIDBG(1, "m4u part restore, larb_idx=%d\n", larb);
  273. /*warning: larb_con is controlled by set/clr */
  274. regval = *(pReg++);
  275. M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
  276. M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
  277. /*M4U_WriteReg32(larb_base, SMI_SHARE_EN, *(pReg++) ); */
  278. /*M4U_WriteReg32(larb_base, SMI_ROUTE_SEL, *(pReg++) ); */
  279. smi_larb_init(larb);
  280. #ifndef MT27
  281. m4u_larb_restore_sec(larb);
  282. #endif
  283. return 0;
  284. }
  285. /* Fake mode check, e.g. WFD */
  286. static int fake_mode_handling(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
  287. {
  288. if (p_conf->scenario == SMI_BWC_SCEN_WFD) {
  289. if (p_conf->b_on_off) {
  290. wifi_disp_transaction = 1;
  291. SMIMSG("Enable WFD in profile: %d\n", smi_profile);
  292. } else {
  293. wifi_disp_transaction = 0;
  294. SMIMSG("Disable WFD in profile: %d\n", smi_profile);
  295. }
  296. return 1;
  297. } else {
  298. return 0;
  299. }
  300. }
  301. static int ovl_limit_uevent(int bwc_scenario, int ovl_pixel_limit)
  302. {
  303. int err = 0;
  304. char *envp[3];
  305. char scenario_buf[32] = "";
  306. char ovl_limit_buf[32] = "";
  307. /* scenario_buf = kzalloc(sizeof(char)*128, GFP_KERNEL); */
  308. /* ovl_limit_buf = kzalloc(sizeof(char)*128, GFP_KERNEL); */
  309. snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
  310. snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
  311. envp[0] = scenario_buf;
  312. envp[1] = ovl_limit_buf;
  313. envp[2] = NULL;
  314. if (pSmiDev != NULL) {
  315. /* err = kobject_uevent_env(&(pSmiDev->kobj), KOBJ_CHANGE, envp); */
  316. /* use smi_data->dev.lobj instead */
  317. /* err = kobject_uevent_env(&(smi_data->dev->kobj), KOBJ_CHANGE, envp); */
  318. /* user smiDeviceUevent->kobj instead */
  319. err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
  320. SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit, bwc_scenario);
  321. }
  322. /* kfree(scenario_buf); */
  323. /* kfree(ovl_limit_buf); */
  324. if (err < 0)
  325. SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
  326. return err;
  327. }
  328. static int smi_bwc_config(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
  329. {
  330. int i;
  331. int result = 0;
  332. unsigned int u4Concurrency = 0;
  333. MTK_SMI_BWC_SCEN eFinalScen;
  334. static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
  335. struct mtk_smi_priv *smicur = (struct mtk_smi_priv *)smi_data->smi_priv;
  336. if (smi_tuning_mode == 1) {
  337. SMIMSG("Doesn't change profile in tunning mode");
  338. return 0;
  339. }
  340. spin_lock(&g_SMIInfo.SMI_lock);
  341. result = fake_mode_handling(p_conf, pu4LocalCnt);
  342. spin_unlock(&g_SMIInfo.SMI_lock);
  343. /* Fake mode is not a real SMI profile, so we need to return here */
  344. if (result == 1)
  345. return 0;
  346. if ((SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario)) {
  347. SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n", p_conf->scenario);
  348. return -1;
  349. }
  350. /* Debug - S */
  351. /* SMIMSG("SMI setTo%d,%s,%d\n" , p_conf->scenario , (p_conf->b_on_off ? "on" : "off") , ePreviousFinalScen); */
  352. /* Debug - E */
  353. #if 0
  354. if (p_conf->b_on_off) {
  355. /* set mmdvfs step according to certain scenarios */
  356. mmdvfs_notify_scenario_enter(p_conf->scenario);
  357. } else {
  358. /* set mmdvfs step to default after the scenario exits */
  359. mmdvfs_notify_scenario_exit(p_conf->scenario);
  360. }
  361. #endif
  362. /* turn on larb clock */
  363. for (i = 0; i < SMI_LARB_NR; i++)
  364. mtk_smi_larb_clock_on(i, true);
  365. spin_lock(&g_SMIInfo.SMI_lock);
  366. if (p_conf->b_on_off) {
  367. /* turn on certain scenario */
  368. g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
  369. if (NULL != pu4LocalCnt)
  370. pu4LocalCnt[p_conf->scenario] += 1;
  371. } else {
  372. /* turn off certain scenario */
  373. if (0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]) {
  374. SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
  375. p_conf->scenario, g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
  376. } else {
  377. g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
  378. }
  379. if (NULL != pu4LocalCnt) {
  380. if (0 == pu4LocalCnt[p_conf->scenario]) {
  381. SMIMSG
  382. ("Process : %s did too many turning off for local SMI profile:%d,%d\n",
  383. current->comm, p_conf->scenario,
  384. pu4LocalCnt[p_conf->scenario]);
  385. } else {
  386. pu4LocalCnt[p_conf->scenario] -= 1;
  387. }
  388. }
  389. }
  390. for (i = 0; i < SMI_BWC_SCEN_CNT; i++) {
  391. if (g_SMIInfo.pu4ConcurrencyTable[i])
  392. u4Concurrency |= (1 << i);
  393. }
  394. if ((1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency)
  395. eFinalScen = SMI_BWC_SCEN_MM_GPU;
  396. else if ((1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency)
  397. eFinalScen = SMI_BWC_SCEN_VR_SLOW;
  398. else if ((1 << SMI_BWC_SCEN_VR) & u4Concurrency)
  399. eFinalScen = SMI_BWC_SCEN_VR;
  400. else if ((1 << SMI_BWC_SCEN_ICFP) & u4Concurrency)
  401. eFinalScen = SMI_BWC_SCEN_VR;
  402. else if ((1 << SMI_BWC_SCEN_VP) & u4Concurrency)
  403. eFinalScen = SMI_BWC_SCEN_VP;
  404. else if ((1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency)
  405. eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
  406. else if ((1 << SMI_BWC_SCEN_VENC) & u4Concurrency)
  407. eFinalScen = SMI_BWC_SCEN_VENC;
  408. else if ((1 << SMI_BWC_SCEN_HDMI) & u4Concurrency)
  409. eFinalScen = SMI_BWC_SCEN_HDMI;
  410. else if ((1 << SMI_BWC_SCEN_HDMI4K) & u4Concurrency)
  411. eFinalScen = SMI_BWC_SCEN_HDMI4K;
  412. else
  413. eFinalScen = SMI_BWC_SCEN_NORMAL;
  414. if (ePreviousFinalScen == eFinalScen) {
  415. SMIMSG("Scen equal%d,don't change\n", eFinalScen);
  416. goto err_clkoff;
  417. } else {
  418. ePreviousFinalScen = eFinalScen;
  419. }
  420. smi_profile = eFinalScen;
  421. /* Bandwidth Limiter */
  422. switch (eFinalScen) {
  423. case SMI_BWC_SCEN_VP:
  424. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
  425. smicur->vp_setting(smi_data);
  426. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
  427. break;
  428. case SMI_BWC_SCEN_SWDEC_VP:
  429. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
  430. smicur->vp_setting(smi_data);
  431. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
  432. break;
  433. case SMI_BWC_SCEN_VR:
  434. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
  435. smicur->vr_setting(smi_data);
  436. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
  437. break;
  438. case SMI_BWC_SCEN_VR_SLOW:
  439. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
  440. smi_profile = SMI_BWC_SCEN_VR_SLOW;
  441. smicur->vr_setting(smi_data);
  442. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
  443. break;
  444. case SMI_BWC_SCEN_VENC:
  445. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
  446. smicur->vr_setting(smi_data);
  447. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  448. break;
  449. case SMI_BWC_SCEN_NORMAL:
  450. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
  451. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  452. smicur->init_setting(smi_data, &is_default_value_saved,
  453. default_val_smi_l1arb, smi_data->larb_nr);
  454. break;
  455. case SMI_BWC_SCEN_MM_GPU:
  456. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
  457. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  458. smicur->init_setting(smi_data, &is_default_value_saved,
  459. default_val_smi_l1arb, smi_data->larb_nr);
  460. break;
  461. case SMI_BWC_SCEN_HDMI:
  462. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_HDMI");
  463. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  464. smicur->hdmi_setting(smi_data);
  465. break;
  466. case SMI_BWC_SCEN_HDMI4K:
  467. SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_HDMI4K");
  468. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  469. smicur->hdmi_4k_setting(smi_data);
  470. break;
  471. default:
  472. SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
  473. smicur->init_setting(smi_data, &is_default_value_saved,
  474. default_val_smi_l1arb, smi_data->larb_nr);
  475. g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
  476. break;
  477. }
  478. spin_unlock(&g_SMIInfo.SMI_lock);
  479. /*turn off larb clock */
  480. for (i = 0; i < SMI_LARB_NR; i++)
  481. mtk_smi_larb_clock_off(i, true);
  482. /* Since send uevent may trigger sleeping, we must send the event after releasing spin lock */
  483. ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
  484. #ifndef MT27
  485. /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex,
  486. * call these APIs only when necessary */
  487. {
  488. static unsigned int current_fps;
  489. if ((eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30)) {
  490. /* force 30 fps in VR slow motion profile */
  491. primary_display_force_set_vsync_fps(30);
  492. current_fps = 30;
  493. SMIMSG("[SMI_PROFILE] set 30 fps\n");
  494. } else if ((eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30)) {
  495. /* back to normal fps */
  496. current_fps = primary_display_get_fps();
  497. primary_display_force_set_vsync_fps(current_fps);
  498. SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
  499. }
  500. }
  501. #endif
  502. SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
  503. (p_conf->b_on_off ? "on" : "off"), eFinalScen,
  504. g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
  505. g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
  506. g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
  507. return 0;
  508. /* Debug usage - S */
  509. /* smi_dumpDebugMsg(); */
  510. /* SMIMSG("Config:%d,%d,%d\n" , eFinalScen ,
  511. *g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL] ,
  512. *(NULL == pu4LocalCnt ? (-1) : pu4LocalCnt[p_conf->scenario])); */
  513. /* Debug usage - E */
  514. err_clkoff:
  515. spin_unlock(&g_SMIInfo.SMI_lock);
  516. /*turn off larb clock */
  517. for (i = 0; i < SMI_LARB_NR; i++)
  518. mtk_smi_larb_clock_off(i, true);
  519. return 0;
  520. }
  521. /*
  522. const struct dev_pm_ops mtk_smi_pm_ops = {
  523. SET_SYSTEM_SLEEP_PM_OPS(smiclk_subsys_before_off, smiclk_subsys_after_on)
  524. };*/
  525. int smi_common_init(void)
  526. {
  527. int i;
  528. for (i = 0; i < SMI_LARB_NR; i++) {
  529. pLarbRegBackUp[i] = kmalloc(LARB_BACKUP_REG_SIZE, GFP_KERNEL | __GFP_ZERO);
  530. if (pLarbRegBackUp[i] == NULL)
  531. SMIERR("pLarbRegBackUp kmalloc fail %d\n", i);
  532. }
  533. for (i = 0; i < smi_data->larb_nr; i++)
  534. mtk_smi_larb_clock_on(i, true);
  535. /* apply init setting after kernel boot */
  536. smi_data->smi_priv->init_setting(smi_data, &is_default_value_saved,
  537. default_val_smi_l1arb, smi_data->larb_nr);
  538. fglarbcallback = true;
  539. for (i = smi_data->larb_nr; i >= 0; i--)
  540. mtk_smi_larb_clock_off(i, true);
  541. return 0;
  542. }
  543. static int smi_open(struct inode *inode, struct file *file)
  544. {
  545. file->private_data = kmalloc_array(SMI_BWC_SCEN_CNT, sizeof(unsigned int), GFP_ATOMIC);
  546. if (NULL == file->private_data) {
  547. SMIMSG("Not enough entry for DDP open operation\n");
  548. return -ENOMEM;
  549. }
  550. memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
  551. return 0;
  552. }
  553. static int smi_release(struct inode *inode, struct file *file)
  554. {
  555. if (NULL != file->private_data) {
  556. kfree(file->private_data);
  557. file->private_data = NULL;
  558. }
  559. return 0;
  560. }
  561. /* GMP start */
  562. void smi_bwc_mm_info_set(int property_id, long val1, long val2)
  563. {
  564. switch (property_id) {
  565. case SMI_BWC_INFO_CON_PROFILE:
  566. g_smi_bwc_mm_info.concurrent_profile = (int)val1;
  567. break;
  568. case SMI_BWC_INFO_SENSOR_SIZE:
  569. g_smi_bwc_mm_info.sensor_size[0] = val1;
  570. g_smi_bwc_mm_info.sensor_size[1] = val2;
  571. break;
  572. case SMI_BWC_INFO_VIDEO_RECORD_SIZE:
  573. g_smi_bwc_mm_info.video_record_size[0] = val1;
  574. g_smi_bwc_mm_info.video_record_size[1] = val2;
  575. break;
  576. case SMI_BWC_INFO_DISP_SIZE:
  577. g_smi_bwc_mm_info.display_size[0] = val1;
  578. g_smi_bwc_mm_info.display_size[1] = val2;
  579. break;
  580. case SMI_BWC_INFO_TV_OUT_SIZE:
  581. g_smi_bwc_mm_info.tv_out_size[0] = val1;
  582. g_smi_bwc_mm_info.tv_out_size[1] = val2;
  583. break;
  584. case SMI_BWC_INFO_FPS:
  585. g_smi_bwc_mm_info.fps = (int)val1;
  586. break;
  587. case SMI_BWC_INFO_VIDEO_ENCODE_CODEC:
  588. g_smi_bwc_mm_info.video_encode_codec = (int)val1;
  589. break;
  590. case SMI_BWC_INFO_VIDEO_DECODE_CODEC:
  591. g_smi_bwc_mm_info.video_decode_codec = (int)val1;
  592. break;
  593. }
  594. }
  595. /* GMP end */
  596. static long smi_ioctl(struct file *pFile, unsigned int cmd, unsigned long param)
  597. {
  598. int ret = 0;
  599. /* unsigned long * pu4Cnt = (unsigned long *)pFile->private_data; */
  600. switch (cmd) {
  601. case MTK_IOC_SMI_BWC_CONFIG:
  602. {
  603. MTK_SMI_BWC_CONFIG cfg;
  604. ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_CONFIG));
  605. if (ret) {
  606. SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
  607. return -EFAULT;
  608. }
  609. ret = smi_bwc_config(&cfg, NULL);
  610. }
  611. break;
  612. /* GMP start */
  613. case MTK_IOC_SMI_BWC_INFO_SET:
  614. {
  615. MTK_SMI_BWC_INFO_SET cfg;
  616. /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... start"); */
  617. ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_INFO_SET));
  618. if (ret) {
  619. SMIMSG(" MTK_IOC_SMI_BWC_INFO_SET, copy_to_user failed: %d\n", ret);
  620. return -EFAULT;
  621. }
  622. /* Set the address to the value assigned by user space program */
  623. smi_bwc_mm_info_set(cfg.property, cfg.value1, cfg.value2);
  624. /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... finish"); */
  625. break;
  626. }
  627. case MTK_IOC_SMI_BWC_INFO_GET:
  628. {
  629. ret = copy_to_user((void *)param, (void *)&g_smi_bwc_mm_info,
  630. sizeof(MTK_SMI_BWC_MM_INFO));
  631. if (ret) {
  632. SMIMSG(" MTK_IOC_SMI_BWC_INFO_GET, copy_to_user failed: %d\n", ret);
  633. return -EFAULT;
  634. }
  635. /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_GET request... finish"); */
  636. break;
  637. }
  638. /* GMP end */
  639. case MTK_IOC_SMI_DUMP_LARB:
  640. {
  641. unsigned int larb_index;
  642. ret = copy_from_user(&larb_index, (void *)param, sizeof(unsigned int));
  643. if (ret)
  644. return -EFAULT;
  645. smi_dumpLarb(larb_index);
  646. }
  647. break;
  648. case MTK_IOC_SMI_DUMP_COMMON:
  649. {
  650. unsigned int arg;
  651. ret = copy_from_user(&arg, (void *)param, sizeof(unsigned int));
  652. if (ret)
  653. return -EFAULT;
  654. smi_dumpCommon();
  655. }
  656. break;
  657. /*case MTK_IOC_MMDVFS_CMD:
  658. {
  659. MTK_MMDVFS_CMD mmdvfs_cmd;
  660. if (copy_from_user(&mmdvfs_cmd, (void *)param, sizeof(MTK_MMDVFS_CMD)))
  661. return -EFAULT;
  662. mmdvfs_handle_cmd(&mmdvfs_cmd);
  663. if (copy_to_user
  664. ((void *)param, (void *)&mmdvfs_cmd, sizeof(MTK_MMDVFS_CMD)))
  665. return -EFAULT;
  666. break;
  667. }*/
  668. default:
  669. return -1;
  670. }
  671. return ret;
  672. }
  673. static const struct file_operations smiFops = {
  674. .owner = THIS_MODULE,
  675. .open = smi_open,
  676. .release = smi_release,
  677. .unlocked_ioctl = smi_ioctl,
  678. .compat_ioctl = MTK_SMI_COMPAT_ioctl
  679. };
  680. static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
  681. static inline int smi_register(void)
  682. {
  683. if (alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI")) {
  684. SMIERR("Allocate device No. failed");
  685. return -EAGAIN;
  686. }
  687. /* Allocate driver */
  688. pSmiDev = cdev_alloc();
  689. if (NULL == pSmiDev) {
  690. unregister_chrdev_region(smiDevNo, 1);
  691. SMIERR("Allocate mem for kobject failed");
  692. return -ENOMEM;
  693. }
  694. /* Attatch file operation. */
  695. cdev_init(pSmiDev, &smiFops);
  696. pSmiDev->owner = THIS_MODULE;
  697. /* Add to system */
  698. if (cdev_add(pSmiDev, smiDevNo, 1)) {
  699. SMIERR("Attatch file operation failed");
  700. unregister_chrdev_region(smiDevNo, 1);
  701. return -EAGAIN;
  702. }
  703. return 0;
  704. }
  705. static struct class *pSmiClass;
  706. static int smi_dev_register(void)
  707. {
  708. int ret;
  709. struct device *smiDevice = NULL;
  710. if (smi_register()) {
  711. pr_err("register SMI failed\n");
  712. return -EAGAIN;
  713. }
  714. pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
  715. if (IS_ERR(pSmiClass)) {
  716. ret = PTR_ERR(pSmiClass);
  717. SMIERR("Unable to create class, err = %d", ret);
  718. return ret;
  719. }
  720. smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
  721. smiDeviceUevent = smiDevice;
  722. return 0;
  723. }
  724. static int mtk_smi_common_get(struct device *smidev, bool pm)
  725. {
  726. struct mtk_smi_common *smipriv = dev_get_drvdata(smidev);
  727. int ret;
  728. if (pm) {
  729. ret = pm_runtime_get_sync(smidev);
  730. if (ret < 0)
  731. return ret;
  732. }
  733. ret = clk_prepare_enable(smipriv->clk_apb);
  734. if (ret) {
  735. dev_err(smidev, "Failed to enable the apb clock\n");
  736. goto err_put_pm;
  737. }
  738. ret = clk_prepare_enable(smipriv->clk_smi);
  739. if (ret) {
  740. dev_err(smidev, "Failed to enable the smi clock\n");
  741. goto err_disable_apb;
  742. }
  743. return ret;
  744. err_disable_apb:
  745. clk_disable_unprepare(smipriv->clk_apb);
  746. err_put_pm:
  747. if (pm)
  748. pm_runtime_put_sync(smidev);
  749. return ret;
  750. }
  751. static void mtk_smi_common_put(struct device *smidev, bool pm)
  752. {
  753. struct mtk_smi_common *smipriv = dev_get_drvdata(smidev);
  754. if (pm)
  755. pm_runtime_put_sync(smidev);
  756. clk_disable_unprepare(smipriv->clk_smi);
  757. clk_disable_unprepare(smipriv->clk_apb);
  758. }
  759. static int _mtk_smi_larb_get(struct device *larbdev, bool pm)
  760. {
  761. struct mtk_smi_larb *larbpriv = dev_get_drvdata(larbdev);
  762. int ret;
  763. ret = mtk_smi_common_get(larbpriv->smi, pm);
  764. if (ret)
  765. return ret;
  766. if (pm) {
  767. ret = pm_runtime_get_sync(larbdev);
  768. if (ret < 0)
  769. goto err_put_smicommon;
  770. }
  771. ret = clk_prepare_enable(larbpriv->clk_apb);
  772. if (ret) {
  773. dev_err(larbdev, "Failed to enable the apb clock\n");
  774. goto err_put_pm;
  775. }
  776. ret = clk_prepare_enable(larbpriv->clk_smi);
  777. if (ret) {
  778. dev_err(larbdev, "Failed to enable the smi clock\n");
  779. goto err_disable_apb;
  780. }
  781. return ret;
  782. err_disable_apb:
  783. clk_disable_unprepare(larbpriv->clk_apb);
  784. err_put_pm:
  785. if (pm)
  786. pm_runtime_put_sync(larbdev);
  787. err_put_smicommon:
  788. mtk_smi_common_put(larbpriv->smi, pm);
  789. return ret;
  790. }
  791. static void _mtk_smi_larb_put(struct device *larbdev, bool pm)
  792. {
  793. struct mtk_smi_larb *larbpriv = dev_get_drvdata(larbdev);
  794. clk_disable_unprepare(larbpriv->clk_smi);
  795. clk_disable_unprepare(larbpriv->clk_apb);
  796. if (pm)
  797. pm_runtime_put_sync(larbdev);
  798. mtk_smi_common_put(larbpriv->smi, pm);
  799. }
  800. /* The power is alway on during power-domain callback.*/
  801. static int mtk_smi_larb_runtime_suspend(struct device *dev)
  802. {
  803. unsigned int idx = smi_get_larb_index(dev);
  804. int ret;
  805. if (!fglarbcallback)
  806. return 0;
  807. if (idx >= SMI_LARB_NR)
  808. return 0;
  809. ret = _mtk_smi_larb_get(dev, false);
  810. if (ret) {
  811. dev_warn(dev, "runtime suspend clk-warn larb%d\n", idx);
  812. return 0;
  813. }
  814. larb_reg_backup(idx);
  815. _mtk_smi_larb_put(dev, false);
  816. dev_dbg(dev, "runtime suspend larb%d\n", idx);
  817. return 0;
  818. }
  819. static int mtk_smi_larb_runtime_resume(struct device *dev)
  820. {
  821. unsigned int idx = smi_get_larb_index(dev);
  822. int ret;
  823. if (!fglarbcallback)
  824. return 0;
  825. if (idx >= SMI_LARB_NR)
  826. return 0;
  827. ret = _mtk_smi_larb_get(dev, false);
  828. if (ret) {
  829. dev_warn(dev, "runtime resume clk-warn larb%d\n", idx);
  830. return 0;
  831. }
  832. larb_reg_restore(idx);
  833. _mtk_smi_larb_put(dev, false);
  834. dev_dbg(dev, "runtime resume larb%d\n", idx);
  835. return 0;
  836. }
  837. /* modify this to avoid build error when runtime_pm not configured */
  838. static const struct dev_pm_ops mtk_smi_larb_ops = {
  839. .runtime_suspend = mtk_smi_larb_runtime_suspend,
  840. .runtime_resume = mtk_smi_larb_runtime_resume,
  841. };
  842. static int mtk_smi_larb_probe(struct platform_device *pdev)
  843. {
  844. struct mtk_smi_larb *larbpriv;
  845. struct resource *res;
  846. struct device *dev = &pdev->dev;
  847. struct device_node *smi_node;
  848. struct platform_device *smi_pdev;
  849. int ret, larbid;
  850. if (!dev->pm_domain)
  851. return -EPROBE_DEFER;
  852. larbpriv = devm_kzalloc(dev, sizeof(*larbpriv), GFP_KERNEL);
  853. if (!larbpriv)
  854. return -ENOMEM;
  855. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  856. larbpriv->base = devm_ioremap_resource(dev, res);
  857. if (IS_ERR(larbpriv->base))
  858. return PTR_ERR(larbpriv->base);
  859. larbpriv->clk_apb = devm_clk_get(dev, "apb");
  860. if (IS_ERR(larbpriv->clk_apb))
  861. return PTR_ERR(larbpriv->clk_apb);
  862. larbpriv->clk_smi = devm_clk_get(dev, "smi");
  863. if (IS_ERR(larbpriv->clk_smi))
  864. return PTR_ERR(larbpriv->clk_smi);
  865. smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
  866. if (!smi_node)
  867. return -EINVAL;
  868. ret = of_property_read_u32(dev->of_node, "mediatek,larbid", &larbid);
  869. if (ret)
  870. return ret;
  871. smi_pdev = of_find_device_by_node(smi_node);
  872. of_node_put(smi_node);
  873. if (smi_pdev) {
  874. larbpriv->smi = &smi_pdev->dev;
  875. } else {
  876. dev_err(dev, "Failed to get the smi_common device\n");
  877. return -EINVAL;
  878. }
  879. smi_data->larb_base[larbid] = (unsigned long)larbpriv->base;
  880. smi_data->larb[larbid] = dev;
  881. smi_data->larb_nr++;
  882. SMIMSG("larb %d-cnt %d probe done\n", larbid, smi_data->larb_nr);
  883. pm_runtime_enable(dev);
  884. dev_set_drvdata(dev, larbpriv);
  885. return 0;
  886. }
  887. static int mtk_smi_larb_remove(struct platform_device *pdev)
  888. {
  889. pm_runtime_disable(&pdev->dev);
  890. return 0;
  891. }
  892. static const struct of_device_id mtk_smi_larb_of_ids[] = {
  893. { .compatible = "mediatek,mt8173-smi-larb", },
  894. { .compatible = "mediatek,mt8127-smi-larb", },
  895. {}
  896. };
  897. static struct platform_driver mtk_smi_larb_driver = {
  898. .probe = mtk_smi_larb_probe,
  899. .remove = mtk_smi_larb_remove,
  900. .driver = {
  901. .name = "mtk-smi-larb",
  902. .of_match_table = mtk_smi_larb_of_ids,
  903. #ifdef CONFIG_PM
  904. .pm = &mtk_smi_larb_ops,
  905. #endif
  906. }
  907. };
  908. static int mtk_smi_probe(struct platform_device *pdev)
  909. {
  910. struct device *dev = &pdev->dev;
  911. struct mtk_smi_common *smipriv;
  912. struct resource *res;
  913. if (!dev->pm_domain)
  914. return -EPROBE_DEFER;
  915. smipriv = devm_kzalloc(dev, sizeof(*smipriv), GFP_KERNEL);
  916. if (!smipriv)
  917. return -ENOMEM;
  918. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  919. smipriv->base = devm_ioremap_resource(dev, res);
  920. if (IS_ERR(smipriv->base))
  921. return PTR_ERR(smipriv->base);
  922. smipriv->clk_apb = devm_clk_get(dev, "apb");
  923. if (IS_ERR(smipriv->clk_apb))
  924. return PTR_ERR(smipriv->clk_apb);
  925. smipriv->clk_smi = devm_clk_get(dev, "smi");
  926. if (IS_ERR(smipriv->clk_smi))
  927. return PTR_ERR(smipriv->clk_smi);
  928. smi_data->smicommon = dev;
  929. smi_data->smi_common_base = (unsigned long)smipriv->base;
  930. pm_runtime_enable(dev);
  931. dev_set_drvdata(dev, smipriv);
  932. return 0;
  933. }
  934. static int mtk_smi_remove(struct platform_device *pdev)
  935. {
  936. pm_runtime_disable(&pdev->dev);
  937. return 0;
  938. }
  939. static const struct of_device_id mtk_smi_of_ids[] = {
  940. { .compatible = "mediatek,mt8173-smi",},
  941. { .compatible = "mediatek,mt8127-smi",},
  942. {}
  943. };
  944. static struct platform_driver mtk_smi_driver = {
  945. .probe = mtk_smi_probe,
  946. .remove = mtk_smi_remove,
  947. .driver = {
  948. .name = "mtk-smi",
  949. .of_match_table = mtk_smi_of_ids,
  950. }
  951. };
  952. static int __init smi_init(void)
  953. {
  954. int ret;
  955. smi_data = kzalloc(sizeof(*smi_data), GFP_KERNEL);
  956. if (smi_data == NULL) {
  957. SMIERR("Unable to allocate memory for smi driver");
  958. return -ENOMEM;
  959. }
  960. ret = platform_driver_register(&mtk_smi_driver);
  961. if (ret != 0) {
  962. pr_err("Failed to register SMI driver\n");
  963. return ret;
  964. }
  965. ret = platform_driver_register(&mtk_smi_larb_driver);
  966. if (ret != 0) {
  967. pr_err("Failed to register SMI-LARB driver\n");
  968. return ret;
  969. }
  970. ret = smi_dev_register();
  971. if (ret) {
  972. SMIMSG("register dev/smi failed\n");
  973. return ret;
  974. }
  975. memset(g_SMIInfo.pu4ConcurrencyTable, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
  976. spin_lock_init(&g_SMIInfo.SMI_lock);
  977. SMI_DBG_Init();
  978. #if defined MT73
  979. smi_data->smi_priv = &smi_mt8173_priv;
  980. #elif defined MT27
  981. smi_data->smi_priv = &smi_mt8127_priv;
  982. #endif
  983. SMIMSG("smi_init done\n");
  984. return 0;
  985. }
  986. static void __exit smi_exit(void)
  987. {
  988. platform_driver_unregister(&mtk_smi_driver);
  989. platform_driver_unregister(&mtk_smi_larb_driver);
  990. }
  991. static int __init smi_init_late(void)
  992. {
  993. /*init clk/mtcmos should be late while ccf */
  994. SMIMSG("smi_init_late-\n");
  995. smi_common_init();
  996. return 0;
  997. }
  998. static void smi_dumpCommonDebugMsg(void)
  999. {
  1000. unsigned long u4Base;
  1001. /* SMI COMMON dump */
  1002. SMIMSG("===SMI common reg dump===\n");
  1003. u4Base = SMI_COMMON_EXT_BASE;
  1004. SMIMSG("[0x200,0x204,0x208]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x200),
  1005. M4U_ReadReg32(u4Base, 0x204), M4U_ReadReg32(u4Base, 0x208));
  1006. SMIMSG("[0x20C,0x210,0x214]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x20C),
  1007. M4U_ReadReg32(u4Base, 0x210), M4U_ReadReg32(u4Base, 0x214));
  1008. #ifdef MT73
  1009. SMIMSG("[0x220,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x220),
  1010. M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base,
  1011. 0x238));
  1012. SMIMSG("[0x400,0x404,0x408]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x400),
  1013. M4U_ReadReg32(u4Base, 0x404), M4U_ReadReg32(u4Base, 0x408));
  1014. #elif defined MT27
  1015. SMIMSG("[0x218,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x218),
  1016. M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base,
  1017. 0x238));
  1018. SMIMSG("[0x400,0x404,]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x400),
  1019. M4U_ReadReg32(u4Base, 0x404));
  1020. #endif
  1021. /* TBD: M4U should dump these, the offset of MT27 have been checked and same with the followings. */
  1022. /*
  1023. For VA and PA check:
  1024. 0x1000C5C0 , 0x1000C5C4, 0x1000C5C8, 0x1000C5CC, 0x1000C5D0
  1025. u4Base = SMI_COMMON_AO_BASE;
  1026. SMIMSG("===SMI always on reg dump===\n");
  1027. SMIMSG("[0x5C0,0x5C4,0x5C8]=[0x%x,0x%x,0x%x]\n" ,
  1028. M4U_ReadReg32(u4Base , 0x5C0),M4U_ReadReg32(u4Base , 0x5C4),
  1029. M4U_ReadReg32(u4Base , 0x5C8));
  1030. SMIMSG("[0x5CC,0x5D0]=[0x%x,0x%x]\n" ,M4U_ReadReg32(u4Base , 0x5CC),
  1031. M4U_ReadReg32(u4Base , 0x5D0));
  1032. */
  1033. }
  1034. static void smi_dumpLarbDebugMsg(unsigned int u4Index)
  1035. {
  1036. unsigned long u4Base;
  1037. u4Base = get_larb_base_addr(u4Index);
  1038. if (u4Base == SMI_ERROR_ADDR) {
  1039. SMIMSG("Doesn't support reg dump for Larb%d\n", u4Index);
  1040. } else {
  1041. SMIMSG("===SMI LARB%d reg dump===\n", u4Index);
  1042. #ifdef MT73
  1043. SMIMSG("[0x0,0x8,0x10]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
  1044. M4U_ReadReg32(u4Base, 0x8), M4U_ReadReg32(u4Base, 0x10));
  1045. SMIMSG("[0x24,0x50,0x60]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x24),
  1046. M4U_ReadReg32(u4Base, 0x50), M4U_ReadReg32(u4Base, 0x60));
  1047. SMIMSG("[0xa0,0xa4,0xa8]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xa0),
  1048. M4U_ReadReg32(u4Base, 0xa4), M4U_ReadReg32(u4Base, 0xa8));
  1049. SMIMSG("[0xac,0xb0,0xb4]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xac),
  1050. M4U_ReadReg32(u4Base, 0xb0), M4U_ReadReg32(u4Base, 0xb4));
  1051. SMIMSG("[0xb8,0xbc,0xc0]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xb8),
  1052. M4U_ReadReg32(u4Base, 0xbc), M4U_ReadReg32(u4Base, 0xc0));
  1053. SMIMSG("[0xc8,0xcc]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xc8),
  1054. M4U_ReadReg32(u4Base, 0xcc));
  1055. #elif defined MT27
  1056. {
  1057. unsigned int u4Offset = 0;
  1058. SMIMSG("[0x0,0x10,0x60]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
  1059. M4U_ReadReg32(u4Base, 0x10), M4U_ReadReg32(u4Base, 0x60));
  1060. SMIMSG("[0x64,0x8c,0x450]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x64),
  1061. M4U_ReadReg32(u4Base, 0x8c), M4U_ReadReg32(u4Base, 0x450));
  1062. SMIMSG("[0x454,0x600,0x604]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x454),
  1063. M4U_ReadReg32(u4Base, 0x600), M4U_ReadReg32(u4Base, 0x604));
  1064. SMIMSG("[0x610,0x614]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x610),
  1065. M4U_ReadReg32(u4Base, 0x614));
  1066. for (u4Offset = 0x200; u4Offset < 0x200 + SMI_LARB_NR * 4; u4Offset += 4)
  1067. SMIMSG("[0x%x = 0x%x ]\n", u4Offset, M4U_ReadReg32(u4Base , u4Offset));
  1068. }
  1069. #endif
  1070. }
  1071. }
  1072. static void smi_dump_format(unsigned long base, unsigned int from, unsigned int to)
  1073. {
  1074. int i, j, left;
  1075. unsigned int value[8];
  1076. for (i = from; i <= to; i += 32) {
  1077. for (j = 0; j < 8; j++)
  1078. value[j] = M4U_ReadReg32(base, i + j * 4);
  1079. SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i, value[0], value[1], value[2], value[3],
  1080. value[4], value[5], value[6], value[7]);
  1081. }
  1082. left = ((from - to) / 4 + 1) % 8;
  1083. if (left) {
  1084. memset(value, 0, 8 * sizeof(unsigned int));
  1085. for (j = 0; j < left; j++)
  1086. value[j] = M4U_ReadReg32(base, i - 32 + j * 4);
  1087. SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i - 32 + j * 4, value[0], value[1],
  1088. value[2], value[3], value[4], value[5], value[6], value[7]);
  1089. }
  1090. }
  1091. static void smi_dumpLarb(unsigned int index)
  1092. {
  1093. unsigned long u4Base;
  1094. u4Base = get_larb_base_addr(index);
  1095. if (u4Base == SMI_ERROR_ADDR) {
  1096. SMIMSG2("Doesn't support reg dump for Larb%d\n", index);
  1097. } else {
  1098. SMIMSG2("===SMI LARB%d reg dump base 0x%lx===\n", index, u4Base);
  1099. smi_dump_format(u4Base, 0, 0x434);
  1100. smi_dump_format(u4Base, 0xF00, 0xF0C);
  1101. }
  1102. }
  1103. static void smi_dumpCommon(void)
  1104. {
  1105. SMIMSG2("===SMI COMMON reg dump base 0x%lx===\n", SMI_COMMON_EXT_BASE);
  1106. smi_dump_format(SMI_COMMON_EXT_BASE, 0x1A0, 0x418);
  1107. }
  1108. void smi_dumpDebugMsg(void)
  1109. {
  1110. unsigned int u4Index;
  1111. /* SMI COMMON dump */
  1112. smi_dumpCommonDebugMsg();
  1113. /* dump all SMI LARB */
  1114. for (u4Index = 0; u4Index < SMI_LARB_NR; u4Index++)
  1115. smi_dumpLarbDebugMsg(u4Index);
  1116. }
  1117. int smi_debug_bus_hanging_detect(unsigned int larbs, int show_dump)
  1118. {
  1119. #ifdef CONFIG_MTK_SMI_EXT
  1120. int i = 0;
  1121. int dump_time = 0;
  1122. int is_smi_issue = 0;
  1123. int status_code = 0;
  1124. /* Keep the dump result */
  1125. unsigned char smi_common_busy_count = 0;
  1126. /*volatile */ unsigned int reg_temp = 0;
  1127. unsigned char smi_larb_busy_count[SMI_LARB_NR] = { 0 };
  1128. unsigned char smi_larb_mmu_status[SMI_LARB_NR] = { 0 };
  1129. /* dump resister and save resgister status */
  1130. for (dump_time = 0; dump_time < 5; dump_time++) {
  1131. unsigned int u4Index = 0;
  1132. reg_temp = M4U_ReadReg32(SMI_COMMON_EXT_BASE, 0x400);
  1133. if ((reg_temp & (1 << 30)) == 0) {
  1134. /* smi common is busy */
  1135. smi_common_busy_count++;
  1136. }
  1137. /* Dump smi common regs */
  1138. if (show_dump != 0)
  1139. smi_dumpCommonDebugMsg();
  1140. for (u4Index = 0; u4Index < SMI_LARB_NR; u4Index++) {
  1141. unsigned long u4Base = get_larb_base_addr(u4Index);
  1142. if (u4Base != SMI_ERROR_ADDR) {
  1143. reg_temp = M4U_ReadReg32(u4Base, 0x0);
  1144. if (reg_temp != 0) {
  1145. /* Larb is busy */
  1146. smi_larb_busy_count[u4Index]++;
  1147. }
  1148. smi_larb_mmu_status[u4Index] = M4U_ReadReg32(u4Base, 0xa0);
  1149. if (show_dump != 0)
  1150. smi_dumpLarbDebugMsg(u4Index);
  1151. }
  1152. }
  1153. }
  1154. /* Show the checked result */
  1155. for (i = 0; i < SMI_LARB_NR; i++) { /* Check each larb */
  1156. if (SMI_DGB_LARB_SELECT(larbs, i)) {
  1157. /* larb i has been selected */
  1158. /* Get status code */
  1159. if (smi_larb_busy_count[i] == 5) { /* The larb is always busy */
  1160. if (smi_common_busy_count == 5) { /* smi common is always busy */
  1161. status_code = 1;
  1162. } else if (smi_common_busy_count == 0) { /* smi common is always idle */
  1163. status_code = 2;
  1164. } else {
  1165. status_code = 5; /* smi common is sometimes busy and idle */
  1166. }
  1167. } else if (smi_larb_busy_count[i] == 0) { /* The larb is always idle */
  1168. if (smi_common_busy_count == 5) { /* smi common is always busy */
  1169. status_code = 3;
  1170. } else if (smi_common_busy_count == 0) { /* smi common is always idle */
  1171. status_code = 4;
  1172. } else {
  1173. status_code = 6; /* smi common is sometimes busy and idle */
  1174. }
  1175. } else { /* sometime the larb is busy */
  1176. if (smi_common_busy_count == 5) { /* smi common is always busy */
  1177. status_code = 7;
  1178. } else if (smi_common_busy_count == 0) { /* smi common is always idle */
  1179. status_code = 8;
  1180. } else {
  1181. status_code = 9; /* smi common is sometimes busy and idle */
  1182. }
  1183. }
  1184. /* Send the debug message according to the final result */
  1185. switch (status_code) {
  1186. case 1:
  1187. case 3:
  1188. case 5:
  1189. case 7:
  1190. case 8:
  1191. SMIMSG
  1192. ("Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine's state first",
  1193. i, smi_larb_busy_count[i], smi_common_busy_count, status_code);
  1194. SMIMSG
  1195. ("If the engine is waiting for Larb%ds' response, it needs SMI HW's check",
  1196. i);
  1197. break;
  1198. case 2:
  1199. if (smi_larb_mmu_status[i] == 0) {
  1200. SMIMSG("Larb%d Busy=%d/5, Common Busy=%d/5,status=%d=>Check engine state first",
  1201. i, smi_larb_busy_count[i], smi_common_busy_count,
  1202. status_code);
  1203. SMIMSG("If the engine is waiting for Larb%ds' response,it needs SMI HW's check",
  1204. i);
  1205. } else {
  1206. SMIMSG("Larb%d Busy=%d/5, Common Busy=%d/5, status=%d==>MMU port config error",
  1207. i, smi_larb_busy_count[i], smi_common_busy_count,
  1208. status_code);
  1209. is_smi_issue = 1;
  1210. }
  1211. break;
  1212. case 4:
  1213. case 6:
  1214. case 9:
  1215. SMIMSG
  1216. ("Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> not SMI issue",
  1217. i, smi_larb_busy_count[i], smi_common_busy_count, status_code);
  1218. break;
  1219. default:
  1220. SMIMSG
  1221. ("Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> status unknown",
  1222. i, smi_larb_busy_count[i], smi_common_busy_count, status_code);
  1223. break;
  1224. }
  1225. }
  1226. }
  1227. return is_smi_issue;
  1228. #endif
  1229. return 0;
  1230. }
  1231. #if IS_ENABLED(CONFIG_COMPAT)
  1232. /* 32 bits process ioctl support: */
  1233. /* This is prepared for the future extension since currently the sizes of 32 bits */
  1234. /* and 64 bits smi parameters are the same. */
  1235. typedef struct {
  1236. compat_int_t scenario;
  1237. compat_int_t b_on_off; /* 0 : exit this scenario , 1 : enter this scenario */
  1238. } MTK_SMI_COMPAT_BWC_CONFIG;
  1239. typedef struct {
  1240. compat_int_t property;
  1241. compat_int_t value1;
  1242. compat_int_t value2;
  1243. } MTK_SMI_COMPAT_BWC_INFO_SET;
  1244. typedef struct {
  1245. compat_uint_t flag; /* Reserved */
  1246. compat_int_t concurrent_profile;
  1247. compat_int_t sensor_size[2];
  1248. compat_int_t video_record_size[2];
  1249. compat_int_t display_size[2];
  1250. compat_int_t tv_out_size[2];
  1251. compat_int_t fps;
  1252. compat_int_t video_encode_codec;
  1253. compat_int_t video_decode_codec;
  1254. compat_int_t hw_ovl_limit;
  1255. } MTK_SMI_COMPAT_BWC_MM_INFO;
  1256. #define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, MTK_SMI_COMPAT_BWC_CONFIG)
  1257. #define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, MTK_SMI_COMPAT_BWC_INFO_SET)
  1258. #define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, MTK_SMI_COMPAT_BWC_MM_INFO)
  1259. static int compat_get_smi_bwc_config_struct(MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
  1260. MTK_SMI_BWC_CONFIG __user *data)
  1261. {
  1262. compat_int_t i;
  1263. int err;
  1264. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1265. err = get_user(i, &(data32->scenario));
  1266. err |= put_user(i, &(data->scenario));
  1267. err |= get_user(i, &(data32->b_on_off));
  1268. err |= put_user(i, &(data->b_on_off));
  1269. return err;
  1270. }
  1271. static int compat_get_smi_bwc_mm_info_set_struct(MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
  1272. MTK_SMI_BWC_INFO_SET __user *data)
  1273. {
  1274. compat_int_t i;
  1275. int err;
  1276. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1277. err = get_user(i, &(data32->property));
  1278. err |= put_user(i, &(data->property));
  1279. err |= get_user(i, &(data32->value1));
  1280. err |= put_user(i, &(data->value1));
  1281. err |= get_user(i, &(data32->value2));
  1282. err |= put_user(i, &(data->value2));
  1283. return err;
  1284. }
  1285. static int compat_get_smi_bwc_mm_info_struct(MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
  1286. MTK_SMI_BWC_MM_INFO __user *data)
  1287. {
  1288. compat_uint_t u;
  1289. compat_int_t i;
  1290. compat_int_t p[2];
  1291. int err;
  1292. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1293. err = get_user(u, &(data32->flag));
  1294. err |= put_user(u, &(data->flag));
  1295. err |= get_user(i, &(data32->concurrent_profile));
  1296. err |= put_user(i, &(data->concurrent_profile));
  1297. err |= copy_from_user(p, &(data32->sensor_size), sizeof(p));
  1298. err |= copy_to_user(&(data->sensor_size), p, sizeof(p));
  1299. err |= copy_from_user(p, &(data32->video_record_size), sizeof(p));
  1300. err |= copy_to_user(&(data->video_record_size), p, sizeof(p));
  1301. err |= copy_from_user(p, &(data32->display_size), sizeof(p));
  1302. err |= copy_to_user(&(data->display_size), p, sizeof(p));
  1303. err |= copy_from_user(p, &(data32->tv_out_size), sizeof(p));
  1304. err |= copy_to_user(&(data->tv_out_size), p, sizeof(p));
  1305. err |= get_user(i, &(data32->fps));
  1306. err |= put_user(i, &(data->fps));
  1307. err |= get_user(i, &(data32->video_encode_codec));
  1308. err |= put_user(i, &(data->video_encode_codec));
  1309. err |= get_user(i, &(data32->video_decode_codec));
  1310. err |= put_user(i, &(data->video_decode_codec));
  1311. err |= get_user(i, &(data32->hw_ovl_limit));
  1312. err |= put_user(i, &(data->hw_ovl_limit));
  1313. return err;
  1314. }
  1315. static int compat_put_smi_bwc_mm_info_struct(MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
  1316. MTK_SMI_BWC_MM_INFO __user *data)
  1317. {
  1318. compat_uint_t u;
  1319. compat_int_t i;
  1320. compat_int_t p[2];
  1321. int err;
  1322. /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
  1323. err = get_user(u, &(data->flag));
  1324. err |= put_user(u, &(data32->flag));
  1325. err |= get_user(i, &(data->concurrent_profile));
  1326. err |= put_user(i, &(data32->concurrent_profile));
  1327. err |= copy_from_user(p, &(data->sensor_size), sizeof(p));
  1328. err |= copy_to_user(&(data32->sensor_size), p, sizeof(p));
  1329. err |= copy_from_user(p, &(data->video_record_size), sizeof(p));
  1330. err |= copy_to_user(&(data32->video_record_size), p, sizeof(p));
  1331. err |= copy_from_user(p, &(data->display_size), sizeof(p));
  1332. err |= copy_to_user(&(data32->display_size), p, sizeof(p));
  1333. err |= copy_from_user(p, &(data->tv_out_size), sizeof(p));
  1334. err |= copy_to_user(&(data32->tv_out_size), p, sizeof(p));
  1335. err |= get_user(i, &(data->fps));
  1336. err |= put_user(i, &(data32->fps));
  1337. err |= get_user(i, &(data->video_encode_codec));
  1338. err |= put_user(i, &(data32->video_encode_codec));
  1339. err |= get_user(i, &(data->video_decode_codec));
  1340. err |= put_user(i, &(data32->video_decode_codec));
  1341. err |= get_user(i, &(data->hw_ovl_limit));
  1342. err |= put_user(i, &(data32->hw_ovl_limit));
  1343. return err;
  1344. }
  1345. long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  1346. {
  1347. long ret;
  1348. if (!filp->f_op || !filp->f_op->unlocked_ioctl)
  1349. return -ENOTTY;
  1350. switch (cmd) {
  1351. case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
  1352. {
  1353. if (COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG) {
  1354. SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_CONFIG");
  1355. return filp->f_op->unlocked_ioctl(filp, cmd,
  1356. (unsigned long)compat_ptr(arg));
  1357. } else {
  1358. MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
  1359. MTK_SMI_BWC_CONFIG __user *data;
  1360. int err;
  1361. data32 = compat_ptr(arg);
  1362. data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
  1363. if (data == NULL)
  1364. return -EFAULT;
  1365. err = compat_get_smi_bwc_config_struct(data32, data);
  1366. if (err)
  1367. return err;
  1368. ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
  1369. (unsigned long)data);
  1370. return ret;
  1371. }
  1372. }
  1373. case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
  1374. {
  1375. if (COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET) {
  1376. SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_SET");
  1377. return filp->f_op->unlocked_ioctl(filp, cmd,
  1378. (unsigned long)compat_ptr(arg));
  1379. } else {
  1380. MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
  1381. MTK_SMI_BWC_INFO_SET __user *data;
  1382. int err;
  1383. data32 = compat_ptr(arg);
  1384. data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
  1385. if (data == NULL)
  1386. return -EFAULT;
  1387. err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
  1388. if (err)
  1389. return err;
  1390. return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
  1391. (unsigned long)data);
  1392. }
  1393. }
  1394. break;
  1395. case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
  1396. {
  1397. if (COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET) {
  1398. SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_GET");
  1399. return filp->f_op->unlocked_ioctl(filp, cmd,
  1400. (unsigned long)compat_ptr(arg));
  1401. } else {
  1402. MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
  1403. MTK_SMI_BWC_MM_INFO __user *data;
  1404. int err;
  1405. data32 = compat_ptr(arg);
  1406. data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
  1407. if (data == NULL)
  1408. return -EFAULT;
  1409. err = compat_get_smi_bwc_mm_info_struct(data32, data);
  1410. if (err)
  1411. return err;
  1412. ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
  1413. (unsigned long)data);
  1414. err = compat_put_smi_bwc_mm_info_struct(data32, data);
  1415. if (err)
  1416. return err;
  1417. return ret;
  1418. }
  1419. }
  1420. break;
  1421. case MTK_IOC_SMI_DUMP_LARB:
  1422. case MTK_IOC_SMI_DUMP_COMMON:
  1423. case MTK_IOC_MMDVFS_CMD:
  1424. return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
  1425. default:
  1426. return -ENOIOCTLCMD;
  1427. }
  1428. }
  1429. #endif
  1430. module_init(smi_init);
  1431. module_exit(smi_exit);
  1432. late_initcall(smi_init_late);
  1433. module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
  1434. module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
  1435. module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
  1436. MODULE_DESCRIPTION("MTK SMI driver");
  1437. MODULE_AUTHOR("Glory Hung<glory.hung@mediatek.com>");
  1438. MODULE_AUTHOR("Yong Wu<yong.wu@mediatek.com>");
  1439. MODULE_LICENSE("GPL");