tz_service.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717
  1. #include <linux/kernel.h>
  2. #include <linux/slab.h>
  3. #include <linux/module.h>
  4. #include <linux/fs.h>
  5. #include <linux/platform_device.h>
  6. #include <linux/debugfs.h>
  7. #include <linux/cdev.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/sched.h>
  10. #include <linux/list.h>
  11. #include <linux/semaphore.h>
  12. #include <linux/completion.h>
  13. #include <linux/io.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/wait.h>
  16. #include <asm/cacheflush.h>
  17. #include <linux/delay.h>
  18. #include <linux/irq.h>
  19. #include "teei_id.h"
  20. #include "nt_smc_call.h"
  21. #include "teei_debug.h"
  22. #include "tz_service.h"
  23. #include "teei_common.h"
  24. #include "../tz_vfs/VFS.h"
  25. #include <linux/of.h>
  26. #include <linux/of_irq.h>
  27. #include <linux/of_address.h>
  28. #include <linux/cpu.h>
  29. #define MAX_BUFF_SIZE (4096)
  30. #define NQ_SIZE (4096)
  31. #define CTL_BUFF_SIZE (4096)
  32. #define VDRV_MAX_SIZE (0x80000)
  33. #define MESSAGE_LENGTH (4096)
  34. #define NQ_VALID 1
  35. #define SWITCH_IRQ (282)
  36. #define F_CREATE_NQ_ID 0x01
  37. #define F_CREATE_CTL_ID 0x02
  38. #define F_CREATE_VDRV_ID 0x04
  39. #define MESSAGE_SIZE (4096)
  40. #define VALID_TYPE (1)
  41. #define INVALID_TYPE (0)
  42. #define FAST_CALL_TYPE (0x100)
  43. #define STANDARD_CALL_TYPE (0x200)
  44. #define TYPE_NONE (0x300)
  45. #define FAST_CREAT_NQ (0x40)
  46. #define FAST_ACK_CREAT_NQ (0x41)
  47. #define FAST_CREAT_VDRV (0x42)
  48. #define FAST_ACK_CREAT_VDRV (0x43)
  49. #define FAST_CREAT_SYS_CTL (0x44)
  50. #define FAST_ACK_CREAT_SYS_CTL (0x45)
  51. #define FAST_CREAT_FDRV (0x46)
  52. #define FAST_ACK_CREAT_FDRV (0x47)
  53. #define NQ_CALL_TYPE (0x60)
  54. #define VDRV_CALL_TYPE (0x61)
  55. #define SCHD_CALL_TYPE (0x62)
  56. #define FDRV_ACK_TYPE (0x63)
  57. #define STD_INIT_CONTEXT (0x80)
  58. #define STD_ACK_INIT_CONTEXT (0x81)
  59. #define STD_OPEN_SESSION (0x82)
  60. #define STD_ACK_OPEN_SESSION (0x83)
  61. #define STD_INVOKE_CMD (0x84)
  62. #define STD_ACK_INVOKE_CMD (0x85)
  63. #define STD_CLOSE_SESSION (0x86)
  64. #define STD_ACK_CLOSE_SESSION (0x87)
  65. #define STD_CLOSE_CONTEXT (0x88)
  66. #define STD_ACK_CLOSE_CONTEXT (0x89)
  67. #define FP_BUFF_SIZE (512 * 1024)
  68. #define FP_SYS_NO (100)
  69. #define START_STATUS (0)
  70. #define END_STATUS (1)
  71. #define VFS_SIZE (512 * 1024)
  72. #define CAPI_CALL 0x01
  73. #define FDRV_CALL 0x02
  74. #define BDRV_CALL 0x03
  75. #define SCHED_CALL 0x04
  76. #define FP_SYS_NO 100
  77. #define VFS_SYS_NO 0x08
  78. #define REETIME_SYS_NO 0x07
  79. unsigned long message_buff = 0;
  80. unsigned long fdrv_message_buff = 0;
  81. unsigned long bdrv_message_buff = 0;
  82. unsigned long tlog_message_buff = 0;
  83. static unsigned long nt_t_buffer;
  84. unsigned long t_nt_buffer;
  85. static unsigned long sys_ctl_buffer;
  86. unsigned long fp_buff_addr = NULL;
  87. extern int get_current_cpuid(void);
  88. #define NQ_BUFF_SIZE (4096)
  89. #define NQ_BLOCK_SIZE (32)
  90. #define BLOCK_MAX_COUNT (NQ_BUFF_SIZE / NQ_BLOCK_SIZE - 1)
  91. #define STD_NQ_ACK_ID 0x01
  92. #define TEE_NAME_SIZE (255)
  93. #define GLSCH_NEG (0x03)
  94. #define GLSCH_NONE (0x00)
  95. #define GLSCH_LOW (0x01)
  96. #define GLSCH_HIGH (0x02)
  97. struct NQ_head {
  98. unsigned int start_index;
  99. unsigned int end_index;
  100. unsigned int Max_count;
  101. unsigned char reserve[20];
  102. };
  103. struct NQ_entry {
  104. unsigned int valid_flag;
  105. unsigned int length;
  106. unsigned int buffer_addr;
  107. unsigned char reserve[20];
  108. };
  109. /******************************
  110. * Message header
  111. ******************************/
  112. struct message_head {
  113. unsigned int invalid_flag;
  114. unsigned int message_type;
  115. unsigned int child_type;
  116. unsigned int param_length;
  117. };
  118. struct fdrv_message_head {
  119. unsigned int driver_type;
  120. unsigned int fdrv_param_length;
  121. };
  122. /******************************
  123. * Fast call structures
  124. ******************************/
  125. struct create_NQ_struct {
  126. unsigned int n_t_nq_phy_addr;
  127. unsigned int n_t_size;
  128. unsigned int t_n_nq_phy_addr;
  129. unsigned int t_n_size;
  130. };
  131. struct create_vdrv_struct {
  132. unsigned int vdrv_type;
  133. unsigned int vdrv_phy_addr;
  134. unsigned int vdrv_size;
  135. };
  136. struct create_fdrv_struct {
  137. unsigned int fdrv_type;
  138. unsigned int fdrv_phy_addr;
  139. unsigned int fdrv_size;
  140. };
  141. struct create_sys_ctl_struct {
  142. unsigned int sys_ctl_phy_addr;
  143. unsigned int sys_ctl_size;
  144. };
  145. struct ack_fast_call_struct {
  146. int retVal;
  147. };
  148. /*********************************
  149. * Standard call structures
  150. *********************************/
  151. struct ack_vdrv_struct {
  152. unsigned int sysno;
  153. };
  154. struct fdrv_struct {
  155. unsigned int driver_id;
  156. };
  157. struct TEEI_printer_command {
  158. int func;
  159. int cmd_size;
  160. union func_arg {
  161. struct func_write {
  162. int length;
  163. int timeout;
  164. } func_write_args;
  165. } args;
  166. };
  167. union TEEI_printer_response {
  168. int value;
  169. };
  170. struct reetime_handle_struct {
  171. struct service_handler *handler;
  172. int retVal;
  173. };
  174. struct reetime_handle_struct reetime_handle_entry;
  175. struct vfs_handle_struct {
  176. struct service_handler *handler;
  177. int retVal;
  178. };
  179. struct vfs_handle_struct vfs_handle_entry;
  180. static int serivce_cmd_flag;
  181. static int smc_flag;
  182. static int sys_call_no;
  183. static long register_shared_param_buf(struct service_handler *handler);
  184. static int register_interrupt_handler(void);
  185. static int init_all_service_handlers(void);
  186. static int start_teei_service(void);
  187. static int printer_thread_function(unsigned long virt_addr, unsigned long para_vaddr, unsigned long buff_vaddr);
  188. unsigned char *printer_share_mem = NULL;
  189. EXPORT_SYMBOL_GPL(printer_share_mem);
  190. unsigned int printer_shmem_flags = 0;
  191. EXPORT_SYMBOL_GPL(printer_shmem_flags);
  192. unsigned char *daulOS_share_mem = NULL;
  193. EXPORT_SYMBOL_GPL(daulOS_share_mem);
  194. unsigned char *daulOS_VFS_share_mem = NULL;
  195. EXPORT_SYMBOL_GPL(daulOS_VFS_share_mem);
  196. unsigned char *vfs_flush_address = NULL;
  197. EXPORT_SYMBOL_GPL(vfs_flush_address);
  198. unsigned char *daulOS_VFS_write_share_mem = NULL;
  199. EXPORT_SYMBOL_GPL(daulOS_VFS_write_share_mem);
  200. unsigned char *daulOS_VFS_read_share_mem = NULL;
  201. EXPORT_SYMBOL_GPL(daulOS_VFS_read_share_mem);
  202. #define SHMEM_ENABLE 0
  203. #define SHMEM_DISABLE 1
  204. struct work_entry {
  205. int call_no;
  206. struct work_struct work;
  207. };
  208. struct load_soter_entry {
  209. unsigned long vfs_addr;
  210. struct work_struct work;
  211. };
  212. static struct load_soter_entry load_ent;
  213. struct work_queue *secure_wq = NULL;
  214. static struct work_entry work_ent;
  215. struct timeval stime;
  216. struct timeval etime;
  217. int vfs_write_flag = 0;
  218. unsigned long teei_vfs_flag = 0;
  219. struct bdrv_call_struct {
  220. int bdrv_call_type;
  221. struct service_handler *handler;
  222. int retVal;
  223. };
  224. extern int add_work_entry(int work_type, unsigned long buff);
  225. #define printk(fmt, args...) printk("\033[;34m[TEEI][TZDriver]"fmt"\033[0m", ##args)
  226. /*add by microtrust*/
  227. static unsigned int get_master_cpu_id(unsigned int gic_irqs)
  228. {
  229. unsigned int i;
  230. i = gic_irqs >> 2;
  231. unsigned long gic_base = 0x10231000 + 0x800 + i * 4 ;
  232. void __iomem *dist_base = (void *)gic_base;
  233. unsigned int target_id = readl_relaxed(dist_base);
  234. target_id = target_id >> ((gic_irqs & 0x03) * 8);
  235. target_id = target_id & 0xff;
  236. return target_id;
  237. }
  238. /****************************************
  239. //extern u32 get_irq_target_microtrust(void);
  240. //~ static int get_current_cpuid(void)
  241. //~ {
  242. //~
  243. //~
  244. //~
  245. //~ #if 0
  246. //~ int cpu_id = get_irq_target_microtrust();
  247. //~ if (cpu_id == 0xff) {
  248. //~ printk("error cpu_id [0x%x]\n", cpu_id);
  249. //~ printk("error cpu_id [0x%x]\n", cpu_id);
  250. //~ printk("error cpu_id [0x%x]\n", cpu_id);
  251. //~
  252. //~ }
  253. //~ printk(" Get current cpu id [0x%x]\n", cpu_id);
  254. //~ return cpu_id;
  255. //~ #endif
  256. //~ static int cpuid = 4;
  257. //~ if(cpuid == 4)
  258. //~ cpuid = 5;
  259. //~ else
  260. //~ cpuid = 4;
  261. //~ return cpuid;
  262. //~
  263. //~ }
  264. */
  265. int cal_time(struct timeval start_time, struct timeval end_time)
  266. {
  267. int timestamp = 0;
  268. timestamp = (end_time.tv_sec - start_time.tv_sec) * 1000000 + (end_time.tv_usec - start_time.tv_usec);
  269. printk("function expend %d u-seconds! \n", timestamp);
  270. return timestamp;
  271. }
  272. static void secondary_teei_ack_invoke_drv(void)
  273. {
  274. n_ack_t_invoke_drv(0, 0, 0);
  275. return;
  276. }
  277. static void post_teei_ack_invoke_drv(int cpu_id)
  278. {
  279. get_online_cpus();
  280. smp_call_function_single(cpu_id,
  281. secondary_teei_ack_invoke_drv,
  282. NULL,
  283. 1);
  284. put_online_cpus();
  285. return;
  286. }
  287. static void teei_ack_invoke_drv(void)
  288. {
  289. int cpu_id = 0;
  290. #if 0
  291. int cpu_id = smp_processor_id();
  292. /* int cpu_id = raw_smp_processor_id(); */
  293. if (cpu_id != 0) {
  294. /* call the mb() */
  295. mb();
  296. post_teei_ack_invoke_drv(0); /* post it to primary */
  297. } else {
  298. /* printk("[%s][%d]\n", __func__, __LINE__); */
  299. n_ack_t_invoke_drv(0, 0, 0); /* called directly on primary core */
  300. }
  301. #else
  302. cpu_id = get_current_cpuid();
  303. post_teei_ack_invoke_drv(cpu_id);
  304. #endif
  305. return;
  306. }
  307. void tz_down(struct semaphore *sem)
  308. {
  309. unsigned long retVal = 1;
  310. do {
  311. retVal = down_trylock(sem);
  312. if (retVal == 0)
  313. break;
  314. else
  315. udelay(100);
  316. } while (true);
  317. }
  318. /*****************************Drivers**************************************/
  319. static struct service_handler fiq_drivers;
  320. /* #define printk(fmt, args...) printk("\033[;34m[TEEI][TZDriver]"fmt"\033[0m", ##args) */
  321. static void fiq_drivers_init(struct service_handler *handler)
  322. {
  323. register_shared_param_buf(handler);
  324. }
  325. static void fiq_drivers_deinit(struct service_handler *handler)
  326. {
  327. return;
  328. }
  329. static int fiq_drivers_handle(struct service_handler *handler)
  330. {
  331. return 0;
  332. }
  333. /******************************TIME**************************************/
  334. #include <linux/time.h>
  335. void set_ack_vdrv_cmd(unsigned int sys_num)
  336. {
  337. if (boot_soter_flag == START_STATUS) {
  338. struct message_head msg_head;
  339. struct ack_vdrv_struct ack_body;
  340. memset(&msg_head, 0, sizeof(struct message_head));
  341. msg_head.invalid_flag = VALID_TYPE;
  342. msg_head.message_type = STANDARD_CALL_TYPE;
  343. msg_head.child_type = N_ACK_T_INVOKE_DRV;
  344. msg_head.param_length = sizeof(struct ack_vdrv_struct);
  345. ack_body.sysno = sys_num;
  346. memcpy(message_buff, &msg_head, sizeof(struct message_head));
  347. memcpy(message_buff + sizeof(struct message_head), &ack_body, sizeof(struct ack_vdrv_struct));
  348. Flush_Dcache_By_Area((unsigned long)message_buff, (unsigned long)message_buff + MESSAGE_SIZE);
  349. } else {
  350. *((int *)bdrv_message_buff) = sys_num;
  351. Flush_Dcache_By_Area((unsigned long)bdrv_message_buff, (unsigned long)bdrv_message_buff + MESSAGE_SIZE);
  352. }
  353. return;
  354. }
  355. static struct service_handler reetime;
  356. static void reetime_init(struct service_handler *handler)
  357. {
  358. register_shared_param_buf(handler);
  359. }
  360. static void reetime_deinit(struct service_handler *handler)
  361. {
  362. return;
  363. }
  364. int __reetime_handle(struct service_handler *handler)
  365. {
  366. struct timeval tv;
  367. void *ptr = NULL;
  368. int tv_sec;
  369. int tv_usec;
  370. do_gettimeofday(&tv);
  371. ptr = handler->param_buf;
  372. tv_sec = tv.tv_sec;
  373. *((int *)ptr) = tv_sec;
  374. tv_usec = tv.tv_usec;
  375. *((int *)ptr + 1) = tv_usec;
  376. Flush_Dcache_By_Area((unsigned long)handler->param_buf, (unsigned long)handler->param_buf + handler->size);
  377. set_ack_vdrv_cmd(handler->sysno);
  378. teei_vfs_flag = 0;
  379. /* down(&smc_lock); */
  380. #if 0
  381. teei_ack_invoke_drv();
  382. #else
  383. n_ack_t_invoke_drv(0, 0, 0);
  384. #endif
  385. return 0;
  386. }
  387. static void secondary_reetime_handle(void *info)
  388. {
  389. struct reetime_handle_struct *cd = (struct reetime_handle_struct *)info;
  390. /* with a rmb() */
  391. rmb();
  392. cd->retVal = __reetime_handle(cd->handler);
  393. /* with a wmb() */
  394. wmb();
  395. }
  396. static int reetime_handle(struct service_handler *handler)
  397. {
  398. int cpu_id = 0;
  399. int retVal = 0;
  400. struct bdrv_call_struct *reetime_bdrv_ent = NULL;
  401. down(&smc_lock);
  402. #if 0
  403. reetime_handle_entry.handler = handler;
  404. #else
  405. reetime_bdrv_ent = (struct bdrv_call_struct *)kmalloc(sizeof(struct bdrv_call_struct), GFP_KERNEL);
  406. reetime_bdrv_ent->handler = handler;
  407. reetime_bdrv_ent->bdrv_call_type = REETIME_SYS_NO;
  408. #endif
  409. /* with a wmb() */
  410. wmb();
  411. #if 0
  412. get_online_cpus();
  413. cpu_id = get_current_cpuid();
  414. smp_call_function_single(cpu_id, secondary_reetime_handle, (void *)(&reetime_handle_entry), 1);
  415. put_online_cpus();
  416. #else
  417. retVal = add_work_entry(BDRV_CALL, (unsigned long)reetime_bdrv_ent);
  418. if (retVal != 0) {
  419. up(&smc_lock);
  420. return retVal;
  421. }
  422. #endif
  423. rmb();
  424. #if 0
  425. return reetime_handle_entry.retVal;
  426. #else
  427. return 0;
  428. #endif
  429. }
  430. /******************************SOCKET**************************************/
  431. #include "SOCK.h"
  432. static struct service_handler socket;
  433. static unsigned long para_vaddr;
  434. static unsigned long buff_vaddr;
  435. static struct service_handler vfs_handler;
  436. #define SOCKET_BASE 0xFE021000
  437. #define PAGE_SIZE 4096
  438. enum {
  439. FUCTION_socket = 0x0,
  440. FUCTION_connect = 0x04,
  441. FUCTION_send = 0x08,
  442. FUCTION_recv = 0x0C,
  443. FUCTION_close = 0x20,
  444. SET_BUFFER_BASE = 0xF0,
  445. SET_PARAM_BASE = 0xF4,
  446. };
  447. static void socket_init(struct service_handler *handler)
  448. {
  449. unsigned long para_paddr = 0;
  450. unsigned long buff_paddr = 0;
  451. para_vaddr = (unsigned long)kmalloc(PAGE_SIZE, GFP_KERNEL);
  452. buff_vaddr = (unsigned long)kmalloc(PAGE_SIZE, GFP_KERNEL);
  453. #if 0 /* for qemu */
  454. para_paddr = virt_to_phys((void *)para_vaddr);
  455. buff_paddr = virt_to_phys((void *)buff_vaddr);
  456. writel(para_paddr, SOCKET_BASE + SET_PARAM_BASE);
  457. writel(buff_paddr, SOCKET_BASE + SET_BUFFER_BASE);
  458. #endif /* for qemu */
  459. register_shared_param_buf(handler);
  460. }
  461. static void socket_deinit(struct service_handler *handler)
  462. {
  463. kfree((void *)para_vaddr);
  464. kfree((void *)buff_vaddr);
  465. }
  466. static int socket_handle(struct service_handler *handler)
  467. {
  468. /* socket_thread_function((unsigned long)handler->param_buf, para_vaddr, buff_vaddr); */
  469. return 0;
  470. }
  471. /********************************************************************
  472. * VFS functions *
  473. ********************************************************************/
  474. int vfs_thread_function(unsigned long virt_addr, unsigned long para_vaddr, unsigned long buff_vaddr)
  475. {
  476. /*printk("=========================================================================\n");*/
  477. daulOS_VFS_share_mem = virt_addr;
  478. #ifdef VFS_RDWR_SEM
  479. up(&VFS_rd_sem);
  480. down_interruptible(&VFS_wr_sem);
  481. #else
  482. complete(&VFS_rd_comp);
  483. wait_for_completion_interruptible(&VFS_wr_comp);
  484. #endif
  485. /*printk("=============================2222222222222222222222222222222222============================================\n");*/
  486. }
  487. static void vfs_init(struct service_handler *handler) /*! init service */
  488. {
  489. register_shared_param_buf(handler);
  490. vfs_flush_address = handler->param_buf;
  491. return;
  492. }
  493. static void vfs_deinit(struct service_handler *handler) /*! stop service */
  494. {
  495. return;
  496. }
  497. int __vfs_handle(struct service_handler *handler) /*! invoke handler */
  498. {
  499. /* vfs_thread_function(handler->param_buf, para_vaddr, buff_vaddr); */
  500. Flush_Dcache_By_Area((unsigned long)handler->param_buf, (unsigned long)handler->param_buf + handler->size);
  501. set_ack_vdrv_cmd(handler->sysno);
  502. teei_vfs_flag = 0;
  503. /* down(&smc_lock); */
  504. #if 0
  505. teei_ack_invoke_drv();
  506. #else
  507. n_ack_t_invoke_drv(0, 0, 0);
  508. #endif
  509. return 0;
  510. }
  511. static void secondary_vfs_handle(void *info)
  512. {
  513. struct vfs_handle_struct *cd = (struct vfs_handle_struct *)info;
  514. /* with a rmb() */
  515. rmb();
  516. cd->retVal = __vfs_handle(cd->handler);
  517. /* with a wmb() */
  518. wmb();
  519. }
  520. static int vfs_handle(struct service_handler *handler)
  521. {
  522. int cpu_id = 0;
  523. int retVal = 0;
  524. struct bdrv_call_struct *vfs_bdrv_ent = NULL;
  525. vfs_thread_function(handler->param_buf, para_vaddr, buff_vaddr);
  526. down(&smc_lock);
  527. #if 0
  528. vfs_handle_entry.handler = handler;
  529. #else
  530. vfs_bdrv_ent = (struct bdrv_call_struct *)kmalloc(sizeof(struct bdrv_call_struct), GFP_KERNEL);
  531. vfs_bdrv_ent->handler = handler;
  532. vfs_bdrv_ent->bdrv_call_type = VFS_SYS_NO;
  533. #endif
  534. /* with a wmb() */
  535. wmb();
  536. #if 0
  537. get_online_cpus();
  538. cpu_id = get_current_cpuid();
  539. smp_call_function_single(cpu_id, secondary_vfs_handle, (void *)(&vfs_handle_entry), 1);
  540. put_online_cpus();
  541. #else
  542. Flush_Dcache_By_Area((unsigned long)vfs_bdrv_ent, (unsigned long)vfs_bdrv_ent + sizeof(struct bdrv_call_struct));
  543. retVal = add_work_entry(BDRV_CALL, (unsigned long)vfs_bdrv_ent);
  544. if (retVal != 0) {
  545. up(&smc_lock);
  546. return retVal;
  547. }
  548. #endif
  549. rmb();
  550. #if 0
  551. return vfs_handle_entry.retVal;
  552. #else
  553. return 0;
  554. #endif
  555. }
  556. /**********************************************************************
  557. * Printer functions *
  558. **********************************************************************/
  559. static struct service_handler printer_driver;
  560. static void printer_driver_init(struct service_handler *handler) /*init service */
  561. {
  562. register_shared_param_buf(handler);
  563. printer_share_mem = handler->param_buf;
  564. return;
  565. }
  566. static void printer_driver_deinit(struct service_handler *handler) /*! stop service */
  567. {
  568. return;
  569. }
  570. static int printer_driver_handle(struct service_handler *handler) /* invoke handler */
  571. {
  572. printer_thread_function(handler->param_buf, para_vaddr, buff_vaddr);
  573. return 0;
  574. }
  575. static int printer_thread_function(unsigned long virt_addr, unsigned long para_vaddr, unsigned long buff_vaddr)
  576. {
  577. int retVal = 0;
  578. int timeout = 0;
  579. struct TEEI_printer_command *command = NULL;
  580. #ifdef DEBUG
  581. int i = 0;
  582. for (i = 0; i < 100; i++)
  583. printk("param_buf[%d] = %d\n", i, *((unsigned char *)virt_addr + i));
  584. #endif
  585. printer_shmem_flags = SHMEM_ENABLE;
  586. command = (struct TEEI_printer_command *)virt_addr;
  587. timeout = command->args.func_write_args.timeout;
  588. /* up(&printer_rd_sem); */
  589. /* retVal = down_timeout(&printer_wr_sem, HZ*timeout); */
  590. if (retVal < 0) {
  591. union TEEI_printer_response t_response;
  592. t_response.value = -ETIME;
  593. /* retVal = down_trylock(&printer_rd_sem); */
  594. if (retVal == 0)
  595. printk("[SEM status] Printer App is not RUNNING\n");
  596. else if (retVal == 1)
  597. printk("[SEM status] BLUE Printer is not Ready\n");
  598. memcpy(virt_addr, (void *)&t_response, sizeof(union TEEI_printer_response));
  599. }
  600. printer_shmem_flags = SHMEM_DISABLE;
  601. }
  602. /*****************************************************************************/
  603. static void secondary_invoke_fastcall(void *info)
  604. {
  605. n_invoke_t_fast_call(0, 0, 0);
  606. }
  607. static void invoke_fastcall(void)
  608. {
  609. int cpu_id = 0;
  610. get_online_cpus();
  611. cpu_id = get_current_cpuid();
  612. smp_call_function_single(cpu_id, secondary_invoke_fastcall, NULL, 1);
  613. put_online_cpus();
  614. }
  615. static long register_shared_param_buf(struct service_handler *handler)
  616. {
  617. long retVal = 0;
  618. unsigned long irq_flag = 0;
  619. struct message_head msg_head;
  620. struct create_vdrv_struct msg_body;
  621. struct ack_fast_call_struct msg_ack;
  622. if (message_buff == NULL) {
  623. printk("[%s][%d]: There is NO command buffer!.\n", __func__, __LINE__);
  624. return -EINVAL;
  625. }
  626. if (handler->size > VDRV_MAX_SIZE) {
  627. printk("[%s][%d]: The vDrv buffer is too large, DO NOT Allow to create it.\n", __FILE__, __LINE__);
  628. return -EINVAL;
  629. }
  630. handler->param_buf = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(handler->size, SZ_4K)));
  631. if (handler->param_buf == NULL) {
  632. printk("[%s][%d]: kmalloc vdrv_buffer failed.\n", __FILE__, __LINE__);
  633. return -ENOMEM;
  634. }
  635. memset(&msg_head, 0, sizeof(struct message_head));
  636. memset(&msg_body, 0, sizeof(struct create_vdrv_struct));
  637. memset(&msg_ack, 0, sizeof(struct ack_fast_call_struct));
  638. msg_head.invalid_flag = VALID_TYPE;
  639. msg_head.message_type = FAST_CALL_TYPE;
  640. msg_head.child_type = FAST_CREAT_VDRV;
  641. msg_head.param_length = sizeof(struct create_vdrv_struct);
  642. msg_body.vdrv_type = handler->sysno;
  643. msg_body.vdrv_phy_addr = virt_to_phys(handler->param_buf);
  644. msg_body.vdrv_size = handler->size;
  645. local_irq_save(irq_flag);
  646. /* Notify the T_OS that there is ctl_buffer to be created. */
  647. memcpy(message_buff, &msg_head, sizeof(struct message_head));
  648. memcpy(message_buff + sizeof(struct message_head), &msg_body, sizeof(struct create_vdrv_struct));
  649. Flush_Dcache_By_Area((unsigned long)message_buff, (unsigned long)message_buff + MESSAGE_SIZE);
  650. /* Call the smc_fast_call */
  651. /* n_invoke_t_fast_call(0, 0, 0); */
  652. down(&(boot_sema));
  653. down(&(smc_lock));
  654. /*down(&cpu_down_lock);*/
  655. invoke_fastcall();
  656. down(&(boot_sema));
  657. up(&(boot_sema));
  658. memcpy(&msg_head, message_buff, sizeof(struct message_head));
  659. memcpy(&msg_ack, message_buff + sizeof(struct message_head), sizeof(struct ack_fast_call_struct));
  660. local_irq_restore(irq_flag);
  661. /*up(&cpu_down_lock);*/
  662. /* Check the response from T_OS. */
  663. if ((msg_head.message_type == FAST_CALL_TYPE) && (msg_head.child_type == FAST_ACK_CREAT_VDRV)) {
  664. retVal = msg_ack.retVal;
  665. if (retVal == 0) {
  666. /* printk("[%s][%d]: %s end.\n", __FILE__, __LINE__, __func__); */
  667. return retVal;
  668. }
  669. } else {
  670. retVal = -EAGAIN;
  671. }
  672. /* Release the resource and return. */
  673. free_pages(handler->param_buf, get_order(ROUND_UP(handler->size, SZ_4K)));
  674. handler->param_buf = NULL;
  675. return retVal;
  676. }
  677. static void secondary_load_func(void)
  678. {
  679. Flush_Dcache_By_Area((unsigned long)boot_vfs_addr, (unsigned long)boot_vfs_addr + VFS_SIZE);
  680. printk("[%s][%d]: %s end.\n", __func__, __LINE__, __func__);
  681. n_ack_t_load_img(0, 0, 0);
  682. return ;
  683. }
  684. static void load_func(struct work_struct *entry)
  685. {
  686. int cpu_id = 0;
  687. vfs_thread_function(boot_vfs_addr, NULL, NULL);
  688. down(&smc_lock);
  689. get_online_cpus();
  690. cpu_id = get_current_cpuid();
  691. printk("[%s][%d]current cpu id[%d] \n", __func__, __LINE__, cpu_id);
  692. smp_call_function_single(cpu_id, secondary_load_func, NULL, 1);
  693. put_online_cpus();
  694. return;
  695. }
  696. static void work_func(struct work_struct *entry)
  697. {
  698. struct work_entry *md = container_of(entry, struct work_entry, work);
  699. int sys_call_num = md->call_no;
  700. if (sys_call_num == socket.sysno) {
  701. socket.handle(&socket);
  702. Flush_Dcache_By_Area(socket.param_buf, socket.param_buf + socket.size);
  703. } else if (sys_call_num == reetime.sysno) {
  704. reetime.handle(&reetime);
  705. Flush_Dcache_By_Area(reetime.param_buf, reetime.param_buf + reetime.size);
  706. } else if (sys_call_num == vfs_handler.sysno) {
  707. vfs_handler.handle(&vfs_handler);
  708. Flush_Dcache_By_Area(vfs_handler.param_buf, vfs_handler.param_buf + vfs_handler.size);
  709. } else if (sys_call_num == printer_driver.sysno) {
  710. printer_driver.handle(&printer_driver);
  711. Flush_Dcache_By_Area(printer_driver.param_buf, printer_driver.param_buf + printer_driver.size);
  712. }
  713. serivce_cmd_flag = 0;
  714. smc_flag = 1;
  715. return;
  716. }
  717. void handle_dispatch(void)
  718. {
  719. if (sys_call_no == socket.sysno) {
  720. socket.handle(&socket);
  721. Flush_Dcache_By_Area(socket.param_buf, socket.param_buf + socket.size);
  722. } else if (sys_call_no == reetime.sysno) {
  723. reetime.handle(&reetime);
  724. Flush_Dcache_By_Area(reetime.param_buf, reetime.param_buf + reetime.size);
  725. } else if (sys_call_no == vfs_handler.sysno) {
  726. vfs_handler.handle(&vfs_handler);
  727. Flush_Dcache_By_Area(vfs_handler.param_buf, vfs_handler.param_buf + vfs_handler.size);
  728. }
  729. }
  730. static void do_service(void *p)
  731. {
  732. while (true) {
  733. if (serivce_cmd_flag == 1) {
  734. handle_dispatch();
  735. serivce_cmd_flag = 0;
  736. smc_flag = 1;
  737. } else
  738. schedule();
  739. }
  740. return 0;
  741. }
  742. static irqreturn_t drivers_interrupt(int irq, void *dummy)
  743. {
  744. unsigned int p0, p1, p2, p3, p4, p5, p6;
  745. if (p1 == 0xffff)
  746. generic_handle_irq(p2);
  747. else {
  748. work_ent.call_no = p1;
  749. INIT_WORK(&(work_ent.work), work_func);
  750. queue_work(secure_wq, &(work_ent.work));
  751. }
  752. return IRQ_HANDLED;
  753. }
  754. static int register_interrupt_handler(void)
  755. {
  756. int irq_no = 100;
  757. int ret = request_irq(irq_no, drivers_interrupt, 0, "tz_drivers_service", (void *)register_interrupt_handler);
  758. if (ret)
  759. TERR("ERROR for request_irq %d error code : %d ", irq_no, ret);
  760. else
  761. TINFO("request irq [ %d ] OK ", irq_no);
  762. return 0;
  763. }
  764. static int init_all_service_handlers(void)
  765. {
  766. socket.init = socket_init;
  767. socket.deinit = socket_deinit;
  768. socket.handle = socket_handle;
  769. socket.size = 0x80000;
  770. socket.sysno = 1;
  771. reetime.init = reetime_init;
  772. reetime.deinit = reetime_deinit;
  773. reetime.handle = reetime_handle;
  774. reetime.size = 0x1000;
  775. reetime.sysno = 7;
  776. vfs_handler.init = vfs_init;
  777. vfs_handler.deinit = vfs_deinit;
  778. vfs_handler.handle = vfs_handle;
  779. vfs_handler.size = 0x80000;
  780. vfs_handler.sysno = 8;
  781. fiq_drivers.init = fiq_drivers_init;
  782. fiq_drivers.deinit = fiq_drivers_deinit;
  783. fiq_drivers.handle = fiq_drivers_handle;
  784. fiq_drivers.size = 0x1000;
  785. fiq_drivers.sysno = 9;
  786. printer_driver.init = printer_driver_init;
  787. printer_driver.deinit = printer_driver_deinit;
  788. printer_driver.handle = printer_driver_handle;
  789. printer_driver.size = 0x1000;
  790. printer_driver.sysno = 10;
  791. /* socket.init(&socket); */
  792. printk("[%s][%d] begin to init reetime service!\n", __func__, __LINE__);
  793. reetime.init(&reetime);
  794. printk("[%s][%d] init reetime service successfully!\n", __func__, __LINE__);
  795. printk("[%s][%d] begin to init vfs service!\n", __func__, __LINE__);
  796. vfs_handler.init(&vfs_handler);
  797. printk("[%s][%d] init vfs service successfully!\n", __func__, __LINE__);
  798. /*
  799. fiq_drivers.init(&fiq_drivers);
  800. printer_driver.init(&printer_driver);
  801. */
  802. return 0;
  803. }
  804. /***********************************************************************
  805. create_notify_queue:
  806. Create the two way notify queues between T_OS and NT_OS.
  807. argument:
  808. size the notify queue size.
  809. return value:
  810. EINVAL invalid argument
  811. ENOMEM no enough memory
  812. EAGAIN The command ID in the response is NOT accordant to the request.
  813. ***********************************************************************/
  814. static long create_notify_queue(unsigned long msg_buff, unsigned long size)
  815. {
  816. long retVal = 0;
  817. unsigned long irq_flag = 0;
  818. struct message_head msg_head;
  819. struct create_NQ_struct msg_body;
  820. struct ack_fast_call_struct msg_ack;
  821. /* Check the argument */
  822. if (size > MAX_BUFF_SIZE) {
  823. printk("[%s][%d]: The NQ buffer size is too large, DO NOT Allow to create it.\n", __FILE__, __LINE__);
  824. retVal = -EINVAL;
  825. goto return_fn;
  826. }
  827. /* Create the double NQ buffer. */
  828. nt_t_buffer = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(size, SZ_4K)));
  829. if (nt_t_buffer == NULL) {
  830. printk("[%s][%d]: kmalloc nt_t_buffer failed.\n", __func__, __LINE__);
  831. retVal = -ENOMEM;
  832. goto return_fn;
  833. }
  834. t_nt_buffer = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(size, SZ_4K)));
  835. if (t_nt_buffer == NULL) {
  836. printk("[%s][%d]: kmalloc t_nt_buffer failed.\n", __func__, __LINE__);
  837. retVal = -ENOMEM;
  838. goto Destroy_nt_t_buffer;
  839. }
  840. memset(&msg_head, 0, sizeof(struct message_head));
  841. memset(&msg_body, 0, sizeof(struct create_NQ_struct));
  842. memset(&msg_ack, 0, sizeof(struct ack_fast_call_struct));
  843. msg_head.invalid_flag = VALID_TYPE;
  844. msg_head.message_type = FAST_CALL_TYPE;
  845. msg_head.child_type = FAST_CREAT_NQ;
  846. msg_head.param_length = sizeof(struct create_NQ_struct);
  847. msg_body.n_t_nq_phy_addr = virt_to_phys(nt_t_buffer);
  848. msg_body.n_t_size = size;
  849. msg_body.t_n_nq_phy_addr = virt_to_phys(t_nt_buffer);
  850. msg_body.t_n_size = size;
  851. local_irq_save(irq_flag);
  852. /* Notify the T_OS that there are two QN to be created. */
  853. memcpy(msg_buff, &msg_head, sizeof(struct message_head));
  854. memcpy(msg_buff + sizeof(struct message_head), &msg_body, sizeof(struct create_NQ_struct));
  855. Flush_Dcache_By_Area((unsigned long)msg_buff, (unsigned long)msg_buff + MESSAGE_SIZE);
  856. down(&(boot_sema));
  857. down(&(smc_lock));
  858. /*down(&cpu_down_lock);*/
  859. /* Call the smc_fast_call */
  860. /* n_invoke_t_fast_call(0, 0, 0); */
  861. invoke_fastcall();
  862. down(&(boot_sema));
  863. up(&(boot_sema));
  864. memcpy(&msg_head, msg_buff, sizeof(struct message_head));
  865. memcpy(&msg_ack, msg_buff + sizeof(struct message_head), sizeof(struct ack_fast_call_struct));
  866. local_irq_restore(irq_flag);
  867. /* Check the response from T_OS. */
  868. /*up(&cpu_down_lock);*/
  869. if ((msg_head.message_type == FAST_CALL_TYPE) && (msg_head.child_type == FAST_ACK_CREAT_NQ)) {
  870. retVal = msg_ack.retVal;
  871. if (retVal == 0)
  872. goto return_fn;
  873. else
  874. goto Destroy_t_nt_buffer;
  875. } else
  876. retVal = -EAGAIN;
  877. /* Release the resource and return. */
  878. Destroy_t_nt_buffer:
  879. free_pages(t_nt_buffer, get_order(ROUND_UP(size, SZ_4K)));
  880. Destroy_nt_t_buffer:
  881. free_pages(nt_t_buffer, get_order(ROUND_UP(size, SZ_4K)));
  882. return_fn:
  883. return retVal;
  884. }
  885. static long create_ctl_buffer(unsigned long msg_buff, unsigned long size)
  886. {
  887. long retVal = 0;
  888. unsigned long irq_flag = 0;
  889. struct message_head msg_head;
  890. struct create_sys_ctl_struct msg_body;
  891. struct ack_fast_call_struct msg_ack;
  892. /* Check the argument */
  893. if (size > MAX_BUFF_SIZE) {
  894. printk("[%s][%d]: The CTL buffer size is too large, DO NOT Allow to create it.\n", __FILE__, __LINE__);
  895. return -EINVAL;
  896. }
  897. /* Create the ctl_buffer. */
  898. sys_ctl_buffer = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(size, SZ_4K)));
  899. if (sys_ctl_buffer == NULL) {
  900. printk("[%s][%d]: kmalloc ctl_buffer failed.\n", __FILE__, __LINE__);
  901. return -ENOMEM;
  902. }
  903. memset(&msg_head, 0, sizeof(struct message_head));
  904. memset(&msg_body, 0, sizeof(struct create_sys_ctl_struct));
  905. memset(&msg_ack, 0, sizeof(struct ack_fast_call_struct));
  906. msg_head.invalid_flag = VALID_TYPE;
  907. msg_head.message_type = FAST_CALL_TYPE;
  908. msg_head.child_type = FAST_CREAT_SYS_CTL;
  909. msg_head.param_length = sizeof(struct create_sys_ctl_struct);
  910. msg_body.sys_ctl_phy_addr = virt_to_phys(sys_ctl_buffer);
  911. msg_body.sys_ctl_size = size;
  912. local_irq_save(irq_flag);
  913. /* Notify the T_OS that there is ctl_buffer to be created. */
  914. memcpy(msg_buff, &msg_head, sizeof(struct message_head));
  915. memcpy(msg_buff + sizeof(struct message_head), &msg_body, sizeof(struct create_sys_ctl_struct));
  916. Flush_Dcache_By_Area((unsigned long)msg_buff, (unsigned long)msg_buff + MESSAGE_SIZE);
  917. memcpy(&msg_head, msg_buff, sizeof(struct message_head));
  918. memcpy(&msg_ack, msg_buff + sizeof(struct message_head), sizeof(struct ack_fast_call_struct));
  919. local_irq_restore(irq_flag);
  920. /* Check the response from T_OS. */
  921. if ((msg_head.message_type == FAST_CALL_TYPE) && (msg_head.child_type == FAST_ACK_CREAT_SYS_CTL)) {
  922. retVal = msg_ack.retVal;
  923. if (retVal == 0) {
  924. /* printk("[%s][%d]: %s end.\n", __FILE__, __LINE__, __func__); */
  925. return retVal;
  926. }
  927. } else {
  928. retVal = -EAGAIN;
  929. }
  930. /* Release the resource and return. */
  931. free_pages(sys_ctl_buffer, get_order(ROUND_UP(size, SZ_4K)));
  932. /* printk("[%s][%d]: %s end.\n", __FILE__, __LINE__, __func__); */
  933. return retVal;
  934. }
  935. void NQ_init(unsigned long NQ_buff)
  936. {
  937. memset((char *)NQ_buff, 0, NQ_BUFF_SIZE);
  938. }
  939. long init_nq_head(unsigned char *buffer_addr)
  940. {
  941. struct NQ_head *temp_head = NULL;
  942. temp_head = (struct NQ_head *)buffer_addr;
  943. memset(temp_head, 0, NQ_BLOCK_SIZE);
  944. temp_head->start_index = 0;
  945. temp_head->end_index = 0;
  946. temp_head->Max_count = BLOCK_MAX_COUNT;
  947. Flush_Dcache_By_Area((unsigned long)temp_head, (unsigned long)temp_head + NQ_BLOCK_SIZE);
  948. return 0;
  949. }
  950. static __always_inline unsigned int get_end_index(struct NQ_head *nq_head)
  951. {
  952. if (nq_head->end_index == BLOCK_MAX_COUNT)
  953. return 1;
  954. else
  955. return nq_head->end_index + 1;
  956. }
  957. int add_nq_entry(unsigned char *command_buff, int command_length, int valid_flag)
  958. {
  959. struct NQ_head *temp_head = NULL;
  960. struct NQ_entry *temp_entry = NULL;
  961. temp_head = (struct NQ_head *)nt_t_buffer;
  962. if (temp_head->start_index == ((temp_head->end_index + 1) % temp_head->Max_count))
  963. return -ENOMEM;
  964. temp_entry = nt_t_buffer + NQ_BLOCK_SIZE + temp_head->end_index * NQ_BLOCK_SIZE;
  965. temp_entry->valid_flag = valid_flag;
  966. temp_entry->length = command_length;
  967. temp_entry->buffer_addr = command_buff;
  968. temp_head->end_index = (temp_head->end_index + 1) % temp_head->Max_count;
  969. Flush_Dcache_By_Area((unsigned long)nt_t_buffer, (unsigned long)(nt_t_buffer + NQ_BUFF_SIZE));
  970. }
  971. unsigned char *get_nq_entry(unsigned char *buffer_addr)
  972. {
  973. struct NQ_head *temp_head = NULL;
  974. struct NQ_entry *temp_entry = NULL;
  975. temp_head = (struct NQ_head *)buffer_addr;
  976. if (temp_head->start_index == temp_head->end_index)
  977. return NULL;
  978. temp_entry = buffer_addr + NQ_BLOCK_SIZE + temp_head->start_index * NQ_BLOCK_SIZE;
  979. temp_head->start_index = (temp_head->start_index + 1) % temp_head->Max_count;
  980. Flush_Dcache_By_Area((unsigned long)buffer_addr, (unsigned long)temp_head + NQ_BUFF_SIZE);
  981. return temp_entry;
  982. }
  983. static int create_nq_buffer(void)
  984. {
  985. int retVal = 0;
  986. retVal = create_notify_queue(message_buff, NQ_SIZE);
  987. if (retVal < 0) {
  988. printk("[%s][%d]:create_notify_queue failed with errno %d.\n", __func__, __LINE__, retVal);
  989. return -EINVAL;
  990. }
  991. NQ_init(t_nt_buffer);
  992. NQ_init(nt_t_buffer);
  993. init_nq_head(t_nt_buffer);
  994. init_nq_head(nt_t_buffer);
  995. return 0;
  996. }
  997. void add_bdrv_queue(int bdrv_id)
  998. {
  999. work_ent.call_no = bdrv_id;
  1000. INIT_WORK(&(work_ent.work), work_func);
  1001. queue_work(secure_wq, &(work_ent.work));
  1002. return;
  1003. }
  1004. void set_fp_command(unsigned long memory_size)
  1005. {
  1006. printk("[%s][%d]", __func__, __LINE__);
  1007. struct fdrv_message_head fdrv_msg_head;
  1008. memset(&fdrv_msg_head, 0, sizeof(struct fdrv_message_head));
  1009. fdrv_msg_head.driver_type = FP_SYS_NO;
  1010. fdrv_msg_head.fdrv_param_length = sizeof(unsigned int);
  1011. memcpy(fdrv_message_buff, &fdrv_msg_head, sizeof(struct fdrv_message_head));
  1012. Flush_Dcache_By_Area((unsigned long)fdrv_message_buff, (unsigned long)fdrv_message_buff + MESSAGE_SIZE);
  1013. return;
  1014. }
  1015. void set_sch_nq_cmd(void)
  1016. {
  1017. struct message_head msg_head;
  1018. memset(&msg_head, 0, sizeof(struct message_head));
  1019. msg_head.invalid_flag = VALID_TYPE;
  1020. msg_head.message_type = STANDARD_CALL_TYPE;
  1021. msg_head.child_type = N_INVOKE_T_NQ;
  1022. memcpy(message_buff, &msg_head, sizeof(struct message_head));
  1023. Flush_Dcache_By_Area((unsigned long)message_buff, (unsigned long)message_buff + MESSAGE_SIZE);
  1024. return;
  1025. }
  1026. void set_sch_load_img_cmd(void)
  1027. {
  1028. struct message_head msg_head;
  1029. memset(&msg_head, 0, sizeof(struct message_head));
  1030. msg_head.invalid_flag = VALID_TYPE;
  1031. msg_head.message_type = STANDARD_CALL_TYPE;
  1032. msg_head.child_type = N_INVOKE_T_LOAD_TEE;
  1033. memcpy(message_buff, &msg_head, sizeof(struct message_head));
  1034. Flush_Dcache_By_Area((unsigned long)message_buff, (unsigned long)message_buff + MESSAGE_SIZE);
  1035. return;
  1036. }
  1037. struct teei_smc_cmd *get_response_smc_cmd(void)
  1038. {
  1039. struct NQ_entry *nq_ent = NULL;
  1040. /* mutex_lock(&t_nt_NQ_lock); */
  1041. nq_ent = get_nq_entry(t_nt_buffer);
  1042. /* mutex_unlock(&t_nt_NQ_lock); */
  1043. if (nq_ent == NULL)
  1044. return NULL;
  1045. return (struct teei_smc_cmd *)phys_to_virt((unsigned long)(nq_ent->buffer_addr));
  1046. }
  1047. static irqreturn_t nt_switch_irq_handler(void)
  1048. {
  1049. unsigned long irq_flag = 0;
  1050. struct teei_smc_cmd *command = NULL;
  1051. struct semaphore *cmd_sema = NULL;
  1052. struct message_head *msg_head = NULL;
  1053. struct ack_fast_call_struct *msg_ack = NULL;
  1054. if (boot_soter_flag == START_STATUS) {
  1055. /* printk("[%s][%d] ==== boot_soter_flag == START_STATUS ========\n", __func__, __LINE__); */
  1056. INIT_WORK(&(load_ent.work), load_func);
  1057. queue_work(secure_wq, &(load_ent.work));
  1058. up(&smc_lock);
  1059. return IRQ_HANDLED;
  1060. } else {
  1061. msg_head = (struct message_head *)message_buff;
  1062. if (FAST_CALL_TYPE == msg_head->message_type) {
  1063. /* printk("[%s][%d] ==== FAST_CALL_TYPE ACK ========\n", __func__, __LINE__); */
  1064. return IRQ_HANDLED;
  1065. } else if (STANDARD_CALL_TYPE == msg_head->message_type) {
  1066. /* Get the smc_cmd struct */
  1067. if (msg_head->child_type == VDRV_CALL_TYPE) {
  1068. /* printk("[%s][%d] ==== VDRV_CALL_TYPE ========\n", __func__, __LINE__); */
  1069. work_ent.call_no = msg_head->param_length;
  1070. INIT_WORK(&(work_ent.work), work_func);
  1071. queue_work(secure_wq, &(work_ent.work));
  1072. up(&smc_lock);
  1073. #if 0
  1074. } else if (msg_head->child_type == FDRV_ACK_TYPE) {
  1075. /* printk("[%s][%d] ==== FDRV_ACK_TYPE ========\n", __func__, __LINE__); */
  1076. /*
  1077. if(forward_call_flag == GLSCH_NONE)
  1078. forward_call_flag = GLSCH_NEG;
  1079. else
  1080. forward_call_flag = GLSCH_NONE;
  1081. */
  1082. up(&boot_sema);
  1083. up(&smc_lock);
  1084. #endif
  1085. } else {
  1086. /* printk("[%s][%d] ==== STANDARD_CALL_TYPE ACK ========\n", __func__, __LINE__); */
  1087. forward_call_flag = GLSCH_NONE;
  1088. command = get_response_smc_cmd();
  1089. if (NULL == command)
  1090. return IRQ_NONE;
  1091. /* Get the semaphore */
  1092. cmd_sema = (struct semaphore *)(command->teei_sema);
  1093. /* Up the semaphore */
  1094. up(cmd_sema);
  1095. up(&smc_lock);
  1096. }
  1097. return IRQ_HANDLED;
  1098. } else {
  1099. printk("[%s][%d] ==== Unknown IRQ ========\n", __func__, __LINE__);
  1100. return IRQ_NONE;
  1101. }
  1102. }
  1103. }
  1104. int register_switch_irq_handler(void)
  1105. {
  1106. int retVal = 0;
  1107. #ifdef CONFIG_OF
  1108. int irq_num = 0;
  1109. struct device_node *node;
  1110. node = of_find_compatible_node(NULL, NULL, "microtrust,utos");
  1111. irq_num = irq_of_parse_and_map(node, 3);
  1112. retVal = request_irq(irq_num, nt_switch_irq_handler, 0, "tz_drivers_service", NULL);
  1113. if (retVal)
  1114. printk("[CONFIG_OF] [%s] ERROR for request_irq %d error code : %d.\n", __func__, irq_num, retVal);
  1115. else
  1116. printk("[CONFIG_OF] [%s] request irq [ %d ] OK.\n", __func__, irq_num);
  1117. #else
  1118. /* register 282 IRQ */
  1119. retVal = request_irq(SWITCH_IRQ, nt_switch_irq_handler, 0, "tz_drivers_service", NULL);
  1120. if (retVal)
  1121. printk("ERROR for request_irq %d error code : %d.\n", SWITCH_IRQ, retVal);
  1122. else
  1123. printk("request irq [ %d ] OK.\n", SWITCH_IRQ);
  1124. #endif
  1125. return 0;
  1126. }
  1127. #if 0
  1128. static int create_vDrv_vfs_buffer(void)
  1129. {
  1130. int retVal = 0;
  1131. retVal = create_vdrv_buffer(message_buff, TEEI_VFS_TYPE, VFS_BUFF_SIZE, &vDrv_vfs_buff);
  1132. if (retVal < 0)
  1133. printk("[%s][%d]:create_vdrv_buffer failed with errno %d.\n", __func__, __LINE__, retVal);
  1134. return retVal;
  1135. }
  1136. #endif
  1137. static int start_teei_service(void)
  1138. {
  1139. /* kernel_thread(do_service, NULL, 0); */
  1140. return 0;
  1141. }
  1142. struct init_cmdbuf_struct {
  1143. unsigned long phy_addr;
  1144. unsigned long fdrv_phy_addr;
  1145. unsigned long bdrv_phy_addr;
  1146. unsigned long tlog_phy_addr;
  1147. };
  1148. struct init_cmdbuf_struct init_cmdbuf_entry;
  1149. static void secondary_init_cmdbuf(void *info)
  1150. {
  1151. struct init_cmdbuf_struct *cd = (struct init_cmdbuf_struct *)info;
  1152. /* with a rmb() */
  1153. rmb();
  1154. printk("[%s][%d] message = %lx, fdrv message = %lx, bdrv_message = %lx, tlog_message = %lx.\n", __func__, __LINE__,
  1155. (unsigned long)cd->phy_addr, (unsigned long)cd->fdrv_phy_addr,
  1156. (unsigned long)cd->bdrv_phy_addr, (unsigned long)cd->tlog_phy_addr);
  1157. n_init_t_fc_buf(cd->phy_addr, cd->fdrv_phy_addr, 0);
  1158. n_init_t_fc_buf(cd->bdrv_phy_addr, cd->tlog_phy_addr, 0);
  1159. /* with a wmb() */
  1160. wmb();
  1161. }
  1162. static void init_cmdbuf(unsigned long phy_address, unsigned long fdrv_phy_address,
  1163. unsigned long bdrv_phy_address, unsigned long tlog_phy_address)
  1164. {
  1165. int cpu_id = 0;
  1166. init_cmdbuf_entry.phy_addr = phy_address;
  1167. init_cmdbuf_entry.fdrv_phy_addr = fdrv_phy_address;
  1168. init_cmdbuf_entry.bdrv_phy_addr = bdrv_phy_address;
  1169. init_cmdbuf_entry.tlog_phy_addr = tlog_phy_address;
  1170. /* with a wmb() */
  1171. wmb();
  1172. get_online_cpus();
  1173. cpu_id = get_current_cpuid();
  1174. smp_call_function_single(cpu_id, secondary_init_cmdbuf, (void *)(&init_cmdbuf_entry), 1);
  1175. put_online_cpus();
  1176. /* with a rmb() */
  1177. rmb();
  1178. }
  1179. long create_cmd_buff(void)
  1180. {
  1181. unsigned long irq_status = 0;
  1182. message_buff = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1183. if (message_buff == NULL) {
  1184. printk("[%s][%d] Create message buffer failed!\n", __FILE__, __LINE__);
  1185. return -ENOMEM;
  1186. }
  1187. fdrv_message_buff = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1188. if (fdrv_message_buff == NULL) {
  1189. printk("[%s][%d] Create fdrv message buffer failed!\n", __FILE__, __LINE__);
  1190. free_pages(message_buff, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1191. return -ENOMEM;
  1192. }
  1193. bdrv_message_buff = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1194. if (bdrv_message_buff == NULL) {
  1195. printk("[%s][%d] Create bdrv message buffer failed!\n", __FILE__, __LINE__);
  1196. free_pages(message_buff, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1197. free_pages(fdrv_message_buff, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1198. return -ENOMEM;
  1199. }
  1200. tlog_message_buff = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA , get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1201. if (tlog_message_buff == NULL) {
  1202. printk("[%s][%d] Create tlog message buffer failed!\n", __FILE__, __LINE__);
  1203. free_pages(message_buff, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1204. free_pages(fdrv_message_buff, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1205. free_pages(bdrv_message_buff, get_order(ROUND_UP(MESSAGE_LENGTH, SZ_4K)));
  1206. return -ENOMEM;
  1207. }
  1208. /* smc_call to notify SOTER the share memory(message_buff) */
  1209. /* n_init_t_fc_buf((unsigned long)virt_to_phys(message_buff), 0, 0); */
  1210. printk("[%s][%d] message = %lx, fdrv message = %lx, bdrv_message = %lx, tlog_message = %lx\n", __func__, __LINE__,
  1211. (unsigned long)virt_to_phys(message_buff),
  1212. (unsigned long)virt_to_phys(fdrv_message_buff),
  1213. (unsigned long)virt_to_phys(bdrv_message_buff),
  1214. (unsigned long)virt_to_phys(tlog_message_buff));
  1215. init_cmdbuf((unsigned long)virt_to_phys(message_buff), (unsigned long)virt_to_phys(fdrv_message_buff),
  1216. (unsigned long)virt_to_phys(bdrv_message_buff), (unsigned long)virt_to_phys(tlog_message_buff));
  1217. return 0;
  1218. }
  1219. unsigned long create_fp_fdrv(int buff_size)
  1220. {
  1221. long retVal = 0;
  1222. unsigned long irq_flag = 0;
  1223. unsigned long temp_addr = 0;
  1224. struct message_head msg_head;
  1225. struct create_fdrv_struct msg_body;
  1226. struct ack_fast_call_struct msg_ack;
  1227. if (message_buff == NULL) {
  1228. printk("[%s][%d]: There is NO command buffer!.\n", __func__, __LINE__);
  1229. return NULL;
  1230. }
  1231. if (buff_size > VDRV_MAX_SIZE) {
  1232. printk("[%s][%d]: FP Drv buffer is too large, Can NOT create it.\n", __FILE__, __LINE__);
  1233. return NULL;
  1234. }
  1235. temp_addr = (unsigned long) __get_free_pages(GFP_KERNEL | GFP_DMA, get_order(ROUND_UP(buff_size, SZ_4K)));
  1236. if (temp_addr == NULL) {
  1237. printk("[%s][%d]: kmalloc fp drv buffer failed.\n", __FILE__, __LINE__);
  1238. return NULL;
  1239. }
  1240. memset(&msg_head, 0, sizeof(struct message_head));
  1241. memset(&msg_body, 0, sizeof(struct create_fdrv_struct));
  1242. memset(&msg_ack, 0, sizeof(struct ack_fast_call_struct));
  1243. msg_head.invalid_flag = VALID_TYPE;
  1244. msg_head.message_type = FAST_CALL_TYPE;
  1245. msg_head.child_type = FAST_CREAT_FDRV;
  1246. msg_head.param_length = sizeof(struct create_fdrv_struct);
  1247. msg_body.fdrv_type = FP_SYS_NO;
  1248. msg_body.fdrv_phy_addr = virt_to_phys(temp_addr);
  1249. msg_body.fdrv_size = buff_size;
  1250. local_irq_save(irq_flag);
  1251. /* Notify the T_OS that there is ctl_buffer to be created. */
  1252. memcpy(message_buff, &msg_head, sizeof(struct message_head));
  1253. memcpy(message_buff + sizeof(struct message_head), &msg_body, sizeof(struct create_fdrv_struct));
  1254. Flush_Dcache_By_Area((unsigned long)message_buff, (unsigned long)message_buff + MESSAGE_SIZE);
  1255. /* Call the smc_fast_call */
  1256. /* n_invoke_t_fast_call(0, 0, 0); */
  1257. down(&(smc_lock));
  1258. /*down(&cpu_down_lock);*/
  1259. down(&(boot_sema));
  1260. invoke_fastcall();
  1261. down(&(boot_sema));
  1262. up(&(boot_sema));
  1263. memcpy(&msg_head, message_buff, sizeof(struct message_head));
  1264. memcpy(&msg_ack, message_buff + sizeof(struct message_head), sizeof(struct ack_fast_call_struct));
  1265. local_irq_restore(irq_flag);
  1266. /*up(&cpu_down_lock);*/
  1267. /* Check the response from T_OS. */
  1268. if ((msg_head.message_type == FAST_CALL_TYPE) && (msg_head.child_type == FAST_ACK_CREAT_FDRV)) {
  1269. retVal = msg_ack.retVal;
  1270. if (retVal == 0) {
  1271. /* printk("[%s][%d]: %s end.\n", __func__, __LINE__, __func__); */
  1272. return temp_addr;
  1273. }
  1274. } else
  1275. retVal = NULL;
  1276. /* Release the resource and return. */
  1277. free_pages(temp_addr, get_order(ROUND_UP(buff_size, SZ_4K)));
  1278. printk("[%s][%d]: %s failed!\n", __func__, __LINE__, __func__);
  1279. return retVal;
  1280. }
  1281. int teei_service_init(void)
  1282. {
  1283. /**
  1284. * register interrupt handler
  1285. */
  1286. /* register_switch_irq_handler(); */
  1287. printk("[%s][%d] begin to create nq buffer!\n", __func__, __LINE__);
  1288. create_nq_buffer();
  1289. printk("[%s][%d] end of creating nq buffer!\n", __func__, __LINE__);
  1290. if (soter_error_flag == 1)
  1291. return -1;
  1292. printk("[%s][%d] begin to create fp buffer!\n", __func__, __LINE__);
  1293. fp_buff_addr = create_fp_fdrv(FP_BUFF_SIZE);
  1294. if (soter_error_flag == 1)
  1295. return -1;
  1296. /**
  1297. * init service handler
  1298. */
  1299. init_all_service_handlers();
  1300. if (soter_error_flag == 1)
  1301. return -1;
  1302. /**
  1303. * start service thread
  1304. */
  1305. start_teei_service();
  1306. /**
  1307. * Create Work Queue
  1308. */
  1309. /* secure_wq = create_workqueue("Secure Call"); */
  1310. return 0;
  1311. }