nt_smc_call.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. /***********************************************************
  2. * @ file : smc_call.h
  3. * @ brief : monitor call interface for user,
  4. * this implement is updated to SMC Calling Convention doc
  5. * from arm ,Document number: ARM DEN 0028A 2013
  6. * @ author: luocl
  7. * @ author: Steven Meng
  8. * @ copyright microtrust Corporation
  9. *************************************************************/
  10. #ifndef SMC_CALL_H_
  11. #define SMC_CALL_H_
  12. /*This field id is fixed by arm*/
  13. #define ID_FIELD_F_FAST_SMC_CALL 1
  14. #define ID_FIELD_F_STANDARD_SMC_CALL 0
  15. #define ID_FIELD_W_64 1
  16. #define ID_FIELD_W_32 0
  17. #define ID_FIELD_T_ARM_SERVICE 0
  18. #define ID_FIELD_T_CPU_SERVICE 1
  19. #define ID_FIELD_T_SIP_SERVICE 2
  20. #define ID_FIELD_T_OEM_SERVICE 3
  21. #define ID_FIELD_T_STANDARD_SERVICE 4
  22. /*TA Call 48-49*/
  23. #define ID_FIELD_T_TA_SERVICE0 48
  24. #define ID_FIELD_T_TA_SERVICE1 49
  25. /*TOS Call 50-63*/
  26. #define ID_FIELD_T_TRUSTED_OS_SERVICE0 50
  27. #define ID_FIELD_T_TRUSTED_OS_SERVICE1 51
  28. #define ID_FIELD_T_TRUSTED_OS_SERVICE2 52
  29. #define ID_FIELD_T_TRUSTED_OS_SERVICE3 53
  30. #define MAKE_SMC_CALL_ID(F, W, T, FN) (((F)<<31)|((W)<<30)|((T)<<24)|(FN))
  31. #define SMC_CALL_RTC_OK 0x0
  32. #define SMC_CALL_RTC_UNKNOWN_FUN 0xFFFFFFFF
  33. #define SMC_CALL_RTC_MONITOR_NOT_READY 0xFFFFFFFE
  34. /*For t side Fast Call*/
  35. #define T_BOOT_NT_OS \
  36. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 0)
  37. #define T_ACK_N_OS_READY \
  38. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 1)
  39. #define T_GET_PARAM_IN \
  40. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 2)
  41. #define T_ACK_T_OS_FOREGROUND \
  42. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 3)
  43. #define T_ACK_T_OS_BACKSTAGE \
  44. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 4)
  45. #define T_ACK_N_FAST_CALL \
  46. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 5)
  47. #define T_DUMP_STATE \
  48. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 6)
  49. #define T_ACK_N_INIT_FC_BUF \
  50. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 7)
  51. #define T_GET_BOOT_PARMS \
  52. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE0, 8)
  53. /*For t side Standard Call*/
  54. #define T_SCHED_NT \
  55. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 0)
  56. #define T_ACK_N_SYS_CTL \
  57. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 1)
  58. #define T_ACK_N_NQ \
  59. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 2)
  60. #define T_ACK_N_INVOKE_DRV \
  61. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 3)
  62. #define T_INVOKE_N_DRV \
  63. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 4)
  64. #define T_RAISE_N_EVENT \
  65. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 5)
  66. #define T_ACK_N_BOOT_OK \
  67. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 6)
  68. #define T_INVOKE_N_LOAD_IMG \
  69. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 7)
  70. #define T_ACK_N_KERNEL_OK \
  71. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 8)
  72. #define T_SCHED_NT_IRQ \
  73. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_32, ID_FIELD_T_TRUSTED_OS_SERVICE1, 9)
  74. /*For nt side Fast Call*/
  75. #define N_SWITCH_TO_T_OS_STAGE2 \
  76. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 0)
  77. #define N_CPU_CBOOT \
  78. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_CPU_SERVICE, 0)
  79. #define N_CPU_SUSPEND \
  80. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_CPU_SERVICE, 1)
  81. #define N_CPU_ON \
  82. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_CPU_SERVICE, 2)
  83. #define N_CPU_OFF \
  84. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_CPU_SERVICE, 3)
  85. #define N_GET_PARAM_IN \
  86. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 1)
  87. #define N_INIT_T_FC_BUF \
  88. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 2)
  89. #define N_INVOKE_T_FAST_CALL \
  90. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 3)
  91. #define NT_DUMP_STATE \
  92. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 4)
  93. #define N_ACK_T_FOREGROUND \
  94. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 5)
  95. #define N_ACK_T_BACKSTAGE \
  96. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 6)
  97. #define N_INIT_T_BOOT_STAGE1 \
  98. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 7)
  99. #define N_SWITCH_CORE \
  100. MAKE_SMC_CALL_ID(ID_FIELD_F_FAST_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE2, 8)
  101. /*For nt side Standard Call*/
  102. #define NT_SCHED_T \
  103. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 0)
  104. #define N_INVOKE_T_SYS_CTL \
  105. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 1)
  106. #define N_INVOKE_T_NQ \
  107. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 2)
  108. #define N_INVOKE_T_DRV \
  109. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 3)
  110. #define N_RAISE_T_EVENT \
  111. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 4)
  112. #define N_ACK_T_INVOKE_DRV \
  113. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 5)
  114. #define N_INVOKE_T_LOAD_TEE \
  115. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 6)
  116. #define N_ACK_T_LOAD_IMG \
  117. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 7)
  118. #define NT_SCHED_T_FIQ \
  119. MAKE_SMC_CALL_ID(ID_FIELD_F_STANDARD_SMC_CALL, ID_FIELD_W_64, ID_FIELD_T_TRUSTED_OS_SERVICE3, 8)
  120. /* ================== NT FAST CALL ================ */
  121. static inline void n_init_t_boot_stage1(
  122. uint64_t p0,
  123. uint64_t p1,
  124. uint64_t p2)
  125. {
  126. uint64_t temp[3];
  127. temp[0] = p0;
  128. temp[1] = p1;
  129. temp[2] = p2;
  130. __asm__ volatile(
  131. /* ".arch_extension sec\n" */
  132. "mov x0, %[fun_id]\n\t"
  133. "ldr x1, [%[temp], #0]\n\t"
  134. "ldr x2, [%[temp], #8]\n\t"
  135. "ldr x3, [%[temp], #16]\n\t"
  136. "smc 0\n\t"
  137. "nop"
  138. : :
  139. [fun_id] "r" (N_INIT_T_BOOT_STAGE1), [temp] "r" (temp)
  140. : "x0", "x1", "x2", "x3", "memory");
  141. }
  142. static inline void n_switch_to_t_os_stage2(void)
  143. {
  144. __asm__ volatile(
  145. /* ".arch_extension sec\n" */
  146. "mov x0, %[fun_id]\n\t"
  147. "mov x1, #0\n\t"
  148. "mov x2, #0\n\t"
  149. "mov x3, #0\n\t"
  150. "smc 0\n\t"
  151. "nop"
  152. : :
  153. [fun_id] "r" (N_SWITCH_TO_T_OS_STAGE2)
  154. : "x0", "x1", "x2", "x3", "memory");
  155. }
  156. static inline void nt_dump_state(void)
  157. {
  158. __asm__ volatile(
  159. /* ".arch_extension sec\n" */
  160. "mov x0, %[fun_id]\n\t"
  161. "mov x1, #0\n\t"
  162. "mov x2, #0\n\t"
  163. "mov x3, #0\n\t"
  164. "smc 0\n\t"
  165. "nop"
  166. : :
  167. [fun_id] "r" (NT_DUMP_STATE)
  168. : "x0", "x1", "x2", "x3", "memory");
  169. }
  170. static inline void n_get_param_in(
  171. uint64_t *rtc0,
  172. uint64_t *rtc1,
  173. uint64_t *rtc2,
  174. uint64_t *rtc3)
  175. {
  176. uint64_t temp[4];
  177. __asm__ volatile(
  178. /* ".arch_extension sec\n" */
  179. "mov x0, %[fun_id]\n\t"
  180. "mov x1, #0\n\t"
  181. "mov x2, #0\n\t"
  182. "mov x3, #0\n\t"
  183. "smc 0\n\t"
  184. "nop"
  185. "str x0, [%[temp]]\n\t"
  186. "str x1, [%[temp], #8]\n\t"
  187. "str x2, [%[temp], #16]\n\t"
  188. "str x3, [%[temp], #24]\n\t"
  189. : :
  190. [fun_id] "r" (N_GET_PARAM_IN), [temp] "r" (temp)
  191. : "x0", "x1", "x2", "x3", "memory");
  192. *rtc0 = temp[0];
  193. *rtc1 = temp[1];
  194. *rtc2 = temp[2];
  195. *rtc3 = temp[3];
  196. }
  197. static inline void n_init_t_fc_buf(
  198. uint64_t p0,
  199. uint64_t p1,
  200. uint64_t p2)
  201. {
  202. uint64_t temp[3];
  203. temp[0] = p0;
  204. temp[1] = p1;
  205. temp[2] = p2;
  206. __asm__ volatile(
  207. /* ".arch_extension sec\n" */
  208. "mov x0, %[fun_id]\n\t"
  209. "ldr x1, [%[temp], #0]\n\t"
  210. "ldr x2, [%[temp], #8]\n\t"
  211. "ldr x3, [%[temp], #16]\n\t"
  212. "smc 0\n\t"
  213. "nop"
  214. : :
  215. [fun_id] "r" (N_INIT_T_FC_BUF), [temp] "r" (temp)
  216. : "x0", "x1", "x2", "x3", "memory");
  217. }
  218. static inline void n_invoke_t_fast_call(
  219. uint64_t p0,
  220. uint64_t p1,
  221. uint64_t p2)
  222. {
  223. uint64_t temp[3];
  224. temp[0] = p0;
  225. temp[1] = p1;
  226. temp[2] = p2;
  227. __asm__ volatile(
  228. /* ".arch_extension sec\n" */
  229. "mov x0, %[fun_id]\n\t"
  230. "ldr x1, [%[temp], #0]\n\t"
  231. "ldr x2, [%[temp], #8]\n\t"
  232. "ldr x3, [%[temp], #16]\n\t"
  233. "smc 0\n\t"
  234. "nop"
  235. : :
  236. [fun_id] "r" (N_INVOKE_T_FAST_CALL), [temp] "r" (temp)
  237. : "x0", "x1", "x2", "x3", "memory");
  238. }
  239. /* ================== NT STANDARD CALL ================ */
  240. static inline void nt_sched_t(void)
  241. {
  242. __asm__ volatile(
  243. /* ".arch_extension sec\n" */
  244. "mov x0, %[fun_id]\n\t"
  245. "mov x1, #0\n\t"
  246. "mov x2, #0\n\t"
  247. "mov x3, #0\n\t"
  248. "smc 0\n\t"
  249. "nop"
  250. : :
  251. [fun_id] "r" (NT_SCHED_T)
  252. : "x0", "x1", "x2", "x3", "memory");
  253. }
  254. static inline void n_invoke_t_sys_ctl(
  255. uint64_t p0,
  256. uint64_t p1,
  257. uint64_t p2)
  258. {
  259. uint64_t temp[3];
  260. temp[0] = p0;
  261. temp[1] = p1;
  262. temp[2] = p2;
  263. __asm__ volatile(
  264. /* ".arch_extension sec\n */
  265. "mov x0, %[fun_id]\n\t"
  266. "ldr x1, [%[temp], #0]\n\t"
  267. "ldr x2, [%[temp], #8]\n\t"
  268. "ldr x3, [%[temp], #16]\n\t"
  269. "smc 0\n\t"
  270. "nop"
  271. : :
  272. [fun_id] "r" (N_INVOKE_T_SYS_CTL), [temp] "r" (temp)
  273. : "x0", "x1", "x2", "x3", "memory");
  274. }
  275. static inline void n_invoke_t_nq(
  276. uint64_t p0,
  277. uint64_t p1,
  278. uint64_t p2)
  279. {
  280. uint64_t temp[3];
  281. temp[0] = p0;
  282. temp[1] = p1;
  283. temp[2] = p2;
  284. __asm__ volatile(
  285. /* ".arch_extension sec\n" */
  286. "mov x0, %[fun_id]\n\t"
  287. "ldr x1, [%[temp], #0]\n\t"
  288. "ldr x2, [%[temp], #8]\n\t"
  289. "ldr x3, [%[temp], #16]\n\t"
  290. "smc 0\n\t"
  291. "nop"
  292. : :
  293. [fun_id] "r" (N_INVOKE_T_NQ), [temp] "r" (temp)
  294. : "x0", "x1", "x2", "x3", "memory");
  295. }
  296. static inline void n_invoke_t_drv(
  297. uint64_t p0,
  298. uint64_t p1,
  299. uint64_t p2)
  300. {
  301. uint64_t temp[3];
  302. temp[0] = p0;
  303. temp[1] = p1;
  304. temp[2] = p2;
  305. __asm__ volatile(
  306. /* ".arch_extension sec\n" */
  307. "mov x0, %[fun_id]\n\t"
  308. "ldr x1, [%[temp], #0]\n\t"
  309. "ldr x2, [%[temp], #8]\n\t"
  310. "ldr x3, [%[temp], #16]\n\t"
  311. "smc 0\n\t"
  312. "nop"
  313. : :
  314. [fun_id] "r" (N_INVOKE_T_DRV), [temp] "r" (temp)
  315. : "x0", "x1", "x2", "x3", "memory");
  316. }
  317. static inline void n_raise_t_event(
  318. uint64_t p0,
  319. uint64_t p1,
  320. uint64_t p2)
  321. {
  322. uint64_t temp[3];
  323. temp[0] = p0;
  324. temp[1] = p1;
  325. temp[2] = p2;
  326. __asm__ volatile(
  327. /* ".arch_extension sec\n" */
  328. "mov x0, %[fun_id]\n\t"
  329. "ldr x1, [%[temp], #0]\n\t"
  330. "ldr x2, [%[temp], #8]\n\t"
  331. "ldr x3, [%[temp], #16]\n\t"
  332. "smc 0\n\t"
  333. "nop"
  334. : :
  335. [fun_id] "r" (N_RAISE_T_EVENT), [temp] "r" (temp)
  336. : "x0", "x1", "x2", "x3", "memory");
  337. }
  338. static inline void n_ack_t_invoke_drv(
  339. uint64_t p0,
  340. uint64_t p1,
  341. uint64_t p2)
  342. {
  343. uint64_t temp[3];
  344. temp[0] = p0;
  345. temp[1] = p1;
  346. temp[2] = p2;
  347. __asm__ volatile(
  348. /* ".arch_extension sec\n" */
  349. "mov x0, %[fun_id]\n\t"
  350. "ldr x1, [%[temp], #0]\n\t"
  351. "ldr x2, [%[temp], #8]\n\t"
  352. "ldr x3, [%[temp], #16]\n\t"
  353. "smc 0\n\t"
  354. "nop"
  355. : :
  356. [fun_id] "r" (N_ACK_T_INVOKE_DRV), [temp] "r" (temp)
  357. : "x0", "x1", "x2", "x3", "memory");
  358. }
  359. static inline void n_invoke_t_load_tee(
  360. uint64_t p0,
  361. uint64_t p1,
  362. uint64_t p2)
  363. {
  364. uint64_t temp[3];
  365. temp[0] = p0;
  366. temp[1] = p1;
  367. temp[2] = p2;
  368. __asm__ volatile(
  369. /* ".arch_extension sec\n" */
  370. "mov x0, %[fun_id]\n\t"
  371. "ldr x1, [%[temp], #0]\n\t"
  372. "ldr x2, [%[temp], #8]\n\t"
  373. "ldr x3, [%[temp], #16]\n\t"
  374. "smc 0\n\t"
  375. "nop"
  376. : :
  377. [fun_id] "r" (N_INVOKE_T_LOAD_TEE), [temp] "r" (temp)
  378. : "x0", "x1", "x2", "x3", "memory");
  379. }
  380. static inline void n_ack_t_load_img(
  381. uint64_t p0,
  382. uint64_t p1,
  383. uint64_t p2)
  384. {
  385. uint64_t temp[3];
  386. temp[0] = p0;
  387. temp[1] = p1;
  388. temp[2] = p2;
  389. __asm__ volatile(
  390. /* ".arch_extension sec\n" */
  391. "mov x0, %[fun_id]\n\t"
  392. "ldr x1, [%[temp], #0]\n\t"
  393. "ldr x2, [%[temp], #8]\n\t"
  394. "ldr x3, [%[temp], #16]\n\t"
  395. "smc 0\n\t"
  396. "nop"
  397. : :
  398. [fun_id] "r" (N_ACK_T_LOAD_IMG), [temp] "r" (temp)
  399. : "x0", "x1", "x2", "x3", "memory");
  400. }
  401. static inline void nt_sched_t_fiq(
  402. uint64_t p0,
  403. uint64_t p1,
  404. uint64_t p2)
  405. {
  406. uint64_t temp[3];
  407. temp[0] = p0;
  408. temp[1] = p1;
  409. temp[2] = p2;
  410. __asm__ volatile(
  411. /* ".arch_extension sec\n" */
  412. "mov x0, %[fun_id]\n\t"
  413. "ldr x1, [%[temp], #0]\n\t"
  414. "ldr x2, [%[temp], #8]\n\t"
  415. "ldr x3, [%[temp], #16]\n\t"
  416. "smc 0\n\t"
  417. "nop"
  418. : :
  419. [fun_id] "r" (NT_SCHED_T_FIQ), [temp] "r" (temp)
  420. : "x0", "x1", "x2", "x3", "memory");
  421. }
  422. static inline void nt_sched_core(
  423. uint64_t p0,
  424. uint64_t p1,
  425. uint64_t p2)
  426. {
  427. uint64_t temp[3];
  428. temp[0] = p0;
  429. temp[1] = p1;
  430. temp[2] = p2;
  431. __asm__ volatile(
  432. /* ".arch_extension sec\n" */
  433. "mov x0, %[fun_id]\n\t"
  434. "ldr x1, [%[temp], #0]\n\t"
  435. "ldr x2, [%[temp], #8]\n\t"
  436. "ldr x3, [%[temp], #16]\n\t"
  437. "smc 0\n\t"
  438. "nop"
  439. : :
  440. [fun_id] "r" (N_SWITCH_CORE), [temp] "r" (temp)
  441. : "x0", "x1", "x2", "x3", "memory");
  442. }
  443. #endif /* SMC_CALL_H_ */