entry.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/alternative-asm.h>
  23. #include <asm/assembler.h>
  24. #include <asm/asm-offsets.h>
  25. #include <asm/cpufeature.h>
  26. #include <asm/errno.h>
  27. #include <asm/esr.h>
  28. #include <asm/thread_info.h>
  29. #include <asm/unistd.h>
  30. /*
  31. * Context tracking subsystem. Used to instrument transitions
  32. * between user and kernel mode.
  33. */
  34. .macro ct_user_exit, syscall = 0
  35. #ifdef CONFIG_CONTEXT_TRACKING
  36. bl context_tracking_user_exit
  37. .if \syscall == 1
  38. /*
  39. * Save/restore needed during syscalls. Restore syscall arguments from
  40. * the values already saved on stack during kernel_entry.
  41. */
  42. ldp x0, x1, [sp]
  43. ldp x2, x3, [sp, #S_X2]
  44. ldp x4, x5, [sp, #S_X4]
  45. ldp x6, x7, [sp, #S_X6]
  46. .endif
  47. #endif
  48. .endm
  49. .macro ct_user_enter
  50. #ifdef CONFIG_CONTEXT_TRACKING
  51. bl context_tracking_user_enter
  52. #endif
  53. .endm
  54. /*
  55. * Bad Abort numbers
  56. *-----------------
  57. */
  58. #define BAD_SYNC 0
  59. #define BAD_IRQ 1
  60. #define BAD_FIQ 2
  61. #define BAD_ERROR 3
  62. .macro kernel_entry, el, regsize = 64
  63. sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
  64. .if \regsize == 32
  65. mov w0, w0 // zero upper 32 bits of x0
  66. .endif
  67. push x28, x29
  68. push x26, x27
  69. push x24, x25
  70. push x22, x23
  71. push x20, x21
  72. push x18, x19
  73. push x16, x17
  74. push x14, x15
  75. push x12, x13
  76. push x10, x11
  77. push x8, x9
  78. push x6, x7
  79. push x4, x5
  80. push x2, x3
  81. push x0, x1
  82. .if \el == 0
  83. mrs x21, sp_el0
  84. get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
  85. ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
  86. disable_step_tsk x19, x20 // exceptions when scheduling.
  87. .else
  88. add x21, sp, #S_FRAME_SIZE
  89. .endif
  90. mrs x22, elr_el1
  91. mrs x23, spsr_el1
  92. stp lr, x21, [sp, #S_LR]
  93. stp x22, x23, [sp, #S_PC]
  94. /*
  95. * Set syscallno to -1 by default (overridden later if real syscall).
  96. */
  97. .if \el == 0
  98. mvn x21, xzr
  99. str x21, [sp, #S_SYSCALLNO]
  100. .endif
  101. /*
  102. * Registers that may be useful after this macro is invoked:
  103. *
  104. * x21 - aborted SP
  105. * x22 - aborted PC
  106. * x23 - aborted PSTATE
  107. */
  108. .endm
  109. #ifdef CONFIG_MTK_COMPAT
  110. .macro kernel_entry_compat
  111. sub sp, sp, #S_FRAME_SIZE - S_X16 // room for LR, SP, SPSR, ELR
  112. mov w0, w0 // zero upper 32 bits of x0
  113. stp x14, x15, [sp, #-16]!
  114. stp x12, x13, [sp, #-16]!
  115. stp x10, x11, [sp, #-16]!
  116. stp x8, x9, [sp, #-16]!
  117. stp x6, x7, [sp, #-16]!
  118. stp x4, x5, [sp, #-16]!
  119. stp x2, x3, [sp, #-16]!
  120. stp x0, x1, [sp, #-16]!
  121. mrs x21, sp_el0
  122. get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
  123. ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
  124. disable_step_tsk x19, x20 // exceptions when scheduling.
  125. mrs x22, elr_el1
  126. mrs x23, spsr_el1
  127. stp lr, x21, [sp, #S_LR]
  128. stp x22, x23, [sp, #S_PC]
  129. /*
  130. * Set syscallno to -1 by default (overridden later if real syscall).
  131. */
  132. mvn x21, xzr
  133. str x21, [sp, #S_SYSCALLNO]
  134. /*
  135. * Registers that may be useful after this macro is invoked:
  136. *
  137. * x21 - aborted SP
  138. * x22 - aborted PC
  139. * x23 - aborted PSTATE
  140. */
  141. .endm
  142. #endif
  143. .macro kernel_exit, el, ret = 0
  144. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  145. .if \el == 0
  146. ct_user_enter
  147. ldr x23, [sp, #S_SP] // load return stack pointer
  148. #ifdef CONFIG_ARM64_ERRATUM_845719
  149. alternative_insn \
  150. "nop", \
  151. "tbz x22, #4, 1f", \
  152. ARM64_WORKAROUND_845719
  153. #ifdef CONFIG_PID_IN_CONTEXTIDR
  154. alternative_insn \
  155. "nop; nop", \
  156. "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
  157. ARM64_WORKAROUND_845719
  158. #else
  159. alternative_insn \
  160. "nop", \
  161. "msr contextidr_el1, xzr; 1:", \
  162. ARM64_WORKAROUND_845719
  163. #endif
  164. #endif
  165. .endif
  166. .if \ret
  167. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  168. add sp, sp, S_X2
  169. .else
  170. pop x0, x1
  171. .endif
  172. pop x2, x3 // load the rest of the registers
  173. pop x4, x5
  174. pop x6, x7
  175. pop x8, x9
  176. msr elr_el1, x21 // set up the return data
  177. msr spsr_el1, x22
  178. .if \el == 0
  179. msr sp_el0, x23
  180. .endif
  181. pop x10, x11
  182. pop x12, x13
  183. pop x14, x15
  184. pop x16, x17
  185. pop x18, x19
  186. pop x20, x21
  187. pop x22, x23
  188. pop x24, x25
  189. pop x26, x27
  190. pop x28, x29
  191. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  192. eret // return to kernel
  193. .endm
  194. #ifdef CONFIG_MTK_COMPAT
  195. .macro kernel_exit_compat, ret = 0
  196. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  197. ct_user_enter
  198. ldr x23, [sp, #S_SP] // load return stack pointer
  199. .if \ret
  200. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  201. add sp, sp, S_X2
  202. .else
  203. ldp x0, x1, [sp], #16
  204. .endif
  205. ldp x2, x3, [sp], #16 // load the rest of the registers
  206. ldp x4, x5, [sp], #16
  207. ldp x6, x7, [sp], #16
  208. ldp x8, x9, [sp], #16
  209. msr elr_el1, x21 // set up the return data
  210. msr spsr_el1, x22
  211. msr sp_el0, x23
  212. #ifdef CONFIG_ARM64_ERRATUM_845719
  213. alternative_insn \
  214. "nop", \
  215. "tbz x22, #4, 1f", \
  216. ARM64_WORKAROUND_845719
  217. #ifdef CONFIG_PID_IN_CONTEXTIDR
  218. alternative_insn \
  219. "nop; nop", \
  220. "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
  221. ARM64_WORKAROUND_845719
  222. #else
  223. alternative_insn \
  224. "nop", \
  225. "msr contextidr_el1, xzr; 1:", \
  226. ARM64_WORKAROUND_845719
  227. #endif
  228. #endif
  229. ldp x10, x11, [sp], #16
  230. ldp x12, x13, [sp], #16
  231. ldp x14, x15, [sp], #16
  232. tbnz x22, #4, 1f
  233. ldp x16, x17, [sp], #16
  234. ldp x18, x19, [sp], #16
  235. ldp x20, x21, [sp], #16
  236. ldp x22, x23, [sp], #16
  237. ldp x24, x25, [sp], #16
  238. ldp x26, x27, [sp], #16
  239. ldp x28, x29, [sp], #16
  240. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  241. eret // return to kernel
  242. // could not run here
  243. 1:
  244. #ifdef CONFIG_ARM64_ERRATUM_845719
  245. #ifdef CONFIG_PID_IN_CONTEXTIDR
  246. mrs x29, contextidr_el1
  247. msr contextidr_el1, x29
  248. #else
  249. msr contextidr_el1, xzr
  250. #endif
  251. #endif
  252. add sp, sp, #S_X29-S_X15
  253. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  254. eret // return to kernel
  255. .endm
  256. #endif
  257. .macro get_thread_info, rd
  258. mov \rd, sp
  259. and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
  260. .endm
  261. /*
  262. * These are the registers used in the syscall handler, and allow us to
  263. * have in theory up to 7 arguments to a function - x0 to x6.
  264. *
  265. * x7 is reserved for the system call number in 32-bit mode.
  266. */
  267. sc_nr .req x25 // number of system calls
  268. scno .req x26 // syscall number
  269. stbl .req x27 // syscall table pointer
  270. tsk .req x28 // current thread_info
  271. /*
  272. * Interrupt handling.
  273. */
  274. .macro irq_handler
  275. ldr x1, handle_arch_irq
  276. mov x0, sp
  277. blr x1
  278. .endm
  279. .text
  280. /*
  281. * Exception vectors.
  282. */
  283. .align 11
  284. ENTRY(vectors)
  285. ventry el1_sync_invalid // Synchronous EL1t
  286. ventry el1_irq_invalid // IRQ EL1t
  287. ventry el1_fiq_invalid // FIQ EL1t
  288. ventry el1_error_invalid // Error EL1t
  289. ventry el1_sync // Synchronous EL1h
  290. ventry el1_irq // IRQ EL1h
  291. ventry el1_fiq_invalid // FIQ EL1h
  292. ventry el1_error_invalid // Error EL1h
  293. ventry el0_sync // Synchronous 64-bit EL0
  294. ventry el0_irq // IRQ 64-bit EL0
  295. ventry el0_fiq_invalid // FIQ 64-bit EL0
  296. ventry el0_error_invalid // Error 64-bit EL0
  297. #ifdef CONFIG_COMPAT
  298. ventry el0_sync_compat // Synchronous 32-bit EL0
  299. ventry el0_irq_compat // IRQ 32-bit EL0
  300. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  301. ventry el0_error_invalid_compat // Error 32-bit EL0
  302. #else
  303. ventry el0_sync_invalid // Synchronous 32-bit EL0
  304. ventry el0_irq_invalid // IRQ 32-bit EL0
  305. ventry el0_fiq_invalid // FIQ 32-bit EL0
  306. ventry el0_error_invalid // Error 32-bit EL0
  307. #endif
  308. END(vectors)
  309. /*
  310. * Invalid mode handlers
  311. */
  312. .macro inv_entry, el, reason, regsize = 64
  313. kernel_entry el, \regsize
  314. mov x0, sp
  315. mov x1, #\reason
  316. mrs x2, esr_el1
  317. b bad_mode
  318. .endm
  319. el0_sync_invalid:
  320. inv_entry 0, BAD_SYNC
  321. ENDPROC(el0_sync_invalid)
  322. el0_irq_invalid:
  323. inv_entry 0, BAD_IRQ
  324. ENDPROC(el0_irq_invalid)
  325. el0_fiq_invalid:
  326. inv_entry 0, BAD_FIQ
  327. ENDPROC(el0_fiq_invalid)
  328. el0_error_invalid:
  329. inv_entry 0, BAD_ERROR
  330. ENDPROC(el0_error_invalid)
  331. #ifdef CONFIG_COMPAT
  332. el0_fiq_invalid_compat:
  333. inv_entry 0, BAD_FIQ, 32
  334. ENDPROC(el0_fiq_invalid_compat)
  335. el0_error_invalid_compat:
  336. inv_entry 0, BAD_ERROR, 32
  337. ENDPROC(el0_error_invalid_compat)
  338. #endif
  339. el1_sync_invalid:
  340. inv_entry 1, BAD_SYNC
  341. ENDPROC(el1_sync_invalid)
  342. el1_irq_invalid:
  343. inv_entry 1, BAD_IRQ
  344. ENDPROC(el1_irq_invalid)
  345. el1_fiq_invalid:
  346. inv_entry 1, BAD_FIQ
  347. ENDPROC(el1_fiq_invalid)
  348. el1_error_invalid:
  349. inv_entry 1, BAD_ERROR
  350. ENDPROC(el1_error_invalid)
  351. /*
  352. * EL1 mode handlers.
  353. */
  354. .align 6
  355. el1_sync:
  356. kernel_entry 1
  357. mrs x1, esr_el1 // read the syndrome register
  358. lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
  359. cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
  360. b.eq el1_da
  361. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  362. b.eq el1_undef
  363. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  364. b.eq el1_sp_pc
  365. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  366. b.eq el1_sp_pc
  367. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
  368. b.eq el1_undef
  369. cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
  370. b.ge el1_dbg
  371. b el1_inv
  372. el1_da:
  373. /*
  374. * Data abort handling
  375. */
  376. mrs x0, far_el1
  377. enable_dbg
  378. // re-enable interrupts if they were enabled in the aborted context
  379. tbnz x23, #7, 1f // PSR_I_BIT
  380. enable_irq
  381. 1:
  382. mov x2, sp // struct pt_regs
  383. bl do_mem_abort
  384. // disable interrupts before pulling preserved data off the stack
  385. disable_irq
  386. kernel_exit 1
  387. el1_sp_pc:
  388. /*
  389. * Stack or PC alignment exception handling
  390. */
  391. mrs x0, far_el1
  392. enable_dbg
  393. mov x2, sp
  394. b do_sp_pc_abort
  395. el1_undef:
  396. /*
  397. * Undefined instruction
  398. */
  399. enable_dbg
  400. mov x0, sp
  401. b do_undefinstr
  402. el1_dbg:
  403. /*
  404. * Debug exception handling
  405. */
  406. cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
  407. cinc x24, x24, eq // set bit '0'
  408. tbz x24, #0, el1_inv // EL1 only
  409. mrs x0, far_el1
  410. mov x2, sp // struct pt_regs
  411. bl do_debug_exception
  412. kernel_exit 1
  413. el1_inv:
  414. // TODO: add support for undefined instructions in kernel mode
  415. enable_dbg
  416. mov x0, sp
  417. mov x1, #BAD_SYNC
  418. mrs x2, esr_el1
  419. b bad_mode
  420. ENDPROC(el1_sync)
  421. .align 6
  422. el1_irq:
  423. kernel_entry 1
  424. enable_dbg
  425. #ifdef CONFIG_TRACE_IRQFLAGS
  426. bl trace_hardirqs_off
  427. #endif
  428. #ifdef CONFIG_MTPROF
  429. bl MT_trace_hardirqs_off
  430. #endif
  431. irq_handler
  432. #ifdef CONFIG_PREEMPT
  433. get_thread_info tsk
  434. ldr w24, [tsk, #TI_PREEMPT] // get preempt count
  435. cbnz w24, 1f // preempt count != 0
  436. ldr x0, [tsk, #TI_FLAGS] // get flags
  437. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  438. bl el1_preempt
  439. 1:
  440. #endif
  441. #ifdef CONFIG_MTPROF
  442. bl MT_trace_hardirqs_on
  443. #endif
  444. #ifdef CONFIG_TRACE_IRQFLAGS
  445. bl trace_hardirqs_on
  446. #endif
  447. kernel_exit 1
  448. ENDPROC(el1_irq)
  449. #ifdef CONFIG_PREEMPT
  450. el1_preempt:
  451. mov x24, lr
  452. 1: bl preempt_schedule_irq // irq en/disable is done inside
  453. ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
  454. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  455. ret x24
  456. #endif
  457. /*
  458. * EL0 mode handlers.
  459. */
  460. .align 6
  461. el0_sync:
  462. kernel_entry 0
  463. mrs x25, esr_el1 // read the syndrome register
  464. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  465. cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
  466. b.eq el0_svc
  467. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  468. b.eq el0_da
  469. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  470. b.eq el0_ia
  471. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  472. b.eq el0_fpsimd_acc
  473. cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
  474. b.eq el0_fpsimd_exc
  475. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  476. b.eq el0_undef
  477. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  478. b.eq el0_sp_pc
  479. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  480. b.eq el0_sp_pc
  481. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  482. b.eq el0_undef
  483. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  484. b.ge el0_dbg
  485. b el0_inv
  486. #ifdef CONFIG_COMPAT
  487. .align 6
  488. el0_sync_compat:
  489. #ifdef CONFIG_MTK_COMPAT
  490. kernel_entry_compat
  491. #else
  492. kernel_entry 0, 32
  493. #endif
  494. mrs x25, esr_el1 // read the syndrome register
  495. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  496. cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
  497. b.eq el0_svc_compat
  498. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  499. b.eq el0_da
  500. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  501. b.eq el0_ia
  502. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  503. b.eq el0_fpsimd_acc
  504. cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
  505. b.eq el0_fpsimd_exc
  506. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  507. b.eq el0_undef
  508. cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
  509. b.eq el0_undef
  510. cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
  511. b.eq el0_undef
  512. cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
  513. b.eq el0_undef
  514. cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
  515. b.eq el0_undef
  516. cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
  517. b.eq el0_undef
  518. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  519. b.ge el0_dbg
  520. b el0_inv
  521. el0_svc_compat:
  522. /*
  523. * AArch32 syscall handling
  524. */
  525. adr stbl, compat_sys_call_table // load compat syscall table pointer
  526. uxtw scno, w7 // syscall number in w7 (r7)
  527. mov sc_nr, #__NR_compat_syscalls
  528. b el0_svc_naked
  529. .align 6
  530. el0_irq_compat:
  531. #ifdef CONFIG_MTK_COMPAT
  532. kernel_entry_compat
  533. #else
  534. kernel_entry 0, 32
  535. #endif
  536. b el0_irq_naked
  537. #endif
  538. el0_da:
  539. /*
  540. * Data abort handling
  541. */
  542. mrs x26, far_el1
  543. // enable interrupts before calling the main handler
  544. enable_dbg_and_irq
  545. ct_user_exit
  546. bic x0, x26, #(0xff << 56)
  547. mov x1, x25
  548. mov x2, sp
  549. bl do_mem_abort
  550. b ret_to_user
  551. el0_ia:
  552. /*
  553. * Instruction abort handling
  554. */
  555. mrs x26, far_el1
  556. // enable interrupts before calling the main handler
  557. enable_dbg_and_irq
  558. ct_user_exit
  559. mov x0, x26
  560. orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
  561. mov x2, sp
  562. bl do_mem_abort
  563. b ret_to_user
  564. el0_fpsimd_acc:
  565. /*
  566. * Floating Point or Advanced SIMD access
  567. */
  568. enable_dbg
  569. ct_user_exit
  570. mov x0, x25
  571. mov x1, sp
  572. bl do_fpsimd_acc
  573. b ret_to_user
  574. el0_fpsimd_exc:
  575. /*
  576. * Floating Point or Advanced SIMD exception
  577. */
  578. enable_dbg
  579. ct_user_exit
  580. mov x0, x25
  581. mov x1, sp
  582. bl do_fpsimd_exc
  583. b ret_to_user
  584. el0_sp_pc:
  585. /*
  586. * Stack or PC alignment exception handling
  587. */
  588. mrs x26, far_el1
  589. // enable interrupts before calling the main handler
  590. enable_dbg_and_irq
  591. ct_user_exit
  592. mov x0, x26
  593. mov x1, x25
  594. mov x2, sp
  595. bl do_sp_pc_abort
  596. b ret_to_user
  597. el0_undef:
  598. /*
  599. * Undefined instruction
  600. */
  601. // enable interrupts before calling the main handler
  602. enable_dbg_and_irq
  603. ct_user_exit
  604. mov x0, sp
  605. bl do_undefinstr
  606. b ret_to_user
  607. el0_dbg:
  608. /*
  609. * Debug exception handling
  610. */
  611. tbnz x24, #0, el0_inv // EL0 only
  612. mrs x0, far_el1
  613. mov x1, x25
  614. mov x2, sp
  615. bl do_debug_exception
  616. enable_dbg
  617. ct_user_exit
  618. b ret_to_user
  619. el0_inv:
  620. enable_dbg
  621. ct_user_exit
  622. mov x0, sp
  623. mov x1, #BAD_SYNC
  624. mrs x2, esr_el1
  625. bl bad_mode
  626. b ret_to_user
  627. ENDPROC(el0_sync)
  628. .align 6
  629. el0_irq:
  630. kernel_entry 0
  631. el0_irq_naked:
  632. enable_dbg
  633. #ifdef CONFIG_TRACE_IRQFLAGS
  634. bl trace_hardirqs_off
  635. #endif
  636. ct_user_exit
  637. irq_handler
  638. #ifdef CONFIG_TRACE_IRQFLAGS
  639. bl trace_hardirqs_on
  640. #endif
  641. b ret_to_user
  642. ENDPROC(el0_irq)
  643. /*
  644. * Register switch for AArch64. The callee-saved registers need to be saved
  645. * and restored. On entry:
  646. * x0 = previous task_struct (must be preserved across the switch)
  647. * x1 = next task_struct
  648. * Previous and next are guaranteed not to be the same.
  649. *
  650. */
  651. ENTRY(cpu_switch_to)
  652. add x8, x0, #THREAD_CPU_CONTEXT
  653. mov x9, sp
  654. stp x19, x20, [x8], #16 // store callee-saved registers
  655. stp x21, x22, [x8], #16
  656. stp x23, x24, [x8], #16
  657. stp x25, x26, [x8], #16
  658. stp x27, x28, [x8], #16
  659. stp x29, x9, [x8], #16
  660. str lr, [x8]
  661. add x8, x1, #THREAD_CPU_CONTEXT
  662. ldp x19, x20, [x8], #16 // restore callee-saved registers
  663. ldp x21, x22, [x8], #16
  664. ldp x23, x24, [x8], #16
  665. ldp x25, x26, [x8], #16
  666. ldp x27, x28, [x8], #16
  667. ldp x29, x9, [x8], #16
  668. ldr lr, [x8]
  669. mov sp, x9
  670. ret
  671. ENDPROC(cpu_switch_to)
  672. /*
  673. * This is the fast syscall return path. We do as little as possible here,
  674. * and this includes saving x0 back into the kernel stack.
  675. */
  676. ret_fast_syscall:
  677. disable_irq // disable interrupts
  678. ldr x1, [tsk, #TI_FLAGS]
  679. and x2, x1, #_TIF_WORK_MASK
  680. cbnz x2, fast_work_pending
  681. enable_step_tsk x1, x2
  682. #ifdef CONFIG_MTK_COMPAT
  683. kernel_exit_compat ret = 1
  684. #else
  685. kernel_exit 0, ret = 1
  686. #endif
  687. /*
  688. * Ok, we need to do extra processing, enter the slow path.
  689. */
  690. fast_work_pending:
  691. str x0, [sp, #S_X0] // returned x0
  692. work_pending:
  693. tbnz x1, #TIF_NEED_RESCHED, work_resched
  694. /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
  695. ldr x2, [sp, #S_PSTATE]
  696. mov x0, sp // 'regs'
  697. tst x2, #PSR_MODE_MASK // user mode regs?
  698. b.ne no_work_pending // returning to kernel
  699. enable_irq // enable interrupts for do_notify_resume()
  700. bl do_notify_resume
  701. b ret_to_user
  702. work_resched:
  703. bl schedule
  704. /*
  705. * "slow" syscall return path.
  706. */
  707. ret_to_user:
  708. disable_irq // disable interrupts
  709. ldr x1, [tsk, #TI_FLAGS]
  710. and x2, x1, #_TIF_WORK_MASK
  711. cbnz x2, work_pending
  712. enable_step_tsk x1, x2
  713. no_work_pending:
  714. #ifdef CONFIG_MTK_COMPAT
  715. kernel_exit_compat ret = 0
  716. #else
  717. kernel_exit 0, ret = 0
  718. #endif
  719. ENDPROC(ret_to_user)
  720. /*
  721. * This is how we return from a fork.
  722. */
  723. ENTRY(ret_from_fork)
  724. bl schedule_tail
  725. cbz x19, 1f // not a kernel thread
  726. mov x0, x20
  727. blr x19
  728. 1: get_thread_info tsk
  729. b ret_to_user
  730. ENDPROC(ret_from_fork)
  731. /*
  732. * SVC handler.
  733. */
  734. .align 6
  735. el0_svc:
  736. adrp stbl, sys_call_table // load syscall table pointer
  737. uxtw scno, w8 // syscall number in w8
  738. mov sc_nr, #__NR_syscalls
  739. el0_svc_naked: // compat entry point
  740. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  741. enable_dbg_and_irq
  742. ct_user_exit 1
  743. ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
  744. tst x16, #_TIF_SYSCALL_WORK
  745. b.ne __sys_trace
  746. cmp scno, sc_nr // check upper syscall limit
  747. b.hs ni_sys
  748. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  749. blr x16 // call sys_* routine
  750. b ret_fast_syscall
  751. ni_sys:
  752. mov x0, sp
  753. bl do_ni_syscall
  754. b ret_fast_syscall
  755. ENDPROC(el0_svc)
  756. /*
  757. * This is the really slow path. We're going to be doing context
  758. * switches, and waiting for our parent to respond.
  759. */
  760. __sys_trace:
  761. mov w0, #-1 // set default errno for
  762. cmp scno, x0 // user-issued syscall(-1)
  763. b.ne 1f
  764. mov x0, #-ENOSYS
  765. str x0, [sp, #S_X0]
  766. 1: mov x0, sp
  767. bl syscall_trace_enter
  768. cmp w0, #-1 // skip the syscall?
  769. b.eq __sys_trace_return_skipped
  770. uxtw scno, w0 // syscall number (possibly new)
  771. mov x1, sp // pointer to regs
  772. cmp scno, sc_nr // check upper syscall limit
  773. b.hs __ni_sys_trace
  774. ldp x0, x1, [sp] // restore the syscall args
  775. ldp x2, x3, [sp, #S_X2]
  776. ldp x4, x5, [sp, #S_X4]
  777. ldp x6, x7, [sp, #S_X6]
  778. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  779. blr x16 // call sys_* routine
  780. __sys_trace_return:
  781. str x0, [sp, #S_X0] // save returned x0
  782. __sys_trace_return_skipped:
  783. mov x0, sp
  784. bl syscall_trace_exit
  785. b ret_to_user
  786. __ni_sys_trace:
  787. mov x0, sp
  788. bl do_ni_syscall
  789. b __sys_trace_return
  790. /*
  791. * Special system call wrappers.
  792. */
  793. ENTRY(sys_rt_sigreturn_wrapper)
  794. mov x0, sp
  795. b sys_rt_sigreturn
  796. ENDPROC(sys_rt_sigreturn_wrapper)
  797. ENTRY(handle_arch_irq)
  798. .quad 0