.macro clear_gp_regs
.irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
mov x\n, xzr
.endr
.endm
// Bad Abort numbers
BAD_SYNC = 0
BAD_IRQ = 1
BAD_FIQ = 2
BAD_ERROR = 3
.macro kernel_ventry, el, label, regsize = 64
.align 7
sub sp, sp, #S_FRAME_SIZE
b el\()\el\()_\label
.endm
.macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
.if \el == 0
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, PERCPU_ENTRY_TASK_RAW, x20 // Ensure MDSCR_EL1.SS is clear,
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
// Save the task's original addr_limit and set USER_DS
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
.if \el == 0
stp xzr, xzr, [sp, #S_STACKFRAME]
.else
stp x29, x22, [sp, #S_STACKFRAME]
.endif
add x29, sp, #S_STACKFRAME
stp x22, x23, [sp, #S_PC]
.if \el == 0
mov w21, #NO_SYSCALL
str w21, [sp, #S_SYSCALLNO]
.endif
.if \el == 0
msr sp_el0, tsk
.endif
// x21 - aborted SP
// x22 - aborted PC
// x23 - aborted PSTATE
.endm
.macro kernel_exit, el
.if \el != 0
disable_daif
// Restore the task's original addr_limit.
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
.endif
// load ELR, SPSR
ldp x21, x22, [sp, #S_PC]
.if \el == 0
.endif
.if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
ldp x0, x1, [sp, #16 * 0]
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
ldp x8, x9, [sp, #16 * 4]
ldp x10, x11, [sp, #16 * 5]
ldp x12, x13, [sp, #16 * 6]
ldp x14, x15, [sp, #16 * 7]
ldp x16, x17, [sp, #16 * 8]
ldp x18, x19, [sp, #16 * 9]
ldp x20, x21, [sp, #16 * 10]
ldp x22, x23, [sp, #16 * 11]
ldp x24, x25, [sp, #16 * 12]
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
eret
sb
.endm
.macro irq_stack_entry
mov x19, sp // preserve the original sp
// Compare sp with the base of the task stack.
// If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
// and should switch to the irq stack.
ldr x25, [tsk, TSK_STACK]
eor x25, x25, x19
and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f
ldr_this_cpu x25, IRQ_STACK_PTR, x26
mov x26, #THREAD_SIZE
add x26, x25, x26
/* switch to the irq stack */
mov sp, x26
9998:
.endm
.macro irq_stack_exit
mov sp, x19
.endm
tsk .req x28 // current thread_info
// Interrupt handling.
.macro irq_handler
mov x0, sp
irq_stack_entry
bl handle_arch_irq
irq_stack_exit
.endm
.pushsection ".entry.text", "ax"
.align 11
.global vectors; .align 2; vectors:
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t
kernel_ventry 1, error_invalid // Error EL1t
kernel_ventry 1, sync // Synchronous EL1h
kernel_ventry 1, irq // IRQ EL1h
kernel_ventry 1, fiq_invalid // FIQ EL1h
kernel_ventry 1, error // Error EL1h
kernel_ventry 0, sync // Synchronous 64-bit EL0
kernel_ventry 0, irq // IRQ 64-bit EL0
kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
kernel_ventry 0, error // Error 64-bit EL0
kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
.type vectors, @function; .size vectors, .-vectors
// Invalid mode handlers
.macro inv_entry, el, reason, regsize = 64
kernel_entry \el, \regsize
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
bl bad_mode
brk BUG_BRK_IMM
.endm
el0_sync_invalid:
inv_entry 0, BAD_SYNC
.type el0_sync_invalid, @function; .size el0_sync_invalid, .-el0_sync_invalid
el0_irq_invalid:
inv_entry 0, BAD_IRQ
.type el0_irq_invalid, @function; .size el0_irq_invalid, .-el0_irq_invalid
el0_fiq_invalid:
inv_entry 0, BAD_FIQ
.type el0_fiq_invalid, @function; .size el0_fiq_invalid, .-el0_fiq_invalid
el0_error_invalid:
inv_entry 0, BAD_ERROR
.type el0_error_invalid, @function; .size el0_error_invalid, .-el0_error_invalid
el1_sync_invalid:
inv_entry 1, BAD_SYNC
.type el1_sync_invalid, @function; .size el1_sync_invalid, .-el1_sync_invalid
el1_irq_invalid:
inv_entry 1, BAD_IRQ
.type el1_irq_invalid, @function; .size el1_irq_invalid, .-el1_irq_invalid
el1_fiq_invalid:
inv_entry 1, BAD_FIQ
.type el1_fiq_invalid, @function; .size el1_fiq_invalid, .-el1_fiq_invalid
el1_error_invalid:
inv_entry 1, BAD_ERROR
.type el1_error_invalid, @function; .size el1_error_invalid, .-el1_error_invalid
// EL1 mode handlers.
.align 6
el1_sync:
kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_ia:
el1_da:
// Data abort handling
mrs x3, far_el1
inherit_daif pstate=x23, tmp=x2
clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
kernel_exit 1
el1_sp_pc:
// Stack or PC alignment exception handling
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
mov x2, sp
bl do_sp_pc_abort
brk BUG_BRK_IMM
el1_undef:
// Undefined instruction
inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
kernel_exit 1
el1_dbg:
// Debug exception handling
cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
kernel_exit 1
el1_inv:
// TODO: add support for undefined instructions in kernel mode
inherit_daif pstate=x23, tmp=x2
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
bl bad_mode
brk BUG_BRK_IMM
.type el1_sync, @function; .size el1_sync, .-el1_sync
.align 6
el1_irq:
kernel_entry 1
enable_da_f
irq_handler
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
bl el1_preempt
1:
kernel_exit 1
.type el1_irq, @function; .size el1_irq, .-el1_irq
el1_preempt:
mov x24, lr
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
// EL0 mode handlers.
.align 6
el0_sync:
kernel_entry 0
mrs x25, esr_el1 // read the syndrome register
lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc
cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
cmp x24, #ESR_ELx_EC_SVE // SVE access
b.eq el0_sve_acc
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
el0_da:
// Data abort handling
mrs x26, far_el1
enable_daif
clear_address_tag x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
b ret_to_user
el0_ia:
// Instruction abort handling
mrs x26, far_el1
enable_da_f
mov x0, x26
mov x1, x25
mov x2, sp
bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
// Floating Point or Advanced SIMD access
enable_daif
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
b ret_to_user
el0_sve_acc:
// Scalable Vector Extension access
enable_daif
mov x0, x25
mov x1, sp
bl do_sve_acc
b ret_to_user
el0_fpsimd_exc:
// Floating Point, Advanced SIMD or SVE exception
enable_daif
mov x0, x25
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
el0_sp_pc:
// Stack or PC alignment exception handling
mrs x26, far_el1
enable_da_f
mov x0, x26
mov x1, x25
mov x2, sp
bl do_sp_pc_abort
b ret_to_user
el0_undef:
// Undefined instruction
enable_daif
mov x0, sp
bl do_undefinstr
b ret_to_user
el0_sys:
// System instructions, for trapped cache maintenance instructions
enable_daif
mov x0, x25
mov x1, sp
bl do_sysinstr
b ret_to_user
el0_dbg:
// Debug exception handling
tbnz x24, #0, el0_inv // EL0 only
mrs x0, far_el1
mov x1, x25
mov x2, sp
bl do_debug_exception
enable_daif
b ret_to_user
el0_inv:
enable_daif
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
bl bad_el0_sync
b ret_to_user
.type el0_sync, @function; .size el0_sync, .-el0_sync
.align 6
el0_irq:
kernel_entry 0
el0_irq_naked:
enable_da_f
irq_handler
b ret_to_user
.type el0_irq, @function; .size el0_irq, .-el0_irq
el1_error:
kernel_entry 1
mrs x1, esr_el1
enable_dbg
mov x0, sp
bl do_serror
kernel_exit 1
.type el1_error, @function; .size el1_error, .-el1_error
el0_error:
kernel_entry 0
mrs x1, esr_el1
enable_dbg
mov x0, sp
bl do_serror
enable_daif
b ret_to_user
.type el0_error, @function; .size el0_error, .-el0_error
work_pending:
mov x0, sp // 'regs'
bl do_notify_resume
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
b finish_ret_to_user
// "slow" syscall return path.
ret_to_user:
disable_daif
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
finish_ret_to_user:
kernel_exit 0
.type ret_to_user, @function; .size ret_to_user, .-ret_to_user
// SVC handler.
.align 6
el0_svc:
mov x0, sp
bl el0_svc_handler
b ret_to_user
.type el0_svc, @function; .size el0_svc, .-el0_svc
.popsection // .entry.text