semx_bsp_data 0.1.5

板级数据定义
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
    .macro	clear_gp_regs
    .irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
    mov	x\n, xzr
    .endr
    .endm

// Bad Abort numbers
BAD_SYNC  = 0
BAD_IRQ   = 1
BAD_FIQ   = 2
BAD_ERROR = 3

    .macro kernel_ventry, el, label, regsize = 64
    .align 7

    sub	sp, sp, #S_FRAME_SIZE

    b	el\()\el\()_\label
    .endm

    .macro	kernel_entry, el, regsize = 64
    .if	\regsize == 32
    mov	w0, w0				// zero upper 32 bits of x0
    .endif
    stp	x0, x1, [sp, #16 * 0]
    stp	x2, x3, [sp, #16 * 1]
    stp	x4, x5, [sp, #16 * 2]
    stp	x6, x7, [sp, #16 * 3]
    stp	x8, x9, [sp, #16 * 4]
    stp	x10, x11, [sp, #16 * 5]
    stp	x12, x13, [sp, #16 * 6]
    stp	x14, x15, [sp, #16 * 7]
    stp	x16, x17, [sp, #16 * 8]
    stp	x18, x19, [sp, #16 * 9]
    stp	x20, x21, [sp, #16 * 10]
    stp	x22, x23, [sp, #16 * 11]
    stp	x24, x25, [sp, #16 * 12]
    stp	x26, x27, [sp, #16 * 13]
    stp	x28, x29, [sp, #16 * 14]

    .if	\el == 0
    clear_gp_regs
    mrs	x21, sp_el0
    ldr_this_cpu	tsk, PERCPU_ENTRY_TASK_RAW, x20	// Ensure MDSCR_EL1.SS is clear,

    .else
    add	x21, sp, #S_FRAME_SIZE
    get_thread_info tsk
    // Save the task's original addr_limit and set USER_DS
    ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
    str	x20, [sp, #S_ORIG_ADDR_LIMIT]
    mov	x20, #USER_DS
    str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
    .endif /* \el == 0 */
    mrs	x22, elr_el1
    mrs	x23, spsr_el1
    stp	lr, x21, [sp, #S_LR]

    .if \el == 0
    stp	xzr, xzr, [sp, #S_STACKFRAME]
    .else
    stp	x29, x22, [sp, #S_STACKFRAME]
    .endif
    add	x29, sp, #S_STACKFRAME

    stp	x22, x23, [sp, #S_PC]

    .if	\el == 0
    mov	w21, #NO_SYSCALL
    str	w21, [sp, #S_SYSCALLNO]
    .endif

    .if	\el == 0
    msr	sp_el0, tsk
    .endif
    // x21 - aborted SP
    // x22 - aborted PC
    // x23 - aborted PSTATE
    .endm

    .macro	kernel_exit, el
    .if	\el != 0
    disable_daif

    // Restore the task's original addr_limit.
    ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
    str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
    .endif

    // load ELR, SPSR
    ldp	x21, x22, [sp, #S_PC]
    .if	\el == 0
    .endif

    .if	\el == 0
    ldr	x23, [sp, #S_SP]		// load return stack pointer
    msr	sp_el0, x23
    .endif

    msr	elr_el1, x21			// set up the return data
    msr	spsr_el1, x22
    ldp	x0, x1, [sp, #16 * 0]
    ldp	x2, x3, [sp, #16 * 1]
    ldp	x4, x5, [sp, #16 * 2]
    ldp	x6, x7, [sp, #16 * 3]
    ldp	x8, x9, [sp, #16 * 4]
    ldp	x10, x11, [sp, #16 * 5]
    ldp	x12, x13, [sp, #16 * 6]
    ldp	x14, x15, [sp, #16 * 7]
    ldp	x16, x17, [sp, #16 * 8]
    ldp	x18, x19, [sp, #16 * 9]
    ldp	x20, x21, [sp, #16 * 10]
    ldp	x22, x23, [sp, #16 * 11]
    ldp	x24, x25, [sp, #16 * 12]
    ldp	x26, x27, [sp, #16 * 13]
    ldp	x28, x29, [sp, #16 * 14]
    ldr	lr, [sp, #S_LR]
    add	sp, sp, #S_FRAME_SIZE		// restore sp

    eret
    sb
    .endm

    .macro	irq_stack_entry
    mov	x19, sp			// preserve the original sp

    // Compare sp with the base of the task stack.
    // If the top ~(THREAD_STACK_SIZE - 1) bits match, we are on a task stack,
    // and should switch to the irq stack.
    ldr	x25, [tsk, TSK_STACK]
    eor	x25, x25, x19
    and	x25, x25, #~(THREAD_STACK_SIZE - 1)
    cbnz	x25, 9998f

    ldr_this_cpu x25, IRQ_STACK_PTR, x26
    mov	x26, #THREAD_STACK_SIZE
    add	x26, x25, x26

    /* switch to the irq stack */
    mov	sp, x26
9998:
    .endm

    .macro	irq_stack_exit
    mov	sp, x19
    .endm

tsk	.req	x28		// current thread_info

    // Interrupt handling.
    .macro	irq_handler
    mov	x0, sp
    irq_stack_entry
    bl	handle_arch_irq
    irq_stack_exit
    .endm

    .pushsection ".entry.text", "ax"
    .align	11
.global vectors; .align 2; vectors:
    kernel_ventry	1, sync_invalid			// Synchronous EL1t
    kernel_ventry	1, irq_invalid			// IRQ EL1t
    kernel_ventry	1, fiq_invalid			// FIQ EL1t
    kernel_ventry	1, error_invalid		// Error EL1t

    kernel_ventry	1, sync				// Synchronous EL1h
    kernel_ventry	1, irq				// IRQ EL1h
    kernel_ventry	1, fiq_invalid			// FIQ EL1h
    kernel_ventry	1, error			// Error EL1h

    kernel_ventry	0, sync				// Synchronous 64-bit EL0
    kernel_ventry	0, irq				// IRQ 64-bit EL0
    kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
    kernel_ventry	0, error			// Error 64-bit EL0

    kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
    kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
    kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
    kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
.type vectors, @function; .size vectors, .-vectors

    // Invalid mode handlers
    .macro	inv_entry, el, reason, regsize = 64
    kernel_entry \el, \regsize
    mov	x0, sp
    mov	x1, #\reason
    mrs	x2, esr_el1
    bl	bad_mode
    brk BUG_BRK_IMM
    .endm

el0_sync_invalid:
    inv_entry 0, BAD_SYNC
.type el0_sync_invalid, @function; .size el0_sync_invalid, .-el0_sync_invalid

el0_irq_invalid:
    inv_entry 0, BAD_IRQ
.type el0_irq_invalid, @function; .size el0_irq_invalid, .-el0_irq_invalid

el0_fiq_invalid:
    inv_entry 0, BAD_FIQ
.type el0_fiq_invalid, @function; .size el0_fiq_invalid, .-el0_fiq_invalid

el0_error_invalid:
    inv_entry 0, BAD_ERROR
.type el0_error_invalid, @function; .size el0_error_invalid, .-el0_error_invalid

el1_sync_invalid:
    inv_entry 1, BAD_SYNC
.type el1_sync_invalid, @function; .size el1_sync_invalid, .-el1_sync_invalid

el1_irq_invalid:
    inv_entry 1, BAD_IRQ
.type el1_irq_invalid, @function; .size el1_irq_invalid, .-el1_irq_invalid

el1_fiq_invalid:
    inv_entry 1, BAD_FIQ
.type el1_fiq_invalid, @function; .size el1_fiq_invalid, .-el1_fiq_invalid

el1_error_invalid:
    inv_entry 1, BAD_ERROR
.type el1_error_invalid, @function; .size el1_error_invalid, .-el1_error_invalid

    // EL1 mode handlers.
    .align	6
el1_sync:
    kernel_entry 1
    mrs	x1, esr_el1			// read the syndrome register
    lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
    cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
    b.eq	el1_da
    cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
    b.eq	el1_ia
    cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
    b.eq	el1_undef
    cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
    b.eq	el1_sp_pc
    cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
    b.eq	el1_sp_pc
    cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
    b.eq	el1_undef
    cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
    b.ge	el1_dbg
    b	el1_inv

el1_ia:
el1_da:
    // Data abort handling
    mrs	x3, far_el1
    inherit_daif	pstate=x23, tmp=x2
    clear_address_tag x0, x3
    mov	x2, sp				// struct pt_regs
    bl	do_mem_abort

    kernel_exit 1
el1_sp_pc:
    // Stack or PC alignment exception handling
    mrs	x0, far_el1
    inherit_daif	pstate=x23, tmp=x2
    mov	x2, sp
    bl	do_sp_pc_abort
    brk BUG_BRK_IMM
el1_undef:
    // Undefined instruction
    inherit_daif	pstate=x23, tmp=x2
    mov	x0, sp
    bl	do_undefinstr
    kernel_exit 1
el1_dbg:
    // Debug exception handling
    cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
    cinc	x24, x24, eq			// set bit '0'
    tbz	x24, #0, el1_inv		// EL1 only
    mrs	x0, far_el1
    mov	x2, sp				// struct pt_regs
    bl	do_debug_exception
    kernel_exit 1
el1_inv:
    // TODO: add support for undefined instructions in kernel mode
    inherit_daif	pstate=x23, tmp=x2
    mov	x0, sp
    mov	x2, x1
    mov	x1, #BAD_SYNC
    bl	bad_mode
    brk BUG_BRK_IMM
.type el1_sync, @function; .size el1_sync, .-el1_sync

    .align	6
el1_irq:
    kernel_entry 1
    enable_da_f

    irq_handler

    ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
    cbnz	w24, 1f				// preempt count != 0
    bl	el1_preempt
1:
    kernel_exit 1
.type el1_irq, @function; .size el1_irq, .-el1_irq

el1_preempt:
    mov	x24, lr
1:	bl	preempt_schedule_irq		// irq en/disable is done inside
    ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
    tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
    ret	x24

// EL0 mode handlers.
    .align	6
el0_sync:
    kernel_entry 0
    mrs	x25, esr_el1			// read the syndrome register
    lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
    cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
    b.eq	el0_svc
    cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
    b.eq	el0_da
    cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
    b.eq	el0_ia
    cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
    b.eq	el0_fpsimd_acc
    cmp	x24, #ESR_ELx_EC_SVE		// SVE access
    b.eq	el0_sve_acc
    cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
    b.eq	el0_fpsimd_exc
    cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
    ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
    b.eq	el0_sys
    cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
    b.eq	el0_sp_pc
    cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
    b.eq	el0_sp_pc
    cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
    b.eq	el0_undef
    cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
    b.ge	el0_dbg
    b	el0_inv

el0_da:
    // Data abort handling
    mrs	x26, far_el1
    enable_daif
    clear_address_tag x0, x26
    mov	x1, x25
    mov	x2, sp
    bl	do_mem_abort
    b	ret_to_user
el0_ia:
    // Instruction abort handling
    mrs	x26, far_el1
    enable_da_f
    mov	x0, x26
    mov	x1, x25
    mov	x2, sp
    bl	do_el0_ia_bp_hardening
    b	ret_to_user
el0_fpsimd_acc:
    // Floating Point or Advanced SIMD access
    enable_daif
    mov	x0, x25
    mov	x1, sp
    bl	do_fpsimd_acc
    b	ret_to_user
el0_sve_acc:
    // Scalable Vector Extension access
    enable_daif
    mov	x0, x25
    mov	x1, sp
    bl	do_sve_acc
    b	ret_to_user
el0_fpsimd_exc:
    // Floating Point, Advanced SIMD or SVE exception
    enable_daif
    mov	x0, x25
    mov	x1, sp
    bl	do_fpsimd_exc
    b	ret_to_user
el0_sp_pc:
    // Stack or PC alignment exception handling
    mrs	x26, far_el1
    enable_da_f
    mov	x0, x26
    mov	x1, x25
    mov	x2, sp
    bl	do_sp_pc_abort
    b	ret_to_user
el0_undef:
    // Undefined instruction
    enable_daif
    mov	x0, sp
    bl	do_undefinstr
    b	ret_to_user
el0_sys:
    // System instructions, for trapped cache maintenance instructions
    enable_daif
    mov	x0, x25
    mov	x1, sp
    bl	do_sysinstr
    b	ret_to_user
el0_dbg:
    // Debug exception handling
    tbnz	x24, #0, el0_inv		// EL0 only
    mrs	x0, far_el1
    mov	x1, x25
    mov	x2, sp
    bl	do_debug_exception
    enable_daif
    b	ret_to_user
el0_inv:
    enable_daif
    mov	x0, sp
    mov	x1, #BAD_SYNC
    mov	x2, x25
    bl	bad_el0_sync
    b	ret_to_user
.type el0_sync, @function; .size el0_sync, .-el0_sync

    .align	6
el0_irq:
    kernel_entry 0
el0_irq_naked:
    enable_da_f

    irq_handler

    b	ret_to_user
.type el0_irq, @function; .size el0_irq, .-el0_irq

el1_error:
    kernel_entry 1
    mrs	x1, esr_el1
    enable_dbg
    mov	x0, sp
    bl	do_serror
    kernel_exit 1
.type el1_error, @function; .size el1_error, .-el1_error

el0_error:
    kernel_entry 0
    mrs	x1, esr_el1
    enable_dbg
    mov	x0, sp
    bl	do_serror
    enable_daif
    b	ret_to_user
.type el0_error, @function; .size el0_error, .-el0_error

work_pending:
    mov	x0, sp				// 'regs'
    bl	do_notify_resume
    ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
    b	finish_ret_to_user

// "slow" syscall return path.
ret_to_user:
    disable_daif
    ldr	x1, [tsk, #TSK_TI_FLAGS]
    and	x2, x1, #_TIF_WORK_MASK
    cbnz	x2, work_pending
finish_ret_to_user:
    kernel_exit 0
.type ret_to_user, @function; .size ret_to_user, .-ret_to_user

// SVC handler.
    .align	6
el0_svc:
    mov	x0, sp
    bl	el0_svc_handler
    b	ret_to_user
.type el0_svc, @function; .size el0_svc, .-el0_svc

    .popsection				// .entry.text