#include <sys/asm_linkage.h>
#include <sys/asm_misc.h>
#include <sys/panic.h>
#include <sys/ontrap.h>
#include <sys/regset.h>
#include <sys/privregs.h>
#include <sys/reboot.h>
#include <sys/psw.h>
#include <sys/x86_archext.h>
#include "assym.h"
#include <sys/dditypes.h>
ENTRY(on_fault)
movq %gs:CPU_THREAD, %rsi
leaq catch_fault(%rip), %rdx
movq %rdi, T_ONFAULT(%rsi)
movq %rdx, T_LOFAULT(%rsi)
call smap_disable
jmp setjmp
catch_fault:
movq %gs:CPU_THREAD, %rsi
movq T_ONFAULT(%rsi), %rdi
xorl %eax, %eax
movq %rax, T_ONFAULT(%rsi)
movq %rax, T_LOFAULT(%rsi)
call smap_enable
jmp longjmp
SET_SIZE(on_fault)
ENTRY(no_fault)
movq %gs:CPU_THREAD, %rsi
xorl %eax, %eax
movq %rax, T_ONFAULT(%rsi)
movq %rax, T_LOFAULT(%rsi)
call smap_enable
ret
SET_SIZE(no_fault)
ENTRY(on_trap_trampoline)
movq %gs:CPU_THREAD, %rsi
movq T_ONTRAP(%rsi), %rdi
addq $OT_JMPBUF, %rdi
jmp longjmp
SET_SIZE(on_trap_trampoline)
ENTRY(on_trap)
movw %si, OT_PROT(%rdi)
movw $0, OT_TRAP(%rdi)
leaq on_trap_trampoline(%rip), %rdx
movq %rdx, OT_TRAMPOLINE(%rdi)
xorl %ecx, %ecx
movq %rcx, OT_HANDLE(%rdi)
movq %rcx, OT_PAD1(%rdi)
movq %gs:CPU_THREAD, %rdx
movq T_ONTRAP(%rdx), %rcx
cmpq %rdi, %rcx
je 0f
movq %rcx, OT_PREV(%rdi)
movq %rdi, T_ONTRAP(%rdx)
0: addq $OT_JMPBUF, %rdi
jmp setjmp
SET_SIZE(on_trap)
#if LABEL_PC != 0
#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
#endif
ENTRY(setjmp)
movq %rsp, LABEL_SP(%rdi)
movq %rbp, LABEL_RBP(%rdi)
movq %rbx, LABEL_RBX(%rdi)
movq %r12, LABEL_R12(%rdi)
movq %r13, LABEL_R13(%rdi)
movq %r14, LABEL_R14(%rdi)
movq %r15, LABEL_R15(%rdi)
movq (%rsp), %rdx
movq %rdx, (%rdi)
xorl %eax, %eax
ret
SET_SIZE(setjmp)
ENTRY(longjmp)
movq LABEL_SP(%rdi), %rsp
movq LABEL_RBP(%rdi), %rbp
movq LABEL_RBX(%rdi), %rbx
movq LABEL_R12(%rdi), %r12
movq LABEL_R13(%rdi), %r13
movq LABEL_R14(%rdi), %r14
movq LABEL_R15(%rdi), %r15
movq (%rdi), %rdx
movq %rdx, (%rsp)
xorl %eax, %eax
incl %eax
ret
SET_SIZE(longjmp)
ENTRY(caller)
movq 8(%rbp), %rax
ret
SET_SIZE(caller)
ENTRY(callee)
movq (%rsp), %rax
ret
SET_SIZE(callee)
ENTRY(getfp)
movq %rbp, %rax
ret
SET_SIZE(getfp)
ENTRY(mmu_invlpg)
invlpg (%rdi)
ret
SET_SIZE(mmu_invlpg)
ENTRY(getcr0)
movq %cr0, %rax
ret
SET_SIZE(getcr0)
ENTRY(setcr0)
movq %rdi, %cr0
ret
SET_SIZE(setcr0)
ENTRY(getcr2)
#if defined(__xpv)
movq %gs:CPU_VCPU_INFO, %rax
movq VCPU_INFO_ARCH_CR2(%rax), %rax
#else
movq %cr2, %rax
#endif
ret
SET_SIZE(getcr2)
ENTRY(getcr3)
movq %cr3, %rax
ret
SET_SIZE(getcr3)
#if !defined(__xpv)
ENTRY(setcr3)
movq %rdi, %cr3
ret
SET_SIZE(setcr3)
ENTRY(reload_cr3)
movq %cr3, %rdi
movq %rdi, %cr3
ret
SET_SIZE(reload_cr3)
#endif
ENTRY(getcr4)
movq %cr4, %rax
ret
SET_SIZE(getcr4)
ENTRY(setcr4)
movq %rdi, %cr4
ret
SET_SIZE(setcr4)
ENTRY(getcr8)
movq %cr8, %rax
ret
SET_SIZE(getcr8)
ENTRY(setcr8)
movq %rdi, %cr8
ret
SET_SIZE(setcr8)
ENTRY(__cpuid_insn)
movq %rbx, %r8
movq %rcx, %r9
movq %rdx, %r11
movl (%rdi), %eax
movl 0x4(%rdi), %ebx
movl 0x8(%rdi), %ecx
movl 0xc(%rdi), %edx
cpuid
movl %eax, (%rdi)
movl %ebx, 0x4(%rdi)
movl %ecx, 0x8(%rdi)
movl %edx, 0xc(%rdi)
movq %r8, %rbx
movq %r9, %rcx
movq %r11, %rdx
ret
SET_SIZE(__cpuid_insn)
ENTRY_NP(i86_monitor)
pushq %rbp
movq %rsp, %rbp
movq %rdi, %rax
movq %rsi, %rcx
clflush (%rax)
.byte 0x0f, 0x01, 0xc8
leave
ret
SET_SIZE(i86_monitor)
ENTRY_NP(i86_mwait)
pushq %rbp
call x86_md_clear
movq %rsp, %rbp
movq %rdi, %rax
movq %rsi, %rcx
.byte 0x0f, 0x01, 0xc9
leave
ret
SET_SIZE(i86_mwait)
#if defined(__xpv)
#else
ENTRY_NP(tsc_read)
movq %rbx, %r11
movl $0, %eax
cpuid
rdtsc
movq %r11, %rbx
shlq $32, %rdx
orq %rdx, %rax
ret
.globl _tsc_mfence_start
_tsc_mfence_start:
mfence
rdtsc
shlq $32, %rdx
orq %rdx, %rax
ret
.globl _tsc_mfence_end
_tsc_mfence_end:
.globl _tscp_start
_tscp_start:
.byte 0x0f, 0x01, 0xf9
shlq $32, %rdx
orq %rdx, %rax
ret
.globl _tscp_end
_tscp_end:
.globl _no_rdtsc_start
_no_rdtsc_start:
xorl %edx, %edx
xorl %eax, %eax
ret
.globl _no_rdtsc_end
_no_rdtsc_end:
.globl _tsc_lfence_start
_tsc_lfence_start:
lfence
rdtsc
shlq $32, %rdx
orq %rdx, %rax
ret
.globl _tsc_lfence_end
_tsc_lfence_end:
SET_SIZE(tsc_read)
#endif
ENTRY_NP(randtick)
rdtsc
shlq $32, %rdx
orq %rdx, %rax
ret
SET_SIZE(randtick)
ENTRY(_insque)
movq (%rsi), %rax
movq %rsi, CPTRSIZE(%rdi)
movq %rax, (%rdi)
movq %rdi, (%rsi)
movq %rdi, CPTRSIZE(%rax)
ret
SET_SIZE(_insque)
ENTRY(_remque)
movq (%rdi), %rax
movq CPTRSIZE(%rdi), %rdx
movq %rax, (%rdx)
movq %rdx, CPTRSIZE(%rax)
ret
SET_SIZE(_remque)
ENTRY(strlen)
#ifdef DEBUG
movq postbootkernelbase(%rip), %rax
cmpq %rax, %rdi
jae str_valid
pushq %rbp
movq %rsp, %rbp
leaq .str_panic_msg(%rip), %rdi
xorl %eax, %eax
call panic
#endif
str_valid:
cmpb $0, (%rdi)
movq %rdi, %rax
je .null_found
.align 4
.strlen_loop:
incq %rdi
cmpb $0, (%rdi)
jne .strlen_loop
.null_found:
subq %rax, %rdi
movq %rdi, %rax
ret
SET_SIZE(strlen)
#ifdef DEBUG
.text
.str_panic_msg:
.string "strlen: argument below kernelbase"
#endif
#define SETPRI(level) \
movl $##level, %edi; \
jmp do_splx
#define RAISE(level) \
movl $##level, %edi; \
jmp splr
ENTRY(spl8)
SETPRI(15)
SET_SIZE(spl8)
ENTRY(spl7)
RAISE(13)
SET_SIZE(spl7)
ENTRY(splzs)
SETPRI(12)
SET_SIZE(splzs)
ENTRY(splhi)
ALTENTRY(splhigh)
ALTENTRY(spl6)
ALTENTRY(i_ddi_splhigh)
RAISE(DISP_LEVEL)
SET_SIZE(i_ddi_splhigh)
SET_SIZE(spl6)
SET_SIZE(splhigh)
SET_SIZE(splhi)
ENTRY(spl0)
SETPRI(0)
SET_SIZE(spl0)
ENTRY(splx)
jmp do_splx
SET_SIZE(splx)
ENTRY(wait_500ms)
pushq %rbx
movl $50000, %ebx
1:
call tenmicrosec
decl %ebx
jnz 1b
popq %rbx
ret
SET_SIZE(wait_500ms)
#define RESET_METHOD_KBC 1
#define RESET_METHOD_PORT92 2
#define RESET_METHOD_PCI 4
DGDEF3(pc_reset_methods, 4, 8)
.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
ENTRY(pc_reset)
testl $RESET_METHOD_KBC, pc_reset_methods(%rip)
jz 1f
/
/ Try the classic keyboard controller-triggered reset.
/
movw $0x64, %dx
movb $0xfe, %al
outb (%dx)
/ Wait up to 500 milliseconds here for the keyboard controller
/ to pull the reset line. On some systems where the keyboard
/ controller is slow to pull the reset line, the next reset method
/ may be executed (which may be bad if those systems hang when the
/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
/ and Ferrari 4000 (doesn't like the cf9 reset method))
call wait_500ms
1:
testl $RESET_METHOD_PORT92, pc_reset_methods(%rip)
jz 3f
/
/ Try port 0x92 fast reset
/
movw $0x92, %dx
inb (%dx)
cmpb $0xff, %al / If port's not there, we should get back 0xFF
je 1f
testb $1, %al / If bit 0
jz 2f / is clear, jump to perform the reset
andb $0xfe, %al / otherwise,
outb (%dx) / clear bit 0 first, then
2:
orb $1, %al / Set bit 0
outb (%dx) / and reset the system
1:
call wait_500ms
3:
testl $RESET_METHOD_PCI, pc_reset_methods(%rip)
jz 4f
/ Try the PCI (soft) reset vector (should work on all modern systems,
/ but has been shown to cause problems on 450NX systems, and some newer
/ systems (e.g. ATI IXP400-equipped systems))
/ When resetting via this method, 2 writes are required. The first
/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
/ power cycle).
/ The reset occurs on the second write, during bit 2's transition from
/ 0->1.
movw $0xcf9, %dx
movb $0x2, %al / Reset mode = hard, no power cycle
outb (%dx)
movb $0x6, %al
outb (%dx)
call wait_500ms
4:
/
/ port 0xcf9 failed also. Last-ditch effort is to
/ triple-fault the CPU.
/ Also, use triple fault for EFI firmware
/
ENTRY(efi_reset)
pushq $0x0
pushq $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
lidt (%rsp)
int $0x0 / Trigger interrupt, generate triple-fault
cli
hlt / Wait forever
SET_SIZE(efi_reset)
SET_SIZE(pc_reset)
ENTRY(outl)
movw %di, %dx
movl %esi, %eax
outl (%dx)
ret
SET_SIZE(outl)
ENTRY(outw)
movw %di, %dx
movw %si, %ax
D16 outl (%dx)
ret
SET_SIZE(outw)
ENTRY(outb)
movw %di, %dx
movb %sil, %al
outb (%dx)
ret
SET_SIZE(outb)
ENTRY(inl)
xorl %eax, %eax
movw %di, %dx
inl (%dx)
ret
SET_SIZE(inl)
ENTRY(inw)
xorl %eax, %eax
movw %di, %dx
D16 inl (%dx)
ret
SET_SIZE(inw)
ENTRY(inb)
xorl %eax, %eax
movw %di, %dx
inb (%dx)
ret
SET_SIZE(inb)
ENTRY(int3)
int $T_BPTFLT
ret
SET_SIZE(int3)
ENTRY(int18)
int $T_MCE
ret
SET_SIZE(int18)
ENTRY(int20)
movl boothowto, %eax
andl $RB_DEBUG, %eax
jz 1f
int $T_DBGENTR
1:
rep; ret
SET_SIZE(int20)
ENTRY(int_cmci)
int $T_ENOEXTFLT
ret
SET_SIZE(int_cmci)
ENTRY(scanc)
addq %rsi, %rdi
.scanloop:
cmpq %rdi, %rsi
jnb .scandone
movzbq (%rsi), %r8
incq %rsi
testb %cl, (%r8, %rdx)
jz .scanloop
decq %rsi
.scandone:
movl %edi, %eax
subl %esi, %eax
ret
SET_SIZE(scanc)
ENTRY(intr_clear)
ENTRY(clear_int_flag)
pushfq
popq %rax
#if defined(__xpv)
leaq xpv_panicking, %rdi
movl (%rdi), %edi
cmpl $0, %edi
jne 2f
CLIRET(%rdi, %dl)
andq $_BITNOT(PS_IE), %rax
testb $1, %dl
jnz 1f
orq $PS_IE, %rax
1:
ret
2:
#endif
CLI(%rdi)
ret
SET_SIZE(clear_int_flag)
SET_SIZE(intr_clear)
ENTRY(curcpup)
movq %gs:CPU_SELF, %rax
ret
SET_SIZE(curcpup)
ENTRY(htonll)
ALTENTRY(ntohll)
movq %rdi, %rax
bswapq %rax
ret
SET_SIZE(ntohll)
SET_SIZE(htonll)
ENTRY(htonl)
ALTENTRY(ntohl)
movl %edi, %eax
bswap %eax
ret
SET_SIZE(ntohl)
SET_SIZE(htonl)
ENTRY(htons)
ALTENTRY(ntohs)
movl %edi, %eax
bswap %eax
shrl $16, %eax
ret
SET_SIZE(ntohs)
SET_SIZE(htons)
ENTRY(intr_restore)
ENTRY(restore_int_flag)
testq $PS_IE, %rdi
jz 1f
#if defined(__xpv)
leaq xpv_panicking, %rsi
movl (%rsi), %esi
cmpl $0, %esi
jne 1f
IE_TO_EVENT_MASK(%rsi, %rdi)
#else
sti
#endif
1:
ret
SET_SIZE(restore_int_flag)
SET_SIZE(intr_restore)
ENTRY(sti)
STI
ret
SET_SIZE(sti)
ENTRY(cli)
CLI(%rax)
ret
SET_SIZE(cli)
ENTRY(dtrace_interrupt_disable)
pushfq
popq %rax
#if defined(__xpv)
leaq xpv_panicking, %rdi
movl (%rdi), %edi
cmpl $0, %edi
jne .dtrace_interrupt_disable_done
CLIRET(%rdi, %dl)
andq $_BITNOT(PS_IE), %rax
testb $1, %dl
jnz .dtrace_interrupt_disable_done
orq $PS_IE, %rax
#else
CLI(%rdx)
#endif
.dtrace_interrupt_disable_done:
ret
SET_SIZE(dtrace_interrupt_disable)
ENTRY(dtrace_interrupt_enable)
pushq %rdi
popfq
#if defined(__xpv)
leaq xpv_panicking, %rdx
movl (%rdx), %edx
cmpl $0, %edx
jne .dtrace_interrupt_enable_done
IE_TO_EVENT_MASK(%rdx, %rdi)
#endif
.dtrace_interrupt_enable_done:
ret
SET_SIZE(dtrace_interrupt_enable)
ENTRY(dtrace_membar_producer)
rep; ret
SET_SIZE(dtrace_membar_producer)
ENTRY(dtrace_membar_consumer)
rep; ret
SET_SIZE(dtrace_membar_consumer)
ENTRY(threadp)
movq %gs:CPU_THREAD, %rax
ret
SET_SIZE(threadp)
ENTRY(ip_ocsum)
pushq %rbp
movq %rsp, %rbp
#ifdef DEBUG
movq postbootkernelbase(%rip), %rax
cmpq %rax, %rdi
jnb 1f
xorl %eax, %eax
movq %rdi, %rsi
leaq .ip_ocsum_panic_msg(%rip), %rdi
call panic
.ip_ocsum_panic_msg:
.string "ip_ocsum: address 0x%p below kernelbase\n"
1:
#endif
movl %esi, %ecx
movq %rdi, %rsi
xorl %eax, %eax
testl %ecx, %ecx
jz .ip_ocsum_done
testq $3, %rsi
jnz .ip_csum_notaligned
.ip_csum_aligned:
.next_iter:
subl $32, %ecx
jl .less_than_32
addl 0(%rsi), %edx
.only60:
adcl 4(%rsi), %eax
.only56:
adcl 8(%rsi), %edx
.only52:
adcl 12(%rsi), %eax
.only48:
adcl 16(%rsi), %edx
.only44:
adcl 20(%rsi), %eax
.only40:
adcl 24(%rsi), %edx
.only36:
adcl 28(%rsi), %eax
.only32:
adcl 32(%rsi), %edx
.only28:
adcl 36(%rsi), %eax
.only24:
adcl 40(%rsi), %edx
.only20:
adcl 44(%rsi), %eax
.only16:
adcl 48(%rsi), %edx
.only12:
adcl 52(%rsi), %eax
.only8:
adcl 56(%rsi), %edx
.only4:
adcl 60(%rsi), %eax
.only0:
adcl $0, %eax
adcl $0, %eax
addq $64, %rsi
testl %ecx, %ecx
jnz .next_iter
.ip_ocsum_done:
addl %eax, %edx
adcl $0, %edx
movl %edx, %eax
shrl $16, %eax
addw %dx, %ax
adcw $0, %ax
andl $0xffff, %eax
leave
ret
.ip_csum_notaligned:
xorl %edi, %edi
movw (%rsi), %di
addl %edi, %edx
adcl $0, %edx
addq $2, %rsi
decl %ecx
jmp .ip_csum_aligned
.less_than_32:
addl $32, %ecx
testl $1, %ecx
jz .size_aligned
andl $0xfe, %ecx
movzwl (%rsi, %rcx, 2), %edi
addl %edi, %edx
adcl $0, %edx
.size_aligned:
movl %ecx, %edi
shrl $1, %ecx
shl $1, %edi
subq $64, %rdi
addq %rdi, %rsi
leaq .ip_ocsum_jmptbl(%rip), %rdi
leaq (%rdi, %rcx, 8), %rdi
xorl %ecx, %ecx
clc
movq (%rdi), %rdi
INDIRECT_JMP_REG(rdi)
.align 8
.ip_ocsum_jmptbl:
.quad .only0, .only4, .only8, .only12, .only16, .only20
.quad .only24, .only28, .only32, .only36, .only40, .only44
.quad .only48, .only52, .only56, .only60
SET_SIZE(ip_ocsum)
ENTRY(mul32)
xorl %edx, %edx
movl %edi, %eax
mull %esi
shlq $32, %rdx
orq %rdx, %rax
ret
SET_SIZE(mul32)
ENTRY(scan_memory)
shrq $3, %rsi
jz .scanm_done
movq %rsi, %rcx
movq %rdi, %rsi
rep lodsq
.scanm_done:
rep; ret
SET_SIZE(scan_memory)
ENTRY(lowbit)
movl $-1, %eax
bsfq %rdi, %rdi
cmovnz %edi, %eax
incl %eax
ret
SET_SIZE(lowbit)
ENTRY(highbit)
ALTENTRY(highbit64)
movl $-1, %eax
bsrq %rdi, %rdi
cmovnz %edi, %eax
incl %eax
ret
SET_SIZE(highbit64)
SET_SIZE(highbit)
#define XMSR_ACCESS_VAL $0x9c5a203a
ENTRY(rdmsr)
movl %edi, %ecx
rdmsr
shlq $32, %rdx
orq %rdx, %rax
ret
SET_SIZE(rdmsr)
ENTRY(wrmsr)
movq %rsi, %rdx
shrq $32, %rdx
movl %esi, %eax
movl %edi, %ecx
wrmsr
ret
SET_SIZE(wrmsr)
ENTRY(xrdmsr)
pushq %rbp
movq %rsp, %rbp
movl %edi, %ecx
movl XMSR_ACCESS_VAL, %edi
rdmsr
shlq $32, %rdx
orq %rdx, %rax
leave
ret
SET_SIZE(xrdmsr)
ENTRY(xwrmsr)
pushq %rbp
movq %rsp, %rbp
movl %edi, %ecx
movl XMSR_ACCESS_VAL, %edi
movq %rsi, %rdx
shrq $32, %rdx
movl %esi, %eax
wrmsr
leave
ret
SET_SIZE(xwrmsr)
ENTRY(get_xcr)
movl %edi, %ecx
.byte 0x0f,0x01,0xd0
shlq $32, %rdx
orq %rdx, %rax
ret
SET_SIZE(get_xcr)
ENTRY(set_xcr)
movq %rsi, %rdx
shrq $32, %rdx
movl %esi, %eax
movl %edi, %ecx
.byte 0x0f,0x01,0xd1
ret
SET_SIZE(set_xcr)
ENTRY(invalidate_cache)
wbinvd
ret
SET_SIZE(invalidate_cache)
ENTRY_NP(getcregs)
#if defined(__xpv)
pushq %rdi
movq $CREGSZ, %rsi
call bzero
popq %rdi
movq %cr0, %rax
movq %rax, CREG_CR0(%rdi)
movq %cr2, %rax
movq %rax, CREG_CR2(%rdi)
movq %cr3, %rax
movq %rax, CREG_CR3(%rdi)
movq %cr4, %rax
movq %rax, CREG_CR4(%rdi)
#else
#define GETMSR(r, off, d) \
movl $r, %ecx; \
rdmsr; \
movl %eax, off(d); \
movl %edx, off+4(d)
xorl %eax, %eax
movq %rax, CREG_GDT+8(%rdi)
sgdt CREG_GDT(%rdi)
movq %rax, CREG_IDT+8(%rdi)
sidt CREG_IDT(%rdi)
movq %rax, CREG_LDT(%rdi)
sldt CREG_LDT(%rdi)
movq %rax, CREG_TASKR(%rdi)
str CREG_TASKR(%rdi)
movq %cr0, %rax
movq %rax, CREG_CR0(%rdi)
movq %cr2, %rax
movq %rax, CREG_CR2(%rdi)
movq %cr3, %rax
movq %rax, CREG_CR3(%rdi)
movq %cr4, %rax
movq %rax, CREG_CR4(%rdi)
movq %cr8, %rax
movq %rax, CREG_CR8(%rdi)
GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
#endif
ret
SET_SIZE(getcregs)
#undef GETMSR
ENTRY_NP(panic_trigger)
xorl %eax, %eax
movl $0xdefacedd, %edx
lock
xchgl %edx, (%rdi)
cmpl $0, %edx
je 0f
movl $0, %eax
ret
0: movl $1, %eax
ret
SET_SIZE(panic_trigger)
ENTRY_NP(dtrace_panic_trigger)
xorl %eax, %eax
movl $0xdefacedd, %edx
lock
xchgl %edx, (%rdi)
cmpl $0, %edx
je 0f
movl $0, %eax
ret
0: movl $1, %eax
ret
SET_SIZE(dtrace_panic_trigger)
ENTRY_NP(vpanic)
pushq %rbp
movq %rsp, %rbp
pushfq
pushq %r11
pushq %r10
pushq %rbx
pushq %rax
pushq %r9
pushq %r8
pushq %rcx
pushq %rdx
pushq %rsi
pushq %rdi
movq %rsp, %rbx
leaq panic_quiesce(%rip), %rdi
call panic_trigger
vpanic_common:
movl %eax, %r11d
cmpl $0, %r11d
je 0f
leaq panic_stack(%rip), %rsp
addq $PANICSTKSIZE, %rsp
0: subq $REGSIZE, %rsp
movq 0x0(%rbx), %rcx
movq %rcx, REGOFF_RDI(%rsp)
movq 0x8(%rbx), %rcx
movq %rcx, REGOFF_RSI(%rsp)
movq 0x10(%rbx), %rcx
movq %rcx, REGOFF_RDX(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, REGOFF_RCX(%rsp)
movq 0x20(%rbx), %rcx
movq %rcx, REGOFF_R8(%rsp)
movq 0x28(%rbx), %rcx
movq %rcx, REGOFF_R9(%rsp)
movq 0x30(%rbx), %rcx
movq %rcx, REGOFF_RAX(%rsp)
movq 0x38(%rbx), %rcx
movq %rcx, REGOFF_RBX(%rsp)
movq 0x58(%rbx), %rcx
movq %rcx, REGOFF_RBP(%rsp)
movq 0x40(%rbx), %rcx
movq %rcx, REGOFF_R10(%rsp)
movq 0x48(%rbx), %rcx
movq %rcx, REGOFF_R11(%rsp)
movq %r12, REGOFF_R12(%rsp)
movq %r13, REGOFF_R13(%rsp)
movq %r14, REGOFF_R14(%rsp)
movq %r15, REGOFF_R15(%rsp)
xorl %ecx, %ecx
movw %ds, %cx
movq %rcx, REGOFF_DS(%rsp)
movw %es, %cx
movq %rcx, REGOFF_ES(%rsp)
movw %fs, %cx
movq %rcx, REGOFF_FS(%rsp)
movw %gs, %cx
movq %rcx, REGOFF_GS(%rsp)
movq $0, REGOFF_TRAPNO(%rsp)
movq $0, REGOFF_ERR(%rsp)
leaq vpanic(%rip), %rcx
movq %rcx, REGOFF_RIP(%rsp)
movw %cs, %cx
movzwq %cx, %rcx
movq %rcx, REGOFF_CS(%rsp)
movq 0x50(%rbx), %rcx
movq %rcx, REGOFF_RFL(%rsp)
movq %rbx, %rcx
addq $0x60, %rcx
movq %rcx, REGOFF_RSP(%rsp)
movw %ss, %cx
movzwq %cx, %rcx
movq %rcx, REGOFF_SS(%rsp)
movq REGOFF_RDI(%rsp), %rdi
movq REGOFF_RSI(%rsp), %rsi
movq %rsp, %rdx
movl %r11d, %ecx
call panicsys
addq $REGSIZE, %rsp
popq %rdi
popq %rsi
popq %rdx
popq %rcx
popq %r8
popq %r9
popq %rax
popq %rbx
popq %r10
popq %r11
popfq
leave
ret
SET_SIZE(vpanic)
ENTRY_NP(dtrace_vpanic)
pushq %rbp
movq %rsp, %rbp
pushfq
pushq %r11
pushq %r10
pushq %rbx
pushq %rax
pushq %r9
pushq %r8
pushq %rcx
pushq %rdx
pushq %rsi
pushq %rdi
movq %rsp, %rbx
leaq panic_quiesce(%rip), %rdi
call dtrace_panic_trigger
jmp vpanic_common
SET_SIZE(dtrace_vpanic)
DGDEF3(timedelta, 8, 8)
.long 0, 0
DGDEF3(hrtime_base, 8, 8)
.long _MUL(NSEC_PER_CLOCK_TICK, 6), 0
DGDEF3(adj_shift, 4, 4)
.long ADJ_SHIFT
ENTRY_NP(hres_tick)
pushq %rbp
movq %rsp, %rbp
movq gethrtimef(%rip), %rsi
INDIRECT_CALL_REG(rsi)
movq %rax, %r8
leaq hres_lock(%rip), %rax
movb $-1, %dl
.CL1:
xchgb %dl, (%rax)
testb %dl, %dl
jz .CL3
.CL2:
cmpb $0, (%rax)
pause
jne .CL2
jmp .CL1
.CL3:
leaq hres_last_tick(%rip), %rax
movq %r8, %r11
subq (%rax), %r8
addq %r8, hrtime_base(%rip)
addq %r8, hrestime+8(%rip)
movq %r11, (%rax)
call __adj_hrestime
incl hres_lock(%rip)
leave
ret
SET_SIZE(hres_tick)
ENTRY(prefetch_smap_w)
rep; ret
SET_SIZE(prefetch_smap_w)
ENTRY(prefetch_page_r)
rep; ret
SET_SIZE(prefetch_page_r)
ENTRY(bcmp)
pushq %rbp
movq %rsp, %rbp
#ifdef DEBUG
testq %rdx,%rdx
je 1f
movq postbootkernelbase(%rip), %r11
cmpq %r11, %rdi
jb 0f
cmpq %r11, %rsi
jnb 1f
0: leaq .bcmp_panic_msg(%rip), %rdi
xorl %eax, %eax
call panic
1:
#endif
call memcmp
testl %eax, %eax
setne %dl
leave
movzbl %dl, %eax
ret
SET_SIZE(bcmp)
#ifdef DEBUG
.text
.bcmp_panic_msg:
.string "bcmp: arguments below kernelbase"
#endif
ENTRY_NP(bsrw_insn)
xorl %eax, %eax
bsrw %di, %ax
ret
SET_SIZE(bsrw_insn)
ENTRY_NP(switch_sp_and_call)
pushq %rbp
movq %rsp, %rbp
movq %rdi, %rsp
movq %rdx, %rdi
movq %rsi, %r11
movq %rcx, %rsi
INDIRECT_CALL_REG(r11)
leave
ret
SET_SIZE(switch_sp_and_call)
ENTRY_NP(kmdb_enter)
pushq %rbp
movq %rsp, %rbp
call intr_clear
int $T_DBGENTR
movq %rax, %rdi
call intr_restore
leave
ret
SET_SIZE(kmdb_enter)
ENTRY_NP(return_instr)
rep; ret
SET_SIZE(return_instr)
ENTRY(getflags)
pushfq
popq %rax
#if defined(__xpv)
CURTHREAD(%rdi)
KPREEMPT_DISABLE(%rdi)
CURVCPU(%r11)
andq $_BITNOT(PS_IE), %rax
XEN_TEST_UPCALL_MASK(%r11)
jnz 1f
orq $PS_IE, %rax
1:
KPREEMPT_ENABLE_NOKP(%rdi)
#endif
ret
SET_SIZE(getflags)
ENTRY(ftrace_interrupt_disable)
pushfq
popq %rax
CLI(%rdx)
ret
SET_SIZE(ftrace_interrupt_disable)
ENTRY(ftrace_interrupt_enable)
pushq %rdi
popfq
ret
SET_SIZE(ftrace_interrupt_enable)
ENTRY(clflush_insn)
clflush (%rdi)
ret
SET_SIZE(clflush_insn)
ENTRY(mfence_insn)
mfence
ret
SET_SIZE(mfence_insn)
ENTRY(vmware_port)
pushq %rbx
movl $VMWARE_HVMAGIC, %eax
movl $0xffffffff, %ebx
movl %edi, %ecx
movl $VMWARE_HVPORT, %edx
inl (%dx)
movl %eax, (%rsi)
movl %ebx, 4(%rsi)
movl %ecx, 8(%rsi)
movl %edx, 12(%rsi)
popq %rbx
ret
SET_SIZE(vmware_port)