#ifndef _SYS_MACHPRIVREGS_H
#define _SYS_MACHPRIVREGS_H
#include <sys/hypervisor.h>
#ifdef __cplusplus
extern "C" {
#endif
#if defined(__amd64)
#define CURVCPU(r) \
movq %gs:CPU_VCPU_INFO, r
#define CURTHREAD(r) \
movq %gs:CPU_THREAD, r
#elif defined(__i386)
#define CURVCPU(r) \
movl %gs:CPU_VCPU_INFO, r
#define CURTHREAD(r) \
movl %gs:CPU_THREAD, r
#endif
#define XEN_TEST_EVENT_PENDING(r) \
testb $0xff, VCPU_INFO_EVTCHN_UPCALL_PENDING(r)
#define XEN_SET_UPCALL_MASK(r) \
movb $1, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
#define XEN_GET_UPCALL_MASK(r, mask) \
movb VCPU_INFO_EVTCHN_UPCALL_MASK(r), mask
#define XEN_TEST_UPCALL_MASK(r) \
testb $1, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
#define XEN_CLEAR_UPCALL_MASK(r) \
ASSERT_UPCALL_MASK_IS_SET; \
movb $0, VCPU_INFO_EVTCHN_UPCALL_MASK(r)
#ifdef DEBUG
#if defined(__amd64)
#define ASSERT_UPCALL_MASK_IS_SET \
pushq %r11; \
CURVCPU(%r11); \
XEN_TEST_UPCALL_MASK(%r11); \
jne 6f; \
cmpl $0, stistipanic(%rip); \
jle 6f; \
movl $-1, stistipanic(%rip); \
movq stistimsg(%rip), %rdi; \
xorl %eax, %eax; \
call panic; \
6: pushq %rax; \
pushq %rbx; \
movl %gs:CPU_ID, %eax; \
leaq .+0(%rip), %r11; \
leaq laststi(%rip), %rbx; \
movq %r11, (%rbx, %rax, 8); \
popq %rbx; \
popq %rax; \
popq %r11
#define SAVE_CLI_LOCATION \
pushq %rax; \
pushq %rbx; \
pushq %rcx; \
movl %gs:CPU_ID, %eax; \
leaq .+0(%rip), %rcx; \
leaq lastcli, %rbx; \
movq %rcx, (%rbx, %rax, 8); \
popq %rcx; \
popq %rbx; \
popq %rax; \
#elif defined(__i386)
#define ASSERT_UPCALL_MASK_IS_SET \
pushl %ecx; \
CURVCPU(%ecx); \
XEN_TEST_UPCALL_MASK(%ecx); \
jne 6f; \
cmpl $0, stistipanic; \
jle 6f; \
movl $-1, stistipanic; \
movl stistimsg, %ecx; \
pushl %ecx; \
call panic; \
6: pushl %eax; \
pushl %ebx; \
movl %gs:CPU_ID, %eax; \
leal .+0, %ecx; \
leal laststi, %ebx; \
movl %ecx, (%ebx, %eax, 4); \
popl %ebx; \
popl %eax; \
popl %ecx
#define SAVE_CLI_LOCATION \
pushl %eax; \
pushl %ebx; \
pushl %ecx; \
movl %gs:CPU_ID, %eax; \
leal .+0, %ecx; \
leal lastcli, %ebx; \
movl %ecx, (%ebx, %eax, 4); \
popl %ecx; \
popl %ebx; \
popl %eax; \
#endif
#else
#define ASSERT_UPCALL_MASK_IS_SET
#define SAVE_CLI_LOCATION
#endif
#define KPREEMPT_DISABLE(t) \
addb $1, T_PREEMPT(t)
#define KPREEMPT_ENABLE_NOKP(t) \
subb $1, T_PREEMPT(t)
#define CLI(r) \
CURTHREAD(r); \
KPREEMPT_DISABLE(r); \
CURVCPU(r); \
XEN_SET_UPCALL_MASK(r); \
SAVE_CLI_LOCATION; \
CURTHREAD(r); \
KPREEMPT_ENABLE_NOKP(r)
#define CLIRET(r, ret) \
CURTHREAD(r); \
KPREEMPT_DISABLE(r); \
CURVCPU(r); \
XEN_GET_UPCALL_MASK(r, ret); \
XEN_SET_UPCALL_MASK(r); \
SAVE_CLI_LOCATION; \
CURTHREAD(r); \
KPREEMPT_ENABLE_NOKP(r)
#if defined(__amd64)
#define STI_CLOBBER \
CURVCPU(%r11); \
ASSERT_UPCALL_MASK_IS_SET; \
movw $0x100, %ax; \
movw $0, %di; \
lock; \
cmpxchgw %di, VCPU_INFO_EVTCHN_UPCALL_PENDING(%r11); \
jz 7f; \
movl $__HYPERVISOR_sched_op, %eax; \
movl $SCHEDOP_block, %edi; \
pushq %rsi; \
pushq %rcx; \
pushq %rdx; \
pushq %r8; \
pushq %r9; \
pushq %r10; \
TRAP_INSTR; \
popq %r10; \
popq %r9; \
popq %r8; \
popq %rdx; \
popq %rcx; \
popq %rsi; \
7:
#define STI \
pushq %r11; \
pushq %rdi; \
pushq %rax; \
STI_CLOBBER; \
popq %rax; \
popq %rdi; \
popq %r11
#elif defined(__i386)
#define STI_CLOBBER \
CURVCPU(%ecx); \
ASSERT_UPCALL_MASK_IS_SET; \
movw $0x100, %ax; \
movw $0, %bx; \
lock; \
cmpxchgw %bx, VCPU_INFO_EVTCHN_UPCALL_PENDING(%ecx); \
jz 7f; \
movl $__HYPERVISOR_sched_op, %eax; \
movl $SCHEDOP_block, %ebx; \
TRAP_INSTR; \
7:
#define STI \
pushl %eax; \
pushl %ebx; \
pushl %ecx; \
STI_CLOBBER; \
popl %ecx; \
popl %ebx; \
popl %eax
#endif
#if defined(__amd64)
#define IE_TO_EVENT_MASK(rtmp, rfl) \
testq $PS_IE, rfl; \
jnz 4f; \
CLI(rtmp); \
jmp 5f; \
4: STI; \
5:
#define EVENT_MASK_TO_IE(rtmp, rfl) \
andq $_BITNOT(PS_IE), rfl; \
CURVCPU(rtmp); \
XEN_TEST_UPCALL_MASK(rtmp); \
jnz 1f; \
orq $PS_IE, rfl; \
1:
#elif defined(__i386)
#define IE_TO_EVENT_MASK(rtmp, rfl) \
testl $PS_IE, rfl; \
jnz 4f; \
CLI(rtmp); \
jmp 5f; \
4: STI; \
5:
#define EVENT_MASK_TO_IE(rtmp, rfl) \
andl $_BITNOT(PS_IE), rfl; \
CURVCPU(rtmp); \
XEN_TEST_UPCALL_MASK(rtmp); \
jnz 1f; \
orl $PS_IE, rfl; \
1:
#endif
#if defined(__amd64)
#define ENABLE_INTR_FLAGS \
pushq $F_ON; \
popfq; \
STI
#elif defined(__i386)
#define ENABLE_INTR_FLAGS \
pushl $F_ON; \
popfl; \
STI
#endif
#if defined(__amd64)
#if defined(DEBUG)
#define __ASSERT_NO_RUPDATE_PENDING \
pushq %r15; \
cmpw $KCS_SEL, 0x10(%rsp); \
je 1f; \
movq %gs:CPU_THREAD, %r15; \
movq T_LWP(%r15), %r15; \
testb $0x1, PCB_RUPDATE(%r15); \
je 1f; \
ud2; \
1: popq %r15
#else
#define __ASSERT_NO_RUPDATE_PENDING
#endif
#define HYPERVISOR_IRET(flag) \
__ASSERT_NO_RUPDATE_PENDING; \
pushq $flag; \
pushq %rcx; \
pushq %r11; \
pushq %rax; \
movl $__HYPERVISOR_iret, %eax; \
syscall; \
ud2
#define IRET HYPERVISOR_IRET(0)
#define SYSRETQ HYPERVISOR_IRET(0)
#define SYSRETL ud2
#define SWAPGS
#define SYSEXITL .byte 0x0f, 0x35
#elif defined(__i386)
#define HYPERVISOR_IRET \
pushl %eax; \
movl $__HYPERVISOR_iret, %eax; \
int $0x82; \
ud2
#define IRET HYPERVISOR_IRET
#define SYSRET ud2
#endif
#if defined(__amd64)
#define CLEAN_CS movb $0, REGOFF_CS+4(%rsp)
#elif defined(__i386)
#define CLEAN_CS movb $0, REGOFF_CS+2(%esp)
#endif
#if defined(__amd64)
#define XPV_TRAP_POP \
popq %rcx; \
popq %r11
#define XPV_TRAP_PUSH \
pushq %r11; \
pushq %rcx
#endif
#if defined(__amd64)
#define FAST_INTR_PUSH \
INTGATE_INIT_KERNEL_FLAGS; \
popq %rcx; \
popq %r11; \
subq $REGOFF_RIP, %rsp; \
movq %rsi, REGOFF_RSI(%rsp); \
movq %rdi, REGOFF_RDI(%rsp); \
CLEAN_CS
#define FAST_INTR_POP \
movq REGOFF_RSI(%rsp), %rsi; \
movq REGOFF_RDI(%rsp), %rdi; \
addq $REGOFF_RIP, %rsp
#define FAST_INTR_RETURN \
ASSERT_UPCALL_MASK_IS_SET; \
HYPERVISOR_IRET(0)
#elif defined(__i386)
#define FAST_INTR_PUSH \
cld; \
__SEGREGS_PUSH \
__SEGREGS_LOAD_KERNEL \
#define FAST_INTR_POP \
__SEGREGS_POP
#define FAST_INTR_RETURN \
IRET
#endif
#if defined(__amd64)
#define STTS(rtmp) \
pushq %rdi; \
movl $1, %edi; \
call HYPERVISOR_fpu_taskswitch; \
popq %rdi
#define CLTS \
pushq %rdi; \
xorl %edi, %edi; \
call HYPERVISOR_fpu_taskswitch; \
popq %rdi
#elif defined(__i386)
#define STTS(r) \
pushl $1; \
call HYPERVISOR_fpu_taskswitch; \
addl $4, %esp
#define CLTS \
pushl $0; \
call HYPERVISOR_fpu_taskswitch; \
addl $4, %esp
#endif
#ifdef __cplusplus
}
#endif
#endif