MSR_KERNEL
return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~MSR_RI) /* re-enable MMU */
li r10, MSR_KERNEL /* can take exceptions */
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL); \
regs.msr = MSR_KERNEL;
new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
get_paca()->kernel_msr = MSR_KERNEL;
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
exp.msr = MSR_KERNEL;
got.msr = MSR_KERNEL;
mtmsr(MSR_KERNEL);
mtmsr(MSR_KERNEL);
mtmsr(MSR_KERNEL);
mtmsr(MSR_KERNEL);