MSR_DR
if (mfmsr() & MSR_DR) {
if (mfmsr() & MSR_DR) {
#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV)
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
if (!(mfmsr() & MSR_DR))
if (!(mfmsr() & MSR_DR))
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
{MSR_DR, "DR"},
if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) {
const unsigned long mask = MSR_IR | MSR_DR;
int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
if (msr & (MSR_DR|MSR_IR)) {
switch (msr & (MSR_DR|MSR_IR)) {
case MSR_DR:
case MSR_DR|MSR_IR:
if (msr & (MSR_DR|MSR_IR)) {
switch (msr & (MSR_DR|MSR_IR)) {
case MSR_DR:
case MSR_DR|MSR_IR:
int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
(msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
new_msr |= MSR_IR | MSR_DR;
__mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
(old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
if (msr & MSR_DR)
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
case MSR_DR:
if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
[p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)
msr = msr0 & ~MSR_DR;
mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR));
WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
bool mmu = (msr & (MSR_IR|MSR_DR));
if (mfmsr() & MSR_DR) {
(MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {