_PAGE_DIRTY
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
pte_val(pte) |= _PAGE_DIRTY;
pte_val(pte) |= _PAGE_DIRTY;
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
pte_val(pte) &= ~_PAGE_DIRTY;
pte_val(pte) |= _PAGE_DIRTY;
return pte_val(pte) & _PAGE_DIRTY;
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
pte_val(pte) |= _PAGE_DIRTY;
pte_val(pte) |= _PAGE_DIRTY;
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
if (pte_val(pte) & _PAGE_DIRTY)
pmd_val(pmd) |= _PAGE_DIRTY;
pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
pmd_val(pmd) |= _PAGE_DIRTY;
if (pmd_val(pmd) & _PAGE_DIRTY)
pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
_PAGE_ACCESSED | _PAGE_DIRTY);
_PAGE_DIRTY | _PAGE_READWRITE);
_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
(_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
pte_update(ptep, 0, _PAGE_DIRTY);
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
| _PAGE_DIRTY);
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
pte_val(pte) &= ~_PAGE_DIRTY;
pte_val(pte) |= _PAGE_DIRTY;
{ return pte_val(pte) & _PAGE_DIRTY; }
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
(_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
| _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
| _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
| _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_val(pte) &= ~(_PAGE_DIRTY);
pte_val(pte) |= _PAGE_DIRTY;
#define _PAGE_IOREMAP (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
return __pte(pte_val(pte) | _PAGE_DIRTY);
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
#define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC);
#define _PAGE_KERNEL_RW (_PAGE_SH | _PAGE_DIRTY)
#define _PAGE_KERNEL_RWX (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC)
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
return __pte(pte_val(pte) | _PAGE_DIRTY);
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX)
#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RWX (_PAGE_RWX | _PAGE_DIRTY)
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
#define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
pgflags |= _PAGE_DIRTY;
pte = __pte(pte_val(pte) | _PAGE_DIRTY);
pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
pgflags |= _PAGE_DIRTY;
perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
new_pte |= _PAGE_DIRTY;
new_pte |= _PAGE_DIRTY;
new_pte |= _PAGE_DIRTY;
new_pmd |= _PAGE_DIRTY;
if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY)))
if (pteflags & _PAGE_DIRTY)
if ((access & _PAGE_WRITE) && !(old_pte & _PAGE_DIRTY))
new_pte |= _PAGE_DIRTY;
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO);
pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_ROX);
.mask = _PAGE_DIRTY,
.val = _PAGE_DIRTY,
.mask = _PAGE_DIRTY,
.val = _PAGE_DIRTY,
.mask = _PAGE_DIRTY,
.val = _PAGE_DIRTY,
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
| _PAGE_DIRTY \
return pte_val(pte) & _PAGE_DIRTY;
return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
.mask = _PAGE_DIRTY,
if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
if (pte_val(pte) & _PAGE_DIRTY)
pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
_PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
_PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
return (pte_val(pte) & _PAGE_DIRTY) != 0;
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
_PAGE_DIRTY | _PAGE_SPECIAL)
_PAGE_DIRTY | _PAGE_ACCESSED | \
__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
_PAGE_DIRTY | _PAGE_ACCESSED | \
__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
return pte_get_bits(pte, _PAGE_DIRTY);
pte_clear_bits(pte, _PAGE_DIRTY);
pte_set_bits(pte, _PAGE_DIRTY);
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
(pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY;
(pmd_flags(pmd) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
(_PAGE_DIRTY | _PAGE_PSE);
(pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
(_PAGE_DIRTY | _PAGE_PSE);
pte = pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
return pte_set_flags(pte, _PAGE_DIRTY);
pmd = pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
return pmd_set_flags(pmd, _PAGE_DIRTY);
pud = pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
WARN_ON_ONCE((pgprot_val(pgprot) & (_PAGE_DIRTY | _PAGE_RW)) ==
_PAGE_DIRTY);
val &= (_HPAGE_CHG_MASK & ~_PAGE_DIRTY);
#define _PAGE_DIRTY_BITS (_PAGE_DIRTY | _PAGE_SAVED_DIRTY)
#define ___D _PAGE_DIRTY
#define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW | _PAGE_DIRTY), 0);
pgprot_t clr = __pgprot(_PAGE_RW | _PAGE_DIRTY);
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
se->val64 &= ~_PAGE_DIRTY;
pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY;
#define __WRITEABLE (_PAGE_DIRTY | _PAGE_WRITE)