#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H
extern int rcuwait_wake_up(struct rcuwait *w);
#include <linux/lockdep.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/rwsem.h>
#include <linux/tracepoint-defs.h>
#include <linux/types.h>
#include <linux/cleanup.h>
#include <linux/sched/mm.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
DECLARE_TRACEPOINT(mmap_lock_start_locking);
DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
DECLARE_TRACEPOINT(mmap_lock_released);
#ifdef CONFIG_TRACING
void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
bool success);
void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
bool write)
{
if (tracepoint_enabled(mmap_lock_start_locking))
__mmap_lock_do_trace_start_locking(mm, write);
}
static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
bool write, bool success)
{
if (tracepoint_enabled(mmap_lock_acquire_returned))
__mmap_lock_do_trace_acquire_returned(mm, write, success);
}
static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
{
if (tracepoint_enabled(mmap_lock_released))
__mmap_lock_do_trace_released(mm, write);
}
#else
static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
bool write)
{
}
static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
bool write, bool success)
{
}
static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
{
}
#endif
static inline void mmap_assert_locked(const struct mm_struct *mm)
{
rwsem_assert_held(&mm->mmap_lock);
}
static inline void mmap_assert_write_locked(const struct mm_struct *mm)
{
rwsem_assert_held_write(&mm->mmap_lock);
}
#ifdef CONFIG_PER_VMA_LOCK
#ifdef CONFIG_LOCKDEP
#define __vma_lockdep_map(vma) (&vma->vmlock_dep_map)
#else
#define __vma_lockdep_map(vma) NULL
#endif
#define __vma_lockdep_acquire_read(vma) \
lock_acquire_shared(__vma_lockdep_map(vma), 0, 1, NULL, _RET_IP_)
#define __vma_lockdep_release_read(vma) \
lock_release(__vma_lockdep_map(vma), _RET_IP_)
#define __vma_lockdep_acquire_exclusive(vma) \
lock_acquire_exclusive(__vma_lockdep_map(vma), 0, 0, NULL, _RET_IP_)
#define __vma_lockdep_release_exclusive(vma) \
lock_release(__vma_lockdep_map(vma), _RET_IP_)
#define __vma_lockdep_stat_mark_acquired(vma) \
lock_acquired(__vma_lockdep_map(vma), _RET_IP_)
static inline void mm_lock_seqcount_init(struct mm_struct *mm)
{
seqcount_init(&mm->mm_lock_seq);
}
static inline void mm_lock_seqcount_begin(struct mm_struct *mm)
{
do_raw_write_seqcount_begin(&mm->mm_lock_seq);
}
static inline void mm_lock_seqcount_end(struct mm_struct *mm)
{
ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq);
do_raw_write_seqcount_end(&mm->mm_lock_seq);
}
static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
{
return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
}
static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
{
return read_seqcount_retry(&mm->mm_lock_seq, seq);
}
static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_key;
lockdep_init_map(__vma_lockdep_map(vma), "vm_lock", &lockdep_key, 0);
#endif
if (reset_refcnt)
refcount_set(&vma->vm_refcnt, 0);
vma->vm_lock_seq = UINT_MAX;
}
static inline bool __vma_are_readers_excluded(int refcnt)
{
return (refcnt & VM_REFCNT_EXCLUDE_READERS_FLAG) &&
refcnt <= VM_REFCNT_EXCLUDE_READERS_FLAG + 1;
}
static inline __must_check unsigned int
__vma_refcount_put_return(struct vm_area_struct *vma)
{
int oldcnt;
if (__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt))
return 0;
return oldcnt - 1;
}
static inline void vma_refcount_put(struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
int newcnt;
__vma_lockdep_release_read(vma);
newcnt = __vma_refcount_put_return(vma);
if (newcnt && __vma_are_readers_excluded(newcnt))
rcuwait_wake_up(&mm->vma_writer_wait);
}
static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
{
int oldcnt;
mmap_assert_locked(vma->vm_mm);
if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
VM_REFCNT_LIMIT)))
return false;
__vma_lockdep_acquire_read(vma);
return true;
}
static inline bool vma_start_read_locked(struct vm_area_struct *vma)
{
return vma_start_read_locked_nested(vma, 0);
}
static inline void vma_end_read(struct vm_area_struct *vma)
{
vma_refcount_put(vma);
}
static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
{
const struct mm_struct *mm = vma->vm_mm;
mmap_assert_write_locked(vma->vm_mm);
return mm->mm_lock_seq.sequence;
}
static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
{
return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
}
int __vma_start_write(struct vm_area_struct *vma, int state);
static inline void vma_start_write(struct vm_area_struct *vma)
{
if (__is_vma_write_locked(vma))
return;
__vma_start_write(vma, TASK_UNINTERRUPTIBLE);
}
static inline __must_check
int vma_start_write_killable(struct vm_area_struct *vma)
{
if (__is_vma_write_locked(vma))
return 0;
return __vma_start_write(vma, TASK_KILLABLE);
}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
}
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
unsigned int refcnt;
if (IS_ENABLED(CONFIG_LOCKDEP)) {
if (!lock_is_held(__vma_lockdep_map(vma)))
vma_assert_write_locked(vma);
return;
}
refcnt = refcount_read(&vma->vm_refcnt);
if (refcnt > 1)
return;
VM_WARN_ON_ONCE_VMA(!refcnt, vma);
vma_assert_write_locked(vma);
}
static inline void vma_assert_stabilised(struct vm_area_struct *vma)
{
if (IS_ENABLED(CONFIG_LOCKDEP)) {
if (lockdep_is_held(&vma->vm_mm->mmap_lock))
return;
} else {
if (rwsem_is_locked(&vma->vm_mm->mmap_lock))
return;
}
vma_assert_locked(vma);
}
static inline bool vma_is_attached(struct vm_area_struct *vma)
{
return refcount_read(&vma->vm_refcnt);
}
static inline void vma_assert_attached(struct vm_area_struct *vma)
{
WARN_ON_ONCE(!vma_is_attached(vma));
}
static inline void vma_assert_detached(struct vm_area_struct *vma)
{
WARN_ON_ONCE(vma_is_attached(vma));
}
static inline void vma_mark_attached(struct vm_area_struct *vma)
{
vma_assert_write_locked(vma);
vma_assert_detached(vma);
refcount_set_release(&vma->vm_refcnt, 1);
}
void __vma_exclude_readers_for_detach(struct vm_area_struct *vma);
static inline void vma_mark_detached(struct vm_area_struct *vma)
{
vma_assert_write_locked(vma);
vma_assert_attached(vma);
if (likely(!__vma_refcount_put_return(vma)))
return;
__vma_exclude_readers_for_detach(vma);
}
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
struct vma_iterator *iter,
unsigned long address);
#else
static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
{
return false;
}
static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
{
return true;
}
static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {}
static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline __must_check
int vma_start_write_killable(struct vm_area_struct *vma) { return 0; }
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
static inline void vma_assert_attached(struct vm_area_struct *vma) {}
static inline void vma_assert_detached(struct vm_area_struct *vma) {}
static inline void vma_mark_attached(struct vm_area_struct *vma) {}
static inline void vma_mark_detached(struct vm_area_struct *vma) {}
static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address)
{
return NULL;
}
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
mmap_assert_locked(vma->vm_mm);
}
static inline void vma_assert_stabilised(struct vm_area_struct *vma)
{
mmap_assert_locked(vma->vm_mm);
}
#endif
static inline void mmap_write_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
__mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
static inline int __must_check mmap_write_lock_killable(struct mm_struct *mm)
{
int ret;
__mmap_lock_trace_start_locking(mm, true);
ret = down_write_killable(&mm->mmap_lock);
if (!ret)
mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, ret == 0);
return ret;
}
static inline void vma_end_write_all(struct mm_struct *mm)
{
mmap_assert_write_locked(mm);
mm_lock_seqcount_end(mm);
}
static inline void mmap_write_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, true);
vma_end_write_all(mm);
up_write(&mm->mmap_lock);
}
static inline void mmap_write_downgrade(struct mm_struct *mm)
{
__mmap_lock_trace_acquire_returned(mm, false, true);
vma_end_write_all(mm);
downgrade_write(&mm->mmap_lock);
}
static inline void mmap_read_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, false);
down_read(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, false, true);
}
static inline int __must_check mmap_read_lock_killable(struct mm_struct *mm)
{
int ret;
__mmap_lock_trace_start_locking(mm, false);
ret = down_read_killable(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, false, ret == 0);
return ret;
}
static inline bool __must_check mmap_read_trylock(struct mm_struct *mm)
{
bool ret;
__mmap_lock_trace_start_locking(mm, false);
ret = down_read_trylock(&mm->mmap_lock) != 0;
__mmap_lock_trace_acquire_returned(mm, false, ret);
return ret;
}
static inline void mmap_read_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
up_read(&mm->mmap_lock);
}
DEFINE_GUARD(mmap_read_lock, struct mm_struct *,
mmap_read_lock(_T), mmap_read_unlock(_T))
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, false);
up_read_non_owner(&mm->mmap_lock);
}
static inline int mmap_lock_is_contended(struct mm_struct *mm)
{
return rwsem_is_contended(&mm->mmap_lock);
}
#endif