#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/ns_common.h>
#include <linux/fs_pin.h>
extern struct file_system_type nullfs_fs_type;
extern struct list_head notify_list;
struct mnt_namespace {
struct ns_common ns;
struct mount * root;
struct {
struct rb_root mounts;
struct rb_node *mnt_last_node;
struct rb_node *mnt_first_node;
};
struct user_namespace *user_ns;
struct ucounts *ucounts;
wait_queue_head_t poll;
u64 seq_origin;
u64 event;
#ifdef CONFIG_FSNOTIFY
__u32 n_fsnotify_mask;
struct fsnotify_mark_connector __rcu *n_fsnotify_marks;
#endif
unsigned int nr_mounts;
unsigned int pending_mounts;
refcount_t passive;
bool is_anon;
} __randomize_layout;
struct mnt_pcp {
int mnt_count;
int mnt_writers;
};
struct mountpoint {
struct hlist_node m_hash;
struct dentry *m_dentry;
struct hlist_head m_list;
};
struct mount {
struct hlist_node mnt_hash;
struct mount *mnt_parent;
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
union {
struct rb_node mnt_node;
struct rcu_head mnt_rcu;
struct llist_node mnt_llist;
};
#ifdef CONFIG_SMP
struct mnt_pcp __percpu *mnt_pcp;
#else
int mnt_count;
int mnt_writers;
#endif
struct list_head mnt_mounts;
struct list_head mnt_child;
struct mount *mnt_next_for_sb;
struct mount * __aligned(1) *mnt_pprev_for_sb;
#define WRITE_HOLD 1
const char *mnt_devname;
struct list_head mnt_list;
struct list_head mnt_expire;
struct list_head mnt_share;
struct hlist_head mnt_slave_list;
struct hlist_node mnt_slave;
struct mount *mnt_master;
struct mnt_namespace *mnt_ns;
struct mountpoint *mnt_mp;
union {
struct hlist_node mnt_mp_list;
struct hlist_node mnt_umount;
};
#ifdef CONFIG_FSNOTIFY
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
struct list_head to_notify;
struct mnt_namespace *prev_ns;
#endif
int mnt_t_flags;
int mnt_id;
u64 mnt_id_unique;
int mnt_group_id;
int mnt_expiry_mark;
struct hlist_head mnt_pins;
struct hlist_head mnt_stuck_children;
struct mount *overmount;
} __randomize_layout;
enum {
T_SHARED = 1,
T_UNBINDABLE = 2,
T_MARKED = 4,
T_UMOUNT_CANDIDATE = 8,
T_SHARED_MASK = T_UNBINDABLE,
};
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL)
static inline struct mount *real_mount(struct vfsmount *mnt)
{
return container_of(mnt, struct mount, mnt);
}
static inline int mnt_has_parent(const struct mount *mnt)
{
return mnt != mnt->mnt_parent;
}
static inline int is_mounted(struct vfsmount *mnt)
{
return !IS_ERR_OR_NULL(real_mount(mnt)->mnt_ns);
}
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
extern int __legitimize_mnt(struct vfsmount *, unsigned);
static inline bool __path_is_mountpoint(const struct path *path)
{
struct mount *m = __lookup_mnt(path->mnt, path->dentry);
return m && likely(!(m->mnt.mnt_flags & MNT_SYNC_UMOUNT));
}
extern void __detach_mounts(struct dentry *dentry);
static inline void detach_mounts(struct dentry *dentry)
{
if (!d_mountpoint(dentry))
return;
__detach_mounts(dentry);
}
static inline void get_mnt_ns(struct mnt_namespace *ns)
{
ns_ref_inc(ns);
}
extern seqlock_t mount_lock;
DEFINE_LOCK_GUARD_0(mount_writer, write_seqlock(&mount_lock),
write_sequnlock(&mount_lock))
DEFINE_LOCK_GUARD_0(mount_locked_reader, read_seqlock_excl(&mount_lock),
read_sequnlock_excl(&mount_lock))
struct proc_mounts {
struct mnt_namespace *ns;
struct path root;
int (*show)(struct seq_file *, struct vfsmount *);
};
extern const struct seq_operations mounts_op;
extern bool __is_local_mountpoint(const struct dentry *dentry);
static inline bool is_local_mountpoint(const struct dentry *dentry)
{
if (!d_mountpoint(dentry))
return false;
return __is_local_mountpoint(dentry);
}
static inline bool is_anon_ns(struct mnt_namespace *ns)
{
return ns->is_anon;
}
static inline bool anon_ns_root(const struct mount *m)
{
struct mnt_namespace *ns = READ_ONCE(m->mnt_ns);
return !IS_ERR_OR_NULL(ns) && is_anon_ns(ns) && m == ns->root;
}
static inline bool mnt_ns_attached(const struct mount *mnt)
{
return !RB_EMPTY_NODE(&mnt->mnt_node);
}
static inline bool mnt_ns_empty(const struct mnt_namespace *ns)
{
return RB_EMPTY_ROOT(&ns->mounts);
}
static inline void move_from_ns(struct mount *mnt)
{
struct mnt_namespace *ns = mnt->mnt_ns;
WARN_ON(!mnt_ns_attached(mnt));
if (ns->mnt_last_node == &mnt->mnt_node)
ns->mnt_last_node = rb_prev(&mnt->mnt_node);
if (ns->mnt_first_node == &mnt->mnt_node)
ns->mnt_first_node = rb_next(&mnt->mnt_node);
rb_erase(&mnt->mnt_node, &ns->mounts);
RB_CLEAR_NODE(&mnt->mnt_node);
}
bool has_locked_children(struct mount *mnt, struct dentry *dentry);
struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns,
bool previous);
static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
{
return container_of(ns, struct mnt_namespace, ns);
}
#ifdef CONFIG_FSNOTIFY
static inline void mnt_notify_add(struct mount *m)
{
if ((m->mnt_ns && m->mnt_ns->n_fsnotify_marks) ||
(m->prev_ns && m->prev_ns->n_fsnotify_marks))
list_add_tail(&m->to_notify, ¬ify_list);
else
m->prev_ns = m->mnt_ns;
}
#else
static inline void mnt_notify_add(struct mount *m)
{
}
#endif
static inline struct mount *topmost_overmount(struct mount *m)
{
while (m->overmount)
m = m->overmount;
return m;
}
static inline bool __test_write_hold(struct mount * __aligned(1) *val)
{
return (unsigned long)val & WRITE_HOLD;
}
static inline bool test_write_hold(const struct mount *m)
{
return __test_write_hold(m->mnt_pprev_for_sb);
}
static inline void set_write_hold(struct mount *m)
{
m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
| WRITE_HOLD);
}
static inline void clear_write_hold(struct mount *m)
{
m->mnt_pprev_for_sb = (void *)((unsigned long)m->mnt_pprev_for_sb
& ~WRITE_HOLD);
}
struct mnt_namespace *mnt_ns_from_dentry(struct dentry *dentry);