kcsan_ctx
struct kcsan_ctx kcsan_ctx;
.kcsan_ctx = {
static __always_inline struct kcsan_ctx *get_ctx(void)
return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
struct kcsan_ctx *ctx = get_ctx();
is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx)
find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
struct kcsan_ctx *ctx = get_ctx();
struct kcsan_ctx *ctx = get_ctx();
static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
struct kcsan_ctx *ctx = get_ctx();
struct kcsan_ctx *ctx = get_ctx();
const struct kcsan_ctx ctx_save = current->kcsan_ctx;
memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
current->kcsan_ctx = ctx_save;
struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access;
struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access;