#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
#include <linux/bits.h>
#define CIF_NOHZ_DELAY 2
#define CIF_ENABLED_WAIT 5
#define CIF_MCCK_GUEST 6
#define CIF_DEDICATED_CPU 7
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
#define RESTART_FLAG_CTLREGS _AC(1 << 0, U)
#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <linux/instruction_pointer.h>
#include <linux/bitops.h>
#include <asm/fpu-types.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
#include <asm/irqflags.h>
#include <asm/alternative.h>
#include <asm/fault.h>
struct pcpu {
unsigned long ec_mask;
unsigned long ec_clk;
unsigned long flags;
unsigned long capacity;
signed char state;
signed char polarization;
u16 address;
};
DECLARE_PER_CPU(struct pcpu, pcpu_devices);
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
static __always_inline struct pcpu *this_pcpu(void)
{
return (struct pcpu *)(get_lowcore()->pcpu);
}
static __always_inline void set_cpu_flag(int flag)
{
set_bit(flag, &this_pcpu()->flags);
}
static __always_inline void clear_cpu_flag(int flag)
{
clear_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_cpu_flag(int flag)
{
return test_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_and_set_cpu_flag(int flag)
{
return test_and_set_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_and_clear_cpu_flag(int flag)
{
return test_and_clear_bit(flag, &this_pcpu()->flags);
}
static __always_inline bool test_cpu_flag_of(int flag, int cpu)
{
return test_bit(flag, &per_cpu(pcpu_devices, cpu).flags);
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
static inline void get_cpu_id(struct cpuid *ptr)
{
asm volatile("stidp %0" : "=Q" (*ptr));
}
static __always_inline unsigned long get_cpu_timer(void)
{
unsigned long timer;
asm volatile("stpt %[timer]" : [timer] "=Q" (timer));
return timer;
}
void s390_adjust_jiffies(void);
void s390_update_cpu_mhz(void);
void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
#define TASK_SIZE (TASK_SIZE_MAX)
#define TASK_UNMAPPED_BASE (_REGION2_SIZE >> 1)
#define TASK_SIZE_MAX (-PAGE_SIZE)
#define VDSO_BASE (STACK_TOP + PAGE_SIZE)
#define VDSO_LIMIT (_REGION2_SIZE)
#define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE)
#define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE)
#define HAVE_ARCH_PICK_MMAP_LAYOUT
#define __stackleak_poison __stackleak_poison
static __always_inline void __stackleak_poison(unsigned long erase_low,
unsigned long erase_high,
unsigned long poison)
{
unsigned long tmp, count;
count = erase_high - erase_low;
if (!count)
return;
asm volatile(
" cghi %[count],8\n"
" je 2f\n"
" aghi %[count],-(8+1)\n"
" srlg %[tmp],%[count],8\n"
" ltgr %[tmp],%[tmp]\n"
" jz 1f\n"
"0: stg %[poison],0(%[addr])\n"
" mvc 8(256-8,%[addr]),0(%[addr])\n"
" la %[addr],256(%[addr])\n"
" brctg %[tmp],0b\n"
"1: stg %[poison],0(%[addr])\n"
" exrl %[count],3f\n"
" j 4f\n"
"2: stg %[poison],0(%[addr])\n"
" j 4f\n"
"3: mvc 8(1,%[addr]),0(%[addr])\n"
"4:"
: [addr] "+&a" (erase_low), [count] "+&a" (count), [tmp] "=&a" (tmp)
: [poison] "d" (poison)
: "memory", "cc"
);
}
struct thread_struct {
unsigned int acrs[NUM_ACRS];
unsigned long ksp;
unsigned long user_timer;
unsigned long guest_timer;
unsigned long system_timer;
unsigned long hardirq_timer;
unsigned long softirq_timer;
union teid gmap_teid;
unsigned int gmap_int_code;
int ufpu_flags;
int kfpu_flags;
struct per_regs per_user;
struct per_event per_event;
unsigned long per_flags;
unsigned int system_call;
unsigned long last_break;
unsigned long pfault_wait;
struct list_head list;
struct runtime_instr_cb *ri_cb;
struct gs_cb *gs_cb;
struct gs_cb *gs_bc_cb;
struct pgm_tdb trap_tdb;
struct fpu ufpu;
struct fpu kfpu;
};
#define PER_FLAG_NO_TE 1UL
#define PER_FLAG_TE_ABORT_RAND 2UL
#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
typedef struct thread_struct thread_struct;
#define ARCH_MIN_TASKALIGN 8
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
.last_break = 1, \
}
#define start_thread(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
#define start_thread31(regs, new_psw, new_stackp) do { \
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
execve_tail(); \
} while (0)
struct task_struct;
struct mm_struct;
struct seq_file;
struct pt_regs;
void show_registers(struct pt_regs *regs);
void show_cacheinfo(struct seq_file *m);
void guarded_storage_release(struct task_struct *tsk);
void gs_load_bc_cb(struct pt_regs *regs);
unsigned long __get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
#define current_stack_pointer (__current_stack_pointer())
static __always_inline unsigned long __current_stack_pointer(void)
{
unsigned long sp;
asm volatile("lgr %0,15" : "=d" (sp));
return sp;
}
static __always_inline bool on_thread_stack(void)
{
unsigned long ksp = get_lowcore()->kernel_stack;
return !((ksp ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
static __always_inline unsigned short stap(void)
{
unsigned short cpu_address;
asm volatile("stap %0" : "=Q" (cpu_address));
return cpu_address;
}
#define cpu_relax() barrier()
#define ECAG_CACHE_ATTRIBUTE 0
#define ECAG_CPU_ATTRIBUTE 1
static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
{
unsigned long val;
asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm));
return val;
}
static inline void psw_set_key(unsigned int key)
{
asm volatile("spka 0(%0)" : : "d" (key));
}
static inline void __load_psw(psw_t psw)
{
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
}
static __always_inline void __load_psw_mask(unsigned long mask)
{
psw_t psw __uninitialized;
unsigned long addr;
psw.mask = mask;
asm volatile(
" larl %0,1f\n"
" stg %0,%1\n"
" lpswe %2\n"
"1:"
: "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc");
}
static inline unsigned long __extract_psw(void)
{
unsigned int reg1, reg2;
asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
}
static inline unsigned long __local_mcck_save(void)
{
unsigned long mask = __extract_psw();
__load_psw_mask(mask & ~PSW_MASK_MCHECK);
return mask & PSW_MASK_MCHECK;
}
#define local_mcck_save(mflags) \
do { \
typecheck(unsigned long, mflags); \
mflags = __local_mcck_save(); \
} while (0)
static inline void local_mcck_restore(unsigned long mflags)
{
unsigned long mask = __extract_psw();
mask &= ~PSW_MASK_MCHECK;
__load_psw_mask(mask | mflags);
}
static inline void local_mcck_disable(void)
{
__local_mcck_save();
}
static inline void local_mcck_enable(void)
{
__load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
}
static inline unsigned long __rewind_psw(psw_t psw, long ilen)
{
unsigned long mask;
mask = (psw.mask & PSW_MASK_EA) ? -1UL :
(psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
(1UL << 24) - 1;
return (psw.addr - ilen) & mask;
}
static inline unsigned long __forward_psw(psw_t psw, long ilen)
{
return __rewind_psw(psw, -ilen);
}
static __always_inline void __noreturn disabled_wait(void)
{
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
psw.addr = _THIS_IP_;
__load_psw(psw);
while (1);
}
#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->psw.mask);
}
static __always_inline void bpon(void)
{
asm_inline volatile(
ALTERNATIVE(" nop\n",
" .insn rrf,0xb2e80000,0,0,13,0\n",
ALT_SPEC(82))
);
}
#endif
#endif