t6
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
cfi_st t6, PT_R18, \docfi
cfi_ld t6, PT_R18, \docfi
cregs->t6 = (compat_ulong_t) regs->t6;
regs->t6 = (unsigned long) cregs->t6;
compat_ulong_t t6;
unsigned long t6;
unsigned long t6;
unsigned long t6;
unsigned long t6;
OFFSET(PT_T6, pt_regs, t6);
OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6);
OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6);
DEFINE(FREGS_T6, offsetof(struct __arch_ftrace_regs, t6));
{DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
regs->t5, regs->t6, get_active_shstk(current));
REG_OFFSET_NAME(t6),
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \
SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
u64 t6[ECC_MAX_DIGITS];
vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
vli_mod_mult_fast(y1, y1, t6, curve);
vli_mod_add(t6, x1, x2, curve_prime, ndigits);
vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
vli_mod_mult_fast(t6, t6, t5, curve);
vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
u32 t2, t4, t6, t8;
t6 = t4 * x / 32768 * x / 32768 / 5 / 6;
t8 = t6 * x / 32768 * x / 32768 / 7 / 8;
ret = 32768 - t2 + t4 - t6 + t8;
{ "t6", offsetof(struct user_regs_struct, t6) },
double t6 = 0.0;
ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t6, 8, str,
ASSERT_EQ(skel->bss->t6, 1, "t6 ret");
arr_typedef t6 = {};
t6 = a->b[0];
int t6;
unsigned long t6;
core.regs.t6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6));
core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),