t4
.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
cfi_st t4, PT_R16, \docfi
cfi_ld t4, PT_R16, \docfi
register unsigned long t4 asm ("t4") = __ho.high; \
: "+&r" (t3), "+&r" (t4), "+A" (*(p)) \
((u128)t4 << 64) | t3; \
cregs->t4 = (compat_ulong_t) regs->t4;
regs->t4 = (unsigned long) cregs->t4;
compat_ulong_t t4;
unsigned long t4;
unsigned long t4;
unsigned long t4;
unsigned long t4;
OFFSET(PT_T4, pt_regs, t4);
OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4);
OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4);
DEFINE(FREGS_T4, offsetof(struct __arch_ftrace_regs, t4));
{DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
regs->s11, regs->t3, regs->t4);
REG_OFFSET_NAME(t4),
#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
long arg4 = (long)(t4)(a4)
#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6) \
SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5) \
SYSCALL_PT_ARG4(m, t1, t2, t3, t4), m(t5, (regs->di))
#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4) \
SYSCALL_PT_ARG3(m, t1, t2, t3), m(t4, (regs->si))
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
u64 t4[ECC_MAX_DIGITS];
vli_mod_square_fast(t4, y1, curve);
vli_mod_mult_fast(t5, x1, t4, curve);
vli_mod_square_fast(t4, t4, curve);
vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
vli_set(y1, t4, ndigits);
u32 t0, t1, t2_8, t2_16, t2i, t4, ta;
t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
u8 t4;
u8 t4;
u16 t4; /* power cycle */
u32 t2, t4, t6, t8;
t4 = t2 * x / 32768 * x / 32768 / 3 / 4;
t6 = t4 * x / 32768 * x / 32768 / 5 / 6;
ret = 32768 - t2 + t4 - t6 + t8;
s64 t1, t2, t3, t4;
t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
t34 = t3 + (t4 - t3) / 2;
u8 t4[6];
pcie_ptm_create_debugfs_file(pdata, 0444, t4);
if (t4) {
int t4 = is_t4(lldi->adapter_type), size, size6;
unsigned t1, t2, t3, t4;
t4 = mode->cmd_inh_time;
fbi->reg_lccr3 = fbi->lccr3 | LCCR3_PixClkDiv(__smart_timing(t4, lclk));
unsigned int t1, t2, t3, t4;
t4 = xfs_calc_refcountbt_reservation(mp, 1);
return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
unsigned int t1, t2, t3, t4;
t4 = xfs_calc_refcountbt_reservation(mp, 2);
return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
const unsigned int t4 = rui + efi;
const unsigned int per_intent = max(t1, t4);
unsigned long t1, t2, t4, idle;
u64 t4 = input[4];
u64 t4_ = t4 + (t3_ >> 51);
u64 t4 = input[4];
u64 t4_ = t4 + (t3_ >> 51);
u64 t4 = input[4];
u64 o3 = t4 << 12 | t3 >> 39;
const Byte *t4 = op2 ? *op2 : NULL;
register const Byte *r4 __asm__("r4") = t4;
t2 = r2; t3 = r3; t4 = r4; t5 = r5;
*op2 = t4;
const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
nr->t4 / HZ,
nr->t4 = opt * HZ;
val = nr->t4 / HZ;
nr->t4 =
nr->t4 = onr->t4;
sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
{ "t4", offsetof(struct user_regs_struct, t4) },
double t4 = 5.678912;
ASSERT_OK(btf_dump_data(btf, d, "test_double", NULL, 0, &t4, 8, str,
named_struct_typedef t4 = {};
unsigned long t4;
core.regs.t4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4));
core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),