R4
stepped = emulate_step(®s, ppc_inst(PPC_RAW_LXVD2X(39, R3, R4)));
stepped = emulate_step(®s, ppc_inst(PPC_RAW_STXVD2X(39, R3, R4)));
if (R4(ec) == R4_GEN && LL(ec) == LL_L1) {
u8 r4 = R4(ec);
switch (R4(ec)) {
u8 r4 = R4(ec);
u8 r4 = R4(ec);
u8 r4 = R4(ec);
u8 r4 = R4(ec);
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
wr(scc,R4,X1CLK|SDLC); /* *1 clock, SDLC mode */
u32 R4;
R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
vt = sign_extend32(R4, 23);
ASPEED_PINCTRL_PIN(R4),
SIG_EXPR_LIST_DECL_SINGLE(R4, VGAVS, VGAVS, SIG_DESC_SET(SCU84, 13));
SIG_EXPR_LIST_DECL_SINGLE(R4, DASHR4, DASHR4, SIG_DESC_SET(SCU94, 8));
PIN_DECL_2(R4, GPIOJ5, VGAVS, DASHR4);
FUNC_GROUP_DECL(VGAVS, R4);
up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
write_zsreg(channel, R4, regs[R4]);
up->curregs[R4] &= ~XCLK_MASK;
up->curregs[R4] |= X16CLK;
write_zsreg(uap, R4, regs[R4]);
uap->curregs[R4] = X16CLK | SB1;
uap->curregs[R4] = X1CLK;
uap->curregs[R4] = X16CLK;
uap->curregs[R4] = X32CLK;
uap->curregs[R4] = X16CLK;
up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
write_zsreg(channel, R4, regs[R4]);
up->curregs[R4] &= ~XCLK_MASK;
up->curregs[R4] |= X16CLK;
up->curregs[R4] &= ~0x0c;
up->curregs[R4] |= SB2;
up->curregs[R4] |= SB1;
up->curregs[R4] |= PAR_ENAB;
up->curregs[R4] &= ~PAR_ENAB;
up->curregs[R4] |= PAR_EVEN;
up->curregs[R4] &= ~PAR_EVEN;
write_zsreg(zport, R4, regs[4]);
BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
BPF_JMP_REG(BPF_JNE, R2, R4, 1),
BPF_ALU64_IMM(BPF_MOV, R4, R4), \
BPF_JMP_IMM(BPF_JNE, R4, R4, 6), \
BPF_ALU64_IMM(BPF_MOV, R4, 4), \
BPF_JMP_IMM(BPF_JNE, R4, 4, 6), \
i += __bpf_ld_imm64(&insns[i], R4, fetch);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
i += __bpf_ld_imm64(&insns[i], R4, fetch);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
BPF_ALU64_IMM(BPF_MOV, R4, 4),
BPF_ALU64_IMM(BPF_ADD, R4, 20),
BPF_ALU64_IMM(BPF_SUB, R4, 10),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_ALU64_REG(BPF_ADD, R1, R4),
BPF_ALU64_REG(BPF_ADD, R2, R4),
BPF_ALU64_REG(BPF_ADD, R3, R4),
BPF_ALU64_REG(BPF_ADD, R4, R0),
BPF_ALU64_REG(BPF_ADD, R4, R1),
BPF_ALU64_REG(BPF_ADD, R4, R2),
BPF_ALU64_REG(BPF_ADD, R4, R3),
BPF_ALU64_REG(BPF_ADD, R4, R4),
BPF_ALU64_REG(BPF_ADD, R4, R5),
BPF_ALU64_REG(BPF_ADD, R4, R6),
BPF_ALU64_REG(BPF_ADD, R4, R7),
BPF_ALU64_REG(BPF_ADD, R4, R8),
BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
BPF_ALU64_REG(BPF_ADD, R5, R4),
BPF_ALU64_REG(BPF_ADD, R6, R4),
BPF_ALU64_REG(BPF_ADD, R7, R4),
BPF_ALU64_REG(BPF_ADD, R8, R4),
BPF_ALU64_REG(BPF_ADD, R9, R4),
BPF_ALU32_IMM(BPF_MOV, R4, 4),
BPF_ALU64_IMM(BPF_ADD, R4, 10),
BPF_ALU32_REG(BPF_ADD, R0, R4),
BPF_ALU32_REG(BPF_ADD, R1, R4),
BPF_ALU32_REG(BPF_ADD, R2, R4),
BPF_ALU32_REG(BPF_ADD, R3, R4),
BPF_ALU32_REG(BPF_ADD, R4, R0),
BPF_ALU32_REG(BPF_ADD, R4, R1),
BPF_ALU32_REG(BPF_ADD, R4, R2),
BPF_ALU32_REG(BPF_ADD, R4, R3),
BPF_ALU32_REG(BPF_ADD, R4, R4),
BPF_ALU32_REG(BPF_ADD, R4, R5),
BPF_ALU32_REG(BPF_ADD, R4, R6),
BPF_ALU32_REG(BPF_ADD, R4, R7),
BPF_ALU32_REG(BPF_ADD, R4, R8),
BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
BPF_ALU32_REG(BPF_ADD, R5, R4),
BPF_ALU32_REG(BPF_ADD, R6, R4),
BPF_ALU32_REG(BPF_ADD, R7, R4),
BPF_ALU32_REG(BPF_ADD, R8, R4),
BPF_ALU32_REG(BPF_ADD, R9, R4),
BPF_ALU64_IMM(BPF_MOV, R4, 4),
BPF_ALU64_REG(BPF_SUB, R0, R4),
BPF_ALU64_REG(BPF_SUB, R1, R4),
BPF_ALU64_REG(BPF_SUB, R2, R4),
BPF_ALU64_REG(BPF_SUB, R3, R4),
BPF_ALU64_REG(BPF_SUB, R4, R0),
BPF_ALU64_REG(BPF_SUB, R4, R1),
BPF_ALU64_REG(BPF_SUB, R4, R2),
BPF_ALU64_REG(BPF_SUB, R4, R3),
BPF_ALU64_REG(BPF_SUB, R4, R5),
BPF_ALU64_REG(BPF_SUB, R4, R6),
BPF_ALU64_REG(BPF_SUB, R4, R7),
BPF_ALU64_REG(BPF_SUB, R4, R8),
BPF_ALU64_REG(BPF_SUB, R4, R9),
BPF_ALU64_IMM(BPF_SUB, R4, 10),
BPF_ALU64_REG(BPF_SUB, R5, R4),
BPF_ALU64_REG(BPF_SUB, R6, R4),
BPF_ALU64_REG(BPF_SUB, R7, R4),
BPF_ALU64_REG(BPF_SUB, R8, R4),
BPF_ALU64_REG(BPF_SUB, R9, R4),
BPF_ALU64_REG(BPF_SUB, R0, R4),
BPF_ALU64_REG(BPF_XOR, R4, R4),
BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
BPF_ALU64_REG(BPF_SUB, R4, R4),
BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
BPF_ALU64_IMM(BPF_MOV, R4, 4),
BPF_ALU64_REG(BPF_MUL, R0, R4),
BPF_ALU64_REG(BPF_MUL, R1, R4),
BPF_ALU64_REG(BPF_MUL, R2, R4),
BPF_MOV64_REG(R4, R3),
BPF_MOV64_REG(R5, R4),
BPF_ALU64_IMM(BPF_MOV, R4, 0),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_MOV64_REG(R4, R3),
BPF_MOV64_REG(R5, R4),
BPF_ALU32_IMM(BPF_MOV, R4, 0),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_MOV64_REG(R4, R3),
BPF_MOV64_REG(R5, R4),
BPF_LD_IMM64(R4, 0x0LL),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_MOV32_IMM(R4, -1234),
BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
BPF_ALU64_IMM(BPF_AND, R4, 63),
BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
BPF_MOV64_IMM(R4, 4),
BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
BPF_MOV64_IMM(R4, 5),
BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
BPF_ALU64_REG(BPF_DIV, R2, R4),
i += __bpf_ld_imm64(&insn[i], R4, val);
insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);