jmp
} jmp[ARC_CC_SLE + 1];
.jmp[ARC_CC_UGT] = {
.jmp[ARC_CC_UGE] = {
.jmp[ARC_CC_ULT] = {
.jmp[ARC_CC_ULE] = {
.jmp[ARC_CC_SGT] = {
.jmp[ARC_CC_SGE] = {
.jmp[ARC_CC_SLT] = {
.jmp[ARC_CC_SLE] = {
const u8 *cc = arcv2_64_jccs.jmp[cond].cond;
jmp ([0f:w,%pc,%d0*4])
u8 alu, jmp;
setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
emit_jmp_r(ctx, lo(dst), lo(src), rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel);
if (valid_jmp_i(jmp, imm)) {
emit_jmp_i(ctx, lo(dst), imm, rel, jmp);
emit_jmp_r(ctx, lo(dst), MIPS_R_T6, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
emit_jmp_r64(ctx, dst, src, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel);
emit_jmp_i64(ctx, dst, imm, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel);
if (valid_jmp_i(jmp, imm)) {
emit_jmp_i(ctx, dst, imm, rel, jmp);
emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
u8 alu, jmp;
setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel);
if (valid_jmp_i(jmp, imm)) {
emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp);
emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
emit_jmp_r(ctx, dst, src, rel, jmp);
if (finish_jmp(ctx, jmp, off) < 0)
unsigned int rb, op, jmp;
jmp = *((unsigned int *)regs->pc);
displacement = sign_extend32(((jmp) & 0x3ffffff) << 2, 27);
rb = (jmp & 0x0000ffff) >> 11;
op = jmp >> 26;
ppc_inst_t jmp[4];
if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
jmp @r0
jmp @r0
rd %psr, %i0; jmp %l2; rett %l2 + 4; nop;
jmp .Lwrcr3_pcid_\@
jmp .Lend_\@
jmp asm_common_interrupt
jmp asm_spurious_interrupt
#define RET jmp __x86_return_thunk
jmp __x86_indirect_thunk_\reg
jmp *%\reg
const u8 jmp[] = { JMP8_INSN_OPCODE, fineibt_caller_jmp };
text_poke_early(addr, jmp, 2);
unsigned long *jmp;
jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
if (!jmp)
return jmp;
__le32 *jmp;
risc->jmp = rp;
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
risc->jmp = rp;
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
risc->jmp[0] = cpu_to_le32(cmd);
risc->jmp[1] = cpu_to_le32(next);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
__le32 *jmp;
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 3) * sizeof(*risc->cpu) > risc->size);
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
__le32 *jmp;
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
risc->jmp = rp;
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
risc->jmp = rp;
WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
__le32 *jmp;
buf->jmp = rp;
BUG_ON((buf->jmp - buf->cpu + 2) * sizeof(buf->cpu[0]) > buf->size);
core->name, buf, buf->cpu, buf->jmp);
for (addr = buf->cpu; addr <= buf->jmp; addr += 2)
buf->jmp[0] = cpu_to_le32(RISC_JUMP);
buf->jmp[1] = cpu_to_le32(buf->dma + 8);
prev->jmp[1] = cpu_to_le32(buf->dma);
__le32 *jmp;
u32 jmp;
jmp = NCB_SCRIPTH_PHYS (np, par_err_data_in);
jmp = NCB_SCRIPTH_PHYS (np, par_err_other);
OUTL_DSP (jmp);
u8 jmp[3];
const struct bpf_insn *jmp,
ins[2] = *jmp;
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm, bool alu32)
insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
jmp += 2; \
*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
jmp += 2; \
*insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
jmp
| jmp