/*-
* Copyright (c) 2011-2015 Alexander Nasonov.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Number of saved registers to pass to sljit_emit_enter() function.
*/
#define NSAVEDS 3
/*
* Arguments of generated bpfjit_func_t.
* The first argument is reassigned upon entry
* to a more frequently used buf argument.
*/
#define BJ_CTX_ARG SLJIT_S0
#define BJ_ARGS SLJIT_S1
/*
* Get a number of memwords and external memwords from a bpf_ctx object.
*/
#define GET_EXTWORDS(bc) ((bc) ? (bc)->extwords : 0)
#define GET_MEMWORDS(bc) (GET_EXTWORDS(bc) ? GET_EXTWORDS(bc) : BPF_MEMWORDS)
struct bpfjit_stack
{
bpf_ctx_t *ctx;
uint32_t *extmem; /* pointer to external memory store */
uint32_t reg; /* saved A or X register */
#ifdef _KERNEL
int err; /* 3rd argument for m_xword/m_xhalf/m_xbyte function call */
#endif
uint32_t mem[BPF_MEMWORDS]; /* internal memory store */
};
/*
* Data for BPF_JMP instruction.
* Forward declaration for struct bpfjit_jump.
*/
struct bpfjit_jump_data;
/*
* Data for BPF_JMP instruction.
*/
struct bpfjit_jump_data {
/*
* These entries make up bjumps list:
* jtf[0] - when coming from jt path,
* jtf[1] - when coming from jf path.
*/
struct bpfjit_jump jtf[2];
/*
* Length calculated by Array Bounds Check Elimination (ABC) pass.
*/
bpfjit_abc_length_t abc_length;
/*
* Length checked by the last out-of-bounds check.
*/
bpfjit_abc_length_t checked_length;
};
/*
* Data for "read from packet" instructions.
* See also read_pkt_insn() function below.
*/
struct bpfjit_read_pkt_data {
/*
* Length calculated by Array Bounds Check Elimination (ABC) pass.
*/
bpfjit_abc_length_t abc_length;
/*
* If positive, emit "if (buflen < check_length) return 0"
* out-of-bounds check.
* Values greater than UINT32_MAX generate unconditional "return 0".
*/
bpfjit_abc_length_t check_length;
};
/*
* Additional (optimization-related) data for bpf_insn.
*/
struct bpfjit_insn_data {
/* List of jumps to this insn. */
SLIST_HEAD(, bpfjit_jump) bjumps;
union {
struct bpfjit_jump_data jdata;
struct bpfjit_read_pkt_data rdata;
} u;
bpf_memword_init_t invalid;
bool unreachable;
};
#ifdef _KERNEL
uint32_t m_xword(const struct mbuf *, uint32_t, int *);
uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
MODULE(MODULE_CLASS_MISC, bpfjit, "sljit")
static int
bpfjit_modcmd(modcmd_t cmd, void *arg)
{
/*
* Return a number of scratch registers to pass
* to sljit_emit_enter() function.
*/
static sljit_s32
nscratches(bpfjit_hint_t hints)
{
sljit_s32 rv = 2;
#ifdef _KERNEL
if (hints & BJ_HINT_PKT)
rv = 3; /* xcall with three arguments */
#endif
switch (BPF_SIZE(pc->code)) {
case BPF_W: return 4;
case BPF_H: return 2;
case BPF_B: return 1;
default: return 0;
}
}
/*
* Copy buf and buflen members of bpf_args from BJ_ARGS
* pointer to BJ_BUF and BJ_BUFLEN registers.
*/
static int
load_buf_buflen(struct sljit_compiler *compiler)
{
int status;
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
BJ_BUF, 0,
SLJIT_MEM1(BJ_ARGS),
offsetof(struct bpf_args, pkt));
if (status != SLJIT_SUCCESS)
return status;
if (save_reg == BJ_AREG || (hints & BJ_HINT_XREG)) {
/* save A or X */
status = sljit_emit_op1(compiler,
SLJIT_MOV_U32,
SLJIT_MEM1(SLJIT_SP),
offsetof(struct bpfjit_stack, reg),
save_reg, 0);
if (status != SLJIT_SUCCESS)
return status;
}
/*
* Prepare registers for fn(mbuf, k, &err) call.
*/
status = sljit_emit_op1(compiler,
SLJIT_MOV,
SLJIT_R0, 0,
BJ_BUF, 0);
if (status != SLJIT_SUCCESS)
return status;
if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) {
if (pc->k == 0) {
/* k = X; */
status = sljit_emit_op1(compiler,
SLJIT_MOV,
SLJIT_R1, 0,
BJ_XREG, 0);
if (status != SLJIT_SUCCESS)
return status;
} else {
/* if (X > UINT32_MAX - pc->k) return 0; */
jump = sljit_emit_cmp(compiler,
SLJIT_GREATER,
BJ_XREG, 0,
SLJIT_IMM, UINT32_MAX - pc->k);
if (jump == NULL)
return SLJIT_ERR_ALLOC_FAILED;
if (!append_jump(jump, ret0, ret0_size, ret0_maxsize))
return SLJIT_ERR_ALLOC_FAILED;
/* k = X + pc->k; */
status = sljit_emit_op2(compiler,
SLJIT_ADD,
SLJIT_R1, 0,
BJ_XREG, 0,
SLJIT_IMM, (uint32_t)pc->k);
if (status != SLJIT_SUCCESS)
return status;
}
} else {
/* k = pc->k */
status = sljit_emit_op1(compiler,
SLJIT_MOV,
SLJIT_R1, 0,
SLJIT_IMM, (uint32_t)pc->k);
if (status != SLJIT_SUCCESS)
return status;
}
/*
* The third argument of fn is an address on stack.
*/
status = sljit_get_local_base(compiler,
SLJIT_R2, 0,
offsetof(struct bpfjit_stack, err));
if (status != SLJIT_SUCCESS)
return status;
/* fn(buf, k, &err); */
status = sljit_emit_ijump(compiler,
SLJIT_CALL3,
SLJIT_IMM, SLJIT_FUNC_OFFSET(fn));
if (status != SLJIT_SUCCESS)
return status;
if (dst != SLJIT_RETURN_REG) {
/* move return value to dst */
status = sljit_emit_op1(compiler,
SLJIT_MOV,
dst, 0,
SLJIT_RETURN_REG, 0);
if (status != SLJIT_SUCCESS)
return status;
}
/* X = tmp1 << 2 */
status = sljit_emit_op2(compiler,
SLJIT_SHL,
BJ_XREG, 0,
BJ_TMP1REG, 0,
SLJIT_IMM, 2);
if (status != SLJIT_SUCCESS)
return status;
return SLJIT_SUCCESS;
}
/*
* Emit code for A = A / k or A = A % k when k is a power of 2.
* @pc BPF_DIV or BPF_MOD instruction.
*/
static int
emit_pow2_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc)
{
uint32_t k = pc->k;
int status = SLJIT_SUCCESS;
BJ_ASSERT(k != 0 && (k & (k - 1)) == 0);
if (BPF_OP(pc->code) == BPF_MOD) {
status = sljit_emit_op2(compiler,
SLJIT_AND,
BJ_AREG, 0,
BJ_AREG, 0,
SLJIT_IMM, k - 1);
} else {
int shift = 0;
/*
* Do shift = __builtin_ctz(k).
* The loop is slower, but that's ok.
*/
while (k > 1) {
k >>= 1;
shift++;
}
if (shift != 0) {
status = sljit_emit_op2(compiler,
SLJIT_LSHR|SLJIT_I32_OP,
BJ_AREG, 0,
BJ_AREG, 0,
SLJIT_IMM, shift);
}
}
/*
* Emit code for A = A / div or A = A % div.
* @pc BPF_DIV or BPF_MOD instruction.
*/
static int
emit_moddiv(struct sljit_compiler *compiler, const struct bpf_insn *pc)
{
int status;
const bool xdiv = BPF_OP(pc->code) == BPF_DIV;
const bool xreg = BPF_SRC(pc->code) == BPF_X;
#if defined(BPFJIT_USE_UDIV)
status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_I32_OP);
if (BPF_OP(pc->code) == BPF_DIV) {
#if BJ_AREG != SLJIT_R0
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
SLJIT_R0, 0);
#endif
} else {
#if BJ_AREG != SLJIT_R1
/* Remainder is in SLJIT_R1. */
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
SLJIT_R1, 0);
#endif
}
if (status != SLJIT_SUCCESS)
return status;
#else
status = sljit_emit_ijump(compiler,
SLJIT_CALL2,
SLJIT_IMM, xdiv ? SLJIT_FUNC_OFFSET(divide) :
SLJIT_FUNC_OFFSET(modulus));
#if BJ_AREG != SLJIT_RETURN_REG
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
SLJIT_RETURN_REG, 0);
if (status != SLJIT_SUCCESS)
return status;
#endif
#endif
return status;
}
/*
* Return true if pc is a "read from packet" instruction.
* If length is not NULL and return value is true, *length will
* be set to a safe length required to read a packet.
*/
static bool
read_pkt_insn(const struct bpf_insn *pc, bpfjit_abc_length_t *length)
{
bool rv;
bpfjit_abc_length_t width = 0; /* XXXuninit */
for (i = 0; i < insn_count; i++) {
SLIST_INIT(&insn_dat[i].bjumps);
insn_dat[i].invalid = BJ_INIT_NOBITS;
}
}
/*
* The function divides instructions into blocks. Destination of a jump
* instruction starts a new block. BPF_RET and BPF_JMP instructions
* terminate a block. Blocks are linear, that is, there are no jumps out
* from the middle of a block and there are no jumps in to the middle of
* a block.
*
* The function also sets bits in *initmask for memwords that
* need to be initialized to zero. Note that this set should be empty
* for any valid kernel filter program.
*/
static bool
optimize_pass1(const bpf_ctx_t *bc, const struct bpf_insn *insns,
struct bpfjit_insn_data *insn_dat, size_t insn_count,
bpf_memword_init_t *initmask, bpfjit_hint_t *hints)
{
struct bpfjit_jump *jtf;
size_t i;
uint32_t jt, jf;
bpfjit_abc_length_t length;
bpf_memword_init_t invalid; /* borrowed from bpf_filter() */
bool unreachable;
const size_t memwords = GET_MEMWORDS(bc);
*hints = 0;
*initmask = BJ_INIT_NOBITS;
unreachable = false;
invalid = ~BJ_INIT_NOBITS;
for (i = 0; i < insn_count; i++) {
if (!SLIST_EMPTY(&insn_dat[i].bjumps))
unreachable = false;
insn_dat[i].unreachable = unreachable;
if (unreachable)
continue;
invalid |= insn_dat[i].invalid;
if (read_pkt_insn(&insns[i], &length) && length > UINT32_MAX)
unreachable = true;
switch (BPF_CLASS(insns[i].code)) {
case BPF_RET:
if (BPF_RVAL(insns[i].code) == BPF_A)
*initmask |= invalid & BJ_INIT_ABIT;
unreachable = true;
continue;
case BPF_LD:
if (BPF_MODE(insns[i].code) == BPF_ABS)
*hints |= BJ_HINT_ABS;
for (i = insn_count; i != 0; i--) {
pc = &insns[i-1];
pd = &insn_dat[i-1];
if (pd->unreachable)
continue;
switch (BPF_CLASS(pc->code)) {
case BPF_RET:
/*
* It's quite common for bpf programs to
* check packet bytes in increasing order
* and return zero if bytes don't match
* specified critetion. Such programs disable
* ABC optimization completely because for
* every jump there is a branch with no read
* instruction.
* With no side effects, BPF_STMT(BPF_RET+BPF_K, 0)
* is indistinguishable from out-of-bound load.
* Therefore, abc_length can be set to
* MAX_ABC_LENGTH and enable ABC for many
* bpf programs.
* If this optimization encounters any
* instruction with a side effect, it will
* reset abc_length.
*/
if (BPF_RVAL(pc->code) == BPF_K && pc->k == 0)
abc_length = MAX_ABC_LENGTH;
else
abc_length = 0;
break;
case BPF_MISC:
if (BPF_MISCOP(pc->code) == BPF_COP ||
BPF_MISCOP(pc->code) == BPF_COPX) {
/* COP instructions can have side effects. */
abc_length = 0;
}
break;
case BPF_ST:
case BPF_STX:
if (extwords != 0) {
/* Write to memory is visible after a call. */
abc_length = 0;
}
break;
case BPF_JMP:
abc_length = pd->u.jdata.abc_length;
break;
default:
if (read_pkt_insn(pc, &length)) {
if (abc_length < length)
abc_length = length;
pd->u.rdata.abc_length = abc_length;
}
break;
}
/* reset sjump members of jdata */
for (i = 0; i < insn_count; i++) {
if (insn_dat[i].unreachable ||
BPF_CLASS(insns[i].code) != BPF_JMP) {
continue;
}
/* main loop */
for (i = 0; i < insn_count; i++) {
if (insn_dat[i].unreachable)
continue;
/*
* Resolve jumps to the current insn.
*/
label = NULL;
SLIST_FOREACH(bjump, &insn_dat[i].bjumps, entries) {
if (bjump->sjump != NULL) {
if (label == NULL)
label = sljit_emit_label(compiler);
if (label == NULL)
goto fail;
sljit_set_label(bjump->sjump, label);
}
}
to_mchain_jump = NULL;
unconditional_ret = false;
if (read_pkt_insn(&insns[i], NULL)) {
if (insn_dat[i].u.rdata.check_length > UINT32_MAX) {
/* Jump to "return 0" unconditionally. */
unconditional_ret = true;
jump = sljit_emit_jump(compiler, SLJIT_JUMP);
if (jump == NULL)
goto fail;
if (!append_jump(jump, &ret0,
&ret0_size, &ret0_maxsize))
goto fail;
} else if (insn_dat[i].u.rdata.check_length > 0) {
/* if (buflen < check_length) return 0; */
jump = sljit_emit_cmp(compiler,
SLJIT_LESS,
BJ_BUFLEN, 0,
SLJIT_IMM,
insn_dat[i].u.rdata.check_length);
if (jump == NULL)
goto fail;
#ifdef _KERNEL
to_mchain_jump = jump;
#else
if (!append_jump(jump, &ret0,
&ret0_size, &ret0_maxsize))
goto fail;
#endif
}
}
pc = &insns[i];
switch (BPF_CLASS(pc->code)) {
default:
goto fail;
case BPF_LD:
/* BPF_LD+BPF_IMM A <- k */
if (pc->code == (BPF_LD|BPF_IMM)) {
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
SLJIT_IMM, (uint32_t)pc->k);
if (status != SLJIT_SUCCESS)
goto fail;
continue;
}
/* BPF_LD+BPF_MEM A <- M[k] */
if (pc->code == (BPF_LD|BPF_MEM)) {
if ((uint32_t)pc->k >= memwords)
goto fail;
status = emit_memload(compiler,
BJ_AREG, pc->k, extwords);
if (status != SLJIT_SUCCESS)
goto fail;
continue;
}
/* BPF_LD+BPF_W+BPF_LEN A <- len */
if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) {
status = sljit_emit_op1(compiler,
SLJIT_MOV, /* size_t source */
BJ_AREG, 0,
SLJIT_MEM1(BJ_ARGS),
offsetof(struct bpf_args, wirelen));
if (status != SLJIT_SUCCESS)
goto fail;
case BPF_RET:
rval = BPF_RVAL(pc->code);
if (rval == BPF_X)
goto fail;
/* BPF_RET+BPF_K accept k bytes */
if (rval == BPF_K) {
status = sljit_emit_return(compiler,
SLJIT_MOV_U32,
SLJIT_IMM, (uint32_t)pc->k);
if (status != SLJIT_SUCCESS)
goto fail;
}
/* BPF_RET+BPF_A accept A bytes */
if (rval == BPF_A) {
status = sljit_emit_return(compiler,
SLJIT_MOV_U32,
BJ_AREG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
continue;
case BPF_MISC:
switch (BPF_MISCOP(pc->code)) {
case BPF_TAX:
status = sljit_emit_op1(compiler,
SLJIT_MOV_U32,
BJ_XREG, 0,
BJ_AREG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
continue;
case BPF_TXA:
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
BJ_XREG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
continue;
case BPF_COP:
case BPF_COPX:
if (bc == NULL || bc->copfuncs == NULL)
goto fail;
if (BPF_MISCOP(pc->code) == BPF_COP &&
(uint32_t)pc->k >= bc->nfuncs) {
goto fail;
}
status = emit_cop(compiler, hints, bc, pc,
&ret0, &ret0_size, &ret0_maxsize);
if (status != SLJIT_SUCCESS)
goto fail;
continue;
}
goto fail;
} /* switch */
} /* main loop */
BJ_ASSERT(ret0_size <= ret0_maxsize);
if (ret0_size > 0) {
label = sljit_emit_label(compiler);
if (label == NULL)
goto fail;
for (i = 0; i < ret0_size; i++)
sljit_set_label(ret0[i], label);
}
status = sljit_emit_return(compiler,
SLJIT_MOV_U32,
SLJIT_IMM, 0);
if (status != SLJIT_SUCCESS)
goto fail;
rv = true;
fail:
if (ret0 != NULL)
BJ_FREE(ret0, ret0_maxsize * sizeof(ret0[0]));
status = sljit_emit_enter(compiler, 0, 2, nscratches(hints),
NSAVEDS, 0, 0, sizeof(struct bpfjit_stack));
if (status != SLJIT_SUCCESS)
goto fail;
if (hints & BJ_HINT_COP) {
/* save ctx argument */
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
SLJIT_MEM1(SLJIT_SP),
offsetof(struct bpfjit_stack, ctx),
BJ_CTX_ARG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
if (extwords == 0) {
mem_reg = SLJIT_MEM1(SLJIT_SP);
mem_off = offsetof(struct bpfjit_stack, mem);
} else {
/* copy "mem" argument from bpf_args to bpfjit_stack */
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
BJ_TMP1REG, 0,
SLJIT_MEM1(BJ_ARGS), offsetof(struct bpf_args, mem));
if (status != SLJIT_SUCCESS)
goto fail;
status = sljit_emit_op1(compiler,
SLJIT_MOV_P,
SLJIT_MEM1(SLJIT_SP),
offsetof(struct bpfjit_stack, extmem),
BJ_TMP1REG, 0);
if (status != SLJIT_SUCCESS)
goto fail;
mem_reg = SLJIT_MEM1(BJ_TMP1REG);
mem_off = 0;
}
/*
* Exclude pre-initialised external memory words but keep
* initialization statuses of A and X registers in case
* bc->preinited wrongly sets those two bits.
*/
initmask &= ~preinited | BJ_INIT_ABIT | BJ_INIT_XBIT;
#if defined(_KERNEL)
/* bpf_filter() checks initialization of memwords. */
BJ_ASSERT((initmask & (BJ_INIT_MBIT(memwords) - 1)) == 0);
#endif
for (i = 0; i < memwords; i++) {
if (initmask & BJ_INIT_MBIT(i)) {
/* M[i] = 0; */
status = sljit_emit_op1(compiler,
SLJIT_MOV_U32,
mem_reg, mem_off + i * sizeof(uint32_t),
SLJIT_IMM, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
}
if (initmask & BJ_INIT_ABIT) {
/* A = 0; */
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_AREG, 0,
SLJIT_IMM, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
if (initmask & BJ_INIT_XBIT) {
/* X = 0; */
status = sljit_emit_op1(compiler,
SLJIT_MOV,
BJ_XREG, 0,
SLJIT_IMM, 0);
if (status != SLJIT_SUCCESS)
goto fail;
}
status = load_buf_buflen(compiler);
if (status != SLJIT_SUCCESS)
goto fail;