Discussion:
[PATCH V3] Support for JIT in Seccomp BPF filters.
(too old to reply)
Nicolas Schichan
2013-04-24 17:27:06 UTC
Permalink
Hi,

This patch serie adds support for jitted seccomp BPF filters, with the
required modifications to make it work on the ARM architecture.

- The first patch in the serie adds the required boiler plate in the
core kernel seccomp code to invoke the JIT compilation/free code.

- The second patch reworks the ARM BPF JIT code to make the generation
process less dependent on struct sk_filter.

- The last patch actually implements the ARM part in the BPF jit code.

Some benchmarks, on a 1.6Ghz 88f6282 CPU:

Each system call is tested in two way (fast/slow):

- on the fast version, the tested system call is accepted immediately
after checking the architecture (5 BPF instructions).

- on the slow version, the tested system call is accepted after
previously checking for 85 syscall (90 instructions, including the
architecture check).

The tested syscall is invoked in a loop 1000000 time, the reported
time is the time spent in the loop in seconds.

Without Seccomp JIT:

Syscall Time-Fast Time-Slow
--------------- ---------- ----------
gettimeofday 0.389 1.633
getpid 0.406 1.688
getresuid 1.003 2.266
getcwd 1.342 2.128

With Seccomp JIT:

Syscall Time-Fast Time-Slow
--------------- ----------- ---------
gettimeofday 0.348 0.428
getpid 0.365 0.480
getresuid 0.981 1.060
getcwd 1.237 1.294

For reference, the same code without any seccomp filter:

Syscall Time
--------------- -----
gettimeofday 0.119
getpid 0.137
getresuid 0.747
getcwd 1.021

The activation of the BPF JIT for seccomp is still controled with the
/proc/sys/net/core/bpf_jit_enable sysctl knob.

Those changes are based on the latest rmk-for-next branch.

V2 Changes:
- Document the @bpf_func field in struct seccomp_filter as recommended
by Kees Cook.
- Invoke seccomp_bpf_load directly from generated code without going
via a wrapper.

V3 Changes:
- add accessors giving access to the fields in struct seccomp_filter
as recommended by Andrew Morton. This avoids having to include
<linux/filter.h> in <linux/seccomp.h>, and fixes broken include
dependencies on x86_64 allmodconfig build.
- checkpatch fixes (void*) -> (void *)

Regards,
Nicolas Schichan
2013-04-24 17:27:07 UTC
Permalink
Architecture must select HAVE_SECCOMP_FILTER_JIT and implement
seccomp_jit_compile() and seccomp_jit_free() if they intend to support
jitted seccomp filters.

Accessors to struct seccomp_filter fields are provided in
<linux/seccomp.h> to be used by the JIT code. With this, the 'struct
seccomp_filter' structure can stay opaque when outside
kernel/seccomp.c.

In a way similar to the net BPF, the jit compilation code is expected
to updates struct seccomp_filter.bpf_func pointer (using the correct
accessor) to the generated code.

Signed-off-by: Nicolas Schichan <***@freebox.fr>
---
arch/Kconfig | 14 ++++++++++++++
include/linux/seccomp.h | 18 ++++++++++++++++++
kernel/seccomp.c | 33 ++++++++++++++++++++++++++++++++-
3 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/arch/Kconfig b/arch/Kconfig
index 1455579..370bc52 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -332,6 +332,10 @@ config HAVE_ARCH_SECCOMP_FILTER
- secure_computing return value is checked and a return value of -1
results in the system call being skipped immediately.

+# Used by archs to tell that they support SECCOMP_FILTER_JIT
+config HAVE_SECCOMP_FILTER_JIT
+ bool
+
config SECCOMP_FILTER
def_bool y
depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
@@ -342,6 +346,16 @@ config SECCOMP_FILTER

See Documentation/prctl/seccomp_filter.txt for details.

+config SECCOMP_FILTER_JIT
+ bool "enable Seccomp filter Just In Time compiler"
+ depends on HAVE_SECCOMP_FILTER_JIT && BPF_JIT && SECCOMP_FILTER
+ help
+ Seccomp syscall filtering capabilities are normally handled
+ by an interpreter. This option allows kernel to generate a native
+ code when filter is loaded in memory. This should speedup
+ syscall filtering. Note : Admin should enable this feature
+ changing /proc/sys/net/core/bpf_jit_enable
+
config HAVE_CONTEXT_TRACKING
bool
help
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 6f19cfd..2793607 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -47,6 +47,24 @@ static inline int seccomp_mode(struct seccomp *s)
return s->mode;
}

+#ifdef CONFIG_SECCOMP_FILTER_JIT
+extern void seccomp_jit_compile(struct seccomp_filter *fp);
+extern void seccomp_jit_free(struct seccomp_filter *fp);
+
+/*
+ * accessors used by seccomp JIT code to avoid having to declare the
+ * seccomp_filter struct here.
+ */
+unsigned short seccomp_filter_get_len(struct seccomp_filter *fp);
+struct sock_filter *seccomp_filter_get_insns(struct seccomp_filter *fp);
+void seccomp_filter_set_bpf_func(struct seccomp_filter *fp, void *func);
+void *seccomp_filter_get_bpf_func(struct seccomp_filter *fp);
+
+#else
+static inline void seccomp_jit_compile(struct seccomp_filter *fp) { }
+static inline void seccomp_jit_free(struct seccomp_filter *fp) { }
+#endif
+
#else /* CONFIG_SECCOMP */

#include <linux/errno.h>
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index b7a1004..2377414 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -39,6 +39,8 @@
* is only needed for handling filters shared across tasks.
* @prev: points to a previously installed, or inherited, filter
* @len: the number of instructions in the program
+ * @bpf_func: points to either sk_run_filter or the code generated
+ * by the BPF JIT.
* @insns: the BPF program instructions to evaluate
*
* seccomp_filter objects are organized in a tree linked via the @prev
@@ -55,6 +57,8 @@ struct seccomp_filter {
atomic_t usage;
struct seccomp_filter *prev;
unsigned short len; /* Instruction count */
+ unsigned int (*bpf_func)(const struct sk_buff *skb,
+ const struct sock_filter *filter);
struct sock_filter insns[];
};

@@ -213,7 +217,7 @@ static u32 seccomp_run_filters(int syscall)
* value always takes priority (ignoring the DATA).
*/
for (f = current->seccomp.filter; f; f = f->prev) {
- u32 cur_ret = sk_run_filter(NULL, f->insns);
+ u32 cur_ret = f->bpf_func(NULL, f->insns);
if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
ret = cur_ret;
}
@@ -275,6 +279,9 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
if (ret)
goto fail;

+ filter->bpf_func = sk_run_filter;
+ seccomp_jit_compile(filter);
+
/*
* If there is an existing filter, make it the prev and don't drop its
* task reference.
@@ -332,10 +339,34 @@ void put_seccomp_filter(struct task_struct *tsk)
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
orig = orig->prev;
+ seccomp_jit_free(freeme);
kfree(freeme);
}
}

+/*
+ * seccomp filter accessors for JIT code.
+ */
+unsigned short seccomp_filter_get_len(struct seccomp_filter *fp)
+{
+ return fp->len;
+}
+
+struct sock_filter *seccomp_filter_get_insns(struct seccomp_filter *fp)
+{
+ return fp->insns;
+}
+
+void seccomp_filter_set_bpf_func(struct seccomp_filter *fp, void *func)
+{
+ fp->bpf_func = func;
+}
+
+void *seccomp_filter_get_bpf_func(struct seccomp_filter *fp)
+{
+ return fp->bpf_func;
+}
+
/**
* seccomp_send_sigsys - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
--
1.7.10.4
Nicolas Schichan
2013-04-24 17:27:08 UTC
Permalink
This is in preparation of bpf_jit support for seccomp filters.

Signed-off-by: Nicolas Schichan <***@freebox.fr>
---
arch/arm/net/bpf_jit_32.c | 46 ++++++++++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 17 deletions(-)

diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a0bd8a7..bb66a2b 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -55,7 +55,8 @@
#define FLAG_NEED_X_RESET (1 << 0)

struct jit_ctx {
- const struct sk_filter *skf;
+ unsigned short prog_len;
+ struct sock_filter *prog_insns;
unsigned idx;
unsigned prologue_bytes;
int ret0_fp_idx;
@@ -131,8 +132,8 @@ static u16 saved_regs(struct jit_ctx *ctx)
{
u16 ret = 0;

- if ((ctx->skf->len > 1) ||
- (ctx->skf->insns[0].code == BPF_S_RET_A))
+ if ((ctx->prog_len > 1) ||
+ (ctx->prog_insns[0].code == BPF_S_RET_A))
ret |= 1 << r_A;

#ifdef CONFIG_FRAME_POINTER
@@ -181,7 +182,7 @@ static inline bool is_load_to_a(u16 inst)
static void build_prologue(struct jit_ctx *ctx)
{
u16 reg_set = saved_regs(ctx);
- u16 first_inst = ctx->skf->insns[0].code;
+ u16 first_inst = ctx->prog_insns[0].code;
u16 off;

#ifdef CONFIG_FRAME_POINTER
@@ -279,7 +280,7 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
ctx->imms[i] = k;

/* constants go just after the epilogue */
- offset = ctx->offsets[ctx->skf->len];
+ offset = ctx->offsets[ctx->prog_len];
offset += ctx->prologue_bytes;
offset += ctx->epilogue_bytes;
offset += i * 4;
@@ -419,7 +420,7 @@ static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
} else {
_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
- _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
+ _emit(cond, ARM_B(b_imm(ctx->prog_len, ctx)), ctx);
}
}

@@ -469,14 +470,13 @@ static inline void update_on_xread(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx)
{
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
- const struct sk_filter *prog = ctx->skf;
const struct sock_filter *inst;
unsigned i, load_order, off, condt;
int imm12;
u32 k;

- for (i = 0; i < prog->len; i++) {
- inst = &(prog->insns[i]);
+ for (i = 0; i < ctx->prog_len; i++) {
+ inst = &(ctx->prog_insns[i]);
/* K as an immediate value operand */
k = inst->k;

@@ -769,8 +769,8 @@ cmp_x:
ctx->ret0_fp_idx = i;
emit_mov_i(ARM_R0, k, ctx);
b_epilogue:
- if (i != ctx->skf->len - 1)
- emit(ARM_B(b_imm(prog->len, ctx)), ctx);
+ if (i != ctx->prog_len - 1)
+ emit(ARM_B(b_imm(ctx->prog_len, ctx)), ctx);
break;
case BPF_S_MISC_TAX:
/* X = A */
@@ -858,7 +858,7 @@ b_epilogue:
}


-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;

- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;

- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;

@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
out:
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
}

+void bpf_jit_compile(struct sk_filter *fp)
+{
+ struct jit_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.prog_len = fp->len;
+ ctx.prog_insns = fp->insns;
+
+ __bpf_jit_compile(&ctx);
+ if (ctx.target)
+ fp->bpf_func = (void *)ctx.target;
+}
+
static void bpf_jit_free_worker(struct work_struct *work)
{
module_free(NULL, work);
--
1.7.10.4
Daniel Borkmann
2013-04-24 17:41:34 UTC
Permalink
Post by Nicolas Schichan
This is in preparation of bpf_jit support for seccomp filters.
Please also CC the netdev list for BPF related patches.

Just to give you a heads-up, this might likely lead to a merge
conflict with the net-next tree (commit 79617801ea0c0e66, "filter:
bpf_jit_comp: refactor and unify BPF JIT image dump output").
Post by Nicolas Schichan
---
arch/arm/net/bpf_jit_32.c | 46 ++++++++++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 17 deletions(-)
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a0bd8a7..bb66a2b 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -55,7 +55,8 @@
#define FLAG_NEED_X_RESET (1 << 0)
struct jit_ctx {
- const struct sk_filter *skf;
+ unsigned short prog_len;
+ struct sock_filter *prog_insns;
unsigned idx;
unsigned prologue_bytes;
int ret0_fp_idx;
@@ -131,8 +132,8 @@ static u16 saved_regs(struct jit_ctx *ctx)
{
u16 ret = 0;
- if ((ctx->skf->len > 1) ||
- (ctx->skf->insns[0].code == BPF_S_RET_A))
+ if ((ctx->prog_len > 1) ||
+ (ctx->prog_insns[0].code == BPF_S_RET_A))
ret |= 1 << r_A;
#ifdef CONFIG_FRAME_POINTER
@@ -181,7 +182,7 @@ static inline bool is_load_to_a(u16 inst)
static void build_prologue(struct jit_ctx *ctx)
{
u16 reg_set = saved_regs(ctx);
- u16 first_inst = ctx->skf->insns[0].code;
+ u16 first_inst = ctx->prog_insns[0].code;
u16 off;
#ifdef CONFIG_FRAME_POINTER
@@ -279,7 +280,7 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
ctx->imms[i] = k;
/* constants go just after the epilogue */
- offset = ctx->offsets[ctx->skf->len];
+ offset = ctx->offsets[ctx->prog_len];
offset += ctx->prologue_bytes;
offset += ctx->epilogue_bytes;
offset += i * 4;
@@ -419,7 +420,7 @@ static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
} else {
_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
- _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
+ _emit(cond, ARM_B(b_imm(ctx->prog_len, ctx)), ctx);
}
}
@@ -469,14 +470,13 @@ static inline void update_on_xread(struct jit_ctx *ctx)
static int build_body(struct jit_ctx *ctx)
{
void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
- const struct sk_filter *prog = ctx->skf;
const struct sock_filter *inst;
unsigned i, load_order, off, condt;
int imm12;
u32 k;
- for (i = 0; i < prog->len; i++) {
- inst = &(prog->insns[i]);
+ for (i = 0; i < ctx->prog_len; i++) {
+ inst = &(ctx->prog_insns[i]);
/* K as an immediate value operand */
k = inst->k;
ctx->ret0_fp_idx = i;
emit_mov_i(ARM_R0, k, ctx);
- if (i != ctx->skf->len - 1)
- emit(ARM_B(b_imm(prog->len, ctx)), ctx);
+ if (i != ctx->prog_len - 1)
+ emit(ARM_B(b_imm(ctx->prog_len, ctx)), ctx);
break;
/* X = A */
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
}
+void bpf_jit_compile(struct sk_filter *fp)
+{
+ struct jit_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.prog_len = fp->len;
+ ctx.prog_insns = fp->insns;
+
+ __bpf_jit_compile(&ctx);
+ if (ctx.target)
+ fp->bpf_func = (void *)ctx.target;
+}
+
static void bpf_jit_free_worker(struct work_struct *work)
{
module_free(NULL, work);
Arnd Bergmann
2013-04-26 14:04:44 UTC
Permalink
Post by Nicolas Schichan
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
This part of the patch, in combination with 79617801e "filter: bpf_jit_comp:
refactor and unify BPF JIT image dump output" is now causing build errors
in linux-next:

arch/arm/net/bpf_jit_32.c: In function '__bpf_jit_compile':
arch/arm/net/bpf_jit_32.c:930:16: error: 'fp' undeclared (first use in this function)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
^


Arnd
Andrew Morton
2013-04-26 19:26:01 UTC
Permalink
Post by Arnd Bergmann
Post by Nicolas Schichan
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
refactor and unify BPF JIT image dump output" is now causing build errors
arch/arm/net/bpf_jit_32.c:930:16: error: 'fp' undeclared (first use in this function)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
Thanks, I did this. There may be a smarter way...

--- a/arch/arm/net/bpf_jit_32.c~arm-net-bpf_jit-make-code-generation-less-dependent-on-struct-sk_filter-fix
+++ a/arch/arm/net/bpf_jit_32.c
@@ -858,7 +858,7 @@ b_epilogue:
}


-static void __bpf_jit_compile(struct jit_ctx *out_ctx)
+static void __bpf_jit_compile(struct sk_filter *fp, struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -934,7 +934,7 @@ void bpf_jit_compile(struct sk_filter *f
ctx.prog_len = fp->len;
ctx.prog_insns = fp->insns;

- __bpf_jit_compile(&ctx);
+ __bpf_jit_compile(fp, &ctx);
if (ctx.target)
fp->bpf_func = (void *)ctx.target;
}
_
Daniel Borkmann
2013-04-26 19:47:46 UTC
Permalink
Post by Andrew Morton
Post by Arnd Bergmann
Post by Nicolas Schichan
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
refactor and unify BPF JIT image dump output" is now causing build errors
arch/arm/net/bpf_jit_32.c:930:16: error: 'fp' undeclared (first use in this function)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
Thanks, I did this. There may be a smarter way...
I think also seccomp_jit_compile() would need this change then, otherwise the build
with CONFIG_SECCOMP_FILTER_JIT might break.

I can fix this up for you if not already applied. I presume it's against
linux-next tree?
Post by Andrew Morton
--- a/arch/arm/net/bpf_jit_32.c~arm-net-bpf_jit-make-code-generation-less-dependent-on-struct-sk_filter-fix
+++ a/arch/arm/net/bpf_jit_32.c
}
-static void __bpf_jit_compile(struct jit_ctx *out_ctx)
+static void __bpf_jit_compile(struct sk_filter *fp, struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -934,7 +934,7 @@ void bpf_jit_compile(struct sk_filter *f
ctx.prog_len = fp->len;
ctx.prog_insns = fp->insns;
- __bpf_jit_compile(&ctx);
+ __bpf_jit_compile(fp, &ctx);
if (ctx.target)
fp->bpf_func = (void *)ctx.target;
}
_
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Andrew Morton
2013-04-26 20:09:48 UTC
Permalink
Post by Daniel Borkmann
Post by Andrew Morton
Post by Arnd Bergmann
Post by Nicolas Schichan
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
refactor and unify BPF JIT image dump output" is now causing build errors
arch/arm/net/bpf_jit_32.c:930:16: error: 'fp' undeclared (first use in this function)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
Thanks, I did this. There may be a smarter way...
I think also seccomp_jit_compile() would need this change then, otherwise the build
with CONFIG_SECCOMP_FILTER_JIT might break.
urgh, that tears it.
Post by Daniel Borkmann
I can fix this up for you if not already applied. I presume it's against
linux-next tree?
Yup, please send something.
Daniel Borkmann
2013-04-26 22:01:10 UTC
Permalink
Post by Andrew Morton
Post by Daniel Borkmann
Post by Andrew Morton
Post by Arnd Bergmann
Post by Nicolas Schichan
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
refactor and unify BPF JIT image dump output" is now causing build errors
arch/arm/net/bpf_jit_32.c:930:16: error: 'fp' undeclared (first use in this function)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
Thanks, I did this. There may be a smarter way...
I think also seccomp_jit_compile() would need this change then, otherwise the build
with CONFIG_SECCOMP_FILTER_JIT might break.
urgh, that tears it.
Post by Daniel Borkmann
I can fix this up for you if not already applied. I presume it's against
linux-next tree?
Yup, please send something.
Patch is attached. However, I currently don't have an ARM toolchain at hand, so
uncompiled, untested.

@Nicolas, Xi (cc, ref: http://thread.gmane.org/gmane.linux.kernel/1481464):

If there is someday support for other archs as well, it would be nice if we
do not have each time duplicated seccomp_jit_compile() etc functions in each
JIT implementation, i.e. because they do basically the same. So follow-up
{fix,clean}up is appreciated.

Also, I find it a bit weird that seccomp_filter_get_len() and some other
_one-line_ functions from kernel/seccomp.c are not placed into the
corresponding header file as inlines.
Xi Wang
2013-04-26 22:18:58 UTC
Permalink
Thanks for CCing. One way to clean up this would be to refactor the
bpf jit interface as:

bpf_func_t bpf_jit_compile(struct sock_filter *filter, unsigned int flen);
void bpf_jit_free(bpf_func_t bpf_func);

Then both packet and seccomp filters can share the unified interface.
Also, we don't need seccomp_filter_get_len() and other helpers.

Do you want me to rebase my patch against linux-next and see how that goes?

- xi
Post by Daniel Borkmann
Post by Andrew Morton
Post by Daniel Borkmann
Post by Andrew Morton
Post by Arnd Bergmann
Post by Nicolas Schichan
}
-void bpf_jit_compile(struct sk_filter *fp)
+static void __bpf_jit_compile(struct jit_ctx *out_ctx)
{
struct jit_ctx ctx;
unsigned tmp_idx;
@@ -867,11 +867,10 @@ void bpf_jit_compile(struct sk_filter *fp)
if (!bpf_jit_enable)
return;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
+ ctx = *out_ctx;
ctx.ret0_fp_idx = -1;
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
+ ctx.offsets = kzalloc(4 * (ctx.prog_len + 1), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
@@ -921,13 +920,26 @@ void bpf_jit_compile(struct sk_filter *fp)
print_hex_dump(KERN_INFO, "BPF JIT code: ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
alloc_size, false);
-
- fp->bpf_func = (void *)ctx.target;
kfree(ctx.offsets);
+
+ *out_ctx = ctx;
return;
refactor and unify BPF JIT image dump output" is now causing build errors
arch/arm/net/bpf_jit_32.c:930:16: error: 'fp' undeclared (first use in
this function)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
Thanks, I did this. There may be a smarter way...
I think also seccomp_jit_compile() would need this change then, otherwise the build
with CONFIG_SECCOMP_FILTER_JIT might break.
urgh, that tears it.
Post by Daniel Borkmann
I can fix this up for you if not already applied. I presume it's against
linux-next tree?
Yup, please send something.
Patch is attached. However, I currently don't have an ARM toolchain at hand, so
uncompiled, untested.
If there is someday support for other archs as well, it would be nice if we
do not have each time duplicated seccomp_jit_compile() etc functions in each
JIT implementation, i.e. because they do basically the same. So follow-up
{fix,clean}up is appreciated.
Also, I find it a bit weird that seccomp_filter_get_len() and some other
_one-line_ functions from kernel/seccomp.c are not placed into the
corresponding header file as inlines.
Daniel Borkmann
2013-04-26 22:30:19 UTC
Permalink
Post by Xi Wang
Thanks for CCing. One way to clean up this would be to refactor the
bpf_func_t bpf_jit_compile(struct sock_filter *filter, unsigned int flen);
void bpf_jit_free(bpf_func_t bpf_func);
Then both packet and seccomp filters can share the unified interface.
Also, we don't need seccomp_filter_get_len() and other helpers.
Do you want me to rebase my patch against linux-next and see how that goes?
Sure, whatever works for you. Not sure if it will still make it though.

Also, as Eric already mentioned earlier, please do not top-post your mails!
I think one reminder should be sufficient for that. ;-)
David Miller
2013-04-26 23:33:07 UTC
Permalink
Please stop top-posting.

Thank you.

Nicolas Schichan
2013-04-24 17:27:09 UTC
Permalink
This patch selects HAVE_SECCOMP_FILTER_JIT in the ARM Kconfig file,
implements seccomp_jit_compile() and seccomp_jit_free(), and adds
support for BPF_S_ANC_SECCOMP_LD_W instruction.

BPF_S_ANC_SECCOMP_LD_W instructions trigger the generation of a call
to C function seccomp_bpf_load().

Signed-off-by: Nicolas Schichan <***@freebox.fr>
---
arch/arm/Kconfig | 1 +
arch/arm/net/bpf_jit_32.c | 37 +++++++++++++++++++++++++++++++++++++
2 files changed, 38 insertions(+)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 60b9876..d2e5d39 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -24,6 +24,7 @@ config ARM
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_BPF_JIT
+ select HAVE_SECCOMP_FILTER_JIT
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index bb66a2b..4b89f50 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -548,6 +548,15 @@ load_common:
emit_err_ret(ARM_COND_NE, ctx);
emit(ARM_MOV_R(r_A, ARM_R0), ctx);
break;
+#ifdef CONFIG_SECCOMP_FILTER_JIT
+ case BPF_S_ANC_SECCOMP_LD_W:
+ ctx->seen |= SEEN_CALL;
+ emit_mov_i(ARM_R3, (u32)seccomp_bpf_load, ctx);
+ emit_mov_i(ARM_R0, k, ctx);
+ emit_blx_r(ARM_R3, ctx);
+ emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+ break;
+#endif
case BPF_S_LD_W_IND:
load_order = 2;
goto load_ind;
@@ -956,3 +965,31 @@ void bpf_jit_free(struct sk_filter *fp)
schedule_work(work);
}
}
+
+#ifdef CONFIG_SECCOMP_FILTER_JIT
+void seccomp_jit_compile(struct seccomp_filter *fp)
+{
+ struct jit_ctx ctx;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.prog_len = seccomp_filter_get_len(fp);
+ ctx.prog_insns = seccomp_filter_get_insns(fp);
+
+ __bpf_jit_compile(&ctx);
+ if (ctx.target)
+ seccomp_filter_set_bpf_func(fp, (void *)ctx.target);
+}
+
+void seccomp_jit_free(struct seccomp_filter *fp)
+{
+ struct work_struct *work;
+ void *bpf_func = seccomp_filter_get_bpf_func(fp);
+
+ if (bpf_func != sk_run_filter) {
+ work = (struct work_struct *)bpf_func;
+
+ INIT_WORK(work, bpf_jit_free_worker);
+ schedule_work(work);
+ }
+}
+#endif
--
1.7.10.4
Continue reading on narkive:
Loading...