Merge branch 'bpf-next'

Daniel Borkmann says:

====================
BPF updates

[ Set applies on top of current net-next but also on top of
  Alexei's latest patches. Please see individual patches for
  more details. ]

Changelog:
 v1->v2:
  - Removed paragraph in 1st commit message
  - Rest stays the same
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-09-09 16:59:03 -07:00
commit 60005c60b1
9 changed files with 108 additions and 91 deletions

View File

@ -12,7 +12,6 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/filter.h>
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/slab.h>
@ -174,6 +173,15 @@ static inline bool is_load_to_a(u16 inst)
}
}
static void jit_fill_hole(void *area, unsigned int size)
{
/* Insert illegal UND instructions. */
u32 *ptr, fill_ins = 0xe7ffffff;
/* We are guaranteed to have aligned memory. */
for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
*ptr++ = fill_ins;
}
static void build_prologue(struct jit_ctx *ctx)
{
u16 reg_set = saved_regs(ctx);
@ -859,9 +867,11 @@ b_epilogue:
void bpf_jit_compile(struct bpf_prog *fp)
{
struct bpf_binary_header *header;
struct jit_ctx ctx;
unsigned tmp_idx;
unsigned alloc_size;
u8 *target_ptr;
if (!bpf_jit_enable)
return;
@ -897,13 +907,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* there's nothing after the epilogue on ARMv7 */
build_epilogue(&ctx);
#endif
alloc_size = 4 * ctx.idx;
ctx.target = module_alloc(alloc_size);
if (unlikely(ctx.target == NULL))
header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
4, jit_fill_hole);
if (header == NULL)
goto out;
ctx.target = (u32 *) target_ptr;
ctx.idx = 0;
build_prologue(&ctx);
build_body(&ctx);
build_epilogue(&ctx);
@ -919,8 +931,9 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* there are 2 passes here */
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *)ctx.target;
fp->jited = 1;
fp->jited = true;
out:
kfree(ctx.offsets);
return;
@ -928,8 +941,15 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
module_free(NULL, fp->bpf_func);
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr;
if (!fp->jited)
goto free_filter;
set_memory_rw(addr, header->pages);
bpf_jit_binary_free(header);
free_filter:
bpf_prog_unlock_free(fp);
}

View File

@ -1417,7 +1417,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
fp->bpf_func = (void *)ctx.target;
fp->jited = 1;
fp->jited = true;
out:
kfree(ctx.offsets);

View File

@ -686,7 +686,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
fp->bpf_func = (void *)image;
fp->jited = 1;
fp->jited = true;
}
out:
kfree(addrs);

View File

@ -5,11 +5,9 @@
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/moduleloader.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/filter.h>
#include <linux/random.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@ -148,6 +146,12 @@ struct bpf_jit {
ret; \
})
static void bpf_jit_fill_hole(void *area, unsigned int size)
{
/* Fill whole space with illegal instructions */
memset(area, 0, size);
}
static void bpf_jit_prologue(struct bpf_jit *jit)
{
/* Save registers and create stack frame if necessary */
@ -780,38 +784,6 @@ out:
return -1;
}
/*
* Note: for security reasons, bpf code will follow a randomly
* sized amount of illegal instructions.
*/
struct bpf_binary_header {
unsigned int pages;
u8 image[];
};
static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
u8 **image_ptr)
{
struct bpf_binary_header *header;
unsigned int sz, hole;
/* Most BPF filters are really small, but if some of them fill a page,
* allow at least 128 extra bytes for illegal instructions.
*/
sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
header = module_alloc(sz);
if (!header)
return NULL;
memset(header, 0, sz);
header->pages = sz / PAGE_SIZE;
hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
/* Insert random number of illegal instructions before BPF code
* and make sure the first instruction starts at an even address.
*/
*image_ptr = &header->image[(prandom_u32() % hole) & -2];
return header;
}
void bpf_jit_compile(struct bpf_prog *fp)
{
struct bpf_binary_header *header = NULL;
@ -850,7 +822,8 @@ void bpf_jit_compile(struct bpf_prog *fp)
size = prg_len + lit_len;
if (size >= BPF_SIZE_MAX)
goto out;
header = bpf_alloc_binary(size, &jit.start);
header = bpf_jit_binary_alloc(size, &jit.start,
2, bpf_jit_fill_hole);
if (!header)
goto out;
jit.prg = jit.mid = jit.start + prg_len;
@ -869,7 +842,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
if (jit.start) {
set_memory_ro((unsigned long)header, header->pages);
fp->bpf_func = (void *) jit.start;
fp->jited = 1;
fp->jited = true;
}
out:
kfree(addrs);
@ -884,7 +857,7 @@ void bpf_jit_free(struct bpf_prog *fp)
goto free_filter;
set_memory_rw(addr, header->pages);
module_free(NULL, header);
bpf_jit_binary_free(header);
free_filter:
bpf_prog_unlock_free(fp);

View File

@ -801,7 +801,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (image) {
bpf_flush_icache(image, image + proglen);
fp->bpf_func = (void *)image;
fp->jited = 1;
fp->jited = true;
}
out:
kfree(addrs);

View File

@ -8,12 +8,10 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <asm/cacheflush.h>
int bpf_jit_enable __read_mostly;
@ -109,39 +107,6 @@ static inline void bpf_flush_icache(void *start, void *end)
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
struct bpf_binary_header {
unsigned int pages;
/* Note : for security reasons, bpf code will follow a randomly
* sized amount of int3 instructions
*/
u8 image[];
};
static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
u8 **image_ptr)
{
unsigned int sz, hole;
struct bpf_binary_header *header;
/* Most of BPF filters are really small,
* but if some of them fill a page, allow at least
* 128 extra bytes to insert a random section of int3
*/
sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
header = module_alloc(sz);
if (!header)
return NULL;
memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
header->pages = sz / PAGE_SIZE;
hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
/* insert a random number of int3 instructions before BPF code */
*image_ptr = &header->image[prandom_u32() % hole];
return header;
}
/* pick a register outside of BPF range for JIT internal work */
#define AUX_REG (MAX_BPF_REG + 1)
@ -206,6 +171,12 @@ static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
}
static void jit_fill_hole(void *area, unsigned int size)
{
/* fill whole space with int3 instructions */
memset(area, 0xcc, size);
}
struct jit_context {
unsigned int cleanup_addr; /* epilogue code offset */
bool seen_ld_abs;
@ -959,7 +930,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (proglen <= 0) {
image = NULL;
if (header)
module_free(NULL, header);
bpf_jit_binary_free(header);
goto out;
}
if (image) {
@ -969,7 +940,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
break;
}
if (proglen == oldproglen) {
header = bpf_alloc_binary(proglen, &image);
header = bpf_jit_binary_alloc(proglen, &image,
1, jit_fill_hole);
if (!header)
goto out;
}
@ -983,7 +955,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, image + proglen);
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)image;
prog->jited = 1;
prog->jited = true;
}
out:
kfree(addrs);
@ -998,7 +970,7 @@ void bpf_jit_free(struct bpf_prog *fp)
goto free_filter;
set_memory_rw(addr, header->pages);
module_free(NULL, header);
bpf_jit_binary_free(header);
free_filter:
bpf_prog_unlock_free(fp);

View File

@ -289,15 +289,20 @@ struct sock_fprog_kern {
struct sock_filter *filter;
};
struct bpf_binary_header {
unsigned int pages;
u8 image[];
};
struct bpf_work_struct {
struct bpf_prog *prog;
struct work_struct work;
};
struct bpf_prog {
u32 pages; /* Number of allocated pages */
u32 jited:1, /* Is our filter JIT'ed? */
len:31; /* Number of filter blocks */
u16 pages; /* Number of allocated pages */
bool jited; /* Is our filter JIT'ed? */
u32 len; /* Number of filter blocks */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
struct bpf_work_struct *work; /* Deferred free work struct */
unsigned int (*bpf_func)(const struct sk_buff *skb,
@ -358,6 +363,14 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
void __bpf_prog_free(struct bpf_prog *fp);
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns);
void bpf_jit_binary_free(struct bpf_binary_header *hdr);
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
bpf_prog_unlock_ro(fp);

View File

@ -20,9 +20,12 @@
* Andi Kleen - Fix a few bad bugs and races.
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/moduleloader.h>
#include <asm/unaligned.h>
/* Registers */
@ -125,6 +128,42 @@ void __bpf_prog_free(struct bpf_prog *fp)
}
EXPORT_SYMBOL_GPL(__bpf_prog_free);
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_binary_header *hdr;
unsigned int size, hole, start;
/* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions.
*/
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
hdr = module_alloc(size);
if (hdr == NULL)
return NULL;
/* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size);
hdr->pages = size / PAGE_SIZE;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
start = (prandom_u32() % hole) & ~(alignment - 1);
/* Leave a random number of instructions before BPF code. */
*image_ptr = &hdr->image[start];
return hdr;
}
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
module_free(NULL, hdr);
}
/* Base function for offset calculation. Needs to go into .text section,
* therefore keeping it non-static as well; will also be used by JITs
* anyway later on, so do not let the compiler omit it.

View File

@ -972,7 +972,7 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
int err;
fp->bpf_func = NULL;
fp->jited = 0;
fp->jited = false;
err = bpf_check_classic(fp->insns, fp->len);
if (err) {