nfp: bpf: move translation prepare to offload.c

struct nfp_prog is currently only used internally by the translator.
This means there is a lot of parameter passing going on, between
the translator and different stages of offload.  Simplify things
by allocating nfp_prog in offload.c already.

We will now use kmalloc() to allocate the program area and only
DMA map it for the time of loading (instead of allocating DMA
coherent memory upfront).

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2017-11-03 13:56:28 -07:00 committed by David S. Miller
parent c1c88eae8a
commit 9314c442d7
3 changed files with 94 additions and 91 deletions

View File

@ -2245,58 +2245,27 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
/**
* nfp_bpf_jit() - translate BPF code into NFP assembly
* @nfp_prog: nfp_prog prepared based on @filter
* @filter: kernel BPF filter struct
* @prog_mem: memory to store assembler instructions
* @prog_start: offset of the first instruction when loaded
* @prog_done: where to jump on exit
* @prog_sz: size of @prog_mem in instructions
* @res: achieved parameters of translation results
*/
int
nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
unsigned int prog_start, unsigned int prog_done,
unsigned int prog_sz, struct nfp_bpf_result *res)
int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter)
{
struct nfp_prog *nfp_prog;
int ret;
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
return -ENOMEM;
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = filter->type;
nfp_prog->start_off = prog_start;
nfp_prog->tgt_done = prog_done;
ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
if (ret)
goto out;
ret = nfp_prog_verify(nfp_prog, filter);
if (ret)
goto out;
return ret;
ret = nfp_bpf_optimize(nfp_prog);
if (ret)
goto out;
nfp_prog->prog = prog_mem;
nfp_prog->__prog_alloc_len = prog_sz;
return ret;
ret = nfp_translate(nfp_prog);
if (ret) {
pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated);
ret = -EINVAL;
goto out;
return -EINVAL;
}
ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem);
res->n_instr = nfp_prog->prog_len;
out:
nfp_prog_free(nfp_prog);
return ret;
return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
}

View File

@ -169,19 +169,7 @@ struct nfp_prog {
struct list_head insns;
};
struct nfp_bpf_result {
unsigned int n_instr;
};
int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt);
void nfp_prog_free(struct nfp_prog *nfp_prog);
int
nfp_bpf_jit(struct bpf_prog *filter, void *prog,
unsigned int prog_start, unsigned int prog_done,
unsigned int prog_sz, struct nfp_bpf_result *res);
int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter);
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);

View File

@ -51,7 +51,7 @@
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
int
static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt)
{
@ -73,7 +73,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
return 0;
}
void nfp_prog_free(struct nfp_prog *nfp_prog)
static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *tmp;
@ -84,25 +84,36 @@ void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
static int
nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
struct nfp_bpf_result *res,
void **code, dma_addr_t *dma_addr, u16 max_instr)
static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
unsigned int code_sz = max_instr * sizeof(u64);
unsigned int stack_size;
u16 start_off, done_off;
unsigned int max_mtu;
struct nfp_prog *nfp_prog;
int ret;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
return -EOPNOTSUPP;
}
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
return NULL;
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
if (ret)
goto err_free;
return nfp_prog;
err_free:
nfp_prog_free(nfp_prog);
return NULL;
}
static int
nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
unsigned int stack_size;
unsigned int max_instr;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
@ -111,28 +122,68 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
return -EOPNOTSUPP;
}
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
nfp_prog->stack_depth = prog->aux->stack_depth;
nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM;
ret = nfp_bpf_jit(prog, *code, start_off, done_off, max_instr, res);
if (ret)
goto out;
return nfp_bpf_jit(nfp_prog, prog);
}
static void nfp_bpf_destroy(struct nfp_prog *nfp_prog)
{
kfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
}
static struct nfp_prog *
nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
dma_addr_t *dma_addr)
{
struct nfp_prog *nfp_prog;
unsigned int max_mtu;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
return NULL;
}
nfp_prog = nfp_bpf_verifier_prep(prog);
if (!nfp_prog)
return NULL;
err = nfp_bpf_translate(nn, nfp_prog, prog);
if (err)
goto err_destroy_prog;
*dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
if (dma_mapping_error(nn->dp.dev, *dma_addr))
goto err_destroy_prog;
return 0;
out:
dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
return ret;
err_destroy_prog:
nfp_bpf_destroy(nfp_prog);
return NULL;
}
static void
nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr,
unsigned int code_sz, unsigned int n_instr)
nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog,
dma_addr_t dma_addr)
{
int err;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
/* Load up the JITed code */
@ -140,7 +191,9 @@ nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr,
if (err)
nn_err(nn, "FW command error while loading BPF: %d\n", err);
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
nfp_bpf_destroy(nfp_prog);
}
static void nfp_net_bpf_start(struct nfp_net *nn)
@ -169,11 +222,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog)
{
struct nfp_bpf_result res;
struct nfp_prog *nfp_prog;
dma_addr_t dma_addr;
u16 max_instr;
void *code;
int err;
if (prog && old_prog) {
u8 cap;
@ -192,15 +242,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog = nfp_net_bpf_offload_prepare(nn, prog, &dma_addr);
if (!nfp_prog)
return -EINVAL;
err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code, &dma_addr,
max_instr);
if (err)
return err;
nfp_net_bpf_load(nn, code, dma_addr, max_instr * sizeof(u64),
res.n_instr);
nfp_net_bpf_load(nn, nfp_prog, dma_addr);
if (!old_prog)
nfp_net_bpf_start(nn);