bpf: pre-allocate hash map elements

If kprobe is placed on spin_unlock then calling kmalloc/kfree from
bpf programs is not safe, since the following dead lock is possible:
kfree->spin_lock(kmem_cache_node->lock)...spin_unlock->kprobe->
bpf_prog->map_update->kmalloc->spin_lock(of the same kmem_cache_node->lock)
and deadlocks.

The following solutions were considered and some implemented, but
eventually discarded
- kmem_cache_create for every map
- add recursion check to slow-path of slub
- use reserved memory in bpf_map_update for in_irq or in preempt_disabled
- kmalloc via irq_work

At the end pre-allocation of all map elements turned out to be the simplest
solution and since the user is charged upfront for all the memory, such
pre-allocation doesn't affect the user space visible behavior.

Since it's impossible to tell whether kprobe is triggered in a safe
location from kmalloc point of view, use pre-allocation by default
and introduce new BPF_F_NO_PREALLOC flag.

While testing of per-cpu hash maps it was discovered
that alloc_percpu(GFP_ATOMIC) has odd corner cases and often
fails to allocate memory even when 90% of it is free.
The pre-allocation of per-cpu hash elements solves this problem as well.

Turned out that bpf_map_update() quickly followed by
bpf_map_lookup()+bpf_map_delete() is very common pattern used
in many of iovisor/bcc/tools, so there is additional benefit of
pre-allocation, since such use cases are must faster.

Since all hash map elements are now pre-allocated we can remove
atomic increment of htab->count and save few more cycles.

Also add bpf_map_precharge_memlock() to check rlimit_memlock early to avoid
large malloc/free done by users who don't have sufficient limits.

Pre-allocation is done with vmalloc and alloc/free is done
via percpu_freelist. Here are performance numbers for different
pre-allocation algorithms that were implemented, but discarded
in favor of percpu_freelist:

1 cpu:
pcpu_ida	2.1M
pcpu_ida nolock	2.3M
bt		2.4M
kmalloc		1.8M
hlist+spinlock	2.3M
pcpu_freelist	2.6M

4 cpu:
pcpu_ida	1.5M
pcpu_ida nolock	1.8M
bt w/smp_align	1.7M
bt no/smp_align	1.1M
kmalloc		0.7M
hlist+spinlock	0.2M
pcpu_freelist	2.0M

8 cpu:
pcpu_ida	0.7M
bt w/smp_align	0.8M
kmalloc		0.4M
pcpu_freelist	1.5M

32 cpu:
kmalloc		0.13M
pcpu_freelist	0.49M

pcpu_ida nolock is a modified percpu_ida algorithm without
percpu_ida_cpu locks and without cross-cpu tag stealing.
It's faster than existing percpu_ida, but not as fast as pcpu_freelist.

bt is a variant of block/blk-mq-tag.c simlified and customized
for bpf use case. bt w/smp_align is using cache line for every 'long'
(similar to blk-mq-tag). bt no/smp_align allocates 'long'
bitmasks continuously to save memory. It's comparable to percpu_ida
and in some cases faster, but slower than percpu_freelist

hlist+spinlock is the simplest free list with single spinlock.
As expeceted it has very bad scaling in SMP.

kmalloc is existing implementation which is still available via
BPF_F_NO_PREALLOC flag. It's significantly slower in single cpu and
in 8 cpu setup it's 3 times slower than pre-allocation with pcpu_freelist,
but saves memory, so in cases where map->max_entries can be large
and number of map update/delete per second is low, it may make
sense to use it.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Alexei Starovoitov 2016-03-07 21:57:15 -08:00 committed by David S. Miller
parent e19494edab
commit 6c90598174
4 changed files with 186 additions and 74 deletions

View File

@ -37,6 +37,7 @@ struct bpf_map {
u32 key_size; u32 key_size;
u32 value_size; u32 value_size;
u32 max_entries; u32 max_entries;
u32 map_flags;
u32 pages; u32 pages;
struct user_struct *user; struct user_struct *user;
const struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
@ -178,6 +179,7 @@ struct bpf_map *__bpf_map_get(struct fd f);
void bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map); void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
extern int sysctl_unprivileged_bpf_disabled; extern int sysctl_unprivileged_bpf_disabled;

View File

@ -101,12 +101,15 @@ enum bpf_prog_type {
#define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */ #define BPF_EXIST 2 /* update existing element */
#define BPF_F_NO_PREALLOC (1U << 0)
union bpf_attr { union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */ struct { /* anonymous struct used by BPF_MAP_CREATE command */
__u32 map_type; /* one of enum bpf_map_type */ __u32 map_type; /* one of enum bpf_map_type */
__u32 key_size; /* size of key in bytes */ __u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */ __u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */ __u32 max_entries; /* max number of entries in a map */
__u32 map_flags; /* prealloc or not */
}; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* Copyright (c) 2016 Facebook
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public * modify it under the terms of version 2 of the GNU General Public
@ -13,6 +14,7 @@
#include <linux/jhash.h> #include <linux/jhash.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "percpu_freelist.h"
struct bucket { struct bucket {
struct hlist_head head; struct hlist_head head;
@ -22,6 +24,8 @@ struct bucket {
struct bpf_htab { struct bpf_htab {
struct bpf_map map; struct bpf_map map;
struct bucket *buckets; struct bucket *buckets;
void *elems;
struct pcpu_freelist freelist;
atomic_t count; /* number of elements in this hashtable */ atomic_t count; /* number of elements in this hashtable */
u32 n_buckets; /* number of hash buckets */ u32 n_buckets; /* number of hash buckets */
u32 elem_size; /* size of each element in bytes */ u32 elem_size; /* size of each element in bytes */
@ -29,15 +33,86 @@ struct bpf_htab {
/* each htab element is struct htab_elem + key + value */ /* each htab element is struct htab_elem + key + value */
struct htab_elem { struct htab_elem {
struct hlist_node hash_node;
struct rcu_head rcu;
union { union {
u32 hash; struct hlist_node hash_node;
u32 key_size; struct bpf_htab *htab;
struct pcpu_freelist_node fnode;
}; };
struct rcu_head rcu;
u32 hash;
char key[0] __aligned(8); char key[0] __aligned(8);
}; };
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{
*(void __percpu **)(l->key + key_size) = pptr;
}
static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{
return *(void __percpu **)(l->key + key_size);
}
static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
return (struct htab_elem *) (htab->elems + i * htab->elem_size);
}
static void htab_free_elems(struct bpf_htab *htab)
{
int i;
if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
goto free_elems;
for (i = 0; i < htab->map.max_entries; i++) {
void __percpu *pptr;
pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
htab->map.key_size);
free_percpu(pptr);
}
free_elems:
vfree(htab->elems);
}
static int prealloc_elems_and_freelist(struct bpf_htab *htab)
{
int err = -ENOMEM, i;
htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
if (!htab->elems)
return -ENOMEM;
if (htab->map.map_type != BPF_MAP_TYPE_PERCPU_HASH)
goto skip_percpu_elems;
for (i = 0; i < htab->map.max_entries; i++) {
u32 size = round_up(htab->map.value_size, 8);
void __percpu *pptr;
pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
if (!pptr)
goto free_elems;
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
pptr);
}
skip_percpu_elems:
err = pcpu_freelist_init(&htab->freelist);
if (err)
goto free_elems;
pcpu_freelist_populate(&htab->freelist, htab->elems, htab->elem_size,
htab->map.max_entries);
return 0;
free_elems:
htab_free_elems(htab);
return err;
}
/* Called from syscall */ /* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr) static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{ {
@ -46,6 +121,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
int err, i; int err, i;
u64 cost; u64 cost;
if (attr->map_flags & ~BPF_F_NO_PREALLOC)
/* reserved bits should not be used */
return ERR_PTR(-EINVAL);
htab = kzalloc(sizeof(*htab), GFP_USER); htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab) if (!htab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -55,6 +134,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
htab->map.key_size = attr->key_size; htab->map.key_size = attr->key_size;
htab->map.value_size = attr->value_size; htab->map.value_size = attr->value_size;
htab->map.max_entries = attr->max_entries; htab->map.max_entries = attr->max_entries;
htab->map.map_flags = attr->map_flags;
/* check sanity of attributes. /* check sanity of attributes.
* value_size == 0 may be allowed in the future to use map as a set * value_size == 0 may be allowed in the future to use map as a set
@ -92,7 +172,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (percpu) if (percpu)
htab->elem_size += sizeof(void *); htab->elem_size += sizeof(void *);
else else
htab->elem_size += htab->map.value_size; htab->elem_size += round_up(htab->map.value_size, 8);
/* prevent zero size kmalloc and check for u32 overflow */ /* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 || if (htab->n_buckets == 0 ||
@ -112,6 +192,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
/* if map size is larger than memlock limit, reject it early */
err = bpf_map_precharge_memlock(htab->map.pages);
if (err)
goto free_htab;
err = -ENOMEM; err = -ENOMEM;
htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
GFP_USER | __GFP_NOWARN); GFP_USER | __GFP_NOWARN);
@ -127,10 +212,16 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
raw_spin_lock_init(&htab->buckets[i].lock); raw_spin_lock_init(&htab->buckets[i].lock);
} }
atomic_set(&htab->count, 0); if (!(attr->map_flags & BPF_F_NO_PREALLOC)) {
err = prealloc_elems_and_freelist(htab);
if (err)
goto free_buckets;
}
return &htab->map; return &htab->map;
free_buckets:
kvfree(htab->buckets);
free_htab: free_htab:
kfree(htab); kfree(htab);
return ERR_PTR(err); return ERR_PTR(err);
@ -249,42 +340,42 @@ find_first_elem:
} }
} }
/* itereated over all buckets and all elements */ /* iterated over all buckets and all elements */
return -ENOENT; return -ENOENT;
} }
static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
void __percpu *pptr)
{ {
*(void __percpu **)(l->key + key_size) = pptr; if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
} free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
{
return *(void __percpu **)(l->key + key_size);
}
static void htab_percpu_elem_free(struct htab_elem *l)
{
free_percpu(htab_elem_get_ptr(l, l->key_size));
kfree(l); kfree(l);
} }
static void htab_percpu_elem_free_rcu(struct rcu_head *head) static void htab_elem_free_rcu(struct rcu_head *head)
{ {
struct htab_elem *l = container_of(head, struct htab_elem, rcu); struct htab_elem *l = container_of(head, struct htab_elem, rcu);
struct bpf_htab *htab = l->htab;
htab_percpu_elem_free(l); /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
* we're calling kfree, otherwise deadlock is possible if kprobes
* are placed somewhere inside of slub
*/
preempt_disable();
__this_cpu_inc(bpf_prog_active);
htab_elem_free(htab, l);
__this_cpu_dec(bpf_prog_active);
preempt_enable();
} }
static void free_htab_elem(struct htab_elem *l, bool percpu, u32 key_size) static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{ {
if (percpu) { if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) {
l->key_size = key_size; pcpu_freelist_push(&htab->freelist, &l->fnode);
call_rcu(&l->rcu, htab_percpu_elem_free_rcu);
} else { } else {
kfree_rcu(l, rcu); atomic_dec(&htab->count);
l->htab = htab;
call_rcu(&l->rcu, htab_elem_free_rcu);
} }
} }
@ -293,23 +384,39 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
bool percpu, bool onallcpus) bool percpu, bool onallcpus)
{ {
u32 size = htab->map.value_size; u32 size = htab->map.value_size;
bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC);
struct htab_elem *l_new; struct htab_elem *l_new;
void __percpu *pptr; void __percpu *pptr;
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); if (prealloc) {
if (!l_new) l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist);
return NULL; if (!l_new)
return ERR_PTR(-E2BIG);
} else {
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
atomic_dec(&htab->count);
return ERR_PTR(-E2BIG);
}
l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
if (!l_new)
return ERR_PTR(-ENOMEM);
}
memcpy(l_new->key, key, key_size); memcpy(l_new->key, key, key_size);
if (percpu) { if (percpu) {
/* round up value_size to 8 bytes */ /* round up value_size to 8 bytes */
size = round_up(size, 8); size = round_up(size, 8);
/* alloc_percpu zero-fills */ if (prealloc) {
pptr = __alloc_percpu_gfp(size, 8, GFP_ATOMIC | __GFP_NOWARN); pptr = htab_elem_get_ptr(l_new, key_size);
if (!pptr) { } else {
kfree(l_new); /* alloc_percpu zero-fills */
return NULL; pptr = __alloc_percpu_gfp(size, 8,
GFP_ATOMIC | __GFP_NOWARN);
if (!pptr) {
kfree(l_new);
return ERR_PTR(-ENOMEM);
}
} }
if (!onallcpus) { if (!onallcpus) {
@ -324,7 +431,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
off += size; off += size;
} }
} }
htab_elem_set_ptr(l_new, key_size, pptr); if (!prealloc)
htab_elem_set_ptr(l_new, key_size, pptr);
} else { } else {
memcpy(l_new->key + round_up(key_size, 8), value, size); memcpy(l_new->key + round_up(key_size, 8), value, size);
} }
@ -336,12 +444,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
u64 map_flags) u64 map_flags)
{ {
if (!l_old && unlikely(atomic_read(&htab->count) >= htab->map.max_entries))
/* if elem with this 'key' doesn't exist and we've reached
* max_entries limit, fail insertion of new elem
*/
return -E2BIG;
if (l_old && map_flags == BPF_NOEXIST) if (l_old && map_flags == BPF_NOEXIST)
/* elem already exists */ /* elem already exists */
return -EEXIST; return -EEXIST;
@ -375,13 +477,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
hash = htab_map_hash(key, key_size); hash = htab_map_hash(key, key_size);
/* allocate new element outside of the lock, since
* we're most likley going to insert it
*/
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
if (!l_new)
return -ENOMEM;
b = __select_bucket(htab, hash); b = __select_bucket(htab, hash);
head = &b->head; head = &b->head;
@ -394,21 +489,24 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
if (ret) if (ret)
goto err; goto err;
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
if (IS_ERR(l_new)) {
/* all pre-allocated elements are in use or memory exhausted */
ret = PTR_ERR(l_new);
goto err;
}
/* add new element to the head of the list, so that /* add new element to the head of the list, so that
* concurrent search will find it before old elem * concurrent search will find it before old elem
*/ */
hlist_add_head_rcu(&l_new->hash_node, head); hlist_add_head_rcu(&l_new->hash_node, head);
if (l_old) { if (l_old) {
hlist_del_rcu(&l_old->hash_node); hlist_del_rcu(&l_old->hash_node);
kfree_rcu(l_old, rcu); free_htab_elem(htab, l_old);
} else {
atomic_inc(&htab->count);
} }
raw_spin_unlock_irqrestore(&b->lock, flags); ret = 0;
return 0;
err: err:
raw_spin_unlock_irqrestore(&b->lock, flags); raw_spin_unlock_irqrestore(&b->lock, flags);
kfree(l_new);
return ret; return ret;
} }
@ -466,12 +564,11 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
} else { } else {
l_new = alloc_htab_elem(htab, key, value, key_size, l_new = alloc_htab_elem(htab, key, value, key_size,
hash, true, onallcpus); hash, true, onallcpus);
if (!l_new) { if (IS_ERR(l_new)) {
ret = -ENOMEM; ret = PTR_ERR(l_new);
goto err; goto err;
} }
hlist_add_head_rcu(&l_new->hash_node, head); hlist_add_head_rcu(&l_new->hash_node, head);
atomic_inc(&htab->count);
} }
ret = 0; ret = 0;
err: err:
@ -489,7 +586,6 @@ static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
static int htab_map_delete_elem(struct bpf_map *map, void *key) static int htab_map_delete_elem(struct bpf_map *map, void *key)
{ {
struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_HASH;
struct hlist_head *head; struct hlist_head *head;
struct bucket *b; struct bucket *b;
struct htab_elem *l; struct htab_elem *l;
@ -511,8 +607,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
if (l) { if (l) {
hlist_del_rcu(&l->hash_node); hlist_del_rcu(&l->hash_node);
atomic_dec(&htab->count); free_htab_elem(htab, l);
free_htab_elem(l, percpu, key_size);
ret = 0; ret = 0;
} }
@ -531,17 +626,10 @@ static void delete_all_elements(struct bpf_htab *htab)
hlist_for_each_entry_safe(l, n, head, hash_node) { hlist_for_each_entry_safe(l, n, head, hash_node) {
hlist_del_rcu(&l->hash_node); hlist_del_rcu(&l->hash_node);
atomic_dec(&htab->count); htab_elem_free(htab, l);
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) {
l->key_size = htab->map.key_size;
htab_percpu_elem_free(l);
} else {
kfree(l);
}
} }
} }
} }
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void htab_map_free(struct bpf_map *map) static void htab_map_free(struct bpf_map *map)
{ {
@ -554,10 +642,16 @@ static void htab_map_free(struct bpf_map *map)
*/ */
synchronize_rcu(); synchronize_rcu();
/* some of kfree_rcu() callbacks for elements of this map may not have /* some of free_htab_elem() callbacks for elements of this map may
* executed. It's ok. Proceed to free residual elements and map itself * not have executed. Wait for them.
*/ */
delete_all_elements(htab); rcu_barrier();
if (htab->map.map_flags & BPF_F_NO_PREALLOC) {
delete_all_elements(htab);
} else {
htab_free_elems(htab);
pcpu_freelist_destroy(&htab->freelist);
}
kvfree(htab->buckets); kvfree(htab->buckets);
kfree(htab); kfree(htab);
} }

View File

@ -48,6 +48,19 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
list_add(&tl->list_node, &bpf_map_types); list_add(&tl->list_node, &bpf_map_types);
} }
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
unsigned long memlock_limit, cur;
memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
cur = atomic_long_read(&user->locked_vm);
free_uid(user);
if (cur + pages > memlock_limit)
return -EPERM;
return 0;
}
static int bpf_map_charge_memlock(struct bpf_map *map) static int bpf_map_charge_memlock(struct bpf_map *map)
{ {
struct user_struct *user = get_current_user(); struct user_struct *user = get_current_user();
@ -153,7 +166,7 @@ int bpf_map_new_fd(struct bpf_map *map)
offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
sizeof(attr->CMD##_LAST_FIELD)) != NULL sizeof(attr->CMD##_LAST_FIELD)) != NULL
#define BPF_MAP_CREATE_LAST_FIELD max_entries #define BPF_MAP_CREATE_LAST_FIELD map_flags
/* called via syscall */ /* called via syscall */
static int map_create(union bpf_attr *attr) static int map_create(union bpf_attr *attr)
{ {