bpf: Change size to u64 for bpf_map_{area_alloc, charge_init}()

The functions bpf_map_area_alloc() and bpf_map_charge_init() prior
this commit passed the size parameter as size_t. In this commit this
is changed to u64.

All users of these functions avoid size_t overflows on 32-bit systems,
by explicitly using u64 when calculating the allocation size and
memory charge cost. However, since the result was narrowed by the
size_t when passing size and cost to the functions, the overflow
handling was in vain.

Instead of changing all call sites to size_t and handle overflow at
the call site, the parameter is changed to u64 and checked in the
functions above.

Fixes: d407bd25a2 ("bpf: don't trigger OOM killer under pressure with map alloc")
Fixes: c85d69135a ("bpf: move memory size checks to bpf_map_charge_init()")
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Link: https://lore.kernel.org/bpf/20191029154307.23053-1-bjorn.topel@gmail.com
This commit is contained in:
Björn Töpel 2019-10-29 16:43:07 +01:00 committed by Daniel Borkmann
parent 04ec044b7d
commit ff1c08e1f7
2 changed files with 7 additions and 4 deletions

View File

@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map); void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
void bpf_map_charge_finish(struct bpf_map_memory *mem); void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst, void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src); struct bpf_map_memory *src);
void *bpf_map_area_alloc(size_t size, int numa_node); void *bpf_map_area_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base); void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);

View File

@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
return map; return map;
} }
void *bpf_map_area_alloc(size_t size, int numa_node) void *bpf_map_area_alloc(u64 size, int numa_node)
{ {
/* We really just want to fail instead of triggering OOM killer /* We really just want to fail instead of triggering OOM killer
* under memory pressure, therefore we set __GFP_NORETRY to kmalloc, * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node)
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
void *area; void *area;
if (size >= SIZE_MAX)
return NULL;
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
numa_node); numa_node);
@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
atomic_long_sub(pages, &user->locked_vm); atomic_long_sub(pages, &user->locked_vm);
} }
int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
{ {
u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
struct user_struct *user; struct user_struct *user;