mm/slub.c: switch to bitmap_zalloc()

Switch to bitmap_zalloc() to show clearly what we are allocating.  Besides
that it returns pointer of bitmap type instead of opaque void *.

Link: http://lkml.kernel.org/r/20180830104301.61649-1-andriy.shevchenko@linux.intel.com
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andy Shevchenko 2018-10-26 15:03:06 -07:00 committed by Linus Torvalds
parent 253cc22fc6
commit 0684e6526e
1 changed files with 7 additions and 13 deletions

View File

@ -3621,9 +3621,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page); void *addr = page_address(page);
void *p; void *p;
unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
sizeof(long),
GFP_ATOMIC);
if (!map) if (!map)
return; return;
slab_err(s, page, text, s->name); slab_err(s, page, text, s->name);
@ -3638,7 +3636,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
} }
} }
slab_unlock(page); slab_unlock(page);
kfree(map); bitmap_free(map);
#endif #endif
} }
@ -4411,10 +4409,8 @@ static long validate_slab_cache(struct kmem_cache *s)
{ {
int node; int node;
unsigned long count = 0; unsigned long count = 0;
unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
sizeof(unsigned long),
GFP_KERNEL);
struct kmem_cache_node *n; struct kmem_cache_node *n;
unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
if (!map) if (!map)
return -ENOMEM; return -ENOMEM;
@ -4422,7 +4418,7 @@ static long validate_slab_cache(struct kmem_cache *s)
flush_all(s); flush_all(s);
for_each_kmem_cache_node(s, node, n) for_each_kmem_cache_node(s, node, n)
count += validate_slab_node(s, n, map); count += validate_slab_node(s, n, map);
kfree(map); bitmap_free(map);
return count; return count;
} }
/* /*
@ -4573,14 +4569,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
unsigned long i; unsigned long i;
struct loc_track t = { 0, 0, NULL }; struct loc_track t = { 0, 0, NULL };
int node; int node;
unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
sizeof(unsigned long),
GFP_KERNEL);
struct kmem_cache_node *n; struct kmem_cache_node *n;
unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_KERNEL)) { GFP_KERNEL)) {
kfree(map); bitmap_free(map);
return sprintf(buf, "Out of memory\n"); return sprintf(buf, "Out of memory\n");
} }
/* Push back cpu slabs */ /* Push back cpu slabs */
@ -4646,7 +4640,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
} }
free_loc_track(&t); free_loc_track(&t);
kfree(map); bitmap_free(map);
if (!t.count) if (!t.count)
len += sprintf(buf, "No data\n"); len += sprintf(buf, "No data\n");
return len; return len;