SLUB: Introduce and use SLUB_MAX_SIZE and SLUB_PAGE_SHIFT constants

As a preparational patch to bump up page allocator pass-through threshold,
introduce two new constants SLUB_MAX_SIZE and SLUB_PAGE_SHIFT and convert
mm/slub.c to use them.

Reported-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Tested-by: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
Christoph Lameter 2009-02-17 12:05:07 -05:00 committed by Pekka Enberg
parent b578f3fcca
commit ffadd4d0fe
2 changed files with 24 additions and 11 deletions

View File

@ -120,11 +120,24 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
* is relatively slow so we need this value sufficiently high so that
* performance critical objects are allocated through the SLUB fastpath.
*
* This should be dropped to PAGE_SIZE / 2 once the page allocator
* "fastpath" becomes competitive with the slab allocator fastpaths.
*/
#define SLUB_MAX_SIZE (PAGE_SIZE)
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
/* /*
* We keep the general caches in an array of slab caches that are used for * We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations. * 2^x bytes of allocations.
*/ */
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
/* /*
* Sorry that the following has to be that ugly but some versions of GCC * Sorry that the following has to be that ugly but some versions of GCC
@ -212,7 +225,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
static __always_inline void *kmalloc(size_t size, gfp_t flags) static __always_inline void *kmalloc(size_t size, gfp_t flags)
{ {
if (__builtin_constant_p(size)) { if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE) if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags); return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) { if (!(flags & SLUB_DMA)) {
@ -234,7 +247,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{ {
if (__builtin_constant_p(size) && if (__builtin_constant_p(size) &&
size <= PAGE_SIZE && !(flags & SLUB_DMA)) { size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size); struct kmem_cache *s = kmalloc_slab(size);
if (!s) if (!s)

View File

@ -2475,7 +2475,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem * Kmalloc subsystem
*******************************************************************/ *******************************************************************/
struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches); EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str) static int __init setup_slub_min_order(char *str)
@ -2537,7 +2537,7 @@ panic:
} }
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w) static void sysfs_add_func(struct work_struct *w)
{ {
@ -2658,7 +2658,7 @@ void *__kmalloc(size_t size, gfp_t flags)
{ {
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags); return kmalloc_large(size, flags);
s = get_slab(size, flags); s = get_slab(size, flags);
@ -2686,7 +2686,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
{ {
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, flags, node); return kmalloc_large_node(size, flags, node);
s = get_slab(size, flags); s = get_slab(size, flags);
@ -2985,7 +2985,7 @@ void __init kmem_cache_init(void)
caches++; caches++;
} }
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i], create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL); "kmalloc", 1 << i, GFP_KERNEL);
caches++; caches++;
@ -3022,7 +3022,7 @@ void __init kmem_cache_init(void)
slab_state = UP; slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */ /* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name = kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
@ -3222,7 +3222,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{ {
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags); return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);
@ -3238,7 +3238,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
{ {
struct kmem_cache *s; struct kmem_cache *s;
if (unlikely(size > PAGE_SIZE)) if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node); return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags); s = get_slab(size, gfpflags);