mm: memcg/slab: deprecate slab_root_caches
Currently there are two lists of kmem_caches: 1) slab_caches, which contains all kmem_caches, 2) slab_root_caches, which contains only root kmem_caches. And there is some preprocessor magic to have a single list if CONFIG_MEMCG_KMEM isn't enabled. It was required earlier because the number of non-root kmem_caches was proportional to the number of memory cgroups and could reach really big values. Now, when it cannot exceed the number of root kmem_caches, there is really no reason to maintain two lists. We never iterate over the slab_root_caches list on any hot paths, so it's perfectly fine to iterate over slab_caches and filter out non-root kmem_caches. It allows to remove a lot of config-dependent code and two pointers from the kmem_cache structure. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Shakeel Butt <shakeelb@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/20200623174037.3951353-16-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
272911a4ad
commit
c7094406fc
@ -1249,7 +1249,6 @@ void __init kmem_cache_init(void)
|
||||
nr_node_ids * sizeof(struct kmem_cache_node *),
|
||||
SLAB_HWCACHE_ALIGN, 0, 0);
|
||||
list_add(&kmem_cache->list, &slab_caches);
|
||||
memcg_link_cache(kmem_cache);
|
||||
slab_state = PARTIAL;
|
||||
|
||||
/*
|
||||
|
17
mm/slab.h
17
mm/slab.h
@ -44,14 +44,12 @@ struct kmem_cache {
|
||||
*
|
||||
* @memcg_cache: pointer to memcg kmem cache, used by all non-root memory
|
||||
* cgroups.
|
||||
* @root_caches_node: list node for slab_root_caches list.
|
||||
* @work: work struct used to create the non-root cache.
|
||||
*/
|
||||
struct memcg_cache_params {
|
||||
struct kmem_cache *root_cache;
|
||||
|
||||
struct kmem_cache *memcg_cache;
|
||||
struct list_head __root_caches_node;
|
||||
struct work_struct work;
|
||||
};
|
||||
#endif /* CONFIG_SLOB */
|
||||
@ -265,11 +263,6 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
|
||||
/* List of all root caches. */
|
||||
extern struct list_head slab_root_caches;
|
||||
#define root_caches_node memcg_params.__root_caches_node
|
||||
|
||||
static inline bool is_root_cache(struct kmem_cache *s)
|
||||
{
|
||||
return !s->memcg_params.root_cache;
|
||||
@ -447,14 +440,8 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
|
||||
}
|
||||
|
||||
extern void slab_init_memcg_params(struct kmem_cache *);
|
||||
extern void memcg_link_cache(struct kmem_cache *s);
|
||||
|
||||
#else /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
/* If !memcg, all caches are root. */
|
||||
#define slab_root_caches slab_caches
|
||||
#define root_caches_node list
|
||||
|
||||
static inline bool is_root_cache(struct kmem_cache *s)
|
||||
{
|
||||
return true;
|
||||
@ -523,10 +510,6 @@ static inline void slab_init_memcg_params(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memcg_link_cache(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
static inline struct kmem_cache *virt_to_cache(const void *obj)
|
||||
|
@ -131,9 +131,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
|
||||
LIST_HEAD(slab_root_caches);
|
||||
|
||||
static void memcg_kmem_cache_create_func(struct work_struct *work)
|
||||
{
|
||||
struct kmem_cache *cachep = container_of(work, struct kmem_cache,
|
||||
@ -156,27 +153,11 @@ static void init_memcg_params(struct kmem_cache *s,
|
||||
else
|
||||
slab_init_memcg_params(s);
|
||||
}
|
||||
|
||||
void memcg_link_cache(struct kmem_cache *s)
|
||||
{
|
||||
if (is_root_cache(s))
|
||||
list_add(&s->root_caches_node, &slab_root_caches);
|
||||
}
|
||||
|
||||
static void memcg_unlink_cache(struct kmem_cache *s)
|
||||
{
|
||||
if (is_root_cache(s))
|
||||
list_del(&s->root_caches_node);
|
||||
}
|
||||
#else
|
||||
static inline void init_memcg_params(struct kmem_cache *s,
|
||||
struct kmem_cache *root_cache)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memcg_unlink_cache(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
/*
|
||||
@ -253,7 +234,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
|
||||
if (flags & SLAB_NEVER_MERGE)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
|
||||
list_for_each_entry_reverse(s, &slab_caches, list) {
|
||||
if (slab_unmergeable(s))
|
||||
continue;
|
||||
|
||||
@ -312,7 +293,6 @@ static struct kmem_cache *create_cache(const char *name,
|
||||
|
||||
s->refcount = 1;
|
||||
list_add(&s->list, &slab_caches);
|
||||
memcg_link_cache(s);
|
||||
out:
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
@ -507,7 +487,6 @@ static int shutdown_cache(struct kmem_cache *s)
|
||||
if (__kmem_cache_shutdown(s) != 0)
|
||||
return -EBUSY;
|
||||
|
||||
memcg_unlink_cache(s);
|
||||
list_del(&s->list);
|
||||
|
||||
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
|
||||
@ -751,7 +730,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
|
||||
|
||||
create_boot_cache(s, name, size, flags, useroffset, usersize);
|
||||
list_add(&s->list, &slab_caches);
|
||||
memcg_link_cache(s);
|
||||
s->refcount = 1;
|
||||
return s;
|
||||
}
|
||||
@ -1107,12 +1085,12 @@ static void print_slabinfo_header(struct seq_file *m)
|
||||
void *slab_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
mutex_lock(&slab_mutex);
|
||||
return seq_list_start(&slab_root_caches, *pos);
|
||||
return seq_list_start(&slab_caches, *pos);
|
||||
}
|
||||
|
||||
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
|
||||
{
|
||||
return seq_list_next(p, &slab_root_caches, pos);
|
||||
return seq_list_next(p, &slab_caches, pos);
|
||||
}
|
||||
|
||||
void slab_stop(struct seq_file *m, void *p)
|
||||
@ -1165,11 +1143,12 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
|
||||
|
||||
static int slab_show(struct seq_file *m, void *p)
|
||||
{
|
||||
struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
|
||||
struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
|
||||
|
||||
if (p == slab_root_caches.next)
|
||||
if (p == slab_caches.next)
|
||||
print_slabinfo_header(m);
|
||||
cache_show(s, m);
|
||||
if (is_root_cache(s))
|
||||
cache_show(s, m);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1271,7 +1250,7 @@ static int memcg_slabinfo_show(struct seq_file *m, void *unused)
|
||||
mutex_lock(&slab_mutex);
|
||||
seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
|
||||
seq_puts(m, " <active_slabs> <num_slabs>\n");
|
||||
list_for_each_entry(s, &slab_root_caches, root_caches_node) {
|
||||
list_for_each_entry(s, &slab_caches, list) {
|
||||
/*
|
||||
* Skip kmem caches that don't have the memcg cache.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user