diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index dcde82a4434c..f2247fed8ec4 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -107,7 +107,7 @@ struct kmem_cache { unsigned int red_left_pad; /* Left redzone padding size */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ -#ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_SYSFS struct kobject kobj; /* For sysfs */ #endif #ifdef CONFIG_SLAB_FREELIST_HARDENED @@ -146,7 +146,7 @@ struct kmem_cache { #define slub_set_cpu_partial(s, n) #endif /* CONFIG_SLUB_CPU_PARTIAL */ -#ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_SYSFS #define SLAB_SUPPORTS_SYSFS void sysfs_slab_unlink(struct kmem_cache *); void sysfs_slab_release(struct kmem_cache *); diff --git a/init/Kconfig b/init/Kconfig index acef1d89d356..0ada8784ce94 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1882,6 +1882,11 @@ config VM_EVENT_COUNTERS on EXPERT systems. /proc/vmstat will only show page counts if VM event counters are disabled. +config SLUB_SYSFS + bool "Enable SLUB sysfs interface" + depends on SLUB && SYSFS + default y + config SLUB_DEBUG default y bool "Enable SLUB debugging support" if EXPERT diff --git a/mm/slub.c b/mm/slub.c index 6cdd8f0b3da7..c9e639e9c3e4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -203,7 +203,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Use cmpxchg_double */ #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) -#ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_SYSFS static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); #else @@ -2552,7 +2552,7 @@ static inline unsigned long node_nr_objs(struct kmem_cache_node *n) } #endif /* CONFIG_SLUB_DEBUG */ -#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) +#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLUB_SYSFS) static unsigned long count_partial(struct kmem_cache_node *n, int (*get_count)(struct page *)) { @@ -2566,7 +2566,7 @@ static unsigned long count_partial(struct kmem_cache_node *n, spin_unlock_irqrestore(&n->list_lock, flags); return x; } -#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ +#endif /* CONFIG_SLUB_DEBUG || CONFIG_SLUB_SYSFS */ static noinline void slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) @@ -4564,7 +4564,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, EXPORT_SYMBOL(__kmalloc_node_track_caller); #endif -#ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_SYSFS static int count_inuse(struct page *page) { return page->inuse; @@ -4848,12 +4848,12 @@ static void __init resiliency_test(void) validate_slab_cache(kmalloc_caches[type][9]); } #else -#ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_SYSFS static void resiliency_test(void) {}; #endif #endif /* SLUB_RESILIENCY_TEST */ -#ifdef CONFIG_SYSFS +#ifdef CONFIG_SLUB_SYSFS enum slab_stat_type { SL_ALL, /* All slabs */ SL_PARTIAL, /* Only partially allocated slabs */ @@ -5696,7 +5696,7 @@ static int __init slab_sysfs_init(void) } __initcall(slab_sysfs_init); -#endif /* CONFIG_SYSFS */ +#endif /* CONFIG_SLUB_SYSFS */ #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) static int slab_debugfs_show(struct seq_file *seq, void *v)