mm: slub: Add SLUB_SYSFS

Make slub sysfs interface selectable.
Save about 4.8ms of boot time on RK1808 EVB if unselected.

Change-Id: I2587cc259c3c31a209604d99640d8e84a4ba78f4
Signed-off-by: Tao Huang <huangtao@rock-chips.com>
This commit is contained in:
Tao Huang 2019-09-17 18:20:52 +08:00
parent 70d2349c7c
commit ff4142e509
3 changed files with 14 additions and 9 deletions

View file

@ -107,7 +107,7 @@ struct kmem_cache {
unsigned int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SYSFS
#ifdef CONFIG_SLUB_SYSFS
struct kobject kobj; /* For sysfs */
#endif
#ifdef CONFIG_SLAB_FREELIST_HARDENED
@ -146,7 +146,7 @@ struct kmem_cache {
#define slub_set_cpu_partial(s, n)
#endif /* CONFIG_SLUB_CPU_PARTIAL */
#ifdef CONFIG_SYSFS
#ifdef CONFIG_SLUB_SYSFS
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);

View file

@ -1882,6 +1882,11 @@ config VM_EVENT_COUNTERS
on EXPERT systems. /proc/vmstat will only show page counts
if VM event counters are disabled.
config SLUB_SYSFS
bool "Enable SLUB sysfs interface"
depends on SLUB && SYSFS
default y
config SLUB_DEBUG
default y
bool "Enable SLUB debugging support" if EXPERT

View file

@ -203,7 +203,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
/* Use cmpxchg_double */
#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U)
#ifdef CONFIG_SYSFS
#ifdef CONFIG_SLUB_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
#else
@ -2552,7 +2552,7 @@ static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
}
#endif /* CONFIG_SLUB_DEBUG */
#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SLUB_SYSFS)
static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *))
{
@ -2566,7 +2566,7 @@ static unsigned long count_partial(struct kmem_cache_node *n,
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SLUB_SYSFS */
static noinline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
@ -4564,7 +4564,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif
#ifdef CONFIG_SYSFS
#ifdef CONFIG_SLUB_SYSFS
static int count_inuse(struct page *page)
{
return page->inuse;
@ -4848,12 +4848,12 @@ static void __init resiliency_test(void)
validate_slab_cache(kmalloc_caches[type][9]);
}
#else
#ifdef CONFIG_SYSFS
#ifdef CONFIG_SLUB_SYSFS
static void resiliency_test(void) {};
#endif
#endif /* SLUB_RESILIENCY_TEST */
#ifdef CONFIG_SYSFS
#ifdef CONFIG_SLUB_SYSFS
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
@ -5696,7 +5696,7 @@ static int __init slab_sysfs_init(void)
}
__initcall(slab_sysfs_init);
#endif /* CONFIG_SYSFS */
#endif /* CONFIG_SLUB_SYSFS */
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v)