mm/sl[au]b: generalize kmalloc subsystem

Now everything in kmalloc subsystem can be generalized.
Let's do it!

Generalize __do_kmalloc_node(), __kmalloc_node_track_caller(),
kfree(), __ksize(), __kmalloc(), __kmalloc_node() and move them
to slab_common.c.

In the meantime, rename kmalloc_large_node_notrace()
to __kmalloc_large_node() and make it static as it's now only called in
slab_common.c.

[ feng.tang@intel.com: adjust kfence skip list to include
  __kmem_cache_free so that kfence kunit tests do not fail ]

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Hyeonggon Yoo 2022-08-17 19:18:21 +09:00 committed by Vlastimil Babka
parent ed4cd17eb2
commit b140513524
5 changed files with 107 additions and 200 deletions

View file

@ -4388,49 +4388,6 @@ static int __init setup_slub_min_objects(char *str)
__setup("slub_min_objects=", setup_slub_min_objects);
static __always_inline
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *s;
void *ret;
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node_notrace(size, flags, node);
trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << get_order(size),
flags, node);
return ret;
}
s = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
ret = slab_alloc_node(s, NULL, flags, node, caller, size);
trace_kmalloc_node(caller, ret, s, size, s->size, flags, node);
ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_HARDENED_USERCOPY
/*
* Rejects incorrectly sized objects and objects that are to be copied
@ -4481,43 +4438,6 @@ void __check_heap_object(const void *ptr, unsigned long n,
}
#endif /* CONFIG_HARDENED_USERCOPY */
size_t __ksize(const void *object)
{
struct folio *folio;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
folio = virt_to_folio(object);
if (unlikely(!folio_test_slab(folio)))
return folio_size(folio);
return slab_ksize(folio_slab(folio)->slab_cache);
}
EXPORT_SYMBOL(__ksize);
void kfree(const void *x)
{
struct folio *folio;
struct slab *slab;
void *object = (void *)x;
trace_kfree(_RET_IP_, x);
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
folio = virt_to_folio(x);
if (unlikely(!folio_test_slab(folio))) {
free_large_kmalloc(folio, object);
return;
}
slab = folio_slab(folio);
slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);
#define SHRINK_PROMOTE_MAX 32
/*
@ -4863,13 +4783,6 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
return 0;
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
return __do_kmalloc_node(size, gfpflags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#ifdef CONFIG_SYSFS
static int count_inuse(struct slab *slab)
{