aadb4bc4a1
This gets rid of all kmalloc caches larger than page size. A kmalloc request larger than PAGE_SIZE > 2 is going to be passed through to the page allocator. This works both inline where we will call __get_free_pages instead of kmem_cache_alloc and in __kmalloc. kfree is modified to check if the object is in a slab page. If not then the page is freed via the page allocator instead. Roughly similar to what SLOB does. Advantages: - Reduces memory overhead for kmalloc array - Large kmalloc operations are faster since they do not need to pass through the slab allocator to get to the page allocator. - Performance increase of 10%-20% on alloc and 50% on free for PAGE_SIZEd allocations. SLUB must call page allocator for each alloc anyways since the higher order pages which that allowed avoiding the page alloc calls are not available in a reliable way anymore. So we are basically removing useless slab allocator overhead. - Large kmallocs yields page aligned object which is what SLAB did. Bad things like using page sized kmalloc allocations to stand in for page allocate allocs can be transparently handled and are not distinguishable from page allocator uses. - Checking for too large objects can be removed since it is done by the page allocator. Drawbacks: - No accounting for large kmalloc slab allocations anymore - No debugging of large kmalloc slab allocations. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
191 lines
4.6 KiB
C
191 lines
4.6 KiB
C
#ifndef _LINUX_SLUB_DEF_H
|
|
#define _LINUX_SLUB_DEF_H
|
|
|
|
/*
|
|
* SLUB : A Slab allocator without object queues.
|
|
*
|
|
* (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/kobject.h>
|
|
|
|
struct kmem_cache_node {
|
|
spinlock_t list_lock; /* Protect partial list and nr_partial */
|
|
unsigned long nr_partial;
|
|
atomic_long_t nr_slabs;
|
|
struct list_head partial;
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
struct list_head full;
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Slab cache management.
|
|
*/
|
|
struct kmem_cache {
|
|
/* Used for retriving partial slabs etc */
|
|
unsigned long flags;
|
|
int size; /* The size of an object including meta data */
|
|
int objsize; /* The size of an object without meta data */
|
|
int offset; /* Free pointer offset. */
|
|
int order;
|
|
|
|
/*
|
|
* Avoid an extra cache line for UP, SMP and for the node local to
|
|
* struct kmem_cache.
|
|
*/
|
|
struct kmem_cache_node local_node;
|
|
|
|
/* Allocation and freeing of slabs */
|
|
int objects; /* Number of objects in slab */
|
|
int refcount; /* Refcount for slab cache destroy */
|
|
void (*ctor)(void *, struct kmem_cache *, unsigned long);
|
|
int inuse; /* Offset to metadata */
|
|
int align; /* Alignment */
|
|
const char *name; /* Name (only for display!) */
|
|
struct list_head list; /* List of slab caches */
|
|
#ifdef CONFIG_SLUB_DEBUG
|
|
struct kobject kobj; /* For sysfs */
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA
|
|
int defrag_ratio;
|
|
struct kmem_cache_node *node[MAX_NUMNODES];
|
|
#endif
|
|
struct page *cpu_slab[NR_CPUS];
|
|
};
|
|
|
|
/*
|
|
* Kmalloc subsystem.
|
|
*/
|
|
#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
|
|
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
|
|
#else
|
|
#define KMALLOC_MIN_SIZE 8
|
|
#endif
|
|
|
|
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
|
|
|
|
/*
|
|
* We keep the general caches in an array of slab caches that are used for
|
|
* 2^x bytes of allocations.
|
|
*/
|
|
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];
|
|
|
|
/*
|
|
* Sorry that the following has to be that ugly but some versions of GCC
|
|
* have trouble with constant propagation and loops.
|
|
*/
|
|
static __always_inline int kmalloc_index(size_t size)
|
|
{
|
|
if (!size)
|
|
return 0;
|
|
|
|
if (size <= KMALLOC_MIN_SIZE)
|
|
return KMALLOC_SHIFT_LOW;
|
|
|
|
if (size > 64 && size <= 96)
|
|
return 1;
|
|
if (size > 128 && size <= 192)
|
|
return 2;
|
|
if (size <= 8) return 3;
|
|
if (size <= 16) return 4;
|
|
if (size <= 32) return 5;
|
|
if (size <= 64) return 6;
|
|
if (size <= 128) return 7;
|
|
if (size <= 256) return 8;
|
|
if (size <= 512) return 9;
|
|
if (size <= 1024) return 10;
|
|
if (size <= 2 * 1024) return 11;
|
|
/*
|
|
* The following is only needed to support architectures with a larger page
|
|
* size than 4k.
|
|
*/
|
|
if (size <= 4 * 1024) return 12;
|
|
if (size <= 8 * 1024) return 13;
|
|
if (size <= 16 * 1024) return 14;
|
|
if (size <= 32 * 1024) return 15;
|
|
if (size <= 64 * 1024) return 16;
|
|
if (size <= 128 * 1024) return 17;
|
|
if (size <= 256 * 1024) return 18;
|
|
if (size <= 512 * 1024) return 19;
|
|
if (size <= 1024 * 1024) return 20;
|
|
if (size <= 2 * 1024 * 1024) return 21;
|
|
return -1;
|
|
|
|
/*
|
|
* What we really wanted to do and cannot do because of compiler issues is:
|
|
* int i;
|
|
* for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
|
|
* if (size <= (1 << i))
|
|
* return i;
|
|
*/
|
|
}
|
|
|
|
/*
|
|
* Find the slab cache for a given combination of allocation flags and size.
|
|
*
|
|
* This ought to end up with a global pointer to the right cache
|
|
* in kmalloc_caches.
|
|
*/
|
|
static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
|
|
{
|
|
int index = kmalloc_index(size);
|
|
|
|
if (index == 0)
|
|
return NULL;
|
|
|
|
return &kmalloc_caches[index];
|
|
}
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
#define SLUB_DMA __GFP_DMA
|
|
#else
|
|
/* Disable DMA functionality */
|
|
#define SLUB_DMA (__force gfp_t)0
|
|
#endif
|
|
|
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
|
void *__kmalloc(size_t size, gfp_t flags);
|
|
|
|
static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|
{
|
|
if (__builtin_constant_p(size)) {
|
|
if (size > PAGE_SIZE / 2)
|
|
return (void *)__get_free_pages(flags | __GFP_COMP,
|
|
get_order(size));
|
|
|
|
if (!(flags & SLUB_DMA)) {
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
|
|
|
if (!s)
|
|
return ZERO_SIZE_PTR;
|
|
|
|
return kmem_cache_alloc(s, flags);
|
|
}
|
|
}
|
|
return __kmalloc(size, flags);
|
|
}
|
|
|
|
#ifdef CONFIG_NUMA
|
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
|
|
|
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|
{
|
|
if (__builtin_constant_p(size) &&
|
|
size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
|
|
struct kmem_cache *s = kmalloc_slab(size);
|
|
|
|
if (!s)
|
|
return ZERO_SIZE_PTR;
|
|
|
|
return kmem_cache_alloc_node(s, flags, node);
|
|
}
|
|
return __kmalloc_node(size, flags, node);
|
|
}
|
|
#endif
|
|
|
|
#endif /* _LINUX_SLUB_DEF_H */
|