* SLUB: A slab allocator that limits cache line use instead of queuing
* objects in per cpu and per node lists.
*
- * The allocator synchronizes using per slab locks and only
- * uses a centralized lock to manage a pool of partial slabs.
+ * The allocator synchronizes using per slab locks or atomic operatios
+ * and only uses a centralized lock to manage a pool of partial slabs.
*
* (C) 2007 SGI, Christoph Lameter
+ * (C) 2011 Linux Foundation, Christoph Lameter
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/kmemtrace.h>
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
+#include <linux/stacktrace.h>
+
+#include <trace/events/kmem.h>
/*
* Lock order:
- * 1. slab_lock(page)
- * 2. slab->list_lock
+ * 1. slub_lock (Global Semaphore)
+ * 2. node->list_lock
+ * 3. slab_lock(page) (Only on some arches and for debugging)
+ *
+ * slub_lock
+ *
+ * The role of the slub_lock is to protect the list of all the slabs
+ * and to synchronize major metadata changes to slab cache structures.
+ *
+ * The slab_lock is only used for debugging and on arches that do not
+ * have the ability to do a cmpxchg_double. It only protects the second
+ * double word in the page struct. Meaning
+ * A. page->freelist -> List of object free in a page
+ * B. page->counters -> Counters of objects
+ * C. page->frozen -> frozen state
*
- * The slab_lock protects operations on the object of a particular
- * slab and its metadata in the page struct. If the slab lock
- * has been taken then no allocations nor frees can be performed
- * on the objects in the slab nor can the slab be added or removed
- * from the partial or full lists since this would mean modifying
- * the page_struct of the slab.
+ * If a slab is frozen then it is exempt from list management. It is not
+ * on any list. The processor that froze the slab is the one who can
+ * perform list operations on the page. Other processors may put objects
+ * onto the freelist but the processor that froze the slab is the only
+ * one that can retrieve the objects from the page's freelist.
*
* The list_lock protects the partial and full list on each node and
* the partial slab counter. If taken then no new slabs may be added or
* slabs, operations can continue without any centralized lock. F.e.
* allocating a long series of objects that fill up slabs does not require
* the list lock.
- *
- * The lock order is sometimes inverted when we are trying to get a slab
- * off a list. We take the list_lock and then look for a page on the list
- * to use. While we do that objects in the slabs may be freed. We can
- * only operate on the slab if we have also taken the slab_lock. So we use
- * a slab_trylock() on the slab. If trylock was successful then no frees
- * can occur anymore and we can use the slab for allocations etc. If the
- * slab_trylock() does not succeed then frees are in progress in the slab and
- * we must stay away from it for a while since we may cause a bouncing
- * cacheline if we try to acquire the lock. So go onto the next slab.
- * If all pages are busy then we may allocate a new slab instead of reusing
- * a partial slab. A new slab has noone operating on it and thus there is
- * no danger of cacheline contention.
- *
* Interrupts are disabled during allocation and deallocation in order to
* make the slab allocator safe to use in the context of an irq. In addition
* interrupts are disabled to ensure that the processor does not change
* the fast path and disables lockless freelists.
*/
+#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
+ SLAB_TRACE | SLAB_DEBUG_FREE)
+
+static inline int kmem_cache_debug(struct kmem_cache *s)
+{
#ifdef CONFIG_SLUB_DEBUG
-#define SLABDEBUG 1
+ return unlikely(s->flags & SLAB_DEBUG_FLAGS);
#else
-#define SLABDEBUG 0
+ return 0;
#endif
+}
/*
* Issues still to be resolved:
/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST
+/* Enable to log cmpxchg failures */
+#undef SLUB_DEBUG_CMPXCHG
+
/*
* Mininum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
-#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
+#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000UL /* Poison object */
-#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */
+#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
static int kmem_size = sizeof(struct kmem_cache);
static enum {
DOWN, /* No slab functionality available */
- PARTIAL, /* kmem_cache_open() works but kmalloc does not */
+ PARTIAL, /* Kmem_cache_node works */
UP, /* Everything works but does not show up in sysfs */
SYSFS /* Sysfs up */
} slab_state = DOWN;
/*
* Tracking user of a slab.
*/
+#define TRACK_ADDRS_COUNT 16
struct track {
unsigned long addr; /* Called from address */
+#ifdef CONFIG_STACKTRACE
+ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
+#endif
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
enum track_item { TRACK_ALLOC, TRACK_FREE };
-#ifdef CONFIG_SLUB_DEBUG
+#ifdef CONFIG_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
{ return 0; }
static inline void sysfs_slab_remove(struct kmem_cache *s)
{
+ kfree(s->name);
kfree(s);
}
#endif
-static inline void stat(struct kmem_cache *s, enum stat_item si)
+static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
__this_cpu_inc(s->cpu_slab->stat[si]);
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
-#ifdef CONFIG_NUMA
return s->node[node];
-#else
- return &s->local_node;
-#endif
}
/* Verify that a pointer has an address that is valid within a slab page */
return *(void **)(object + s->offset);
}
+static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
+{
+ void *p;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
+#else
+ p = get_freepointer(s, object);
+#endif
+ return p;
+}
+
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
{
*(void **)(object + s->offset) = fp;
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
__p += (__s)->size)
-/* Scan freelist */
-#define for_each_free_object(__p, __s, __free) \
- for (__p = (__free); __p; __p = get_freepointer((__s), __p))
-
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
return (p - addr) / s->size;
}
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+ /*
+ * Debugging requires use of the padding between object
+ * and whatever may come after it.
+ */
+ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+ return s->objsize;
+
+#endif
+ /*
+ * If we have the need to store the freelist pointer
+ * back there or track user information then we can
+ * only use the space before that information.
+ */
+ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
+ return s->inuse;
+ /*
+ * Else we can use all the padding etc for the allocation
+ */
+ return s->size;
+}
+
+static inline int order_objects(int order, unsigned long size, int reserved)
+{
+ return ((PAGE_SIZE << order) - reserved) / size;
+}
+
static inline struct kmem_cache_order_objects oo_make(int order,
- unsigned long size)
+ unsigned long size, int reserved)
{
struct kmem_cache_order_objects x = {
- (order << OO_SHIFT) + (PAGE_SIZE << order) / size
+ (order << OO_SHIFT) + order_objects(order, size, reserved)
};
return x;
return x.x & OO_MASK;
}
+/*
+ * Per slab locking using the pagelock
+ */
+static __always_inline void slab_lock(struct page *page)
+{
+ bit_spin_lock(PG_locked, &page->flags);
+}
+
+static __always_inline void slab_unlock(struct page *page)
+{
+ __bit_spin_unlock(PG_locked, &page->flags);
+}
+
+/* Interrupts must be disabled (for the fallback code to work right) */
+static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+ void *freelist_old, unsigned long counters_old,
+ void *freelist_new, unsigned long counters_new,
+ const char *n)
+{
+ VM_BUG_ON(!irqs_disabled());
+#ifdef CONFIG_CMPXCHG_DOUBLE
+ if (s->flags & __CMPXCHG_DOUBLE) {
+ if (cmpxchg_double(&page->freelist,
+ freelist_old, counters_old,
+ freelist_new, counters_new))
+ return 1;
+ } else
+#endif
+ {
+ slab_lock(page);
+ if (page->freelist == freelist_old && page->counters == counters_old) {
+ page->freelist = freelist_new;
+ page->counters = counters_new;
+ slab_unlock(page);
+ return 1;
+ }
+ slab_unlock(page);
+ }
+
+ cpu_relax();
+ stat(s, CMPXCHG_DOUBLE_FAIL);
+
+#ifdef SLUB_DEBUG_CMPXCHG
+ printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+#endif
+
+ return 0;
+}
+
+static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+ void *freelist_old, unsigned long counters_old,
+ void *freelist_new, unsigned long counters_new,
+ const char *n)
+{
+#ifdef CONFIG_CMPXCHG_DOUBLE
+ if (s->flags & __CMPXCHG_DOUBLE) {
+ if (cmpxchg_double(&page->freelist,
+ freelist_old, counters_old,
+ freelist_new, counters_new))
+ return 1;
+ } else
+#endif
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ slab_lock(page);
+ if (page->freelist == freelist_old && page->counters == counters_old) {
+ page->freelist = freelist_new;
+ page->counters = counters_new;
+ slab_unlock(page);
+ local_irq_restore(flags);
+ return 1;
+ }
+ slab_unlock(page);
+ local_irq_restore(flags);
+ }
+
+ cpu_relax();
+ stat(s, CMPXCHG_DOUBLE_FAIL);
+
+#ifdef SLUB_DEBUG_CMPXCHG
+ printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+#endif
+
+ return 0;
+}
+
#ifdef CONFIG_SLUB_DEBUG
+/*
+ * Determine a map of object in use on a page.
+ *
+ * Node listlock must be held to guarantee that the page does
+ * not vanish from under us.
+ */
+static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
+{
+ void *p;
+ void *addr = page_address(page);
+
+ for (p = page->freelist; p; p = get_freepointer(s, p))
+ set_bit(slab_index(p, s, addr), map);
+}
+
/*
* Debug settings:
*/
struct track *p = get_track(s, object, alloc);
if (addr) {
+#ifdef CONFIG_STACKTRACE
+ struct stack_trace trace;
+ int i;
+
+ trace.nr_entries = 0;
+ trace.max_entries = TRACK_ADDRS_COUNT;
+ trace.entries = p->addrs;
+ trace.skip = 3;
+ save_stack_trace(&trace);
+
+ /* See rant in lockdep.c */
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
+ p->addrs[i] = 0;
+#endif
p->addr = addr;
p->cpu = smp_processor_id();
p->pid = current->pid;
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+#ifdef CONFIG_STACKTRACE
+ {
+ int i;
+ for (i = 0; i < TRACK_ADDRS_COUNT; i++)
+ if (t->addrs[i])
+ printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
+ else
+ break;
+ }
+#endif
}
static void print_tracking(struct kmem_cache *s, void *object)
dump_stack();
}
-static void init_object(struct kmem_cache *s, void *object, int active)
+static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = object;
}
if (s->flags & SLAB_RED_ZONE)
- memset(p + s->objsize,
- active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
- s->inuse - s->objsize);
+ memset(p + s->objsize, val, s->inuse - s->objsize);
}
-static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
+static u8 *check_bytes8(u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
- if (*start != (u8)value)
+ if (*start != value)
return start;
start++;
bytes--;
return NULL;
}
+static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
+{
+ u64 value64;
+ unsigned int words, prefix;
+
+ if (bytes <= 16)
+ return check_bytes8(start, value, bytes);
+
+ value64 = value | value << 8 | value << 16 | value << 24;
+ value64 = (value64 & 0xffffffff) | value64 << 32;
+ prefix = 8 - ((unsigned long)start) % 8;
+
+ if (prefix) {
+ u8 *r = check_bytes8(start, value, prefix);
+ if (r)
+ return r;
+ start += prefix;
+ bytes -= prefix;
+ }
+
+ words = bytes / 8;
+
+ while (words) {
+ if (*(u64 *)start != value64)
+ return check_bytes8(start, value, 8);
+ start += 8;
+ words--;
+ }
+
+ return check_bytes8(start, value, bytes % 8);
+}
+
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
void *from, void *to)
{
return 1;
start = page_address(page);
- length = (PAGE_SIZE << compound_order(page));
+ length = (PAGE_SIZE << compound_order(page)) - s->reserved;
end = start + length;
remainder = length % s->size;
if (!remainder)
}
static int check_object(struct kmem_cache *s, struct page *page,
- void *object, int active)
+ void *object, u8 val)
{
u8 *p = object;
u8 *endobject = object + s->objsize;
if (s->flags & SLAB_RED_ZONE) {
- unsigned int red =
- active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
-
if (!check_bytes_and_report(s, page, object, "Redzone",
- endobject, red, s->inuse - s->objsize))
+ endobject, val, s->inuse - s->objsize))
return 0;
} else {
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
}
if (s->flags & SLAB_POISON) {
- if (!active && (s->flags & __OBJECT_POISON) &&
+ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
check_pad_bytes(s, page, p);
}
- if (!s->offset && active)
+ if (!s->offset && val == SLUB_RED_ACTIVE)
/*
* Object and freepointer overlap. Cannot check
* freepointer while object is allocated.
return 0;
}
- maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
+ maxobj = order_objects(compound_order(page), s->size, s->reserved);
if (page->objects > maxobj) {
slab_err(s, page, "objects %u > max %u",
s->name, page->objects, maxobj);
static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
{
int nr = 0;
- void *fp = page->freelist;
+ void *fp;
void *object = NULL;
unsigned long max_objects;
+ fp = page->freelist;
while (fp && nr <= page->objects) {
if (fp == search)
return 1;
nr++;
}
- max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
+ max_objects = order_objects(compound_order(page), s->size, s->reserved);
if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = MAX_OBJS_PER_PAGE;
}
}
+/*
+ * Hooks for other subsystems that check memory allocations. In a typical
+ * production configuration these hooks all should produce no code at all.
+ */
+static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
+{
+ flags &= gfp_allowed_mask;
+ lockdep_trace_alloc(flags);
+ might_sleep_if(flags & __GFP_WAIT);
+
+ return should_failslab(s->objsize, flags, s->flags);
+}
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+{
+ flags &= gfp_allowed_mask;
+ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
+ kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
+}
+
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+
+ /*
+ * Trouble is that we may no longer disable interupts in the fast path
+ * So in order to make the debug calls that expect irqs to be
+ * disabled we need to disable interrupts temporarily.
+ */
+#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kmemcheck_slab_free(s, x, s->objsize);
+ debug_check_no_locks_freed(x, s->objsize);
+ local_irq_restore(flags);
+ }
+#endif
+ if (!(s->flags & SLAB_DEBUG_OBJECTS))
+ debug_check_no_obj_freed(x, s->objsize);
+}
+
/*
* Tracking of fully allocated slabs for debugging purposes.
+ *
+ * list_lock must be held.
*/
-static void add_full(struct kmem_cache_node *n, struct page *page)
+static void add_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
{
- spin_lock(&n->list_lock);
+ if (!(s->flags & SLAB_STORE_USER))
+ return;
+
list_add(&page->lru, &n->full);
- spin_unlock(&n->list_lock);
}
+/*
+ * list_lock must be held.
+ */
static void remove_full(struct kmem_cache *s, struct page *page)
{
- struct kmem_cache_node *n;
-
if (!(s->flags & SLAB_STORE_USER))
return;
- n = get_node(s, page_to_nid(page));
-
- spin_lock(&n->list_lock);
list_del(&page->lru);
- spin_unlock(&n->list_lock);
}
/* Tracking of the number of slabs for debugging purposes */
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
- if (!NUMA_BUILD || n) {
+ if (n) {
atomic_long_inc(&n->nr_slabs);
atomic_long_add(objects, &n->total_objects);
}
if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
return;
- init_object(s, object, 0);
+ init_object(s, object, SLUB_RED_INACTIVE);
init_tracking(s, object);
}
-static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
void *object, unsigned long addr)
{
if (!check_slab(s, page))
goto bad;
- if (!on_freelist(s, page, object)) {
- object_err(s, page, object, "Object already allocated");
- goto bad;
- }
-
if (!check_valid_pointer(s, page, object)) {
object_err(s, page, object, "Freelist Pointer check fails");
goto bad;
}
- if (!check_object(s, page, object, 0))
+ if (!check_object(s, page, object, SLUB_RED_INACTIVE))
goto bad;
/* Success perform special debug activities for allocs */
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_ALLOC, addr);
trace(s, page, object, 1);
- init_object(s, object, 1);
+ init_object(s, object, SLUB_RED_ACTIVE);
return 1;
bad:
return 0;
}
-static int free_debug_processing(struct kmem_cache *s, struct page *page,
- void *object, unsigned long addr)
+static noinline int free_debug_processing(struct kmem_cache *s,
+ struct page *page, void *object, unsigned long addr)
{
+ unsigned long flags;
+ int rc = 0;
+
+ local_irq_save(flags);
+ slab_lock(page);
+
if (!check_slab(s, page))
goto fail;
goto fail;
}
- if (!check_object(s, page, object, 1))
- return 0;
+ if (!check_object(s, page, object, SLUB_RED_ACTIVE))
+ goto out;
if (unlikely(s != page->slab)) {
if (!PageSlab(page)) {
goto fail;
}
- /* Special debug activities for freeing objects */
- if (!PageSlubFrozen(page) && !page->freelist)
- remove_full(s, page);
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
- init_object(s, object, 0);
- return 1;
+ init_object(s, object, SLUB_RED_INACTIVE);
+ rc = 1;
+out:
+ slab_unlock(page);
+ local_irq_restore(flags);
+ return rc;
fail:
slab_fix(s, "Object at 0x%p not freed", object);
- return 0;
+ goto out;
}
static int __init setup_slub_debug(char *str)
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
- void *object, int active) { return 1; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+ void *object, u8 val) { return 1; }
+static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
+ struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long objsize,
unsigned long flags, const char *name,
void (*ctor)(void *))
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
-#endif
+
+static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
+ { return 0; }
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
+ void *object) {}
+
+static inline void slab_free_hook(struct kmem_cache *s, void *x) {}
+
+#endif /* CONFIG_SLUB_DEBUG */
/*
* Slab allocation and freeing
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
+ flags &= gfp_allowed_mask;
+
+ if (flags & __GFP_WAIT)
+ local_irq_enable();
+
flags |= s->allocflags;
/*
* Try a lower order alloc if possible
*/
page = alloc_slab_page(flags, node, oo);
- if (!page)
- return NULL;
- stat(s, ORDER_FALLBACK);
+ if (page)
+ stat(s, ORDER_FALLBACK);
}
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+
+ if (!page)
+ return NULL;
+
if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab = s;
page->flags |= 1 << PG_slab;
- if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
- SLAB_STORE_USER | SLAB_TRACE))
- __SetPageSlubDebug(page);
start = page_address(page);
page->freelist = start;
page->inuse = 0;
+ page->frozen = 1;
out:
return page;
}
int order = compound_order(page);
int pages = 1 << order;
- if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
+ if (kmem_cache_debug(s)) {
void *p;
slab_pad_check(s, page);
for_each_object(p, s, page_address(page),
page->objects)
- check_object(s, page, p, 0);
- __ClearPageSlubDebug(page);
+ check_object(s, page, p, SLUB_RED_INACTIVE);
}
kmemcheck_free_shadow(page, compound_order(page));
__free_pages(page, order);
}
+#define need_reserve_slab_rcu \
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page;
- page = container_of((struct list_head *)h, struct page, lru);
+ if (need_reserve_slab_rcu)
+ page = virt_to_head_page(h);
+ else
+ page = container_of((struct list_head *)h, struct page, lru);
+
__free_slab(page->slab, page);
}
static void free_slab(struct kmem_cache *s, struct page *page)
{
if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
- /*
- * RCU free overloads the RCU head over the LRU
- */
- struct rcu_head *head = (void *)&page->lru;
+ struct rcu_head *head;
+
+ if (need_reserve_slab_rcu) {
+ int order = compound_order(page);
+ int offset = (PAGE_SIZE << order) - s->reserved;
+
+ VM_BUG_ON(s->reserved != sizeof(*head));
+ head = page_address(page) + offset;
+ } else {
+ /*
+ * RCU free overloads the RCU head over the LRU
+ */
+ head = (void *)&page->lru;
+ }
call_rcu(head, rcu_free_slab);
} else
}
/*
- * Per slab locking using the pagelock
- */
-static __always_inline void slab_lock(struct page *page)
-{
- bit_spin_lock(PG_locked, &page->flags);
-}
-
-static __always_inline void slab_unlock(struct page *page)
-{
- __bit_spin_unlock(PG_locked, &page->flags);
-}
-
-static __always_inline int slab_trylock(struct page *page)
-{
- int rc = 1;
-
- rc = bit_spin_trylock(PG_locked, &page->flags);
- return rc;
-}
-
-/*
- * Management of partially allocated slabs
+ * Management of partially allocated slabs.
+ *
+ * list_lock must be held.
*/
-static void add_partial(struct kmem_cache_node *n,
+static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
- spin_lock(&n->list_lock);
n->nr_partial++;
if (tail)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
- spin_unlock(&n->list_lock);
}
-static void remove_partial(struct kmem_cache *s, struct page *page)
+/*
+ * list_lock must be held.
+ */
+static inline void remove_partial(struct kmem_cache_node *n,
+ struct page *page)
{
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
- spin_lock(&n->list_lock);
list_del(&page->lru);
n->nr_partial--;
- spin_unlock(&n->list_lock);
}
/*
- * Lock slab and remove from the partial list.
+ * Lock slab, remove from the partial list and put the object into the
+ * per cpu freelist.
*
* Must hold list_lock.
*/
-static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
- struct page *page)
+static inline int acquire_slab(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
{
- if (slab_trylock(page)) {
- list_del(&page->lru);
- n->nr_partial--;
- __SetPageSlubFrozen(page);
+ void *freelist;
+ unsigned long counters;
+ struct page new;
+
+ /*
+ * Zap the freelist and set the frozen bit.
+ * The old freelist is the list of objects for the
+ * per cpu allocation list.
+ */
+ do {
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ new.inuse = page->objects;
+
+ VM_BUG_ON(new.frozen);
+ new.frozen = 1;
+
+ } while (!__cmpxchg_double_slab(s, page,
+ freelist, counters,
+ NULL, new.counters,
+ "lock and freeze"));
+
+ remove_partial(n, page);
+
+ if (freelist) {
+ /* Populate the per cpu freelist */
+ this_cpu_write(s->cpu_slab->freelist, freelist);
+ this_cpu_write(s->cpu_slab->page, page);
+ this_cpu_write(s->cpu_slab->node, page_to_nid(page));
return 1;
+ } else {
+ /*
+ * Slab page came from the wrong list. No object to allocate
+ * from. Put it onto the correct list and continue partial
+ * scan.
+ */
+ printk(KERN_ERR "SLUB: %s : Page without available objects on"
+ " partial list\n", s->name);
+ return 0;
}
- return 0;
}
/*
* Try to allocate a partial slab from a specific node.
*/
-static struct page *get_partial_node(struct kmem_cache_node *n)
+static struct page *get_partial_node(struct kmem_cache *s,
+ struct kmem_cache_node *n)
{
struct page *page;
spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
- if (lock_and_freeze_slab(n, page))
+ if (acquire_slab(s, n, page))
goto out;
page = NULL;
out:
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- page = get_partial_node(n);
+ page = get_partial_node(s, n);
if (page) {
put_mems_allowed();
return page;
struct page *page;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
- page = get_partial_node(get_node(s, searchnode));
- if (page || (flags & __GFP_THISNODE))
+ page = get_partial_node(s, get_node(s, searchnode));
+ if (page || node != NUMA_NO_NODE)
return page;
return get_any_partial(s, flags);
}
+#ifdef CONFIG_PREEMPT
/*
- * Move a page back to the lists.
- *
- * Must be called with the slab lock held.
- *
- * On exit the slab lock will have been dropped.
+ * Calculate the next globally unique transaction for disambiguiation
+ * during cmpxchg. The transactions start with the cpu number and are then
+ * incremented by CONFIG_NR_CPUS.
+ */
+#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
+#else
+/*
+ * No preemption supported therefore also no need to check for
+ * different cpus.
*/
-static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
+#define TID_STEP 1
+#endif
+
+static inline unsigned long next_tid(unsigned long tid)
{
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ return tid + TID_STEP;
+}
- __ClearPageSlubFrozen(page);
- if (page->inuse) {
+static inline unsigned int tid_to_cpu(unsigned long tid)
+{
+ return tid % TID_STEP;
+}
- if (page->freelist) {
- add_partial(n, page, tail);
- stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
- } else {
- stat(s, DEACTIVATE_FULL);
- if (SLABDEBUG && PageSlubDebug(page) &&
- (s->flags & SLAB_STORE_USER))
- add_full(n, page);
- }
- slab_unlock(page);
- } else {
- stat(s, DEACTIVATE_EMPTY);
- if (n->nr_partial < s->min_partial) {
- /*
- * Adding an empty slab to the partial slabs in order
- * to avoid page allocator overhead. This slab needs
- * to come after the other slabs with objects in
- * so that the others get filled first. That way the
- * size of the partial list stays small.
- *
- * kmem_cache_shrink can reclaim any empty slabs from
- * the partial list.
- */
- add_partial(n, page, 1);
- slab_unlock(page);
- } else {
- slab_unlock(page);
- stat(s, FREE_SLAB);
- discard_slab(s, page);
- }
- }
+static inline unsigned long tid_to_event(unsigned long tid)
+{
+ return tid / TID_STEP;
+}
+
+static inline unsigned int init_tid(int cpu)
+{
+ return cpu;
+}
+
+static inline void note_cmpxchg_failure(const char *n,
+ const struct kmem_cache *s, unsigned long tid)
+{
+#ifdef SLUB_DEBUG_CMPXCHG
+ unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
+
+ printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
+
+#ifdef CONFIG_PREEMPT
+ if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
+ printk("due to cpu change %d -> %d\n",
+ tid_to_cpu(tid), tid_to_cpu(actual_tid));
+ else
+#endif
+ if (tid_to_event(tid) != tid_to_event(actual_tid))
+ printk("due to cpu running other code. Event %ld->%ld\n",
+ tid_to_event(tid), tid_to_event(actual_tid));
+ else
+ printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
+ actual_tid, tid, next_tid(tid));
+#endif
+ stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
+}
+
+void init_kmem_cache_cpus(struct kmem_cache *s)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
}
+/*
+ * Remove the cpu slab
+ */
/*
* Remove the cpu slab
*/
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
+ enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct page *page = c->page;
- int tail = 1;
-
- if (page->freelist)
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ int lock = 0;
+ enum slab_modes l = M_NONE, m = M_NONE;
+ void *freelist;
+ void *nextfree;
+ int tail = 0;
+ struct page new;
+ struct page old;
+
+ if (page->freelist) {
stat(s, DEACTIVATE_REMOTE_FREES);
+ tail = 1;
+ }
+
+ c->tid = next_tid(c->tid);
+ c->page = NULL;
+ freelist = c->freelist;
+ c->freelist = NULL;
+
/*
- * Merge cpu freelist into slab freelist. Typically we get here
- * because both freelists are empty. So this is unlikely
- * to occur.
+ * Stage one: Free all available per cpu objects back
+ * to the page freelist while it is still frozen. Leave the
+ * last one.
+ *
+ * There is no need to take the list->lock because the page
+ * is still frozen.
*/
- while (unlikely(c->freelist)) {
- void **object;
+ while (freelist && (nextfree = get_freepointer(s, freelist))) {
+ void *prior;
+ unsigned long counters;
+
+ do {
+ prior = page->freelist;
+ counters = page->counters;
+ set_freepointer(s, freelist, prior);
+ new.counters = counters;
+ new.inuse--;
+ VM_BUG_ON(!new.frozen);
+
+ } while (!__cmpxchg_double_slab(s, page,
+ prior, counters,
+ freelist, new.counters,
+ "drain percpu freelist"));
+
+ freelist = nextfree;
+ }
- tail = 0; /* Hot objects. Put the slab first */
+ /*
+ * Stage two: Ensure that the page is unfrozen while the
+ * list presence reflects the actual number of objects
+ * during unfreeze.
+ *
+ * We setup the list membership and then perform a cmpxchg
+ * with the count. If there is a mismatch then the page
+ * is not unfrozen but the page is on the wrong list.
+ *
+ * Then we restart the process which may have to remove
+ * the page from the list that we just put it on again
+ * because the number of objects in the slab may have
+ * changed.
+ */
+redo:
+
+ old.freelist = page->freelist;
+ old.counters = page->counters;
+ VM_BUG_ON(!old.frozen);
+
+ /* Determine target state of the slab */
+ new.counters = old.counters;
+ if (freelist) {
+ new.inuse--;
+ set_freepointer(s, freelist, old.freelist);
+ new.freelist = freelist;
+ } else
+ new.freelist = old.freelist;
- /* Retrieve object from cpu_freelist */
- object = c->freelist;
- c->freelist = get_freepointer(s, c->freelist);
+ new.frozen = 0;
- /* And put onto the regular freelist */
- set_freepointer(s, object, page->freelist);
- page->freelist = object;
- page->inuse--;
+ if (!new.inuse && n->nr_partial > s->min_partial)
+ m = M_FREE;
+ else if (new.freelist) {
+ m = M_PARTIAL;
+ if (!lock) {
+ lock = 1;
+ /*
+ * Taking the spinlock removes the possiblity
+ * that acquire_slab() will see a slab page that
+ * is frozen
+ */
+ spin_lock(&n->list_lock);
+ }
+ } else {
+ m = M_FULL;
+ if (kmem_cache_debug(s) && !lock) {
+ lock = 1;
+ /*
+ * This also ensures that the scanning of full
+ * slabs from diagnostic functions will not see
+ * any frozen slabs.
+ */
+ spin_lock(&n->list_lock);
+ }
+ }
+
+ if (l != m) {
+
+ if (l == M_PARTIAL)
+
+ remove_partial(n, page);
+
+ else if (l == M_FULL)
+
+ remove_full(s, page);
+
+ if (m == M_PARTIAL) {
+
+ add_partial(n, page, tail);
+ stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+
+ } else if (m == M_FULL) {
+
+ stat(s, DEACTIVATE_FULL);
+ add_full(s, n, page);
+
+ }
+ }
+
+ l = m;
+ if (!__cmpxchg_double_slab(s, page,
+ old.freelist, old.counters,
+ new.freelist, new.counters,
+ "unfreezing slab"))
+ goto redo;
+
+ if (lock)
+ spin_unlock(&n->list_lock);
+
+ if (m == M_FREE) {
+ stat(s, DEACTIVATE_EMPTY);
+ discard_slab(s, page);
+ stat(s, FREE_SLAB);
}
- c->page = NULL;
- unfreeze_slab(s, page, tail);
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
- slab_lock(c->page);
deactivate_slab(s, c);
}
unsigned long addr, struct kmem_cache_cpu *c)
{
void **object;
- struct page *new;
+ struct page *page;
+ unsigned long flags;
+ struct page new;
+ unsigned long counters;
+
+ local_irq_save(flags);
+#ifdef CONFIG_PREEMPT
+ /*
+ * We may have been preempted and rescheduled on a different
+ * cpu before disabling interrupts. Need to reload cpu area
+ * pointer.
+ */
+ c = this_cpu_ptr(s->cpu_slab);
+#endif
/* We handle __GFP_ZERO in the caller */
gfpflags &= ~__GFP_ZERO;
- if (!c->page)
+ page = c->page;
+ if (!page)
+ goto new_slab;
+
+ if (unlikely(!node_match(c, node))) {
+ stat(s, ALLOC_NODE_MISMATCH);
+ deactivate_slab(s, c);
goto new_slab;
+ }
+
+ stat(s, ALLOC_SLOWPATH);
+
+ do {
+ object = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ VM_BUG_ON(!new.frozen);
+
+ /*
+ * If there is no object left then we use this loop to
+ * deactivate the slab which is simple since no objects
+ * are left in the slab and therefore we do not need to
+ * put the page back onto the partial list.
+ *
+ * If there are objects left then we retrieve them
+ * and use them to refill the per cpu queue.
+ */
+
+ new.inuse = page->objects;
+ new.frozen = object != NULL;
- slab_lock(c->page);
- if (unlikely(!node_match(c, node)))
- goto another_slab;
+ } while (!__cmpxchg_double_slab(s, page,
+ object, counters,
+ NULL, new.counters,
+ "__slab_alloc"));
+
+ if (unlikely(!object)) {
+ c->page = NULL;
+ stat(s, DEACTIVATE_BYPASS);
+ goto new_slab;
+ }
stat(s, ALLOC_REFILL);
load_freelist:
- object = c->page->freelist;
- if (unlikely(!object))
- goto another_slab;
- if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
- goto debug;
-
+ VM_BUG_ON(!page->frozen);
c->freelist = get_freepointer(s, object);
- c->page->inuse = c->page->objects;
- c->page->freelist = NULL;
- c->node = page_to_nid(c->page);
-unlock_out:
- slab_unlock(c->page);
- stat(s, ALLOC_SLOWPATH);
+ c->tid = next_tid(c->tid);
+ local_irq_restore(flags);
return object;
-another_slab:
- deactivate_slab(s, c);
-
new_slab:
- new = get_partial(s, gfpflags, node);
- if (new) {
- c->page = new;
+ page = get_partial(s, gfpflags, node);
+ if (page) {
stat(s, ALLOC_FROM_PARTIAL);
+ object = c->freelist;
+
+ if (kmem_cache_debug(s))
+ goto debug;
goto load_freelist;
}
- if (gfpflags & __GFP_WAIT)
- local_irq_enable();
-
- new = new_slab(s, gfpflags, node);
-
- if (gfpflags & __GFP_WAIT)
- local_irq_disable();
+ page = new_slab(s, gfpflags, node);
- if (new) {
+ if (page) {
c = __this_cpu_ptr(s->cpu_slab);
- stat(s, ALLOC_SLAB);
if (c->page)
flush_slab(s, c);
- slab_lock(new);
- __SetPageSlubFrozen(new);
- c->page = new;
+
+ /*
+ * No other reference to the page yet so we can
+ * muck around with it freely without cmpxchg
+ */
+ object = page->freelist;
+ page->freelist = NULL;
+ page->inuse = page->objects;
+
+ stat(s, ALLOC_SLAB);
+ c->node = page_to_nid(page);
+ c->page = page;
+
+ if (kmem_cache_debug(s))
+ goto debug;
goto load_freelist;
}
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);
+ local_irq_restore(flags);
return NULL;
+
debug:
- if (!alloc_debug_processing(s, c->page, object, addr))
- goto another_slab;
+ if (!object || !alloc_debug_processing(s, page, object, addr))
+ goto new_slab;
- c->page->inuse++;
- c->page->freelist = get_freepointer(s, object);
- c->node = -1;
- goto unlock_out;
+ c->freelist = get_freepointer(s, object);
+ deactivate_slab(s, c);
+ c->page = NULL;
+ c->node = NUMA_NO_NODE;
+ local_irq_restore(flags);
+ return object;
}
/*
{
void **object;
struct kmem_cache_cpu *c;
- unsigned long flags;
-
- gfpflags &= gfp_allowed_mask;
-
- lockdep_trace_alloc(gfpflags);
- might_sleep_if(gfpflags & __GFP_WAIT);
+ unsigned long tid;
- if (should_failslab(s->objsize, gfpflags, s->flags))
+ if (slab_pre_alloc_hook(s, gfpflags))
return NULL;
- local_irq_save(flags);
+redo:
+
+ /*
+ * Must read kmem_cache cpu data via this cpu ptr. Preemption is
+ * enabled. We may switch back and forth between cpus while
+ * reading from one cpu area. That does not matter as long
+ * as we end up on the original cpu again when doing the cmpxchg.
+ */
c = __this_cpu_ptr(s->cpu_slab);
+
+ /*
+ * The transaction ids are globally unique per cpu and per operation on
+ * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
+ * occurs on the right processor and that there was no operation on the
+ * linked list in between.
+ */
+ tid = c->tid;
+ barrier();
+
object = c->freelist;
if (unlikely(!object || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
- c->freelist = get_freepointer(s, object);
+ /*
+ * The cmpxchg will only match if there was no additional
+ * operation and if we are on the right processor.
+ *
+ * The cmpxchg does the following atomically (without lock semantics!)
+ * 1. Relocate first pointer to the current per cpu area.
+ * 2. Verify that tid and freelist have not been changed
+ * 3. If they were not changed replace tid and freelist
+ *
+ * Since this is without lock semantics the protection is only against
+ * code executing on this cpu *not* from access by other cpus.
+ */
+ if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ s->cpu_slab->freelist, s->cpu_slab->tid,
+ object, tid,
+ get_freepointer_safe(s, object), next_tid(tid)))) {
+
+ note_cmpxchg_failure("slab_alloc", s, tid);
+ goto redo;
+ }
stat(s, ALLOC_FASTPATH);
}
- local_irq_restore(flags);
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->objsize);
- kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
- kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
+ slab_post_alloc_hook(s, gfpflags, object);
return object;
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+{
+ void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
+ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
+ return ret;
+}
+EXPORT_SYMBOL(kmem_cache_alloc_trace);
+
+void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
- return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
+ void *ret = kmalloc_order(size, flags, order);
+ trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_notrace);
+EXPORT_SYMBOL(kmalloc_order_trace);
#endif
#ifdef CONFIG_NUMA
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-#endif
#ifdef CONFIG_TRACING
-void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
+void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
- int node)
+ int node, size_t size)
{
- return slab_alloc(s, gfpflags, node, _RET_IP_);
+ void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, s->size, gfpflags, node);
+ return ret;
}
-EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
+EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
+#endif
#endif
/*
{
void *prior;
void **object = (void *)x;
+ int was_frozen;
+ int inuse;
+ struct page new;
+ unsigned long counters;
+ struct kmem_cache_node *n = NULL;
+ unsigned long uninitialized_var(flags);
stat(s, FREE_SLOWPATH);
- slab_lock(page);
- if (unlikely(SLABDEBUG && PageSlubDebug(page)))
- goto debug;
+ if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
+ return;
-checks_ok:
- prior = page->freelist;
- set_freepointer(s, object, prior);
- page->freelist = object;
- page->inuse--;
+ do {
+ prior = page->freelist;
+ counters = page->counters;
+ set_freepointer(s, object, prior);
+ new.counters = counters;
+ was_frozen = new.frozen;
+ new.inuse--;
+ if ((!new.inuse || !prior) && !was_frozen && !n) {
+ n = get_node(s, page_to_nid(page));
+ /*
+ * Speculatively acquire the list_lock.
+ * If the cmpxchg does not succeed then we may
+ * drop the list_lock without any processing.
+ *
+ * Otherwise the list_lock will synchronize with
+ * other processors updating the list of slabs.
+ */
+ spin_lock_irqsave(&n->list_lock, flags);
+ }
+ inuse = new.inuse;
- if (unlikely(PageSlubFrozen(page))) {
- stat(s, FREE_FROZEN);
- goto out_unlock;
- }
+ } while (!cmpxchg_double_slab(s, page,
+ prior, counters,
+ object, new.counters,
+ "__slab_free"));
- if (unlikely(!page->inuse))
- goto slab_empty;
+ if (likely(!n)) {
+ /*
+ * The list lock was not taken therefore no list
+ * activity can be necessary.
+ */
+ if (was_frozen)
+ stat(s, FREE_FROZEN);
+ return;
+ }
/*
- * Objects left in the slab. If it was not on the partial list before
- * then add it.
+ * was_frozen may have been set after we acquired the list_lock in
+ * an earlier loop. So we need to check it here again.
*/
- if (unlikely(!prior)) {
- add_partial(get_node(s, page_to_nid(page)), page, 1);
- stat(s, FREE_ADD_PARTIAL);
- }
+ if (was_frozen)
+ stat(s, FREE_FROZEN);
+ else {
+ if (unlikely(!inuse && n->nr_partial > s->min_partial))
+ goto slab_empty;
-out_unlock:
- slab_unlock(page);
+ /*
+ * Objects left in the slab. If it was not on the partial list before
+ * then add it.
+ */
+ if (unlikely(!prior)) {
+ remove_full(s, page);
+ add_partial(n, page, 0);
+ stat(s, FREE_ADD_PARTIAL);
+ }
+ }
+ spin_unlock_irqrestore(&n->list_lock, flags);
return;
slab_empty:
if (prior) {
/*
- * Slab still on the partial list.
+ * Slab on the partial list.
*/
- remove_partial(s, page);
+ remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
- }
- slab_unlock(page);
+ } else
+ /* Slab must be on the full list */
+ remove_full(s, page);
+
+ spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
discard_slab(s, page);
- return;
-
-debug:
- if (!free_debug_processing(s, page, x, addr))
- goto out_unlock;
- goto checks_ok;
}
/*
{
void **object = (void *)x;
struct kmem_cache_cpu *c;
- unsigned long flags;
+ unsigned long tid;
- kmemleak_free_recursive(x, s->flags);
- local_irq_save(flags);
+ slab_free_hook(s, x);
+
+redo:
+
+ /*
+ * Determine the currently cpus per cpu slab.
+ * The cpu may change afterward. However that does not matter since
+ * data is retrieved via this pointer. If we are on the same cpu
+ * during the cmpxchg then the free will succedd.
+ */
c = __this_cpu_ptr(s->cpu_slab);
- kmemcheck_slab_free(s, object, s->objsize);
- debug_check_no_locks_freed(object, s->objsize);
- if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, s->objsize);
- if (likely(page == c->page && c->node >= 0)) {
+
+ tid = c->tid;
+ barrier();
+
+ if (likely(page == c->page)) {
set_freepointer(s, object, c->freelist);
- c->freelist = object;
+
+ if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ s->cpu_slab->freelist, s->cpu_slab->tid,
+ c->freelist, tid,
+ object, next_tid(tid)))) {
+
+ note_cmpxchg_failure("slab_free", s, tid);
+ goto redo;
+ }
stat(s, FREE_FASTPATH);
} else
__slab_free(s, page, x, addr);
- local_irq_restore(flags);
}
void kmem_cache_free(struct kmem_cache *s, void *x)
}
EXPORT_SYMBOL(kmem_cache_free);
-/* Figure out on which slab page the object resides */
-static struct page *get_object_page(const void *x)
-{
- struct page *page = virt_to_head_page(x);
-
- if (!PageSlab(page))
- return NULL;
-
- return page;
-}
-
/*
* Object placement in a slab is made very easy because we always start at
* offset 0. If we tune the size of the object to the alignment then we can
* the smallest order which will fit the object.
*/
static inline int slab_order(int size, int min_objects,
- int max_order, int fract_leftover)
+ int max_order, int fract_leftover, int reserved)
{
int order;
int rem;
int min_order = slub_min_order;
- if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
+ if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
for (order = max(min_order,
unsigned long slab_size = PAGE_SIZE << order;
- if (slab_size < min_objects * size)
+ if (slab_size < min_objects * size + reserved)
continue;
- rem = slab_size % size;
+ rem = (slab_size - reserved) % size;
if (rem <= slab_size / fract_leftover)
break;
return order;
}
-static inline int calculate_order(int size)
+static inline int calculate_order(int size, int reserved)
{
int order;
int min_objects;
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
- max_objects = (PAGE_SIZE << slub_max_order)/size;
+ max_objects = order_objects(slub_max_order, size, reserved);
min_objects = min(min_objects, max_objects);
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
order = slab_order(size, min_objects,
- slub_max_order, fraction);
+ slub_max_order, fraction, reserved);
if (order <= slub_max_order)
return order;
fraction /= 2;
* We were unable to place multiple objects in a slab. Now
* lets see if we can place a single object there.
*/
- order = slab_order(size, 1, slub_max_order, 1);
+ order = slab_order(size, 1, slub_max_order, 1, reserved);
if (order <= slub_max_order)
return order;
/*
* Doh this slab cannot be placed using slub_max_order.
*/
- order = slab_order(size, 1, MAX_ORDER, 1);
+ order = slab_order(size, 1, MAX_ORDER, 1, reserved);
if (order < MAX_ORDER)
return order;
return -ENOSYS;
#endif
}
-static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
-
-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
+static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
{
- if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
- /*
- * Boot time creation of the kmalloc array. Use static per cpu data
- * since the per cpu allocator is not available yet.
- */
- s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
- else
- s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
+ BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
+ SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
+
+ /*
+ * Must align to double word boundary for the double cmpxchg
+ * instructions to work; see __pcpu_double_call_return_bool().
+ */
+ s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
+ 2 * sizeof(void *));
if (!s->cpu_slab)
return 0;
+ init_kmem_cache_cpus(s);
+
return 1;
}
-#ifdef CONFIG_NUMA
+static struct kmem_cache *kmem_cache_node;
+
/*
* No kmalloc_node yet so do it by hand. We know that this is the first
* slab on the node for this slabcache. There are no concurrent accesses
* when allocating for the kmalloc_node_cache. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
-static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
+static void early_kmem_cache_node_alloc(int node)
{
struct page *page;
struct kmem_cache_node *n;
- unsigned long flags;
- BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
+ BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
- page = new_slab(kmalloc_caches, gfpflags, node);
+ page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
BUG_ON(!page);
if (page_to_nid(page) != node) {
n = page->freelist;
BUG_ON(!n);
- page->freelist = get_freepointer(kmalloc_caches, n);
+ page->freelist = get_freepointer(kmem_cache_node, n);
page->inuse++;
- kmalloc_caches->node[node] = n;
+ page->frozen = 0;
+ kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
- init_object(kmalloc_caches, n, 1);
- init_tracking(kmalloc_caches, n);
+ init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
+ init_tracking(kmem_cache_node, n);
#endif
- init_kmem_cache_node(n, kmalloc_caches);
- inc_slabs_node(kmalloc_caches, node, page->objects);
+ init_kmem_cache_node(n, kmem_cache_node);
+ inc_slabs_node(kmem_cache_node, node, page->objects);
- /*
- * lockdep requires consistent irq usage for each lock
- * so even though there cannot be a race this early in
- * the boot sequence, we still disable irqs.
- */
- local_irq_save(flags);
add_partial(n, page, 0);
- local_irq_restore(flags);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = s->node[node];
+
if (n)
- kmem_cache_free(kmalloc_caches, n);
+ kmem_cache_free(kmem_cache_node, n);
+
s->node[node] = NULL;
}
}
-static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
+static int init_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
struct kmem_cache_node *n;
if (slab_state == DOWN) {
- early_kmem_cache_node_alloc(gfpflags, node);
+ early_kmem_cache_node_alloc(node);
continue;
}
- n = kmem_cache_alloc_node(kmalloc_caches,
- gfpflags, node);
+ n = kmem_cache_alloc_node(kmem_cache_node,
+ GFP_KERNEL, node);
if (!n) {
free_kmem_cache_nodes(s);
}
return 1;
}
-#else
-static void free_kmem_cache_nodes(struct kmem_cache *s)
-{
-}
-
-static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
-{
- init_kmem_cache_node(&s->local_node, s);
- return 1;
-}
-#endif
static void set_min_partial(struct kmem_cache *s, unsigned long min)
{
if (forced_order >= 0)
order = forced_order;
else
- order = calculate_order(size);
+ order = calculate_order(size, s->reserved);
if (order < 0)
return 0;
/*
* Determine the number of objects per slab
*/
- s->oo = oo_make(order, size);
- s->min = oo_make(get_order(size), size);
+ s->oo = oo_make(order, size, s->reserved);
+ s->min = oo_make(get_order(size), size, s->reserved);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
}
-static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
+static int kmem_cache_open(struct kmem_cache *s,
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *))
s->objsize = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
+ s->reserved = 0;
+
+ if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
+ s->reserved = sizeof(struct rcu_head);
if (!calculate_sizes(s, -1))
goto error;
}
}
+#ifdef CONFIG_CMPXCHG_DOUBLE
+ if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
+ /* Enable fast mode */
+ s->flags |= __CMPXCHG_DOUBLE;
+#endif
+
/*
* The larger the object size is, the more pages we want on the partial
* list to avoid pounding the page allocator excessively.
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
#endif
- if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
+ if (!init_kmem_cache_nodes(s))
goto error;
- if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
+ if (alloc_kmem_cache_cpus(s))
return 1;
free_kmem_cache_nodes(s);
return 0;
}
-/*
- * Check if a given pointer is valid
- */
-int kmem_ptr_validate(struct kmem_cache *s, const void *object)
-{
- struct page *page;
-
- if (!kern_ptr_validate(object, s->size))
- return 0;
-
- page = get_object_page(object);
-
- if (!page || s != page->slab)
- /* No slab or wrong slab */
- return 0;
-
- if (!check_valid_pointer(s, page, object))
- return 0;
-
- /*
- * We could also check if the object is on the slabs freelist.
- * But this would be too expensive and it seems that the main
- * purpose of kmem_ptr_valid() is to check if the object belongs
- * to a certain slab.
- */
- return 1;
-}
-EXPORT_SYMBOL(kmem_ptr_validate);
-
/*
* Determine the size of a slab object
*/
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *s)
-{
- return s->name;
-}
-EXPORT_SYMBOL(kmem_cache_name);
-
static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
- long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long),
- GFP_ATOMIC);
-
+ unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
+ sizeof(long), GFP_ATOMIC);
if (!map)
return;
slab_err(s, page, "%s", text);
slab_lock(page);
- for_each_free_object(p, s, page->freelist)
- set_bit(slab_index(p, s, addr), map);
+ get_map(s, page, map);
for_each_object(p, s, addr, page->objects) {
if (!test_bit(slab_index(p, s, addr), map)) {
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- list_del(&page->lru);
+ remove_partial(n, page);
discard_slab(s, page);
- n->nr_partial--;
} else {
list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()");
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
- up_write(&slub_lock);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier();
sysfs_slab_remove(s);
- } else
- up_write(&slub_lock);
+ }
+ up_write(&slub_lock);
}
EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
+struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
EXPORT_SYMBOL(kmalloc_caches);
+static struct kmem_cache *kmem_cache;
+
+#ifdef CONFIG_ZONE_DMA
+static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
+#endif
+
static int __init setup_slub_min_order(char *str)
{
get_option(&str, &slub_min_order);
__setup("slub_nomerge", setup_slub_nomerge);
-static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
- const char *name, int size, gfp_t gfp_flags)
+static struct kmem_cache *__init create_kmalloc_cache(const char *name,
+ int size, unsigned int flags)
{
- unsigned int flags = 0;
+ struct kmem_cache *s;
- if (gfp_flags & SLUB_DMA)
- flags = SLAB_CACHE_DMA;
+ s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
/*
* This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slub_lock here.
*/
- if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
+ if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
goto panic;
list_add(&s->list, &slab_caches);
-
- if (sysfs_slab_add(s))
- goto panic;
return s;
panic:
panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
+ return NULL;
}
-#ifdef CONFIG_ZONE_DMA
-static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
-
-static void sysfs_add_func(struct work_struct *w)
-{
- struct kmem_cache *s;
-
- down_write(&slub_lock);
- list_for_each_entry(s, &slab_caches, list) {
- if (s->flags & __SYSFS_ADD_DEFERRED) {
- s->flags &= ~__SYSFS_ADD_DEFERRED;
- sysfs_slab_add(s);
- }
- }
- up_write(&slub_lock);
-}
-
-static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
-
-static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
-{
- struct kmem_cache *s;
- char *text;
- size_t realsize;
- unsigned long slabflags;
- int i;
-
- s = kmalloc_caches_dma[index];
- if (s)
- return s;
-
- /* Dynamically create dma cache */
- if (flags & __GFP_WAIT)
- down_write(&slub_lock);
- else {
- if (!down_write_trylock(&slub_lock))
- goto out;
- }
-
- if (kmalloc_caches_dma[index])
- goto unlock_out;
-
- realsize = kmalloc_caches[index].objsize;
- text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
- (unsigned int)realsize);
-
- s = NULL;
- for (i = 0; i < KMALLOC_CACHES; i++)
- if (!kmalloc_caches[i].size)
- break;
-
- BUG_ON(i >= KMALLOC_CACHES);
- s = kmalloc_caches + i;
-
- /*
- * Must defer sysfs creation to a workqueue because we don't know
- * what context we are called from. Before sysfs comes up, we don't
- * need to do anything because our sysfs initcall will start by
- * adding all existing slabs to sysfs.
- */
- slabflags = SLAB_CACHE_DMA|SLAB_NOTRACK;
- if (slab_state >= SYSFS)
- slabflags |= __SYSFS_ADD_DEFERRED;
-
- if (!text || !kmem_cache_open(s, flags, text,
- realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
- s->size = 0;
- kfree(text);
- goto unlock_out;
- }
-
- list_add(&s->list, &slab_caches);
- kmalloc_caches_dma[index] = s;
-
- if (slab_state >= SYSFS)
- schedule_work(&sysfs_add_work);
-
-unlock_out:
- up_write(&slub_lock);
-out:
- return kmalloc_caches_dma[index];
-}
-#endif
-
/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
#ifdef CONFIG_ZONE_DMA
if (unlikely((flags & SLUB_DMA)))
- return dma_kmalloc_cache(index, flags);
+ return kmalloc_dma_caches[index];
#endif
- return &kmalloc_caches[index];
+ return kmalloc_caches[index];
}
void *__kmalloc(size_t size, gfp_t flags)
}
EXPORT_SYMBOL(__kmalloc);
+#ifdef CONFIG_NUMA
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
return ptr;
}
-#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
size_t ksize(const void *object)
{
struct page *page;
- struct kmem_cache *s;
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
WARN_ON(!PageCompound(page));
return PAGE_SIZE << compound_order(page);
}
- s = page->slab;
+
+ return slab_ksize(page->slab);
+}
+EXPORT_SYMBOL(ksize);
#ifdef CONFIG_SLUB_DEBUG
- /*
- * Debugging requires use of the padding between object
- * and whatever may come after it.
- */
- if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->objsize;
+bool verify_mem_not_deleted(const void *x)
+{
+ struct page *page;
+ void *object = (void *)x;
+ unsigned long flags;
+ bool rv;
-#endif
- /*
- * If we have the need to store the freelist pointer
- * back there or track user information then we can
- * only use the space before that information.
- */
- if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
- return s->inuse;
- /*
- * Else we can use all the padding etc for the allocation
- */
- return s->size;
+ if (unlikely(ZERO_OR_NULL_PTR(x)))
+ return false;
+
+ local_irq_save(flags);
+
+ page = virt_to_head_page(x);
+ if (unlikely(!PageSlab(page))) {
+ /* maybe it was from stack? */
+ rv = true;
+ goto out_unlock;
+ }
+
+ slab_lock(page);
+ if (on_freelist(page->slab, page, object)) {
+ object_err(page->slab, page, object, "Object is on free-list");
+ rv = false;
+ } else {
+ rv = true;
+ }
+ slab_unlock(page);
+
+out_unlock:
+ local_irq_restore(flags);
+ return rv;
}
-EXPORT_SYMBOL(ksize);
+EXPORT_SYMBOL(verify_mem_not_deleted);
+#endif
void kfree(const void *x)
{
* list_lock. page->inuse here is the upper limit.
*/
list_for_each_entry_safe(page, t, &n->partial, lru) {
- if (!page->inuse && slab_trylock(page)) {
- /*
- * Must hold slab lock here because slab_free
- * may have freed the last object and be
- * waiting to release the slab.
- */
- list_del(&page->lru);
- n->nr_partial--;
- slab_unlock(page);
+ if (!page->inuse) {
+ remove_partial(n, page);
discard_slab(s, page);
} else {
list_move(&page->lru,
}
EXPORT_SYMBOL(kmem_cache_shrink);
-#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_MEMORY_HOTPLUG)
static int slab_mem_going_offline_callback(void *arg)
{
struct kmem_cache *s;
BUG_ON(slabs_node(s, offline_node));
s->node[offline_node] = NULL;
- kmem_cache_free(kmalloc_caches, n);
+ kmem_cache_free(kmem_cache_node, n);
}
}
up_read(&slub_lock);
* since memory is not yet available from the node that
* is brought up.
*/
- n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
+ n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
if (!n) {
ret = -ENOMEM;
goto out;
* Basic setup of slabs
*******************************************************************/
+/*
+ * Used for early kmem_cache structures that were allocated using
+ * the page allocator
+ */
+
+static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
+{
+ int node;
+
+ list_add(&s->list, &slab_caches);
+ s->refcount = -1;
+
+ for_each_node_state(node, N_NORMAL_MEMORY) {
+ struct kmem_cache_node *n = get_node(s, node);
+ struct page *p;
+
+ if (n) {
+ list_for_each_entry(p, &n->partial, lru)
+ p->slab = s;
+
+#ifdef CONFIG_SLUB_DEBUG
+ list_for_each_entry(p, &n->full, lru)
+ p->slab = s;
+#endif
+ }
+ }
+}
+
void __init kmem_cache_init(void)
{
int i;
int caches = 0;
+ struct kmem_cache *temp_kmem_cache;
+ int order;
+ struct kmem_cache *temp_kmem_cache_node;
+ unsigned long kmalloc_size;
+
+ kmem_size = offsetof(struct kmem_cache, node) +
+ nr_node_ids * sizeof(struct kmem_cache_node *);
+
+ /* Allocate two kmem_caches from the page allocator */
+ kmalloc_size = ALIGN(kmem_size, cache_line_size());
+ order = get_order(2 * kmalloc_size);
+ kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order);
-#ifdef CONFIG_NUMA
/*
* Must first have the slab cache available for the allocations of the
* struct kmem_cache_node's. There is special bootstrap code in
* kmem_cache_open for slab_state == DOWN.
*/
- create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
- sizeof(struct kmem_cache_node), GFP_NOWAIT);
- kmalloc_caches[0].refcount = -1;
- caches++;
+ kmem_cache_node = (void *)kmem_cache + kmalloc_size;
+
+ kmem_cache_open(kmem_cache_node, "kmem_cache_node",
+ sizeof(struct kmem_cache_node),
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-#endif
/* Able to allocate the per node structures */
slab_state = PARTIAL;
- /* Caches that are not of the two-to-the-power-of size */
- if (KMALLOC_MIN_SIZE <= 32) {
- create_kmalloc_cache(&kmalloc_caches[1],
- "kmalloc-96", 96, GFP_NOWAIT);
- caches++;
- }
- if (KMALLOC_MIN_SIZE <= 64) {
- create_kmalloc_cache(&kmalloc_caches[2],
- "kmalloc-192", 192, GFP_NOWAIT);
- caches++;
- }
+ temp_kmem_cache = kmem_cache;
+ kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+ kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
+ memcpy(kmem_cache, temp_kmem_cache, kmem_size);
- for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
- create_kmalloc_cache(&kmalloc_caches[i],
- "kmalloc", 1 << i, GFP_NOWAIT);
- caches++;
- }
+ /*
+ * Allocate kmem_cache_node properly from the kmem_cache slab.
+ * kmem_cache_node is separately allocated so no need to
+ * update any list pointers.
+ */
+ temp_kmem_cache_node = kmem_cache_node;
+
+ kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
+ memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size);
+
+ kmem_cache_bootstrap_fixup(kmem_cache_node);
+
+ caches++;
+ kmem_cache_bootstrap_fixup(kmem_cache);
+ caches++;
+ /* Free temporary boot structure */
+ free_pages((unsigned long)temp_kmem_cache, order);
+ /* Now we can use the kmem_cache to allocate kmalloc slabs */
/*
* Patch up the size_index table if we have strange large alignment
size_index[size_index_elem(i)] = 8;
}
+ /* Caches that are not of the two-to-the-power-of size */
+ if (KMALLOC_MIN_SIZE <= 32) {
+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
+ caches++;
+ }
+
+ if (KMALLOC_MIN_SIZE <= 64) {
+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
+ caches++;
+ }
+
+ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
+ caches++;
+ }
+
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
+ if (KMALLOC_MIN_SIZE <= 32) {
+ kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT);
+ BUG_ON(!kmalloc_caches[1]->name);
+ }
+
+ if (KMALLOC_MIN_SIZE <= 64) {
+ kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT);
+ BUG_ON(!kmalloc_caches[2]->name);
+ }
+
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
BUG_ON(!s);
- kmalloc_caches[i].name = s;
+ kmalloc_caches[i]->name = s;
}
#ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier);
#endif
-#ifdef CONFIG_NUMA
- kmem_size = offsetof(struct kmem_cache, node) +
- nr_node_ids * sizeof(struct kmem_cache_node *);
-#else
- kmem_size = sizeof(struct kmem_cache);
-#endif
+#ifdef CONFIG_ZONE_DMA
+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
+ struct kmem_cache *s = kmalloc_caches[i];
+
+ if (s && s->size) {
+ char *name = kasprintf(GFP_NOWAIT,
+ "dma-kmalloc-%d", s->objsize);
+
+ BUG_ON(!name);
+ kmalloc_dma_caches[i] = create_kmalloc_cache(name,
+ s->objsize, SLAB_CACHE_DMA);
+ }
+ }
+#endif
printk(KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d\n",
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
+ char *n;
if (WARN_ON(!name))
return NULL;
*/
s->objsize = max(s->objsize, (int)size);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
- up_write(&slub_lock);
if (sysfs_slab_alias(s, name)) {
- down_write(&slub_lock);
s->refcount--;
- up_write(&slub_lock);
goto err;
}
+ up_write(&slub_lock);
return s;
}
+ n = kstrdup(name, GFP_KERNEL);
+ if (!n)
+ goto err;
+
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
- if (kmem_cache_open(s, GFP_KERNEL, name,
+ if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
- up_write(&slub_lock);
if (sysfs_slab_add(s)) {
- down_write(&slub_lock);
list_del(&s->list);
- up_write(&slub_lock);
+ kfree(n);
kfree(s);
goto err;
}
+ up_write(&slub_lock);
return s;
}
+ kfree(n);
kfree(s);
}
+err:
up_write(&slub_lock);
-err:
if (flags & SLAB_PANIC)
panic("Cannot create slabcache %s\n", name);
else
ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
- /* Honor the call site pointer we recieved. */
+ /* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
return ret;
}
+#ifdef CONFIG_NUMA
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
ret = slab_alloc(s, gfpflags, node, caller);
- /* Honor the call site pointer we recieved. */
+ /* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
return ret;
}
+#endif
-#ifdef CONFIG_SLUB_DEBUG
+#ifdef CONFIG_SYSFS
static int count_inuse(struct page *page)
{
return page->inuse;
{
return page->objects;
}
+#endif
+#ifdef CONFIG_SLUB_DEBUG
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
/* Now we know that a valid freelist exists */
bitmap_zero(map, page->objects);
- for_each_free_object(p, s, page->freelist) {
- set_bit(slab_index(p, s, addr), map);
- if (!check_object(s, page, p, 0))
- return 0;
+ get_map(s, page, map);
+ for_each_object(p, s, addr, page->objects) {
+ if (test_bit(slab_index(p, s, addr), map))
+ if (!check_object(s, page, p, SLUB_RED_INACTIVE))
+ return 0;
}
for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))
- if (!check_object(s, page, p, 1))
+ if (!check_object(s, page, p, SLUB_RED_ACTIVE))
return 0;
return 1;
}
static void validate_slab_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
- if (slab_trylock(page)) {
- validate_slab(s, page, map);
- slab_unlock(page);
- } else
- printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
- s->name, page);
-
- if (s->flags & DEBUG_DEFAULT_FLAGS) {
- if (!PageSlubDebug(page))
- printk(KERN_ERR "SLUB %s: SlubDebug not set "
- "on slab 0x%p\n", s->name, page);
- } else {
- if (PageSlubDebug(page))
- printk(KERN_ERR "SLUB %s: SlubDebug set on "
- "slab 0x%p\n", s->name, page);
- }
+ slab_lock(page);
+ validate_slab(s, page, map);
+ slab_unlock(page);
}
static int validate_slab_node(struct kmem_cache *s,
flush_all(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
- struct kmem_cache_node *n = get_node(s, node);
-
- count += validate_slab_node(s, n, map);
- }
- kfree(map);
- return count;
-}
-
-#ifdef SLUB_RESILIENCY_TEST
-static void resiliency_test(void)
-{
- u8 *p;
-
- printk(KERN_ERR "SLUB resiliency testing\n");
- printk(KERN_ERR "-----------------------\n");
- printk(KERN_ERR "A. Corruption after allocation\n");
-
- p = kzalloc(16, GFP_KERNEL);
- p[16] = 0x12;
- printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
- " 0x12->0x%p\n\n", p + 16);
-
- validate_slab_cache(kmalloc_caches + 4);
-
- /* Hmmm... The next two are dangerous */
- p = kzalloc(32, GFP_KERNEL);
- p[32 + sizeof(void *)] = 0x34;
- printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
- " 0x34 -> -0x%p\n", p);
- printk(KERN_ERR
- "If allocated object is overwritten then not detectable\n\n");
-
- validate_slab_cache(kmalloc_caches + 5);
- p = kzalloc(64, GFP_KERNEL);
- p += 64 + (get_cycles() & 0xff) * sizeof(void *);
- *p = 0x56;
- printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
- p);
- printk(KERN_ERR
- "If allocated object is overwritten then not detectable\n\n");
- validate_slab_cache(kmalloc_caches + 6);
-
- printk(KERN_ERR "\nB. Corruption after free\n");
- p = kzalloc(128, GFP_KERNEL);
- kfree(p);
- *p = 0x78;
- printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
- validate_slab_cache(kmalloc_caches + 7);
-
- p = kzalloc(256, GFP_KERNEL);
- kfree(p);
- p[50] = 0x9a;
- printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
- p);
- validate_slab_cache(kmalloc_caches + 8);
-
- p = kzalloc(512, GFP_KERNEL);
- kfree(p);
- p[512] = 0xab;
- printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
- validate_slab_cache(kmalloc_caches + 9);
-}
-#else
-static void resiliency_test(void) {};
-#endif
+ struct kmem_cache_node *n = get_node(s, node);
+ count += validate_slab_node(s, n, map);
+ }
+ kfree(map);
+ return count;
+}
/*
* Generate lists of code addresses where slabcache objects are allocated
* and freed.
static void process_slab(struct loc_track *t, struct kmem_cache *s,
struct page *page, enum track_item alloc,
- long *map)
+ unsigned long *map)
{
void *addr = page_address(page);
void *p;
bitmap_zero(map, page->objects);
- for_each_free_object(p, s, page->freelist)
- set_bit(slab_index(p, s, addr), map);
+ get_map(s, page, map);
for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map))
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr)
- len += sprint_symbol(buf + len, (unsigned long)l->addr);
+ len += sprintf(buf + len, "%pS", (void *)l->addr);
else
len += sprintf(buf + len, "<not-available>");
len += sprintf(buf, "No data\n");
return len;
}
+#endif
+
+#ifdef SLUB_RESILIENCY_TEST
+static void resiliency_test(void)
+{
+ u8 *p;
+
+ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10);
+
+ printk(KERN_ERR "SLUB resiliency testing\n");
+ printk(KERN_ERR "-----------------------\n");
+ printk(KERN_ERR "A. Corruption after allocation\n");
+
+ p = kzalloc(16, GFP_KERNEL);
+ p[16] = 0x12;
+ printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
+ " 0x12->0x%p\n\n", p + 16);
+
+ validate_slab_cache(kmalloc_caches[4]);
+
+ /* Hmmm... The next two are dangerous */
+ p = kzalloc(32, GFP_KERNEL);
+ p[32 + sizeof(void *)] = 0x34;
+ printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
+ " 0x34 -> -0x%p\n", p);
+ printk(KERN_ERR
+ "If allocated object is overwritten then not detectable\n\n");
+
+ validate_slab_cache(kmalloc_caches[5]);
+ p = kzalloc(64, GFP_KERNEL);
+ p += 64 + (get_cycles() & 0xff) * sizeof(void *);
+ *p = 0x56;
+ printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
+ p);
+ printk(KERN_ERR
+ "If allocated object is overwritten then not detectable\n\n");
+ validate_slab_cache(kmalloc_caches[6]);
+
+ printk(KERN_ERR "\nB. Corruption after free\n");
+ p = kzalloc(128, GFP_KERNEL);
+ kfree(p);
+ *p = 0x78;
+ printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
+ validate_slab_cache(kmalloc_caches[7]);
+
+ p = kzalloc(256, GFP_KERNEL);
+ kfree(p);
+ p[50] = 0x9a;
+ printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
+ p);
+ validate_slab_cache(kmalloc_caches[8]);
+
+ p = kzalloc(512, GFP_KERNEL);
+ kfree(p);
+ p[512] = 0xab;
+ printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
+ validate_slab_cache(kmalloc_caches[9]);
+}
+#else
+#ifdef CONFIG_SYSFS
+static void resiliency_test(void) {};
+#endif
+#endif
+#ifdef CONFIG_SYSFS
enum slab_stat_type {
SL_ALL, /* All slabs */
SL_PARTIAL, /* Only partially allocated slabs */
}
}
+ lock_memory_hotplug();
+#ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
nodes[node] += x;
}
- } else if (flags & SO_PARTIAL) {
+ } else
+#endif
+ if (flags & SO_PARTIAL) {
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]);
#endif
+ unlock_memory_hotplug();
kfree(nodes);
return x + sprintf(buf + x, "\n");
}
+#ifdef CONFIG_SLUB_DEBUG
static int any_slab_objects(struct kmem_cache *s)
{
int node;
}
return 0;
}
+#endif
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
-#define to_slab(n) container_of(n, struct kmem_cache, kobj);
+#define to_slab(n) container_of(n, struct kmem_cache, kobj)
struct slab_attribute {
struct attribute attr;
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
- if (s->ctor) {
- int n = sprint_symbol(buf, (unsigned long)s->ctor);
-
- return n + sprintf(buf + n, "\n");
- }
- return 0;
+ if (!s->ctor)
+ return 0;
+ return sprintf(buf, "%pS\n", s->ctor);
}
SLAB_ATTR_RO(ctor);
}
SLAB_ATTR_RO(aliases);
-static ssize_t slabs_show(struct kmem_cache *s, char *buf)
-{
- return show_slab_objects(s, buf, SO_ALL);
-}
-SLAB_ATTR_RO(slabs);
-
static ssize_t partial_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_PARTIAL);
}
SLAB_ATTR_RO(objects_partial);
-static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
-{
- return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
-}
-SLAB_ATTR_RO(total_objects);
-
-static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
+static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
}
-static ssize_t sanity_checks_store(struct kmem_cache *s,
+static ssize_t reclaim_account_store(struct kmem_cache *s,
const char *buf, size_t length)
{
- s->flags &= ~SLAB_DEBUG_FREE;
+ s->flags &= ~SLAB_RECLAIM_ACCOUNT;
if (buf[0] == '1')
- s->flags |= SLAB_DEBUG_FREE;
+ s->flags |= SLAB_RECLAIM_ACCOUNT;
return length;
}
-SLAB_ATTR(sanity_checks);
+SLAB_ATTR(reclaim_account);
-static ssize_t trace_show(struct kmem_cache *s, char *buf)
+static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
}
+SLAB_ATTR_RO(hwcache_align);
-static ssize_t trace_store(struct kmem_cache *s, const char *buf,
- size_t length)
+#ifdef CONFIG_ZONE_DMA
+static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{
- s->flags &= ~SLAB_TRACE;
- if (buf[0] == '1')
- s->flags |= SLAB_TRACE;
- return length;
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
}
-SLAB_ATTR(trace);
+SLAB_ATTR_RO(cache_dma);
+#endif
-#ifdef CONFIG_FAILSLAB
-static ssize_t failslab_show(struct kmem_cache *s, char *buf)
+static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
}
+SLAB_ATTR_RO(destroy_by_rcu);
-static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
- size_t length)
+static ssize_t reserved_show(struct kmem_cache *s, char *buf)
{
- s->flags &= ~SLAB_FAILSLAB;
- if (buf[0] == '1')
- s->flags |= SLAB_FAILSLAB;
- return length;
+ return sprintf(buf, "%d\n", s->reserved);
}
-SLAB_ATTR(failslab);
-#endif
+SLAB_ATTR_RO(reserved);
-static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
+#ifdef CONFIG_SLUB_DEBUG
+static ssize_t slabs_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
+ return show_slab_objects(s, buf, SO_ALL);
}
+SLAB_ATTR_RO(slabs);
-static ssize_t reclaim_account_store(struct kmem_cache *s,
- const char *buf, size_t length)
+static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
{
- s->flags &= ~SLAB_RECLAIM_ACCOUNT;
- if (buf[0] == '1')
- s->flags |= SLAB_RECLAIM_ACCOUNT;
- return length;
+ return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
}
-SLAB_ATTR(reclaim_account);
+SLAB_ATTR_RO(total_objects);
-static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
+static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
}
-SLAB_ATTR_RO(hwcache_align);
-#ifdef CONFIG_ZONE_DMA
-static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
+static ssize_t sanity_checks_store(struct kmem_cache *s,
+ const char *buf, size_t length)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
+ s->flags &= ~SLAB_DEBUG_FREE;
+ if (buf[0] == '1') {
+ s->flags &= ~__CMPXCHG_DOUBLE;
+ s->flags |= SLAB_DEBUG_FREE;
+ }
+ return length;
}
-SLAB_ATTR_RO(cache_dma);
-#endif
+SLAB_ATTR(sanity_checks);
-static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
+static ssize_t trace_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
}
-SLAB_ATTR_RO(destroy_by_rcu);
+
+static ssize_t trace_store(struct kmem_cache *s, const char *buf,
+ size_t length)
+{
+ s->flags &= ~SLAB_TRACE;
+ if (buf[0] == '1') {
+ s->flags &= ~__CMPXCHG_DOUBLE;
+ s->flags |= SLAB_TRACE;
+ }
+ return length;
+}
+SLAB_ATTR(trace);
static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{
return -EBUSY;
s->flags &= ~SLAB_RED_ZONE;
- if (buf[0] == '1')
+ if (buf[0] == '1') {
+ s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_RED_ZONE;
+ }
calculate_sizes(s, -1);
return length;
}
return -EBUSY;
s->flags &= ~SLAB_POISON;
- if (buf[0] == '1')
+ if (buf[0] == '1') {
+ s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_POISON;
+ }
calculate_sizes(s, -1);
return length;
}
return -EBUSY;
s->flags &= ~SLAB_STORE_USER;
- if (buf[0] == '1')
+ if (buf[0] == '1') {
+ s->flags &= ~__CMPXCHG_DOUBLE;
s->flags |= SLAB_STORE_USER;
+ }
calculate_sizes(s, -1);
return length;
}
}
SLAB_ATTR(validate);
+static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
+{
+ if (!(s->flags & SLAB_STORE_USER))
+ return -ENOSYS;
+ return list_locations(s, buf, TRACK_ALLOC);
+}
+SLAB_ATTR_RO(alloc_calls);
+
+static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
+{
+ if (!(s->flags & SLAB_STORE_USER))
+ return -ENOSYS;
+ return list_locations(s, buf, TRACK_FREE);
+}
+SLAB_ATTR_RO(free_calls);
+#endif /* CONFIG_SLUB_DEBUG */
+
+#ifdef CONFIG_FAILSLAB
+static ssize_t failslab_show(struct kmem_cache *s, char *buf)
+{
+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
+}
+
+static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
+ size_t length)
+{
+ s->flags &= ~SLAB_FAILSLAB;
+ if (buf[0] == '1')
+ s->flags |= SLAB_FAILSLAB;
+ return length;
+}
+SLAB_ATTR(failslab);
+#endif
+
static ssize_t shrink_show(struct kmem_cache *s, char *buf)
{
return 0;
}
SLAB_ATTR(shrink);
-static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
-{
- if (!(s->flags & SLAB_STORE_USER))
- return -ENOSYS;
- return list_locations(s, buf, TRACK_ALLOC);
-}
-SLAB_ATTR_RO(alloc_calls);
-
-static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
-{
- if (!(s->flags & SLAB_STORE_USER))
- return -ENOSYS;
- return list_locations(s, buf, TRACK_FREE);
-}
-SLAB_ATTR_RO(free_calls);
-
#ifdef CONFIG_NUMA
static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
{
STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
STAT_ATTR(ALLOC_SLAB, alloc_slab);
STAT_ATTR(ALLOC_REFILL, alloc_refill);
+STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
STAT_ATTR(FREE_SLAB, free_slab);
STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
+STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
STAT_ATTR(ORDER_FALLBACK, order_fallback);
+STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
+STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
#endif
static struct attribute *slab_attrs[] = {
&min_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
- &total_objects_attr.attr,
- &slabs_attr.attr,
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,
&aliases_attr.attr,
&align_attr.attr,
- &sanity_checks_attr.attr,
- &trace_attr.attr,
&hwcache_align_attr.attr,
&reclaim_account_attr.attr,
&destroy_by_rcu_attr.attr,
+ &shrink_attr.attr,
+ &reserved_attr.attr,
+#ifdef CONFIG_SLUB_DEBUG
+ &total_objects_attr.attr,
+ &slabs_attr.attr,
+ &sanity_checks_attr.attr,
+ &trace_attr.attr,
&red_zone_attr.attr,
&poison_attr.attr,
&store_user_attr.attr,
&validate_attr.attr,
- &shrink_attr.attr,
&alloc_calls_attr.attr,
&free_calls_attr.attr,
+#endif
#ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr,
#endif
&alloc_from_partial_attr.attr,
&alloc_slab_attr.attr,
&alloc_refill_attr.attr,
+ &alloc_node_mismatch_attr.attr,
&free_slab_attr.attr,
&cpuslab_flush_attr.attr,
&deactivate_full_attr.attr,
&deactivate_to_head_attr.attr,
&deactivate_to_tail_attr.attr,
&deactivate_remote_frees_attr.attr,
+ &deactivate_bypass_attr.attr,
&order_fallback_attr.attr,
+ &cmpxchg_double_fail_attr.attr,
+ &cmpxchg_double_cpu_fail_attr.attr,
#endif
#ifdef CONFIG_FAILSLAB
&failslab_attr.attr,
{
struct kmem_cache *s = to_slab(kobj);
+ kfree(s->name);
kfree(s);
}
static void sysfs_slab_remove(struct kmem_cache *s)
{
+ if (slab_state < SYSFS)
+ /*
+ * Sysfs has not been setup yet so no need to remove the
+ * cache from sysfs.
+ */
+ return;
+
kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj);
kobject_put(&s->kobj);
struct kmem_cache *s;
int err;
+ down_write(&slub_lock);
+
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) {
+ up_write(&slub_lock);
printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS;
}
kfree(al);
}
+ up_write(&slub_lock);
resiliency_test();
return 0;
}
__initcall(slab_sysfs_init);
-#endif
+#endif /* CONFIG_SYSFS */
/*
* The /proc/slabinfo ABI