| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * SLUB: A slab allocator that limits cache line use instead of queuing |
| * objects in per cpu and per node lists. |
| * |
| * The allocator synchronizes using per slab locks or atomic operatios |
| * and only uses a centralized lock to manage a pool of partial slabs. |
| * |
| * (C) 2007 SGI, Christoph Lameter |
| * (C) 2011 Linux Foundation, Christoph Lameter |
| */ |
| |
| #include <linux/mm.h> |
| #include <linux/swap.h> /* struct reclaim_state */ |
| #include <linux/module.h> |
| #include <linux/bit_spinlock.h> |
| #include <linux/interrupt.h> |
| #include <linux/swab.h> |
| #include <linux/bitops.h> |
| #include <linux/slab.h> |
| #include "slab.h" |
| #include <linux/proc_fs.h> |
| #include <linux/seq_file.h> |
| #include <linux/kasan.h> |
| #include <linux/cpu.h> |
| #include <linux/cpuset.h> |
| #include <linux/mempolicy.h> |
| #include <linux/ctype.h> |
| #include <linux/debugobjects.h> |
| #include <linux/kallsyms.h> |
| #include <linux/memory.h> |
| #include <linux/math64.h> |
| #include <linux/fault-inject.h> |
| #include <linux/stacktrace.h> |
| #include <linux/prefetch.h> |
| #include <linux/memcontrol.h> |
| #include <linux/random.h> |
| |
| #include <trace/events/kmem.h> |
| |
| #include "internal.h" |
| |
| /* |
| * Lock order: |
| * 1. slab_mutex (Global Mutex) |
| * 2. node->list_lock |
| * 3. slab_lock(page) (Only on some arches and for debugging) |
| * |
| * slab_mutex |
| * |
| * The role of the slab_mutex is to protect the list of all the slabs |
| * and to synchronize major metadata changes to slab cache structures. |
| * |
| * The slab_lock is only used for debugging and on arches that do not |
| * have the ability to do a cmpxchg_double. It only protects: |
| * A. page->freelist -> List of object free in a page |
| * B. page->inuse -> Number of objects in use |
| * C. page->objects -> Number of objects in page |
| * D. page->frozen -> frozen state |
| * |
| * If a slab is frozen then it is exempt from list management. It is not |
| * on any list except per cpu partial list. The processor that froze the |
| * slab is the one who can perform list operations on the page. Other |
| * processors may put objects onto the freelist but the processor that |
| * froze the slab is the only one that can retrieve the objects from the |
| * page's freelist. |
| * |
| * The list_lock protects the partial and full list on each node and |
| * the partial slab counter. If taken then no new slabs may be added or |
| * removed from the lists nor make the number of partial slabs be modified. |
| * (Note that the total number of slabs is an atomic value that may be |
| * modified without taking the list lock). |
| * |
| * The list_lock is a centralized lock and thus we avoid taking it as |
| * much as possible. As long as SLUB does not have to handle partial |
| * slabs, operations can continue without any centralized lock. F.e. |
| * allocating a long series of objects that fill up slabs does not require |
| * the list lock. |
| * Interrupts are disabled during allocation and deallocation in order to |
| * make the slab allocator safe to use in the context of an irq. In addition |
| * interrupts are disabled to ensure that the processor does not change |
| * while handling per_cpu slabs, due to kernel preemption. |
| * |
| * SLUB assigns one slab for allocation to each processor. |
| * Allocations only occur from these slabs called cpu slabs. |
| * |
| * Slabs with free elements are kept on a partial list and during regular |
| * operations no list for full slabs is used. If an object in a full slab is |
| * freed then the slab will show up again on the partial lists. |
| * We track full slabs for debugging purposes though because otherwise we |
| * cannot scan all objects. |
| * |
| * Slabs are freed when they become empty. Teardown and setup is |
| * minimal so we rely on the page allocators per cpu caches for |
| * fast frees and allocs. |
| * |
| * page->frozen The slab is frozen and exempt from list processing. |
| * This means that the slab is dedicated to a purpose |
| * such as satisfying allocations for a specific |
| * processor. Objects may be freed in the slab while |
| * it is frozen but slab_free will then skip the usual |
| * list operations. It is up to the processor holding |
| * the slab to integrate the slab into the slab lists |
| * when the slab is no longer needed. |
| * |
| * One use of this flag is to mark slabs that are |
| * used for allocations. Then such a slab becomes a cpu |
| * slab. The cpu slab may be equipped with an additional |
| * freelist that allows lockless access to |
| * free objects in addition to the regular freelist |
| * that requires the slab lock. |
| * |
| * SLAB_DEBUG_FLAGS Slab requires special handling due to debug |
| * options set. This moves slab handling out of |
| * the fast path and disables lockless freelists. |
| */ |
| |
| #ifdef CONFIG_SLUB_DEBUG |
| #ifdef CONFIG_SLUB_DEBUG_ON |
| DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); |
| #else |
| DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); |
| #endif |
| #endif |
| |
| static inline bool kmem_cache_debug(struct kmem_cache *s) |
| { |
| return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); |
| } |
| |
| void *fixup_red_left(struct kmem_cache *s, void *p) |
| { |
| if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) |
| p += s->red_left_pad; |
| |
| return p; |
| } |
| |
| static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) |
| { |
| #ifdef CONFIG_SLUB_CPU_PARTIAL |
| return !kmem_cache_debug(s); |
| #else |
| return false; |
| #endif |
| } |
| |
| /* |
| * Issues still to be resolved: |
| * |
| * - Support PAGE_ALLOC_DEBUG. Should be easy to do. |
| * |
| * - Variable sizing of the per node arrays |
| */ |
| |
| /* Enable to test recovery from slab corruption on boot */ |
| #undef SLUB_RESILIENCY_TEST |
| |
| /* Enable to log cmpxchg failures */ |
| #undef SLUB_DEBUG_CMPXCHG |
| |
| /* |
| * Mininum number of partial slabs. These will be left on the partial |
| * lists even if they are empty. kmem_cache_shrink may reclaim them. |
| */ |
| #define MIN_PARTIAL 5 |
| |
| /* |
| * Maximum number of desirable partial slabs. |
| * The existence of more partial slabs makes kmem_cache_shrink |
| * sort the partial list by the number of objects in use. |
| */ |
| #define MAX_PARTIAL 10 |
| |
| #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ |
| SLAB_POISON | SLAB_STORE_USER) |
| |
| /* |
| * These debug flags cannot use CMPXCHG because there might be consistency |
| * issues when checking or reading debug information |
| */ |
| #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ |
| SLAB_TRACE) |
| |
| |
| /* |
| * Debugging flags that require metadata to be stored in the slab. These get |
| * disabled when slub_debug=O is used and a cache's min order increases with |
| * metadata. |
| */ |
| #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
| |
| #define OO_SHIFT 16 |
| #define OO_MASK ((1 << OO_SHIFT) - 1) |
| #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ |
| |
| /* Internal SLUB flags */ |
| /* Poison object */ |
| #define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) |
| /* Use cmpxchg_double */ |
| #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) |
| |
| /* |
| * Tracking user of a slab. |
| */ |
| #define TRACK_ADDRS_COUNT 16 |
| struct track { |
| unsigned long addr; /* Called from address */ |
| #ifdef CONFIG_STACKTRACE |
| unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ |
| #endif |
| int cpu; /* Was running on cpu */ |
| int pid; /* Pid context */ |
| unsigned long when; /* When did the operation occur */ |
| }; |
| |
| enum track_item { TRACK_ALLOC, TRACK_FREE }; |
| |
| #ifdef CONFIG_SYSFS |
| static int sysfs_slab_add(struct kmem_cache *); |
| static int sysfs_slab_alias(struct kmem_cache *, const char *); |
| #else |
| static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } |
| static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) |
| { return 0; } |
| #endif |
| |
| static inline void stat(const struct kmem_cache *s, enum stat_item si) |
| { |
| #ifdef CONFIG_SLUB_STATS |
| /* |
| * The rmw is racy on a preemptible kernel but this is acceptable, so |
| * avoid this_cpu_add()'s irq-disable overhead. |
| */ |
| raw_cpu_inc(s->cpu_slab->stat[si]); |
| #endif |
| } |
| |
| /******************************************************************** |
| * Core slab cache functions |
| *******************************************************************/ |
| |
| /* |
| * Returns freelist pointer (ptr). With hardening, this is obfuscated |
| * with an XOR of the address where the pointer is held and a per-cache |
| * random number. |
| */ |
| static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, |
| unsigned long ptr_addr) |
| { |
| #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| /* |
| * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. |
| * Normally, this doesn't cause any issues, as both set_freepointer() |
| * and get_freepointer() are called with a pointer with the same tag. |
| * However, there are some issues with CONFIG_SLUB_DEBUG code. For |
| * example, when __free_slub() iterates over objects in a cache, it |
| * passes untagged pointers to check_object(). check_object() in turns |
| * calls get_freepointer() with an untagged pointer, which causes the |
| * freepointer to be restored incorrectly. |
| */ |
| return (void *)((unsigned long)ptr ^ s->random ^ |
| swab((unsigned long)kasan_reset_tag((void *)ptr_addr))); |
| #else |
| return ptr; |
| #endif |
| } |
| |
| /* Returns the freelist pointer recorded at location ptr_addr. */ |
| static inline void *freelist_dereference(const struct kmem_cache *s, |
| void *ptr_addr) |
| { |
| return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), |
| (unsigned long)ptr_addr); |
| } |
| |
| static inline void *get_freepointer(struct kmem_cache *s, void *object) |
| { |
| return freelist_dereference(s, object + s->offset); |
| } |
| |
| static void prefetch_freepointer(const struct kmem_cache *s, void *object) |
| { |
| prefetch(object + s->offset); |
| } |
| |
| static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) |
| { |
| unsigned long freepointer_addr; |
| void *p; |
| |
| if (!debug_pagealloc_enabled_static()) |
| return get_freepointer(s, object); |
| |
| freepointer_addr = (unsigned long)object + s->offset; |
| copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p)); |
| return freelist_ptr(s, p, freepointer_addr); |
| } |
| |
| static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) |
| { |
| unsigned long freeptr_addr = (unsigned long)object + s->offset; |
| |
| #ifdef CONFIG_SLAB_FREELIST_HARDENED |
| BUG_ON(object == fp); /* naive detection of double free or corruption */ |
| #endif |
| |
| *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); |
| } |
| |
| /* Loop over all objects in a slab */ |
| #define for_each_object(__p, __s, __addr, __objects) \ |
| for (__p = fixup_red_left(__s, __addr); \ |
| __p < (__addr) + (__objects) * (__s)->size; \ |
| __p += (__s)->size) |
| |
| static inline unsigned int order_objects(unsigned int order, unsigned int size) |
| { |
| return ((unsigned int)PAGE_SIZE << order) / size; |
| } |
| |
| static inline struct kmem_cache_order_objects oo_make(unsigned int order, |
| unsigned int size) |
| { |
| struct kmem_cache_order_objects x = { |
| (order << OO_SHIFT) + order_objects(order, size) |
| }; |
| |
| return x; |
| } |
| |
| static inline unsigned int oo_order(struct kmem_cache_order_objects x) |
| { |
| return x.x >> OO_SHIFT; |
| } |
| |
| static inline unsigned int oo_objects(struct kmem_cache_order_objects x) |
| { |
| return x.x & OO_MASK; |
| } |
| |
| /* |
| * Per slab locking using the pagelock |
| */ |
| static __always_inline void slab_lock(struct page *page) |
| { |
| VM_BUG_ON_PAGE(PageTail(page), page); |
| bit_spin_lock(PG_locked, &page->flags); |
| } |
| |
| static __always_inline void slab_unlock(struct page *page) |
| { |
| VM_BUG_ON_PAGE(PageTail(page), page); |
| __bit_spin_unlock(PG_locked, &page->flags); |
| } |
| |
| /* Interrupts must be disabled (for the fallback code to work right) */ |
| static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, |
| void *freelist_old, unsigned long counters_old, |
| void *freelist_new, unsigned long counters_new, |
| const char *n) |
| { |
| VM_BUG_ON(!irqs_disabled()); |
| #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ |
| defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) |
| if (s->flags & __CMPXCHG_DOUBLE) { |
| if (cmpxchg_double(&page->freelist, &page->counters, |
| freelist_old, counters_old, |
| freelist_new, counters_new)) |
| return true; |
| } else |
| #endif |
| { |
| slab_lock(page); |
| if (page->freelist == freelist_old && |
| page->counters == counters_old) { |
| page->freelist = freelist_new; |
| page->counters = counters_new; |
| slab_unlock(page); |
| return true; |
| } |
| slab_unlock(page); |
| } |
| |
| cpu_relax(); |
| stat(s, CMPXCHG_DOUBLE_FAIL); |
| |
| #ifdef SLUB_DEBUG_CMPXCHG |
| pr_info("%s %s: cmpxchg double redo ", n, s->name); |
| #endif |
| |
| return false; |
| } |
| |
| static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, |
| void *freelist_old, unsigned long counters_old, |
| void *freelist_new, unsigned long counters_new, |
| const char *n) |
| { |
| #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ |
| defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) |
| if (s->flags & __CMPXCHG_DOUBLE) { |
| if (cmpxchg_double(&page->freelist, &page->counters, |
| freelist_old, counters_old, |
| freelist_new, counters_new)) |
| return true; |
| } else |
| #endif |
| { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| slab_lock(page); |
| if (page->freelist == freelist_old && |
| page->counters == counters_old) { |
| page->freelist = freelist_new; |
| page->counters = counters_new; |
| slab_unlock(page); |
| local_irq_restore(flags); |
| return true; |
| } |
| slab_unlock(page); |
| local_irq_restore(flags); |
| } |
| |
| cpu_relax(); |
| stat(s, CMPXCHG_DOUBLE_FAIL); |
| |
| #ifdef SLUB_DEBUG_CMPXCHG |
| pr_info("%s %s: cmpxchg double redo ", n, s->name); |
| #endif |
| |
| return false; |
| } |
| |
| #ifdef CONFIG_SLUB_DEBUG |
| static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; |
| static DEFINE_SPINLOCK(object_map_lock); |
| |
| /* |
| * Determine a map of object in use on a page. |
| * |
| * Node listlock must be held to guarantee that the page does |
| * not vanish from under us. |
| */ |
| static unsigned long *get_map(struct kmem_cache *s, struct page *page) |
| __acquires(&object_map_lock) |
| { |
| void *p; |
| void *addr = page_address(page); |
| |
| VM_BUG_ON(!irqs_disabled()); |
| |
| spin_lock(&object_map_lock); |
| |
| bitmap_zero(object_map, page->objects); |
| |
| for (p = page->freelist; p; p = get_freepointer(s, p)) |
| set_bit(__obj_to_index(s, addr, p), object_map); |
| |
| return object_map; |
| } |
| |
| static void put_map(unsigned long *map) __releases(&object_map_lock) |
| { |
| VM_BUG_ON(map != object_map); |
| spin_unlock(&object_map_lock); |
| } |
| |
| static inline unsigned int size_from_object(struct kmem_cache *s) |
| { |
| if (s->flags & SLAB_RED_ZONE) |
| return s->size - s->red_left_pad; |
| |
| return s->size; |
| } |
| |
| static inline void *restore_red_left(struct kmem_cache *s, void *p) |
| { |
| if (s->flags & SLAB_RED_ZONE) |
| p -= s->red_left_pad; |
| |
| return p; |
| } |
| |
| /* |
| * Debug settings: |
| */ |
| #if defined(CONFIG_SLUB_DEBUG_ON) |
| static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; |
| #else |
| static slab_flags_t slub_debug; |
| #endif |
| |
| static char *slub_debug_string; |
| static int disable_higher_order_debug; |
| |
| /* |
| * slub is about to manipulate internal object metadata. This memory lies |
| * outside the range of the allocated object, so accessing it would normally |
| * be reported by kasan as a bounds error. metadata_access_enable() is used |
| * to tell kasan that these accesses are OK. |
| */ |
| static inline void metadata_access_enable(void) |
| { |
| kasan_disable_current(); |
| } |
| |
| static inline void metadata_access_disable(void) |
| { |
| kasan_enable_current(); |
| } |
| |
| /* |
| * Object debugging |
| */ |
| |
| /* Verify that a pointer has an address that is valid within a slab page */ |
| static inline int check_valid_pointer(struct kmem_cache *s, |
| struct page *page, void *object) |
| { |
| void *base; |
| |
| if (!object) |
| return 1; |
| |
| base = page_address(page); |
| object = kasan_reset_tag(object); |
| object = restore_red_left(s, object); |
| if (object < base || object >= base + page->objects * s->size || |
| (object - base) % s->size) { |
| return 0; |
| } |
| |
| return 1; |
| } |
| |
| static void print_section(char *level, char *text, u8 *addr, |
| unsigned int length) |
| { |
| metadata_access_enable(); |
| print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, |
| length, 1); |
| metadata_access_disable(); |
| } |
| |
| /* |
| * See comment in calculate_sizes(). |
| */ |
| static inline bool freeptr_outside_object(struct kmem_cache *s) |
| { |
| return s->offset >= s->inuse; |
| } |
| |
| /* |
| * Return offset of the end of info block which is inuse + free pointer if |
| * not overlapping with object. |
| */ |
| static inline unsigned int get_info_end(struct kmem_cache *s) |
| { |
| if (freeptr_outside_object(s)) |
| return s->inuse + sizeof(void *); |
| else |
| return s->inuse; |
| } |
| |
| static struct track *get_track(struct kmem_cache *s, void *object, |
| enum track_item alloc) |
| { |
| struct track *p; |
| |
| p = object + get_info_end(s); |
| |
| return p + alloc; |
| } |
| |
| static void set_track(struct kmem_cache *s, void *object, |
| enum track_item alloc, unsigned long addr) |
| { |
| struct track *p = get_track(s, object, alloc); |
| |
| if (addr) { |
| #ifdef CONFIG_STACKTRACE |
| unsigned int nr_entries; |
| |
| metadata_access_enable(); |
| nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); |
| metadata_access_disable(); |
| |
| if (nr_entries < TRACK_ADDRS_COUNT) |
| p->addrs[nr_entries] = 0; |
| #endif |
| p->addr = addr; |
| p->cpu = smp_processor_id(); |
| p->pid = current->pid; |
| p->when = jiffies; |
| } else { |
| memset(p, 0, sizeof(struct track)); |
| } |
| } |
| |
| static void init_tracking(struct kmem_cache *s, void *object) |
| { |
| if (!(s->flags & SLAB_STORE_USER)) |
| return; |
| |
| set_track(s, object, TRACK_FREE, 0UL); |
| set_track(s, object, TRACK_ALLOC, 0UL); |
| } |
| |
| static void print_track(const char *s, struct track *t, unsigned long pr_time) |
| { |
| if (!t->addr) |
| return; |
| |
| pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n", |
| s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); |
| #ifdef CONFIG_STACKTRACE |
| { |
| int i; |
| for (i = 0; i < TRACK_ADDRS_COUNT; i++) |
| if (t->addrs[i]) |
| pr_err("\t%pS\n", (void *)t->addrs[i]); |
| else |
| break; |
| } |
| #endif |
| } |
| |
| void print_tracking(struct kmem_cache *s, void *object) |
| { |
| unsigned long pr_time = jiffies; |
| if (!(s->flags & SLAB_STORE_USER)) |
| return; |
| |
| print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); |
| print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); |
| } |
| |
| static void print_page_info(struct page *page) |
| { |
| pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", |
| page, page->objects, page->inuse, page->freelist, page->flags); |
| |
| } |
| |
| static void slab_bug(struct kmem_cache *s, char *fmt, ...) |
| { |
| struct va_format vaf; |
| va_list args; |
| |
| va_start(args, fmt); |
| vaf.fmt = fmt; |
| vaf.va = &args; |
| pr_err("=============================================================================\n"); |
| pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); |
| pr_err("-----------------------------------------------------------------------------\n\n"); |
| |
| add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
| va_end(args); |
| } |
| |
| static void slab_fix(struct kmem_cache *s, char *fmt, ...) |
| { |
| struct va_format vaf; |
| va_list args; |
| |
| va_start(args, fmt); |
| vaf.fmt = fmt; |
| vaf.va = &args; |
| pr_err("FIX %s: %pV\n", s->name, &vaf); |
| va_end(args); |
| } |
| |
| static bool freelist_corrupted(struct kmem_cache *s, struct page *page, |
| void **freelist, void *nextfree) |
| { |
| if ((s->flags & SLAB_CONSISTENCY_CHECKS) && |
| !check_valid_pointer(s, page, nextfree) && freelist) { |
| object_err(s, page, *freelist, "Freechain corrupt"); |
| *freelist = NULL; |
| slab_fix(s, "Isolate corrupted freechain"); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) |
| { |
| unsigned int off; /* Offset of last byte */ |
| u8 *addr = page_address(page); |
| |
| print_tracking(s, p); |
| |
| print_page_info(page); |
| |
| pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", |
| p, p - addr, get_freepointer(s, p)); |
| |
| if (s->flags & SLAB_RED_ZONE) |
| print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, |
| s->red_left_pad); |
| else if (p > addr + 16) |
| print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); |
| |
| print_section(KERN_ERR, "Object ", p, |
| min_t(unsigned int, s->object_size, PAGE_SIZE)); |
| if (s->flags & SLAB_RED_ZONE) |
| print_section(KERN_ERR, "Redzone ", p + s->object_size, |
| s->inuse - s->object_size); |
| |
| off = get_info_end(s); |
| |
| if (s->flags & SLAB_STORE_USER) |
| off += 2 * sizeof(struct track); |
| |
| off += kasan_metadata_size(s); |
| |
| if (off != size_from_object(s)) |
| /* Beginning of the filler is the free pointer */ |
| print_section(KERN_ERR, "Padding ", p + off, |
| size_from_object(s) - off); |
| |
| dump_stack(); |
| } |
| |
| void object_err(struct kmem_cache *s, struct page *page, |
| u8 *object, char *reason) |
| { |
| slab_bug(s, "%s", reason); |
| print_trailer(s, page, object); |
| } |
| |
| static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, |
| const char *fmt, ...) |
| { |
| va_list args; |
| char buf[100]; |
| |
| va_start(args, fmt); |
| vsnprintf(buf, sizeof(buf), fmt, args); |
| va_end(args); |
| slab_bug(s, "%s", buf); |
| print_page_info(page); |
| dump_stack(); |
| } |
| |
| static void init_object(struct kmem_cache *s, void *object, u8 val) |
| { |
| u8 *p = object; |
| |
| if (s->flags & SLAB_RED_ZONE) |
| memset(p - s->red_left_pad, val, s->red_left_pad); |
| |
| if (s->flags & __OBJECT_POISON) { |
| memset(p, POISON_FREE, s->object_size - 1); |
| p[s->object_size - 1] = POISON_END; |
| } |
| |
| if (s->flags & SLAB_RED_ZONE) |
| memset(p + s->object_size, val, s->inuse - s->object_size); |
| } |
| |
| static void restore_bytes(struct kmem_cache *s, char *message, u8 data, |
| void *from, void *to) |
| { |
| slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); |
| memset(from, data, to - from); |
| } |
| |
| static int check_bytes_and_report(struct kmem_cache *s, struct page *page, |
| u8 *object, char *what, |
| u8 *start, unsigned int value, unsigned int bytes) |
| { |
| u8 *fault; |
| u8 *end; |
| u8 *addr = page_address(page); |
| |
| metadata_access_enable(); |
| fault = memchr_inv(start, value, bytes); |
| metadata_access_disable(); |
| if (!fault) |
| return 1; |
| |
| end = start + bytes; |
| while (end > fault && end[-1] == value) |
| end--; |
| |
| slab_bug(s, "%s overwritten", what); |
| pr_err("INFO: 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n", |
| fault, end - 1, fault - addr, |
| fault[0], value); |
| print_trailer(s, page, object); |
| |
| restore_bytes(s, what, value, fault, end); |
| return 0; |
| } |
| |
| /* |
| * Object layout: |
| * |
| * object address |
| * Bytes of the object to be managed. |
| * If the freepointer may overlay the object then the free |
| * pointer is at the middle of the object. |
| * |
| * Poisoning uses 0x6b (POISON_FREE) and the last byte is |
| * 0xa5 (POISON_END) |
| * |
| * object + s->object_size |
| * Padding to reach word boundary. This is also used for Redzoning. |
| * Padding is extended by another word if Redzoning is enabled and |
| * object_size == inuse. |
| * |
| * We fill with 0xbb (RED_INACTIVE) for inactive objects and with |
| * 0xcc (RED_ACTIVE) for objects in use. |
| * |
| * object + s->inuse |
| * Meta data starts here. |
| * |
| * A. Free pointer (if we cannot overwrite object on free) |
| * B. Tracking data for SLAB_STORE_USER |
| * C. Padding to reach required alignment boundary or at mininum |
| * one word if debugging is on to be able to detect writes |
| * before the word boundary. |
| * |
| * Padding is done using 0x5a (POISON_INUSE) |
| * |
| * object + s->size |
| * Nothing is used beyond s->size. |
| * |
| * If slabcaches are merged then the object_size and inuse boundaries are mostly |
| * ignored. And therefore no slab options that rely on these boundaries |
| * may be used with merged slabcaches. |
| */ |
| |
| static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) |
| { |
| unsigned long off = get_info_end(s); /* The end of info */ |
| |
| if (s->flags & SLAB_STORE_USER) |
| /* We also have user information there */ |
| off += 2 * sizeof(struct track); |
| |
| off += kasan_metadata_size(s); |
| |
| if (size_from_object(s) == off) |
| return 1; |
| |
| return check_bytes_and_report(s, page, p, "Object padding", |
| p + off, POISON_INUSE, size_from_object(s) - off); |
| } |
| |
| /* Check the pad bytes at the end of a slab page */ |
| static int slab_pad_check(struct kmem_cache *s, struct page *page) |
| { |
| u8 *start; |
| u8 *fault; |
| u8 *end; |
| u8 *pad; |
| int length; |
| int remainder; |
| |
| if (!(s->flags & SLAB_POISON)) |
| return 1; |
| |
| start = page_address(page); |
| length = page_size(page); |
| end = start + length; |
| remainder = length % s->size; |
| if (!remainder) |
| return 1; |
| |
| pad = end - remainder; |
| metadata_access_enable(); |
| fault = memchr_inv(pad, POISON_INUSE, remainder); |
| metadata_access_disable(); |
| if (!fault) |
| return 1; |
| while (end > fault && end[-1] == POISON_INUSE) |
| end--; |
| |
| slab_err(s, page, "Padding overwritten. 0x%p-0x%p @offset=%tu", |
| fault, end - 1, fault - start); |
| print_section(KERN_ERR, "Padding ", pad, remainder); |
| |
| restore_bytes(s, "slab padding", POISON_INUSE, fault, end); |
| return 0; |
| } |
| |
| static int check_object(struct kmem_cache *s, struct page *page, |
| void *object, u8 val) |
| { |
| u8 *p = object; |
| u8 *endobject = object + s->object_size; |
| |
| if (s->flags & SLAB_RED_ZONE) { |
| if (!check_bytes_and_report(s, page, object, "Left Redzone", |
| object - s->red_left_pad, val, s->red_left_pad)) |
| return 0; |
| |
| if (!check_bytes_and_report(s, page, object, "Right Redzone", |
| endobject, val, s->inuse - s->object_size)) |
| return 0; |
| } else { |
| if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { |
| check_bytes_and_report(s, page, p, "Alignment padding", |
| endobject, POISON_INUSE, |
| s->inuse - s->object_size); |
| } |
| } |
| |
| if (s->flags & SLAB_POISON) { |
| if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && |
| (!check_bytes_and_report(s, page, p, "Poison", p, |
| POISON_FREE, s->object_size - 1) || |
| !check_bytes_and_report(s, page, p, "End Poison", |
| p + s->object_size - 1, POISON_END, 1))) |
| return 0; |
| /* |
| * check_pad_bytes cleans up on its own. |
| */ |
| check_pad_bytes(s, page, p); |
| } |
| |
| if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) |
| /* |
| * Object and freepointer overlap. Cannot check |
| * freepointer while object is allocated. |
| */ |
| return 1; |
| |
| /* Check free pointer validity */ |
| if (!check_valid_pointer(s, page, get_freepointer(s, p))) { |
| object_err(s, page, p, "Freepointer corrupt"); |
| /* |
| * No choice but to zap it and thus lose the remainder |
| * of the free objects in this slab. May cause |
| * another error because the object count is now wrong. |
| */ |
| set_freepointer(s, p, NULL); |
| return 0; |
| } |
| return 1; |
| } |
| |
| static int check_slab(struct kmem_cache *s, struct page *page) |
| { |
| int maxobj; |
| |
| VM_BUG_ON(!irqs_disabled()); |
| |
| if (!PageSlab(page)) { |
| slab_err(s, page, "Not a valid slab page"); |
| return 0; |
| } |
| |
| maxobj = order_objects(compound_order(page), s->size); |
| if (page->objects > maxobj) { |
| slab_err(s, page, "objects %u > max %u", |
| page->objects, maxobj); |
| return 0; |
| } |
| if (page->inuse > page->objects) { |
| slab_err(s, page, "inuse %u > max %u", |
| page->inuse, page->objects); |
| return 0; |
| } |
| /* Slab_pad_check fixes things up after itself */ |
| slab_pad_check(s, page); |
| return 1; |
| } |
| |
| /* |
| * Determine if a certain object on a page is on the freelist. Must hold the |
| * slab lock to guarantee that the chains are in a consistent state. |
| */ |
| static int on_freelist(struct kmem_cache *s, struct page *page, void *search) |
| { |
| int nr = 0; |
| void *fp; |
| void *object = NULL; |
| int max_objects; |
| |
| fp = page->freelist; |
| while (fp && nr <= page->objects) { |
| if (fp == search) |
| return 1; |
| if (!check_valid_pointer(s, page, fp)) { |
| if (object) { |
| object_err(s, page, object, |
| "Freechain corrupt"); |
| set_freepointer(s, object, NULL); |
| } else { |
| slab_err(s, page, "Freepointer corrupt"); |
| page->freelist = NULL; |
| page->inuse = page->objects; |
| slab_fix(s, "Freelist cleared"); |
| return 0; |
| } |
| break; |
| } |
| object = fp; |
| fp = get_freepointer(s, object); |
| nr++; |
| } |
| |
| max_objects = order_objects(compound_order(page), s->size); |
| if (max_objects > MAX_OBJS_PER_PAGE) |
| max_objects = MAX_OBJS_PER_PAGE; |
| |
| if (page->objects != max_objects) { |
| slab_err(s, page, "Wrong number of objects. Found %d but should be %d", |
| page->objects, max_objects); |
| page->objects = max_objects; |
| slab_fix(s, "Number of objects adjusted."); |
| } |
| if (page->inuse != page->objects - nr) { |
| slab_err(s, page, "Wrong object count. Counter is %d but counted were %d", |
| page->inuse, page->objects - nr); |
| page->inuse = page->objects - nr; |
| slab_fix(s, "Object count adjusted."); |
| } |
| return search == NULL; |
| } |
| |
| static void trace(struct kmem_cache *s, struct page *page, void *object, |
| int alloc) |
| { |
| if (s->flags & SLAB_TRACE) { |
| pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n", |
| s->name, |
| alloc ? "alloc" : "free", |
| object, page->inuse, |
| page->freelist); |
| |
| if (!alloc) |
| print_section(KERN_INFO, "Object ", (void *)object, |
| s->object_size); |
| |
| dump_stack(); |
| } |
| } |
| |
| /* |
| * Tracking of fully allocated slabs for debugging purposes. |
| */ |
| static void add_full(struct kmem_cache *s, |
| struct kmem_cache_node *n, struct page *page) |
| { |
| if (!(s->flags & SLAB_STORE_USER)) |
| return; |
| |
| lockdep_assert_held(&n->list_lock); |
| list_add(&page->slab_list, &n->full); |
| } |
| |
| static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) |
| { |
| if (!(s->flags & SLAB_STORE_USER)) |
| return; |
| |
| lockdep_assert_held(&n->list_lock); |
| list_del(&page->slab_list); |
| } |
| |
| /* Tracking of the number of slabs for debugging purposes */ |
| static inline unsigned long slabs_node(struct kmem_cache *s, int node) |
| { |
| struct kmem_cache_node *n = get_node(s, node); |
| |
| return atomic_long_read(&n->nr_slabs); |
| } |
| |
| static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) |
| { |
| return atomic_long_read(&n->nr_slabs); |
| } |
| |
| static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) |
| { |
| struct kmem_cache_node *n = get_node(s, node); |
| |
| /* |
| * May be called early in order to allocate a slab for the |
| * kmem_cache_node structure. Solve the chicken-egg |
| * dilemma by deferring the increment of the count during |
| * bootstrap (see early_kmem_cache_node_alloc). |
| */ |
| if (likely(n)) { |
| atomic_long_inc(&n->nr_slabs); |
| atomic_long_add(objects, &n->total_objects); |
| } |
| } |
| static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) |
| { |
| struct kmem_cache_node *n = get_node(s, node); |
| |
| atomic_long_dec(&n->nr_slabs); |
| atomic_long_sub(objects, &n->total_objects); |
| } |
| |
| /* Object debug checks for alloc/free paths */ |
| static void setup_object_debug(struct kmem_cache *s, struct page *page, |
| void *object) |
| { |
| if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) |
| return; |
| |
| init_object(s, object, SLUB_RED_INACTIVE); |
| init_tracking(s, object); |
| } |
| |
| static |
| void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) |
| { |
| if (!kmem_cache_debug_flags(s, SLAB_POISON)) |
| return; |
| |
| metadata_access_enable(); |
| memset(addr, POISON_INUSE, page_size(page)); |
| metadata_access_disable(); |
| } |
| |
| static inline int alloc_consistency_checks(struct kmem_cache *s, |
| struct page *page, void *object) |
| { |
| if (!check_slab(s, page)) |
| return 0; |
| |
| if (!check_valid_pointer(s, page, object)) { |
| object_err(s, page, object, "Freelist Pointer check fails"); |
| return 0; |
| } |
| |
| if (!check_object(s, page, object, SLUB_RED_INACTIVE)) |
| return 0; |
| |
| return 1; |
| } |
| |
| static noinline int alloc_debug_processing(struct kmem_cache *s, |
| struct page *page, |
| void *object, unsigned long addr) |
| { |
| if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
| if (!alloc_consistency_checks(s, page, object)) |
| goto bad; |
| } |
| |
| /* Success perform special debug activities for allocs */ |
| if (s->flags & SLAB_STORE_USER) |
| set_track(s, object, TRACK_ALLOC, addr); |
| trace(s, page, object, 1); |
| init_object(s, object, SLUB_RED_ACTIVE); |
| return 1; |
| |
| bad: |
| if (PageSlab(page)) { |
| /* |
| * If this is a slab page then lets do the best we can |
| * to avoid issues in the future. Marking all objects |
| * as used avoids touching the remaining objects. |
| */ |
| slab_fix(s, "Marking all objects used"); |
| page->inuse = page->objects; |
| page->freelist = NULL; |
| } |
| return 0; |
| } |
| |
| static inline int free_consistency_checks(struct kmem_cache *s, |
| struct page *page, void *object, unsigned long addr) |
| { |
| if (!check_valid_pointer(s, page, object)) { |
| slab_err(s, page, "Invalid object pointer 0x%p", object); |
| return 0; |
| } |
| |
| if (on_freelist(s, page, object)) { |
| object_err(s, page, object, "Object already free"); |
| return 0; |
| } |
| |
| if (!check_object(s, page, object, SLUB_RED_ACTIVE)) |
| return 0; |
| |
| if (unlikely(s != page->slab_cache)) { |
| if (!PageSlab(page)) { |
| slab_err(s, page, "Attempt to free object(0x%p) outside of slab", |
| object); |
| } else if (!page->slab_cache) { |
| pr_err("SLUB <none>: no slab for object 0x%p.\n", |
| object); |
| dump_stack(); |
| } else |
| object_err(s, page, object, |
| "page slab pointer corrupt."); |
| return 0; |
| } |
| return 1; |
| } |
| |
| /* Supports checking bulk free of a constructed freelist */ |
| static noinline int free_debug_processing( |
| struct kmem_cache *s, struct page *page, |
| void *head, void *tail, int bulk_cnt, |
| unsigned long addr) |
| { |
| struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
| void *object = head; |
| int cnt = 0; |
| unsigned long flags; |
| int ret = 0; |
| |
| spin_lock_irqsave(&n->list_lock, flags); |
| slab_lock(page); |
| |
| if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
| if (!check_slab(s, page)) |
| goto out; |
| } |
| |
| next_object: |
| cnt++; |
| |
| if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
| if (!free_consistency_checks(s, page, object, addr)) |
| goto out; |
| } |
| |
| if (s->flags & SLAB_STORE_USER) |
| set_track(s, object, TRACK_FREE, addr); |
| trace(s, page, object, 0); |
| /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ |
| init_object(s, object, SLUB_RED_INACTIVE); |
| |
| /* Reached end of constructed freelist yet? */ |
| if (object != tail) { |
| object = get_freepointer(s, object); |
| goto next_object; |
| } |
| ret = 1; |
| |
| out: |
| if (cnt != bulk_cnt) |
| slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", |
| bulk_cnt, cnt); |
| |
| slab_unlock(page); |
| spin_unlock_irqrestore(&n->list_lock, flags); |
| if (!ret) |
| slab_fix(s, "Object at 0x%p not freed", object); |
| return ret; |
| } |
| |
| /* |
| * Parse a block of slub_debug options. Blocks are delimited by ';' |
| * |
| * @str: start of block |
| * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified |
| * @slabs: return start of list of slabs, or NULL when there's no list |
| * @init: assume this is initial parsing and not per-kmem-create parsing |
| * |
| * returns the start of next block if there's any, or NULL |
| */ |
| static char * |
| parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init) |
| { |
| bool higher_order_disable = false; |
| |
| /* Skip any completely empty blocks */ |
| while (*str && *str == ';') |
| str++; |
| |
| if (*str == ',') { |
| /* |
| * No options but restriction on slabs. This means full |
| * debugging for slabs matching a pattern. |
| */ |
| *flags = DEBUG_DEFAULT_FLAGS; |
| goto check_slabs; |
| } |
| *flags = 0; |
| |
| /* Determine which debug features should be switched on */ |
| for (; *str && *str != ',' && *str != ';'; str++) { |
| switch (tolower(*str)) { |
| case '-': |
| *flags = 0; |
| break; |
| case 'f': |
| *flags |= SLAB_CONSISTENCY_CHECKS; |
| break; |
| case 'z': |
| *flags |= SLAB_RED_ZONE; |
| break; |
| case 'p': |
| *flags |= SLAB_POISON; |
| break; |
| case 'u': |
| *flags |= SLAB_STORE_USER; |
| break; |
| case 't': |
| *flags |= SLAB_TRACE; |
| break; |
| case 'a': |
| *flags |= SLAB_FAILSLAB; |
| break; |
| case 'o': |
| /* |
| * Avoid enabling debugging on caches if its minimum |
| * order would increase as a result. |
| */ |
| higher_order_disable = true; |
| break; |
| default: |
| if (init) |
| pr_err("slub_debug option '%c' unknown. skipped\n", *str); |
| } |
| } |
| check_slabs: |
| if (*str == ',') |
| *slabs = ++str; |
| else |
| *slabs = NULL; |
| |
| /* Skip over the slab list */ |
| while (*str && *str != ';') |
| str++; |
| |
| /* Skip any completely empty blocks */ |
| while (*str && *str == ';') |
| str++; |
| |
| if (init && higher_order_disable) |
| disable_higher_order_debug = 1; |
| |
| if (*str) |
| return str; |
| else |
| return NULL; |
| } |
| |
| static int __init setup_slub_debug(char *str) |
| { |
| slab_flags_t flags; |
| char *saved_str; |
| char *slab_list; |
| bool global_slub_debug_changed = false; |
| bool slab_list_specified = false; |
| |
| slub_debug = DEBUG_DEFAULT_FLAGS; |
| if (*str++ != '=' || !*str) |
| /* |
| * No options specified. Switch on full debugging. |
| */ |
| goto out; |
| |
| saved_str = str; |
| while (str) { |
| str = parse_slub_debug_flags(str, &flags, &slab_list, true); |
| |
| if (!slab_list) { |
| slub_debug = flags; |
| global_slub_debug_changed = true; |
| } else { |
| slab_list_specified = true; |
| } |
| } |
| |
| /* |
| * For backwards compatibility, a single list of flags with list of |
| * slabs means debugging is only enabled for those slabs, so the global |
| * slub_debug should be 0. We can extended that to multiple lists as |
| * long as there is no option specifying flags without a slab list. |
| */ |
| if (slab_list_specified) { |
| if (!global_slub_debug_changed) |
| slub_debug = 0; |
| slub_debug_string = saved_str; |
| } |
| out: |
| if (slub_debug != 0 || slub_debug_string) |
| static_branch_enable(&slub_debug_enabled); |
| if ((static_branch_unlikely(&init_on_alloc) || |
| static_branch_unlikely(&init_on_free)) && |
| (slub_debug & SLAB_POISON)) |
| pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n"); |
| return 1; |
| } |
| |
| __setup("slub_debug", setup_slub_debug); |
| |
| /* |
| * kmem_cache_flags - apply debugging options to the cache |
| * @object_size: the size of an object without meta data |
| * @flags: flags to set |
| * @name: name of the cache |
| * |
| * Debug option(s) are applied to @flags. In addition to the debug |
| * option(s), if a slab name (or multiple) is specified i.e. |
| * slub_debug=<Debug-Options>,<slab name1>,<slab name2> ... |
| * then only the select slabs will receive the debug option(s). |
| */ |
| slab_flags_t kmem_cache_flags(unsigned int object_size, |
| slab_flags_t flags, const char *name) |
| { |
| char *iter; |
| size_t len; |
| char *next_block; |
| slab_flags_t block_flags; |
| |
| len = strlen(name); |
| next_block = slub_debug_string; |
| /* Go through all blocks of debug options, see if any matches our slab's name */ |
| while (next_block) { |
| next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false); |
| if (!iter) |
| continue; |
| /* Found a block that has a slab list, search it */ |
| while (*iter) { |
| char *end, *glob; |
| size_t cmplen; |
| |
| end = strchrnul(iter, ','); |
| if (next_block && next_block < end) |
| end = next_block - 1; |
| |
| glob = strnchr(iter, end - iter, '*'); |
| if (glob) |
| cmplen = glob - iter; |
| else |
| cmplen = max_t(size_t, len, (end - iter)); |
| |
| if (!strncmp(name, iter, cmplen)) { |
| flags |= block_flags; |
| return flags; |
| } |
| |
| if (!*end || *end == ';') |
| break; |
| iter = end + 1; |
| } |
| } |
| |
| return flags | slub_debug; |
| } |
| #else /* !CONFIG_SLUB_DEBUG */ |
| static inline void setup_object_debug(struct kmem_cache *s, |
| struct page *page, void *object) {} |
| static inline |
| void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} |
| |
| static inline int alloc_debug_processing(struct kmem_cache *s, |
| struct page *page, void *object, unsigned long addr) { return 0; } |
| |
| static inline int free_debug_processing( |
| struct kmem_cache *s, struct page *page, |
| void *head, void *tail, int bulk_cnt, |
| unsigned long addr) { return 0; } |
| |
| static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
| { return 1; } |
| static inline int check_object(struct kmem_cache *s, struct page *page, |
| void *object, u8 val) { return 1; } |
| static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, |
| struct page *page) {} |
| static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, |
| struct page *page) {} |
| slab_flags_t kmem_cache_flags(unsigned int object_size, |
| slab_flags_t flags, const char *name) |
| { |
| return flags; |
| } |
| #define slub_debug 0 |
| |
| #define disable_higher_order_debug 0 |
| |
| static inline unsigned long slabs_node(struct kmem_cache *s, int node) |
| { return 0; } |
| static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) |
| { return 0; } |
| static inline void inc_slabs_node(struct kmem_cache *s, int node, |
| int objects) {} |
| static inline void dec_slabs_node(struct kmem_cache *s, int node, |
| int objects) {} |
| |
| static bool freelist_corrupted(struct kmem_cache *s, struct page *page, |
| void **freelist, void *nextfree) |
| { |
| return false; |
| } |
| #endif /* CONFIG_SLUB_DEBUG */ |
| |
| /* |
| * Hooks for other subsystems that check memory allocations. In a typical |
| * production configuration these hooks all should produce no code at all. |
| */ |
| static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) |
| { |
| ptr = kasan_kmalloc_large(ptr, size, flags); |
| /* As ptr might get tagged, call kmemleak hook after KASAN. */ |
| kmemleak_alloc(ptr, size, 1, flags); |
| return ptr; |
| } |
| |
| static __always_inline void kfree_hook(void *x) |
| { |
| kmemleak_free(x); |
| kasan_kfree_large(x, _RET_IP_); |
| } |
| |
| static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x) |
| { |
| kmemleak_free_recursive(x, s->flags); |
| |
| /* |
| * Trouble is that we may no longer disable interrupts in the fast path |
| * So in order to make the debug calls that expect irqs to be |
| * disabled we need to disable interrupts temporarily. |
| */ |
| #ifdef CONFIG_LOCKDEP |
| { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| debug_check_no_locks_freed(x, s->object_size); |
| local_irq_restore(flags); |
| } |
| #endif |
| if (!(s->flags & SLAB_DEBUG_OBJECTS)) |
| debug_check_no_obj_freed(x, s->object_size); |
| |
| /* Use KCSAN to help debug racy use-after-free. */ |
| if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) |
| __kcsan_check_access(x, s->object_size, |
| KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); |
| |
| /* KASAN might put x into memory quarantine, delaying its reuse */ |
| return kasan_slab_free(s, x, _RET_IP_); |
| } |
| |
| static inline bool slab_free_freelist_hook(struct kmem_cache *s, |
| void **head, void **tail, |
| int *cnt) |
| { |
| |
| void *object; |
| void *next = *head; |
| void *old_tail = *tail ? *tail : *head; |
| int rsize; |
| |
| /* Head and tail of the reconstructed freelist */ |
| *head = NULL; |
| *tail = NULL; |
| |
| do { |
| object = next; |
| next = get_freepointer(s, object); |
| |
| if (slab_want_init_on_free(s)) { |
| /* |
| * Clear the object and the metadata, but don't touch |
| * the redzone. |
| */ |
| memset(object, 0, s->object_size); |
| rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad |
| : 0; |
| memset((char *)object + s->inuse, 0, |
| s->size - s->inuse - rsize); |
| |
| } |
| /* If object's reuse doesn't have to be delayed */ |
| if (!slab_free_hook(s, object)) { |
| /* Move object to the new freelist */ |
| set_freepointer(s, object, *head); |
| *head = object; |
| if (!*tail) |
| *tail = object; |
| } else { |
| /* |
| * Adjust the reconstructed freelist depth |
| * accordingly if object's reuse is delayed. |
| */ |
| --(*cnt); |
| } |
| } while (object != old_tail); |
| |
| if (*head == *tail) |
| *tail = NULL; |
| |
| return *head != NULL; |
| } |
| |
| static void *setup_object(struct kmem_cache *s, struct page *page, |
| void *object) |
| { |
| setup_object_debug(s, page, object); |
| object = kasan_init_slab_obj(s, object); |
| if (unlikely(s->ctor)) { |
| kasan_unpoison_object_data(s, object); |
| s->ctor(object); |
| kasan_poison_object_data(s, object); |
| } |
| return object; |
| } |
| |
| /* |
| * Slab allocation and freeing |
| */ |
| static inline struct page *alloc_slab_page(struct kmem_cache *s, |
| gfp_t flags, int node, struct kmem_cache_order_objects oo) |
| { |
| struct page *page; |
| unsigned int order = oo_order(oo); |
| |
| if (node == NUMA_NO_NODE) |
| page = alloc_pages(flags, order); |
| else |
| page = __alloc_pages_node(node, flags, order); |
| |
| if (page) |
| account_slab_page(page, order, s); |
| |
| return page; |
| } |
| |
| #ifdef CONFIG_SLAB_FREELIST_RANDOM |
| /* Pre-initialize the random sequence cache */ |
| static int init_cache_random_seq(struct kmem_cache *s) |
| { |
| unsigned int count = oo_objects(s->oo); |
| int err; |
| |
| /* Bailout if already initialised */ |
| if (s->random_seq) |
| return 0; |
| |
| err = cache_random_seq_create(s, count, GFP_KERNEL); |
| if (err) { |
| pr_err("SLUB: Unable to initialize free list for %s\n", |
| s->name); |
| return err; |
| } |
| |
| /* Transform to an offset on the set of pages */ |
| if (s->random_seq) { |
| unsigned int i; |
| |
| for (i = 0; i < count; i++) |
| s->random_seq[i] *= s->size; |
| } |
| return 0; |
| } |
| |
| /* Initialize each random sequence freelist per cache */ |
| static void __init init_freelist_randomization(void) |
| { |
| struct kmem_cache *s; |
| |
| mutex_lock(&slab_mutex); |
| |
| list_for_each_entry(s, &slab_caches, list) |
| init_cache_random_seq(s); |
| |
| mutex_unlock(&slab_mutex); |
| } |
| |
| /* Get the next entry on the pre-computed freelist randomized */ |
| static void *next_freelist_entry(struct kmem_cache *s, struct page *page, |
| unsigned long *pos, void *start, |
| unsigned long page_limit, |
| unsigned long freelist_count) |
| { |
| unsigned int idx; |
| |
| /* |
| * If the target page allocation failed, the number of objects on the |
| * page might be smaller than the usual size defined by the cache. |
| */ |
| do { |
| idx = s->random_seq[*pos]; |
| *pos += 1; |
| if (*pos >= freelist_count) |
| *pos = 0; |
| } while (unlikely(idx >= page_limit)); |
| |
| return (char *)start + idx; |
| } |
| |
| /* Shuffle the single linked freelist based on a random pre-computed sequence */ |
| static bool shuffle_freelist(struct kmem_cache *s, struct page *page) |
| { |
| void *start; |
| void *cur; |
| void *next; |
| unsigned long idx, pos, page_limit, freelist_count; |
| |
| if (page->objects < 2 || !s->random_seq) |
| return false; |
| |
| freelist_count = oo_objects(s->oo); |
| pos = get_random_int() % freelist_count; |
| |
| page_limit = page->objects * s->size; |
| start = fixup_red_left(s, page_address(page)); |
| |
| /* First entry is used as the base of the freelist */ |
| cur = next_freelist_entry(s, page, &pos, start, page_limit, |
| freelist_count); |
| cur = setup_object(s, page, cur); |
| page->freelist = cur; |
| |
| for (idx = 1; idx < page->objects; idx++) { |
| next = next_freelist_entry(s, page, &pos, start, page_limit, |
| freelist_count); |
| next = setup_object(s, page, next); |
| set_freepointer(s, cur, next); |
| cur = next; |
| } |
| set_freepointer(s, cur, NULL); |
| |
| return true; |
| } |
| #else |
| static inline int init_cache_random_seq(struct kmem_cache *s) |
| { |
| return 0; |
| } |
| static inline void init_freelist_randomization(void) { } |
| static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) |
| { |
| return false; |
| } |
| #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
| |
| static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) |
| { |
| struct page *page; |
| struct kmem_cache_order_objects oo = s->oo; |
| gfp_t alloc_gfp; |
| void *start, *p, *next; |
| int idx; |
| bool shuffle; |
| |
| flags &= gfp_allowed_mask; |
| |
| if (gfpflags_allow_blocking(flags)) |
| local_irq_enable(); |
| |
| flags |= s->allocflags; |
| |
| /* |
| * Let the initial higher-order allocation fail under memory pressure |
| * so we fall-back to the minimum order allocation. |
| */ |
| alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; |
| if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) |
| alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL); |
| |
| page = alloc_slab_page(s, alloc_gfp, node, oo); |
| if (unlikely(!page)) { |
| oo = s->min; |
| alloc_gfp = flags; |
| /* |
| * Allocation may have failed due to fragmentation. |
| * Try a lower order alloc if possible |
| */ |
| page = alloc_slab_page(s, alloc_gfp, node, oo); |
| if (unlikely(!page)) |
| goto out; |
| stat(s, ORDER_FALLBACK); |
| } |
| |
| page->objects = oo_objects(oo); |
| |
| page->slab_cache = s; |
| __SetPageSlab(page); |
| if (page_is_pfmemalloc(page)) |
| SetPageSlabPfmemalloc(page); |
| |
| kasan_poison_slab(page); |
| |
| start = page_address(page); |
| |
| setup_page_debug(s, page, start); |
| |
| shuffle = shuffle_freelist(s, page); |
| |
| if (!shuffle) { |
| start = fixup_red_left(s, start); |
| start = setup_object(s, page, start); |
| page->freelist = start; |
| for (idx = 0, p = start; idx < page->objects - 1; idx++) { |
| next = p + s->size; |
| next = setup_object(s, page, next); |
| set_freepointer(s, p, next); |
| p = next; |
| } |
| set_freepointer(s, p, NULL); |
| } |
| |
| page->inuse = page->objects; |
| page->frozen = 1; |
| |
| out: |
| if (gfpflags_allow_blocking(flags)) |
| local_irq_disable(); |
| if (!page) |
| return NULL; |
| |
| inc_slabs_node(s, page_to_nid(page), page->objects); |
| |
| return page; |
| } |
| |
| static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) |
| { |
| if (unlikely(flags & GFP_SLAB_BUG_MASK)) |
| flags = kmalloc_fix_flags(flags); |
| |
| return allocate_slab(s, |
| flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); |
| } |
| |
| static void __free_slab(struct kmem_cache *s, struct page *page) |
| { |
| int order = compound_order(page); |
| int pages = 1 << order; |
| |
| if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { |
| void *p; |
| |
| slab_pad_check(s, page); |
| for_each_object(p, s, page_address(page), |
| page->objects) |
| check_object(s, page, p, SLUB_RED_INACTIVE); |
| } |
| |
| __ClearPageSlabPfmemalloc(page); |
| __ClearPageSlab(page); |
| |
| page->mapping = NULL; |
| if (current->reclaim_state) |
| current->reclaim_state->reclaimed_slab += pages; |
| unaccount_slab_page(page, order, s); |
| __free_pages(page, order); |
| } |
| |
| static void rcu_free_slab(struct rcu_head *h) |
| { |
| struct page *page = container_of(h, struct page, rcu_head); |
| |
| __free_slab(page->slab_cache, page); |
| } |
| |
| static void free_slab(struct kmem_cache *s, struct page *page) |
| { |
| if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { |
| call_rcu(&page->rcu_head, rcu_free_slab); |
| } else |
| __free_slab(s, page); |
| } |
| |
| static void discard_slab(struct kmem_cache *s, struct page *page) |
| { |
| dec_slabs_node(s, page_to_nid(page), page->objects); |
| free_slab(s, page); |
| } |
| |
| /* |
| * Management of partially allocated slabs. |
| */ |
| static inline void |
| __add_partial(struct kmem_cache_node *n, struct page *page, int tail) |
| { |
| n->nr_partial++; |
| if (tail == DEACTIVATE_TO_TAIL) |
| list_add_tail(&page->slab_list, &n->partial); |
| else |
| list_add(&page->slab_list, &n->partial); |
| } |
| |
| static inline void add_partial(struct kmem_cache_node *n, |
| struct page *page, int tail) |
| { |
| lockdep_assert_held(&n->list_lock); |
| __add_partial(n, page, tail); |
| } |
| |
| static inline void remove_partial(struct kmem_cache_node *n, |
| struct page *page) |
| { |
| lockdep_assert_held(&n->list_lock); |
| list_del(&page->slab_list); |
| n->nr_partial--; |
| } |
| |
| /* |
| * Remove slab from the partial list, freeze it and |
| * return the pointer to the freelist. |
| * |
| * Returns a list of objects or NULL if it fails. |
| */ |
| static inline void *acquire_slab(struct kmem_cache *s, |
| struct kmem_cache_node *n, struct page *page, |
| int mode, int *objects) |
| { |
| void *freelist; |
| unsigned long counters; |
| struct page new; |
| |
| lockdep_assert_held(&n->list_lock); |
| |
| /* |
| * Zap the freelist and set the frozen bit. |
| * The old freelist is the list of objects for the |
| * per cpu allocation list. |
| */ |
| freelist = page->freelist; |
| counters = page->counters; |
| new.counters = counters; |
| *objects = new.objects - new.inuse; |
| if (mode) { |
| new.inuse = page->objects; |
| new.freelist = NULL; |
| } else { |
| new.freelist = freelist; |
| } |
| |
| VM_BUG_ON(new.frozen); |
| new.frozen = 1; |
| |
| if (!__cmpxchg_double_slab(s, page, |
| freelist, counters, |
| new.freelist, new.counters, |
| "acquire_slab")) |
| return NULL; |
| |
| remove_partial(n, page); |
| WARN_ON(!freelist); |
| return freelist; |
| } |
| |
| static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); |
| static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); |
| |
| /* |
| * Try to allocate a partial slab from a specific node. |
| */ |
| static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, |
| struct kmem_cache_cpu *c, gfp_t flags) |
| { |
| struct page *page, *page2; |
| void *object = NULL; |
| unsigned int available = 0; |
| int objects; |
| |
| /* |
| * Racy check. If we mistakenly see no partial slabs then we |
| * just allocate an empty slab. If we mistakenly try to get a |
| * partial slab and there is none available then get_partial() |
| * will return NULL. |
| */ |
| if (!n || !n->nr_partial) |
| return NULL; |
| |
| spin_lock(&n->list_lock); |
| list_for_each_entry_safe(page, page2, &n->partial, slab_list) { |
| void *t; |
| |
| if (!pfmemalloc_match(page, flags)) |
| continue; |
| |
| t = acquire_slab(s, n, page, object == NULL, &objects); |
| if (!t) |
| break; |
| |
| available += objects; |
| if (!object) { |
| c->page = page; |
| stat(s, ALLOC_FROM_PARTIAL); |
| object = t; |
| } else { |
| put_cpu_partial(s, page, 0); |
| stat(s, CPU_PARTIAL_NODE); |
| } |
| if (!kmem_cache_has_cpu_partial(s) |
| || available > slub_cpu_partial(s) / 2) |
| break; |
| |
| } |
| spin_unlock(&n->list_lock); |
| return object; |
| } |
| |
| /* |
| * Get a page from somewhere. Search in increasing NUMA distances. |
| */ |
| static void *get_any_partial(struct kmem_cache *s, gfp_t flags, |
| struct kmem_cache_cpu *c) |
| { |
| #ifdef CONFIG_NUMA |
| struct zonelist *zonelist; |
| struct zoneref *z; |
| struct zone *zone; |
| enum zone_type highest_zoneidx = gfp_zone(flags); |
| void *object; |
| unsigned int cpuset_mems_cookie; |
| |
| /* |
| * The defrag ratio allows a configuration of the tradeoffs between |
| * inter node defragmentation and node local allocations. A lower |
| * defrag_ratio increases the tendency to do local allocations |
| * instead of attempting to obtain partial slabs from other nodes. |
| * |
| * If the defrag_ratio is set to 0 then kmalloc() always |
| * returns node local objects. If the ratio is higher then kmalloc() |
| * may return off node objects because partial slabs are obtained |
| * from other nodes and filled up. |
| * |
| * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100 |
| * (which makes defrag_ratio = 1000) then every (well almost) |
| * allocation will first attempt to defrag slab caches on other nodes. |
| * This means scanning over all nodes to look for partial slabs which |
| * may be expensive if we do it every time we are trying to find a slab |
| * with available objects. |
| */ |
| if (!s->remote_node_defrag_ratio || |
| get_cycles() % 1024 > s->remote_node_defrag_ratio) |
| return NULL; |
| |
| do { |
| cpuset_mems_cookie = read_mems_allowed_begin(); |
| zonelist = node_zonelist(mempolicy_slab_node(), flags); |
| for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { |
| struct kmem_cache_node *n; |
| |
| n = get_node(s, zone_to_nid(zone)); |
| |
| if (n && cpuset_zone_allowed(zone, flags) && |
| n->nr_partial > s->min_partial) { |
| object = get_partial_node(s, n, c, flags); |
| if (object) { |
| /* |
| * Don't check read_mems_allowed_retry() |
| * here - if mems_allowed was updated in |
| * parallel, that was a harmless race |
| * between allocation and the cpuset |
| * update |
| */ |
| return object; |
| } |
| } |
| } |
| } while (read_mems_allowed_retry(cpuset_mems_cookie)); |
| #endif /* CONFIG_NUMA */ |
| return NULL; |
| } |
| |
| /* |
| * Get a partial page, lock it and return it. |
| */ |
| static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, |
| struct kmem_cache_cpu *c) |
| { |
| void *object; |
| int searchnode = node; |
| |
| if (node == NUMA_NO_NODE) |
| searchnode = numa_mem_id(); |
| |
| object = get_partial_node(s, get_node(s, searchnode), c, flags); |
| if (object || node != NUMA_NO_NODE) |
| return object; |
| |
| return get_any_partial(s, flags, c); |
| } |
| |
| #ifdef CONFIG_PREEMPTION |
| /* |
| * Calculate the next globally unique transaction for disambiguation |
| * during cmpxchg. The transactions start with the cpu number and are then |
| * incremented by CONFIG_NR_CPUS. |
| */ |
| #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) |
| #else |
| /* |
| * No preemption supported therefore also no need to check for |
| * different cpus. |
| */ |
| #define TID_STEP 1 |
| #endif |
| |
| static inline unsigned long next_tid(unsigned long tid) |
| { |
| return tid + TID_STEP; |
| } |
| |
| #ifdef SLUB_DEBUG_CMPXCHG |
| static inline unsigned int tid_to_cpu(unsigned long tid) |
| { |
| return tid % TID_STEP; |
| } |
| |
| static inline unsigned long tid_to_event(unsigned long tid) |
| { |
| return tid / TID_STEP; |
| } |
| #endif |
| |
| static inline unsigned int init_tid(int cpu) |
| { |
| return cpu; |
| } |
| |
| static inline void note_cmpxchg_failure(const char *n, |
| const struct kmem_cache *s, unsigned long tid) |
| { |
| #ifdef SLUB_DEBUG_CMPXCHG |
| unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); |
| |
| pr_info("%s %s: cmpxchg redo ", n, s->name); |
| |
| #ifdef CONFIG_PREEMPTION |
| if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) |
| pr_warn("due to cpu change %d -> %d\n", |
| tid_to_cpu(tid), tid_to_cpu(actual_tid)); |
| else |
| #endif |
| if (tid_to_event(tid) != tid_to_event(actual_tid)) |
| pr_warn("due to cpu running other code. Event %ld->%ld\n", |
| tid_to_event(tid), tid_to_event(actual_tid)); |
| else |
| pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n", |
| actual_tid, tid, next_tid(tid)); |
| #endif |
| stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
| } |
| |
| static void init_kmem_cache_cpus(struct kmem_cache *s) |
| { |
| int cpu; |
| |
| for_each_possible_cpu(cpu) |
| per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); |
| } |
| |
| /* |
| * Remove the cpu slab |
| */ |
| static void deactivate_slab(struct kmem_cache *s, struct page *page, |
| void *freelist, struct kmem_cache_cpu *c) |
| { |
| enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; |
| struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
| int lock = 0; |
| enum slab_modes l = M_NONE, m = M_NONE; |
| void *nextfree; |
| int tail = DEACTIVATE_TO_HEAD; |
| struct page new; |
| struct page old; |
| |
| if (page->freelist) { |
| stat(s, DEACTIVATE_REMOTE_FREES); |
| tail = DEACTIVATE_TO_TAIL; |
| } |
| |
| /* |
| * Stage one: Free all available per cpu objects back |
| * to the page freelist while it is still frozen. Leave the |
| * last one. |
| * |
| * There is no need to take the list->lock because the page |
| * is still frozen. |
| */ |
| while (freelist && (nextfree = get_freepointer(s, freelist))) { |
| void *prior; |
| unsigned long counters; |
| |
| /* |
| * If 'nextfree' is invalid, it is possible that the object at |
| * 'freelist' is already corrupted. So isolate all objects |
| * starting at 'freelist'. |
| */ |
| if (freelist_corrupted(s, page, &freelist, nextfree)) |
| break; |
| |
| do { |
| prior = page->freelist; |
| counters = page->counters; |
| set_freepointer(s, freelist, prior); |
| new.counters = counters; |
| new.inuse--; |
| VM_BUG_ON(!new.frozen); |
| |
| } while (!__cmpxchg_double_slab(s, page, |
| prior, counters, |
| freelist, new.counters, |
| "drain percpu freelist")); |
| |
| freelist = nextfree; |
| } |
| |
| /* |
| * Stage two: Ensure that the page is unfrozen while the |
| * list presence reflects the actual number of objects |
| * during unfreeze. |
| * |
| * We setup the list membership and then perform a cmpxchg |
| * with the count. If there is a mismatch then the page |
| * is not unfrozen but the page is on the wrong list. |
| * |
| * Then we restart the process which may have to remove |
| * the page from the list that we just put it on again |
| * because the number of objects in the slab may have |
| * changed. |
| */ |
| redo: |
| |
| old.freelist = page->freelist; |
| old.counters = page->counters; |
| VM_BUG_ON(!old.frozen); |
| |
| /* Determine target state of the slab */ |
| new.counters = old.counters; |
| if (freelist) { |
| new.inuse--; |
| set_freepointer(s, freelist, old.freelist); |
| new.freelist = freelist; |
| } else |
| new.freelist = old.freelist; |
| |
| new.frozen = 0; |
| |
| if (!new.inuse && n->nr_partial >= s->min_partial) |
| m = M_FREE; |
| else if (new.freelist) { |
| m = M_PARTIAL; |
| if (!lock) { |
| lock = 1; |
| /* |
| * Taking the spinlock removes the possibility |
| * that acquire_slab() will see a slab page that |
| * is frozen |
| */ |
| spin_lock(&n->list_lock); |
| } |
| } else { |
| m = M_FULL; |
| #ifdef CONFIG_SLUB_DEBUG |
| if ((s->flags & SLAB_STORE_USER) && !lock) { |
| lock = 1; |
| /* |
| * This also ensures that the scanning of full |
| * slabs from diagnostic functions will not see |
| * any frozen slabs. |
| */ |
| spin_lock(&n->list_lock); |
| } |
| #endif |
| } |
| |
| if (l != m) { |
| if (l == M_PARTIAL) |
| remove_partial(n, page); |
| else if (l == M_FULL) |
| remove_full(s, n, page); |
| |
| if (m == M_PARTIAL) |
| add_partial(n, page, tail); |
| else if (m == M_FULL) |
| add_full(s, n, page); |
| } |
| |
| l = m; |
| if (!__cmpxchg_double_slab(s, page, |
| old.freelist, old.counters, |
| new.freelist, new.counters, |
| "unfreezing slab")) |
| goto redo; |
| |
| if (lock) |
| spin_unlock(&n->list_lock); |
| |
| if (m == M_PARTIAL) |
| stat(s, tail); |
| else if (m == M_FULL) |
| stat(s, DEACTIVATE_FULL); |
| else if (m == M_FREE) { |
| stat(s, DEACTIVATE_EMPTY); |
| discard_slab(s, page); |
| stat(s, FREE_SLAB); |
| } |
| |
| c->page = NULL; |
| c->freelist = NULL; |
| } |
| |
| /* |
| * Unfreeze all the cpu partial slabs. |
| * |
| * This function must be called with interrupts disabled |
| * for the cpu using c (or some other guarantee must be there |
| * to guarantee no concurrent accesses). |
| */ |
| static void unfreeze_partials(struct kmem_cache *s, |
| struct kmem_cache_cpu *c) |
| { |
| #ifdef CONFIG_SLUB_CPU_PARTIAL |
| struct kmem_cache_node *n = NULL, *n2 = NULL; |
| struct page *page, *discard_page = NULL; |
| |
| while ((page = slub_percpu_partial(c))) { |
| struct page new; |
| struct page old; |
| |
| slub_set_percpu_partial(c, page); |
| |
| n2 = get_node(s, page_to_nid(page)); |
| if (n != n2) { |
| if (n) |
| spin_unlock(&n->list_lock); |
| |
| n = n2; |
| spin_lock(&n->list_lock); |
| } |
| |
| do { |
| |
| old.freelist = page->freelist; |
| old.counters = page->counters; |
| VM_BUG_ON(!old.frozen); |
| |
| new.counters = old.counters; |
| new.freelist = old.freelist; |
| |
| new.frozen = 0; |
| |
| } while (!__cmpxchg_double_slab(s, page, |
| old.freelist, old.counters, |
| new.freelist, new.counters, |
| "unfreezing slab")); |
| |
| if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { |
| page->next = discard_page; |
| discard_page = page; |
| } else { |
| add_partial(n, page, DEACTIVATE_TO_TAIL); |
| stat(s, FREE_ADD_PARTIAL); |
| } |
| } |
| |
| if (n) |
| spin_unlock(&n->list_lock); |
| |
| while (discard_page) { |
| page = discard_page; |
| discard_page = discard_page->next; |
| |
| stat(s, DEACTIVATE_EMPTY); |
| discard_slab(s, page); |
| stat(s, FREE_SLAB); |
| } |
| #endif /* CONFIG_SLUB_CPU_PARTIAL */ |
| } |
| |
| /* |
| * Put a page that was just frozen (in __slab_free|get_partial_node) into a |
| * partial page slot if available. |
| * |
| * If we did not find a slot then simply move all the partials to the |
| * per node partial list. |
| */ |
| static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) |
| { |
| #ifdef CONFIG_SLUB_CPU_PARTIAL |
| struct page *oldpage; |
| int pages; |
| int pobjects; |
| |
| preempt_disable(); |
| do { |
| pages = 0; |
| pobjects = 0; |
| oldpage = this_cpu_read(s->cpu_slab->partial); |
| |
| if (oldpage) { |
| pobjects = oldpage->pobjects; |
| pages = oldpage->pages; |
| if (drain && pobjects > slub_cpu_partial(s)) { |
| unsigned long flags; |
| /* |
| * partial array is full. Move the existing |
| * set to the per node partial list. |
| */ |
| local_irq_save(flags); |
| unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); |
| local_irq_restore(flags); |
| oldpage = NULL; |
| pobjects = 0; |
| pages = 0; |
| stat(s, CPU_PARTIAL_DRAIN); |
| } |
| } |
| |
| pages++; |
| pobjects += page->objects - page->inuse; |
| |
| page->pages = pages; |
| page->pobjects = pobjects; |
| page->next = oldpage; |
| |
| } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) |
| != oldpage); |
| if (unlikely(!slub_cpu_partial(s))) { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); |
| local_irq_restore(flags); |
| } |
| preempt_enable(); |
| #endif /* CONFIG_SLUB_CPU_PARTIAL */ |
| } |
| |
| static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
| { |
| stat(s, CPUSLAB_FLUSH); |
| deactivate_slab(s, c->page, c->freelist, c); |
| |
| c->tid = next_tid(c->tid); |
| } |
| |
| /* |
| * Flush cpu slab. |
| * |
| * Called from IPI handler with interrupts disabled. |
| */ |
| static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) |
| { |
| struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
| |
| if (c->page) |
| flush_slab(s, c); |
| |
| unfreeze_partials(s, c); |
| } |
| |
| static void flush_cpu_slab(void *d) |
| { |
| struct kmem_cache *s = d; |
| |
| __flush_cpu_slab(s, smp_processor_id()); |
| } |
| |
| static bool has_cpu_slab(int cpu, void *info) |
| { |
| struct kmem_cache *s = info; |
| struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
| |
| return c->page || slub_percpu_partial(c); |
| } |
| |
| static void flush_all(struct kmem_cache *s) |
| { |
| on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); |
| } |
| |
| /* |
| * Use the cpu notifier to insure that the cpu slabs are flushed when |
| * necessary. |
| */ |
| static int slub_cpu_dead(unsigned int cpu) |
| { |
| struct kmem_cache *s; |
| unsigned long flags; |
| |
| mutex_lock(&slab_mutex); |
| list_for_each_entry(s, &slab_caches, list) { |
| local_irq_save(flags); |
| __flush_cpu_slab(s, cpu); |
| local_irq_restore(flags); |
| } |
| mutex_unlock(&slab_mutex); |
| return 0; |
| } |
| |
| /* |
| * Check if the objects in a per cpu structure fit numa |
| * locality expectations. |
| */ |
| static inline int node_match(struct page *page, int node) |
| { |
| #ifdef CONFIG_NUMA |
| if (node != NUMA_NO_NODE && page_to_nid(page) != node) |
| return 0; |
| #endif |
| return 1; |
| } |
| |
| #ifdef CONFIG_SLUB_DEBUG |
| static int count_free(struct page *page) |
| { |
| return page->objects - page->inuse; |
| } |
| |
| static inline unsigned long node_nr_objs(struct kmem_cache_node *n) |
| { |
| return atomic_long_read(&n->total_objects); |
| } |
| #endif /* CONFIG_SLUB_DEBUG */ |
| |
| #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS) |
| static unsigned long count_partial(struct kmem_cache_node *n, |
| int (*get_count)(struct page *)) |
| { |
| unsigned long flags; |
| unsigned long x = 0; |
| struct page *page; |
| |
| spin_lock_irqsave(&n->list_lock, flags); |
| list_for_each_entry(page, &n->partial, slab_list) |
| x += get_count(page); |
| spin_unlock_irqrestore(&n->list_lock, flags); |
| return x; |
| } |
| #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ |
| |
| static noinline void |
| slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) |
| { |
| #ifdef CONFIG_SLUB_DEBUG |
| static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL, |
| DEFAULT_RATELIMIT_BURST); |
| int node; |
| struct kmem_cache_node *n; |
| |
| if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs)) |
| return; |
| |
| pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", |
| nid, gfpflags, &gfpflags); |
| pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n", |
| s->name, s->object_size, s->size, oo_order(s->oo), |
| oo_order(s->min)); |
| |
| if (oo_order(s->min) > get_order(s->object_size)) |
| pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n", |
| s->name); |
| |
| for_each_kmem_cache_node(s, node, n) { |
| unsigned long nr_slabs; |
| unsigned long nr_objs; |
| unsigned long nr_free; |
| |
| nr_free = count_partial(n, count_free); |
| nr_slabs = node_nr_slabs(n); |
| nr_objs = node_nr_objs(n); |
| |
| pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n", |
| node, nr_slabs, nr_objs, nr_free); |
| } |
| #endif |
| } |
| |
| static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, |
| int node, struct kmem_cache_cpu **pc) |
| { |
| void *freelist; |
| struct kmem_cache_cpu *c = *pc; |
| struct page *page; |
| |
| WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); |
| |
| freelist = get_partial(s, flags, node, c); |
| |
| if (freelist) |
| return freelist; |
| |
| page = new_slab(s, flags, node); |
| if (page) { |
| c = raw_cpu_ptr(s->cpu_slab); |
| if (c->page) |
| flush_slab(s, c); |
| |
| /* |
| * No other reference to the page yet so we can |
| * muck around with it freely without cmpxchg |
| */ |
| freelist = page->freelist; |
| page->freelist = NULL; |
| |
| stat(s, ALLOC_SLAB); |
| c->page = page; |
| *pc = c; |
| } |
| |
| return freelist; |
| } |
| |
| static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) |
| { |
| if (unlikely(PageSlabPfmemalloc(page))) |
| return gfp_pfmemalloc_allowed(gfpflags); |
| |
| return true; |
| } |
| |
| /* |
| * Check the page->freelist of a page and either transfer the freelist to the |
| * per cpu freelist or deactivate the page. |
| * |
| * The page is still frozen if the return value is not NULL. |
| * |
| * If this function returns NULL then the page has been unfrozen. |
| * |
| * This function must be called with interrupt disabled. |
| */ |
| static inline void *get_freelist(struct kmem_cache *s, struct page *page) |
| { |
| struct page new; |
| unsigned long counters; |
| void *freelist; |
| |
| do { |
| freelist = page->freelist; |
| counters = page->counters; |
| |
| new.counters = counters; |
| VM_BUG_ON(!new.frozen); |
| |
| new.inuse = page->objects; |
| new.frozen = freelist != NULL; |
| |
| } while (!__cmpxchg_double_slab(s, page, |
| freelist, counters, |
| NULL, new.counters, |
| "get_freelist")); |
| |
| return freelist; |
| } |
| |
| /* |
| * Slow path. The lockless freelist is empty or we need to perform |
| * debugging duties. |
| * |
| * Processing is still very fast if new objects have been freed to the |
| * regular freelist. In that case we simply take over the regular freelist |
| * as the lockless freelist and zap the regular freelist. |
| * |
| * If that is not working then we fall back to the partial lists. We take the |
| * first element of the freelist as the object to allocate now and move the |
| * rest of the freelist to the lockless freelist. |
| * |
| * And if we were unable to get a new slab from the partial slab lists then |
| * we need to allocate a new slab. This is the slowest path since it involves |
| * a call to the page allocator and the setup of a new slab. |
| * |
| * Version of __slab_alloc to use when we know that interrupts are |
| * already disabled (which is the case for bulk allocation). |
| */ |
| static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| unsigned long addr, struct kmem_cache_cpu *c) |
| { |
| void *freelist; |
| struct page *page; |
| |
| stat(s, ALLOC_SLOWPATH); |
| |
| page = c->page; |
| if (!page) { |
| /* |
| * if the node is not online or has no normal memory, just |
| * ignore the node constraint |
| */ |
| if (unlikely(node != NUMA_NO_NODE && |
| !node_state(node, N_NORMAL_MEMORY))) |
| node = NUMA_NO_NODE; |
| goto new_slab; |
| } |
| redo: |
| |
| if (unlikely(!node_match(page, node))) { |
| /* |
| * same as above but node_match() being false already |
| * implies node != NUMA_NO_NODE |
| */ |
| if (!node_state(node, N_NORMAL_MEMORY)) { |
| node = NUMA_NO_NODE; |
| goto redo; |
| } else { |
| stat(s, ALLOC_NODE_MISMATCH); |
| deactivate_slab(s, page, c->freelist, c); |
| goto new_slab; |
| } |
| } |
| |
| /* |
| * By rights, we should be searching for a slab page that was |
| * PFMEMALLOC but right now, we are losing the pfmemalloc |
| * information when the page leaves the per-cpu allocator |
| */ |
| if (unlikely(!pfmemalloc_match(page, gfpflags))) { |
| deactivate_slab(s, page, c->freelist, c); |
| goto new_slab; |
| } |
| |
| /* must check again c->freelist in case of cpu migration or IRQ */ |
| freelist = c->freelist; |
| if (freelist) |
| goto load_freelist; |
| |
| freelist = get_freelist(s, page); |
| |
| if (!freelist) { |
| c->page = NULL; |
| stat(s, DEACTIVATE_BYPASS); |
| goto new_slab; |
| } |
| |
| stat(s, ALLOC_REFILL); |
| |
| load_freelist: |
| /* |
| * freelist is pointing to the list of objects to be used. |
| * page is pointing to the page from which the objects are obtained. |
| * That page must be frozen for per cpu allocations to work. |
| */ |
| VM_BUG_ON(!c->page->frozen); |
| c->freelist = get_freepointer(s, freelist); |
| c->tid = next_tid(c->tid); |
| return freelist; |
| |
| new_slab: |
| |
| if (slub_percpu_partial(c)) { |
| page = c->page = slub_percpu_partial(c); |
| slub_set_percpu_partial(c, page); |
| stat(s, CPU_PARTIAL_ALLOC); |
| goto redo; |
| } |
| |
| freelist = new_slab_objects(s, gfpflags, node, &c); |
| |
| if (unlikely(!freelist)) { |
| slab_out_of_memory(s, gfpflags, node); |
| return NULL; |
| } |
| |
| page = c->page; |
| if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) |
| goto load_freelist; |
| |
| /* Only entered in the debug case */ |
| if (kmem_cache_debug(s) && |
| !alloc_debug_processing(s, page, freelist, addr)) |
| goto new_slab; /* Slab failed checks. Next slab needed */ |
| |
| deactivate_slab(s, page, get_freepointer(s, freelist), c); |
| return freelist; |
| } |
| |
| /* |
| * Another one that disabled interrupt and compensates for possible |
| * cpu changes by refetching the per cpu area pointer. |
| */ |
| static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| unsigned long addr, struct kmem_cache_cpu *c) |
| { |
| void *p; |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| #ifdef CONFIG_PREEMPTION |
| /* |
| * We may have been preempted and rescheduled on a different |
| * cpu before disabling interrupts. Need to reload cpu area |
| * pointer. |
| */ |
| c = this_cpu_ptr(s->cpu_slab); |
| #endif |
| |
| p = ___slab_alloc(s, gfpflags, node, addr, c); |
| local_irq_restore(flags); |
| return p; |
| } |
| |
| /* |
| * If the object has been wiped upon free, make sure it's fully initialized by |
| * zeroing out freelist pointer. |
| */ |
| static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, |
| void *obj) |
| { |
| if (unlikely(slab_want_init_on_free(s)) && obj) |
| memset((void *)((char *)obj + s->offset), 0, sizeof(void *)); |
| } |
| |
| /* |
| * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) |
| * have the fastpath folded into their functions. So no function call |
| * overhead for requests that can be satisfied on the fastpath. |
| * |
| * The fastpath works by first checking if the lockless freelist can be used. |
| * If not then __slab_alloc is called for slow processing. |
| * |
| * Otherwise we can simply pick the next object from the lockless free list. |
| */ |
| static __always_inline void *slab_alloc_node(struct kmem_cache *s, |
| gfp_t gfpflags, int node, unsigned long addr) |
| { |
| void *object; |
| struct kmem_cache_cpu *c; |
| struct page *page; |
| unsigned long tid; |
| struct obj_cgroup *objcg = NULL; |
| |
| s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); |
| if (!s) |
| return NULL; |
| redo: |
| /* |
| * Must read kmem_cache cpu data via this cpu ptr. Preemption is |
| * enabled. We may switch back and forth between cpus while |
| * reading from one cpu area. That does not matter as long |
| * as we end up on the original cpu again when doing the cmpxchg. |
| * |
| * We should guarantee that tid and kmem_cache are retrieved on |
| * the same cpu. It could be different if CONFIG_PREEMPTION so we need |
| * to check if it is matched or not. |
| */ |
| do { |
| tid = this_cpu_read(s->cpu_slab->tid); |
| c = raw_cpu_ptr(s->cpu_slab); |
| } while (IS_ENABLED(CONFIG_PREEMPTION) && |
| unlikely(tid != READ_ONCE(c->tid))); |
| |
| /* |
| * Irqless object alloc/free algorithm used here depends on sequence |
| * of fetching cpu_slab's data. tid should be fetched before anything |
| * on c to guarantee that object and page associated with previous tid |
| * won't be used with current tid. If we fetch tid first, object and |
| * page could be one associated with next tid and our alloc/free |
| * request will be failed. In this case, we will retry. So, no problem. |
| */ |
| barrier(); |
| |
| /* |
| * The transaction ids are globally unique per cpu and per operation on |
| * a per cpu queue. Thus they can be guarantee that the cmpxchg_double |
| * occurs on the right processor and that there was no operation on the |
| * linked list in between. |
| */ |
| |
| object = c->freelist; |
| page = c->page; |
| if (unlikely(!object || !page || !node_match(page, node))) { |
| object = __slab_alloc(s, gfpflags, node, addr, c); |
| } else { |
| void *next_object = get_freepointer_safe(s, object); |
| |
| /* |
| * The cmpxchg will only match if there was no additional |
| * operation and if we are on the right processor. |
| * |
| * The cmpxchg does the following atomically (without lock |
| * semantics!) |
| * 1. Relocate first pointer to the current per cpu area. |
| * 2. Verify that tid and freelist have not been changed |
| * 3. If they were not changed replace tid and freelist |
| * |
| * Since this is without lock semantics the protection is only |
| * against code executing on this cpu *not* from access by |
| * other cpus. |
| */ |
| if (unlikely(!this_cpu_cmpxchg_double( |
| s->cpu_slab->freelist, s->cpu_slab->tid, |
| object, tid, |
| next_object, next_tid(tid)))) { |
| |
| note_cmpxchg_failure("slab_alloc", s, tid); |
| goto redo; |
| } |
| prefetch_freepointer(s, next_object); |
| stat(s, ALLOC_FASTPATH); |
| } |
| |
| maybe_wipe_obj_freeptr(s, object); |
| |
| if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) |
| memset(object, 0, s->object_size); |
| |
| slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); |
| |
| return object; |
| } |
| |
| static __always_inline void *slab_alloc(struct kmem_cache *s, |
| gfp_t gfpflags, unsigned long addr) |
| { |
| return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); |
| } |
| |
| void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) |
| { |
| void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
| |
| trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, |
| s->size, gfpflags); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(kmem_cache_alloc); |
| |
| #ifdef CONFIG_TRACING |
| void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) |
| { |
| void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
| trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); |
| ret = kasan_kmalloc(s, ret, size, gfpflags); |
| return ret; |
| } |
| EXPORT_SYMBOL(kmem_cache_alloc_trace); |
| #endif |
| |
| #ifdef CONFIG_NUMA |
| void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) |
| { |
| void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); |
| |
| trace_kmem_cache_alloc_node(_RET_IP_, ret, |
| s->object_size, s->size, gfpflags, node); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(kmem_cache_alloc_node); |
| |
| #ifdef CONFIG_TRACING |
| void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| gfp_t gfpflags, |
| int node, size_t size) |
| { |
| void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); |
| |
| trace_kmalloc_node(_RET_IP_, ret, |
| size, s->size, gfpflags, node); |
| |
| ret = kasan_kmalloc(s, ret, size, gfpflags); |
| return ret; |
| } |
| EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
| #endif |
| #endif /* CONFIG_NUMA */ |
| |
| /* |
| * Slow path handling. This may still be called frequently since objects |
| * have a longer lifetime than the cpu slabs in most processing loads. |
| * |
| * So we still attempt to reduce cache line usage. Just take the slab |
| * lock and free the item. If there is no additional partial page |
| * handling required then we can return immediately. |
| */ |
| static void __slab_free(struct kmem_cache *s, struct page *page, |
| void *head, void *tail, int cnt, |
| unsigned long addr) |
| |
| { |
| void *prior; |
| int was_frozen; |
| struct page new; |
| unsigned long counters; |
| struct kmem_cache_node *n = NULL; |
| unsigned long flags; |
| |
| stat(s, FREE_SLOWPATH); |
| |
| if (kmem_cache_debug(s) && |
| !free_debug_processing(s, page, head, tail, cnt, addr)) |
| return; |
| |
| do { |
| if (unlikely(n)) { |
| spin_unlock_irqrestore(&n->list_lock, flags); |
| n = NULL; |
| } |
| prior = page->freelist; |
| counters = page->counters; |
| set_freepointer(s, tail, prior); |
| new.counters = counters; |
| was_frozen = new.frozen; |
| new.inuse -= cnt; |
| if ((!new.inuse || !prior) && !was_frozen) { |
| |
| if (kmem_cache_has_cpu_partial(s) && !prior) { |
| |
| /* |
| * Slab was on no list before and will be |
| * partially empty |
| * We can defer the list move and instead |
| * freeze it. |
| */ |
| new.frozen = 1; |
| |
| } else { /* Needs to be taken off a list */ |
| |
| n = get_node(s, page_to_nid(page)); |
| /* |
| * Speculatively acquire the list_lock. |
| * If the cmpxchg does not succeed then we may |
| * drop the list_lock without any processing. |
| * |
| * Otherwise the list_lock will synchronize with |
| * other processors updating the list of slabs. |
| */ |
| spin_lock_irqsave(&n->list_lock, flags); |
| |
| } |
| } |
| |
| } while (!cmpxchg_double_slab(s, page, |
| prior, counters, |
| head, new.counters, |
| "__slab_free")); |
| |
| if (likely(!n)) { |
| |
| if (likely(was_frozen)) { |
| /* |
| * The list lock was not taken therefore no list |
| * activity can be necessary. |
| */ |
| stat(s, FREE_FROZEN); |
| } else if (new.frozen) { |
| /* |
| * If we just froze the page then put it onto the |
| * per cpu partial list. |
| */ |
| put_cpu_partial(s, page, 1); |
| stat(s, CPU_PARTIAL_FREE); |
| } |
| |
| return; |
| } |
| |
| if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) |
| goto slab_empty; |
| |
| /* |
| * Objects left in the slab. If it was not on the partial list before |
| * then add it. |
| */ |
| if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { |
| remove_full(s, n, page); |
| add_partial(n, page, DEACTIVATE_TO_TAIL); |
| stat(s, FREE_ADD_PARTIAL); |
| } |
| spin_unlock_irqrestore(&n->list_lock, flags); |
| return; |
| |
| slab_empty: |
| if (prior) { |
| /* |
| * Slab on the partial list. |
| */ |
| remove_partial(n, page); |
| stat(s, FREE_REMOVE_PARTIAL); |
| } else { |
| /* Slab must be on the full list */ |
| remove_full(s, n, page); |
| } |
| |
| spin_unlock_irqrestore(&n->list_lock, flags); |
| stat(s, FREE_SLAB); |
| discard_slab(s, page); |
| } |
| |
| /* |
| * Fastpath with forced inlining to produce a kfree and kmem_cache_free that |
| * can perform fastpath freeing without additional function calls. |
| * |
| * The fastpath is only possible if we are freeing to the current cpu slab |
| * of this processor. This typically the case if we have just allocated |
| * the item before. |
| * |
| * If fastpath is not possible then fall back to __slab_free where we deal |
| * with all sorts of special processing. |
| * |
| * Bulk free of a freelist with several objects (all pointing to the |
| * same page) possible by specifying head and tail ptr, plus objects |
| * count (cnt). Bulk free indicated by tail pointer being set. |
| */ |
| static __always_inline void do_slab_free(struct kmem_cache *s, |
| struct page *page, void *head, void *tail, |
| int cnt, unsigned long addr) |
| { |
| void *tail_obj = tail ? : head; |
| struct kmem_cache_cpu *c; |
| unsigned long tid; |
| |
| /* memcg_slab_free_hook() is already called for bulk free. */ |
| if (!tail) |
| memcg_slab_free_hook(s, &head, 1); |
| redo: |
| /* |
| * Determine the currently cpus per cpu slab. |
| * The cpu may change afterward. However that does not matter since |
| * data is retrieved via this pointer. If we are on the same cpu |
| * during the cmpxchg then the free will succeed. |
| */ |
| do { |
| tid = this_cpu_read(s->cpu_slab->tid); |
| c = raw_cpu_ptr(s->cpu_slab); |
| } while (IS_ENABLED(CONFIG_PREEMPTION) && |
| unlikely(tid != READ_ONCE(c->tid))); |
| |
| /* Same with comment on barrier() in slab_alloc_node() */ |
| barrier(); |
| |
| if (likely(page == c->page)) { |
| void **freelist = READ_ONCE(c->freelist); |
| |
| set_freepointer(s, tail_obj, freelist); |
| |
| if (unlikely(!this_cpu_cmpxchg_double( |
| s->cpu_slab->freelist, s->cpu_slab->tid, |
| freelist, tid, |
| head, next_tid(tid)))) { |
| |
| note_cmpxchg_failure("slab_free", s, tid); |
| goto redo; |
| } |
| stat(s, FREE_FASTPATH); |
| } else |
| __slab_free(s, page, head, tail_obj, cnt, addr); |
| |
| } |
| |
| static __always_inline void slab_free(struct kmem_cache *s, struct page *page, |
| void *head, void *tail, int cnt, |
| unsigned long addr) |
| { |
| /* |
| * With KASAN enabled slab_free_freelist_hook modifies the freelist |
| * to remove objects, whose reuse must be delayed. |
| */ |
| if (slab_free_freelist_hook(s, &head, &tail, &cnt)) |
| do_slab_free(s, page, head, tail, cnt, addr); |
| } |
| |
| #ifdef CONFIG_KASAN_GENERIC |
| void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) |
| { |
| do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); |
| } |
| #endif |
| |
| void kmem_cache_free(struct kmem_cache *s, void *x) |
| { |
| s = cache_from_obj(s, x); |
| if (!s) |
| return; |
| slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); |
| trace_kmem_cache_free(_RET_IP_, x); |
| } |
| EXPORT_SYMBOL(kmem_cache_free); |
|