slab: add __kmem_cache_destroy() Like __kmem_cache_create(), it operates on a KC whose memory is managed by the caller. Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
diff --git a/kern/include/slab.h b/kern/include/slab.h index 509b823..f014f83 100644 --- a/kern/include/slab.h +++ b/kern/include/slab.h
@@ -162,12 +162,13 @@ void kmem_cache_init(void); void kmem_cache_reap(struct kmem_cache *cp); unsigned int kmc_nr_pcpu_caches(void); -/* Low-level interface for initializing a cache. */ +/* Low-level interface for creating/destroying; caller manages kc's memory */ void __kmem_cache_create(struct kmem_cache *kc, const char *name, size_t obj_size, int align, int flags, struct arena *source, int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *priv); +void __kmem_cache_destroy(struct kmem_cache *cp); /* Tracing */ int kmem_trace_start(struct kmem_cache *kc);
diff --git a/kern/src/slab.c b/kern/src/slab.c index 995ba31..0ebc730 100644 --- a/kern/src/slab.c +++ b/kern/src/slab.c
@@ -490,7 +490,7 @@ /* Once you call destroy, never use this cache again... o/w there may be weird * races, and other serious issues. */ -void kmem_cache_destroy(struct kmem_cache *cp) +void __kmem_cache_destroy(struct kmem_cache *cp) { struct kmem_slab *a_slab, *next; @@ -520,6 +520,11 @@ } spin_unlock_irqsave(&cp->cache_lock); kmem_trace_warn_notempty(cp); +} + +void kmem_cache_destroy(struct kmem_cache *cp) +{ + __kmem_cache_destroy(cp); kmem_cache_free(kmem_cache_cache, cp); }