summaryrefslogtreecommitdiffstats
path: root/memory/alloc
diff options
context:
space:
mode:
authorJon Santmyer <jon@jonsantmyer.com>2024-05-22 13:00:41 -0400
committerJon Santmyer <jon@jonsantmyer.com>2024-05-22 13:00:41 -0400
commitace65b453151845bc361f21f3e5b651c35f9f126 (patch)
tree262ebd29b0ca1d8584f0b6f1efa7a00d9f4f3e43 /memory/alloc
parentf004c1ade8d617a82cea2fe249434cccb47a2358 (diff)
downloadjove-kernel-master.tar.gz
jove-kernel-master.tar.bz2
jove-kernel-master.zip
massive refactor for mp and organizationHEADmaster
Diffstat (limited to 'memory/alloc')
-rw-r--r--memory/alloc/calloc.c21
-rw-r--r--memory/alloc/free.c80
-rw-r--r--memory/alloc/malloc.c206
-rw-r--r--memory/alloc/malloc.h183
-rw-r--r--memory/alloc/realloc.c73
-rw-r--r--memory/alloc/reallocarray.c37
6 files changed, 600 insertions, 0 deletions
diff --git a/memory/alloc/calloc.c b/memory/alloc/calloc.c
new file mode 100644
index 0000000..cd272af
--- /dev/null
+++ b/memory/alloc/calloc.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include <string.h>
+
+void*
+__calloc_impl(heap_cache_t *cache, size_t nmemb, size_t w)
+{
+ w = REALW(w);
+ void *ptr = __malloc_impl(cache, w * nmemb);
+ if(ptr == NULL) return NULL;
+
+ /* Zero newly allocated memory as per spec.*/
+ memset(ptr, 0, w * nmemb);
+ return ptr;
+}
diff --git a/memory/alloc/free.c b/memory/alloc/free.c
new file mode 100644
index 0000000..e2cc060
--- /dev/null
+++ b/memory/alloc/free.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+
+bucket_t*
+__bucket_from_obj(heap_cache_t *cache, bucket_obj_t *obj)
+{
+ assert(cache != NULL);
+ assert(cache->head != NULL);
+ /* Buckets are always aligned to the nearest page boundary, and that the
+ * metadata is always below the object. We use this fact to quickly find
+ * owning buckets.*/
+ uintptr_t objptr = (uintptr_t)obj;
+ uintptr_t bktptr = objptr & ~0xFFF;
+ while(1) {
+ /* If the page boundary is below the head, we are below the heap memory
+ * region and can stop.*/
+ if(bktptr < (uintptr_t)cache->head) return NULL;
+
+ /* At each page boundary below the object, check for the heap checksum.
+ * If it is found, we can assume that we found a bucket.*/
+ bucket_t *bucket = (bucket_t*)bktptr;
+ if(OBJ_VALID(bucket)) {
+ if(bucket->limit > objptr) return NULL;
+ return bucket;
+ }
+
+ bktptr -= BUCKET_ALIGN;
+ }
+}
+
+void
+__free_impl(heap_cache_t *cache, void *ptr)
+{
+ DBGPRINT("free: asking to free ptr %p\n", ptr);
+ assert(cache != NULL);
+ /* free(NULL) does nothing.*/
+ if(ptr == NULL) return;
+
+ /* Make sure the pointer actually comes from the heap.*/
+ assert((uintptr_t)ptr < (uintptr_t)cache->tail);
+ assert((uintptr_t)ptr > (uintptr_t)cache->head);
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+ assert(OBJ_VALID(obj));
+
+ /* Mask the taken bit in the object to mark it free.*/
+ DBGPRINT("free: masking taken bit in object %p (was %i, now ",
+ obj, obj->size_taken);
+ obj->size_taken &= ~1;
+ DBGPRINT("%i)\n", obj->size_taken);
+
+ /* If the buckets' lastfree is greater than this new free object, this
+ * object will be the lastfree.*/
+ bucket_t *bucket = __bucket_from_obj(cache, obj);
+ if(bucket != NULL && bucket->firstfree > obj)
+ bucket->firstfree = obj;
+
+ /* Start trying to merge this free object with the next object.
+ * If the object after is taken, do nothing.*/
+ bucket_obj_t *after = OBJ_AFTER(obj);
+ if((uintptr_t)after > bucket->limit) return;
+ assert(OBJ_VALID(after));
+ if(OBJ_TAKEN(after)) return;
+
+ /* Merge with the next object.
+ * from:
+ * [meta| -objw-][meta| -afterw-]
+ * to:
+ * [meta| ---------objw---------]*/
+ DBGPRINT("free: obj %p merging with adjacent free object %p (was %i, now \n",
+ obj, after, obj->size_taken);
+ obj->size_taken += sizeof(bucket_obj_t) + after->size_taken;
+ DBGPRINT("%i)\n", obj->size_taken);
+}
diff --git a/memory/alloc/malloc.c b/memory/alloc/malloc.c
new file mode 100644
index 0000000..ba1e2d2
--- /dev/null
+++ b/memory/alloc/malloc.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+#include "jove.h"
+#include "memory/bump.h"
+
+/**Allocate memory for a new bucket in the heap.
+ * Automatically aligns new memory to BUCKET_ALIGN.
+ * Instantiates a new free object spanning the entire bucket.
+ * Then adds the new bucket to the heap cache.
+ * If sbrk encounters an error, pass through errno and return NULL.
+ * @param cache to allocate for.
+ * @param objw width of the object the bucket is assumed to hold.
+ * @return bucket created by this function.*/
+bucket_t*
+s_bucket_new(heap_cache_t *cache, size_t objw)
+{
+ DBGPRINT("malloc: ask for new bucket for cache %p with objw %i\n", cache, objw);
+ size_t bucketw = BUCKET_WIDTH(objw);
+ /* Assuming sbrk allocates memory for after the program break, make sure
+ * we allocate enough for the requested bucket width and any adjustments
+ * to ensure page alignment.*/
+ uintptr_t pbrk = (uintptr_t)bump_where();
+ uintptr_t brko = pbrk % BUCKET_ALIGN;
+ uintptr_t nbrk = (uintptr_t)bump_alloc(bucketw + brko);
+
+ bucket_t *bucket = (bucket_t*)(nbrk + brko);
+ DBGPRINT("malloc: got new bucket at %p\n", bucket);
+
+ *bucket = (bucket_t){
+ .checksum = OBJ_CHECKSUM,
+ .next = NULL,
+ .limit = (uintptr_t)bucket + bucketw,
+ };
+ /* Create a new object spanning the entire bucket. This new object will
+ * be the first free.*/
+ bucket_obj_t *obj0 = (bucket_obj_t*)bucket->objs;
+ *obj0 = (bucket_obj_t) {
+ .checksum = OBJ_CHECKSUM,
+ .size_taken = bucketw - sizeof(bucket_t) - sizeof(bucket_obj_t)
+ };
+ bucket->firstfree = obj0;
+
+ /* If the cache is empty, this will be the first and last bucket.*/
+ if(cache->head == NULL) {
+ cache->head = cache->tail = bucket;
+ return bucket;
+ }
+ /* Add the bucket to the heap cache bucket list.*/
+ cache->tail->next = bucket;
+ cache->tail = bucket;
+ return bucket;
+}
+
+/**Given an existing object and a size, split the object so that the original
+ * has size w.
+ * If the object is smaller than or is already the correct size, do nothing.
+ * Space after the split is allocated to a new free object after the resized
+ * original.
+ * If the original == bucket->firstfree, the new object is assigned to
+ * that variable.
+ * If the heap is corrupted, assert fail.
+ * @param bucket object's parent bucket.
+ * @param parent object to split.
+ * @param w requested size for original object. */
+void
+s_obj_take_split(bucket_t *bucket, bucket_obj_t *parent, size_t w)
+{
+ DBGPRINT("malloc: requesting to split object %p (last %p) for size %i (was %i)\n",
+ bucket, parent, w, parent->size_taken);
+
+ /* Assert fail on corrupted heap.*/
+ assert(OBJ_VALID(bucket));
+ assert(OBJ_VALID(parent));
+ assert(w >= OBJ_SIZE_MIN);
+
+ size_t objw = OBJ_WIDTH(parent);
+ if(w == objw) { /* Obj is already correct size, take and ignore.*/
+ if(bucket->firstfree == parent)
+ bucket->firstfree = OBJ_AFTER(parent);
+ /* Ensure that firstfree does not exceed bucket limits.*/
+ if((uintptr_t)bucket->firstfree > bucket->limit)
+ bucket->firstfree = NULL;
+ DBGPRINT("malloc: Object is already right size, ignoring\n");
+
+ /* Mark object as taken.*/
+ parent->size_taken = w | 1;
+ return;
+ }
+ /* New object created by split will be the original width minus the new
+ * width and metadata size.
+ *
+ * from:
+ * [meta| --------objw-------- ]
+ *
+ * to:
+ * [meta| -w- ][meta| -afterw- ]*/
+ size_t afterw = objw - sizeof(bucket_obj_t) - w;
+
+ parent->size_taken = w | 1;
+ bucket_obj_t *obj = OBJ_AFTER(parent);
+ DBGPRINT("malloc: New object created inplace at %p with size %i\n",
+ obj, afterw);
+
+ /* In-place allocate after obj.*/
+ *obj = (bucket_obj_t){
+ .checksum = OBJ_CHECKSUM,
+ .size_taken = afterw,
+ };
+ if(bucket->firstfree == parent)
+ bucket->firstfree = obj;
+}
+
+/* Look for a free object in the given bucket that closest matches size w.
+ * Uses bucket->firstfree as a starting point to save checks against taken
+ * objects.
+ * If the heap is corrupted, assert fail.
+ * If there are no good objects in this bucket, return NULL.
+ * @param bucket to search in.
+ * @param w to search for.
+ * @return best-fitting object or NULL if none are found.
+ * */
+bucket_obj_t*
+s_obj_bestfit(bucket_t *bucket, size_t w)
+{
+ assert(bucket != NULL);
+ assert(OBJ_VALID(bucket));
+
+ bucket_obj_t *best = NULL;
+ bucket_obj_t *obj = bucket->firstfree;
+ if(obj == NULL) return NULL;
+ for(; (uintptr_t)obj < bucket->limit; obj = OBJ_AFTER(obj))
+ {
+ assert(OBJ_VALID(obj));
+ if(OBJ_TAKEN(obj)) continue;
+ size_t objw = OBJ_WIDTH(obj);
+ if(objw < w) continue;
+ if(best == NULL) best = obj;
+
+ size_t bestw = OBJ_WIDTH(best);
+ if(objw < bestw) best = obj;
+ }
+
+ DBGPRINT("malloc: Bucket %p returns best fit at %p for size %i ",
+ bucket, best, w);
+ if(best != NULL) { DBGPRINT("(size %i)\n", best->size_taken); }
+ else { DBGPRINT("(is NULL)\n"); }
+
+ return best;
+}
+
+/**Searching through the entire heap, or until an object is found, find a
+ * free object that best fits the requested width and place the resulting
+ * bucket in the passed pointer.
+ * If the heap is corrupted, assert fail.
+ * If no object is found in any of the buckets, return NULL.
+ * @param cache heap to search through.
+ * @param w object width to search for.
+ * @param bucket pointer to bucket pointer owning the returned bucket.
+ * If no object is found, variable is untouched.
+ * @return object that fits the given width.*/
+bucket_obj_t*
+s_bucket_bestfit(heap_cache_t *cache, size_t w, bucket_t **bucket)
+{
+ *bucket = cache->head;
+ for(; *bucket != NULL; *bucket = (*bucket)->next)
+ {
+ bucket_obj_t *obj = s_obj_bestfit(*bucket, w);
+ /* Object is already found, don't waste cycles searching through
+ * the entire heap.*/
+ if(obj != NULL) return obj;
+ }
+ return NULL;
+}
+
+void*
+__malloc_impl(heap_cache_t *cache, size_t w)
+{
+ assert(cache != NULL);
+ /* w == 0 is ub, just return NULL.*/
+ if(w == 0) return NULL;
+ /* Ensure that w is valid.*/
+ w = REALW(w);
+
+ DBGPRINT("malloc: Asking to malloc for size %i\n", w);
+
+ bucket_t *bucket = NULL;
+ bucket_obj_t *obj = s_bucket_bestfit(cache, w, &bucket);
+ if(obj == NULL) {
+ /* No bucket in the heap has an appropriate space.
+ * Allocate a new bucket and take the first object.
+ * If the bucket allocation fails, return -1 and pass errno through.*/
+ bucket = s_bucket_new(cache, w);
+ if(bucket == NULL) return (void*)(-1);
+ obj = (bucket_obj_t*)bucket->objs;
+ }
+ /* Split the bucket to make the object bestfit.*/
+ s_obj_take_split(bucket, obj, w);
+ return obj->data;
+}
diff --git a/memory/alloc/malloc.h b/memory/alloc/malloc.h
new file mode 100644
index 0000000..abe87ff
--- /dev/null
+++ b/memory/alloc/malloc.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#ifndef _MALLOC_H
+#define _MALLOC_H 1
+
+/* A bucket-based malloc implementation for 2^n sized objects.
+ * Allocates memory at pbrk for use in dynamic allocations, called "objects".
+ * Each object is pre-empted by metadata, which stores a checksum, the width,
+ * and a bit to mark it as taken.
+ *
+ * Each alloc-able region is pre-empted by a "bucket", which stores metadata
+ * about the bucket's state, the width, the first free object, and the next
+ * bucket in the chain.
+ *
+ * Buckets are allocated when needed. The width of the new bucket is determined
+ * by the object that needs the bucket.
+ * Buckets are always page aligned, and the size is a multiple of a page width.
+ *
+ * Dependencies:
+ * stddef.h
+ * stdint.h
+ * errno.h
+ * assert.h
+ * assert
+ * string.h
+ * memset
+ * memcpy
+ * unistd.h
+ * sbrk
+ * brk
+ *
+ * with MALLOC_DBG:
+ * stdio.h
+ * printf*/
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define DBGPRINT(...)
+#ifdef MALLOC_DBG
+#include "io/log.h"
+#undef DBGPRINT
+#define DBGPRINT(...) klogf(__VA_ARGS__)
+#endif
+
+/* Bucket checksum is a 64-bit value containing the string "BUCKET\0\0"*/
+#define OBJ_CHECKSUM \
+ (((uintmax_t)'B') | \
+ ((uintmax_t)'U' << 8) | \
+ ((uintmax_t)'C' << 16) | \
+ ((uintmax_t)'K' << 24) | \
+ ((uintmax_t)'E' << 32) | \
+ ((uintmax_t)'T' << 40))
+
+/* For an object to be valid, the object must have the correct checksum.*/
+#define OBJ_VALID(obj) (obj->checksum == OBJ_CHECKSUM)
+
+/* Object width is assumed to be a multiple of two. The first bit is reserved
+ * for the taken flag.*/
+#define OBJ_WIDTH(obj) (obj->size_taken & ~1)
+#define OBJ_TAKEN(obj) (obj->size_taken & 1)
+
+/* Gets the object struct after this object.
+ * Since objects are allocated in-place, we can get the next object using
+ * the object's size and it's pointer to the start of data.
+ *
+ * The same is true for getting the metadata for any given data pointer.
+ * We take the pointer and subtract the size of the object struct from it
+ * since the metadata is behind the actual data.*/
+#define OBJ_AFTER(obj) \
+ ((bucket_obj_t*)(((uintptr_t)obj->data) + OBJ_WIDTH(obj)))
+#define OBJ_FROM_PTR(ptr) ((bucket_obj_t*)((uintptr_t)ptr - sizeof(bucket_obj_t)));
+
+typedef struct bucket_obj
+{
+ uintmax_t checksum;
+ size_t size_taken;
+ char data[];
+} bucket_obj_t;
+
+#define LOG2(n) ((31) - __builtin_clz(n))
+/* Buckets are always aligned to the page boundary for cache friendliness.*/
+#define BUCKET_ALIGN (4096)
+/* Buckets should always be a multiple of the page width, expanding with the
+ * initial object size.*/
+#define BUCKET_WIDTH(w) \
+ (1UL << (12 + (w < BUCKET_ALIGN ? 0 : LOG2(w + 1) - 11)))
+
+/* Bucket metadata is stored behind the object metadata.*/
+typedef struct bucket
+{
+ size_t checksum;
+ struct bucket *next;
+
+ uintptr_t limit;
+ bucket_obj_t *firstfree; /* Helpful for fast object allocations.*/
+ char objs[];
+} bucket_t;
+
+/* The overall size of an object (data + metadata) should be 2*metadata.*/
+#define OBJ_SIZE_MIN (sizeof(bucket_obj_t))
+/* Data width is always a power of two.*/
+#define REALW(n) \
+ (n < OBJ_SIZE_MIN ? OBJ_SIZE_MIN : (1UL << (LOG2(n) + 1)))
+
+typedef struct heap_cache
+{
+ bucket_t *head;
+ bucket_t *tail;
+} heap_cache_t;
+
+/**Given the heap cache and an object, find the bucket to which the object
+ * belongs.
+ * Since buckets are always page aligned, we can check each page boundary
+ * for the checksum until we find it, then check the limit.
+ * Since we also know the first bucket in the cache, we can also check if the
+ * passed object is invalid and return NULL.
+ * @param heap cache.
+ * @param obj to find the bucket for.
+ * @return bucket owning obj, or NULL if obj is invalid.*/
+bucket_t *__bucket_from_obj(heap_cache_t *heap, bucket_obj_t *obj);
+
+/**Allocate w bytes into the given heap.
+ * If w == 0, returns NULL.
+ * If w is not a power of 2, rounds to the nearest pow2.
+ * If the heap is corrupted, assert fail.
+ * If the program is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param w n-bytes to allocate.
+ * @return pointer to allocated memory.*/
+void* __malloc_impl(heap_cache_t *heap, size_t w);
+
+/**Allocate a contiguous array of nmemb members each w wide.
+ * Newly allocated memory is zero'd.
+ * If w == 0 || nmemb == 0, returns NULL
+ * If w is not a power of 2, rounds to nearest pow2
+ * If the heap is corrupted, assert fail
+ * If the program is OON, ernno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param nmemb number of members.
+ * @param w number of bytes per member.
+ * @return pointer to allocated memory.*/
+void* __calloc_impl(heap_cache_t *heap, size_t nmemb, size_t w);
+
+/**Resize the given allocated memory region to fit w.
+ * If w is less than the original width, does nothing.
+ * If w == 0, frees p
+ * If the heap is corrupted, assert fail.
+ * If the heap is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param p pointer to reallocate.
+ * @param w width to fit p to.
+ * @return pointer to memory allocated to fit w.*/
+void* __realloc_impl(heap_cache_t *heap, void *p, size_t w);
+
+/**Resize the given allocated contiguous array to fit nmemb members of w width
+ * objects.
+ * Zeros the new members excluding already existing data.
+ * If w * nmemb is less than the original width, does nothing.
+ * If w == 0 || nmemb == 0, frees p
+ * If the heap is corrupted, assert fail.
+ * If the heap is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param p pointer to reallocate.
+ * @param nmemb number of members.
+ * @param w width to fit p to.
+ * @return pointer to memory allocated to fit w * nmemb.*/
+void* __reallocarray_impl(heap_cache_t *heap, void *p, size_t nmemb, size_t w);
+
+/**Free a pointer in the given heap.
+ * If p == NULL, do nothing.
+ * If p does not fall in the range of heap, assert fail.
+ * If the heap is corrupted, assert fail.
+ * @param heap cache.
+ * @param p pointer to free.*/
+void __free_impl(heap_cache_t *heap, void *p);
+
+#endif
diff --git a/memory/alloc/realloc.c b/memory/alloc/realloc.c
new file mode 100644
index 0000000..1b90e8c
--- /dev/null
+++ b/memory/alloc/realloc.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+#include "string.h"
+
+void*
+__realloc_impl(heap_cache_t *cache, void *ptr, size_t w)
+{
+ if(w == 0) {
+ DBGPRINT("realloc: called with w == 0, freeing %p\n", ptr);
+ __free_impl(cache, ptr);
+ return NULL;
+ }
+ if(ptr == NULL) {
+ DBGPRINT("realloc: called with NULL ptr, alloc %i\n", w);
+ return __malloc_impl(cache, w);
+ }
+
+ w = REALW(w);
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+
+ assert(OBJ_VALID(obj));
+ size_t objw = OBJ_WIDTH(obj);
+ if(objw > w) return ptr;
+
+ size_t wdiff = w - objw;
+ DBGPRINT("realloc: ptr %p needs %i new bytes\n", ptr, wdiff);
+
+ bucket_obj_t *after = OBJ_AFTER(obj);
+ size_t afterw = 0;
+ bucket_t *bucket = __bucket_from_obj(cache, obj);
+ void *newptr = NULL;
+
+ if(bucket == NULL)
+ goto resize_fail;
+
+ if((uintptr_t)after > bucket->limit) goto resize_fail;
+ assert(OBJ_VALID(after));
+ afterw = OBJ_WIDTH(after);
+
+ if(OBJ_TAKEN(after))
+ goto resize_fail;
+
+ DBGPRINT("realloc: obj %p after obj %p is free (w %i)\n", after, obj, afterw);
+ if(objw + sizeof(bucket_obj_t) + afterw < w)
+ goto resize_fail;
+
+ obj->size_taken = w | 1;
+ if(bucket->firstfree == after)
+ bucket->firstfree = OBJ_AFTER(obj);
+
+ after = OBJ_AFTER(obj);
+ *after = (bucket_obj_t){
+ .checksum = OBJ_CHECKSUM,
+ .size_taken = afterw - wdiff
+ };
+ DBGPRINT("realloc: moved obj %p (%i) to resize obj %p (%i)\n",
+ after, after->size_taken, obj, obj->size_taken);
+ return ptr;
+
+resize_fail:
+ DBGPRINT("realloc: could not resize existing object, calling malloc\n");
+ newptr = __malloc_impl(cache, w);
+ memcpy(newptr, ptr, obj->size_taken & ~1);
+ __free_impl(cache, ptr);
+ return newptr;
+}
diff --git a/memory/alloc/reallocarray.c b/memory/alloc/reallocarray.c
new file mode 100644
index 0000000..c58dfe9
--- /dev/null
+++ b/memory/alloc/reallocarray.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+#include "string.h"
+
+void*
+__reallocarray_impl(heap_cache_t *cache, void *ptr, size_t nmemb, size_t w)
+{
+ /* Free if w == nmemb == 0*/
+ if(w == 0 || nmemb == 0) {
+ __free_impl(cache, ptr);
+ return NULL;
+ }
+
+ if(ptr == NULL)
+ return __calloc_impl(cache, nmemb, w);
+
+ size_t allocw = w * nmemb;
+ w = REALW(w);
+
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+ assert(OBJ_VALID(obj));
+ size_t oldw = OBJ_WIDTH(obj);
+ if(allocw < oldw) return ptr;
+
+ ptr = __realloc_impl(cache, ptr, allocw);
+ size_t wdiff = allocw - oldw;
+ void *zeroat = (void*)(((uintptr_t)ptr) + oldw);
+ memset(zeroat, 0, wdiff);
+ return ptr;
+}