summaryrefslogtreecommitdiffstats
path: root/memory/alloc/free.c
diff options
context:
space:
mode:
Diffstat (limited to 'memory/alloc/free.c')
-rw-r--r--memory/alloc/free.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/memory/alloc/free.c b/memory/alloc/free.c
new file mode 100644
index 0000000..e2cc060
--- /dev/null
+++ b/memory/alloc/free.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+
+bucket_t*
+__bucket_from_obj(heap_cache_t *cache, bucket_obj_t *obj)
+{
+ assert(cache != NULL);
+ assert(cache->head != NULL);
+ /* Buckets are always aligned to the nearest page boundary, and that the
+ * metadata is always below the object. We use this fact to quickly find
+ * owning buckets.*/
+ uintptr_t objptr = (uintptr_t)obj;
+ uintptr_t bktptr = objptr & ~0xFFF;
+ while(1) {
+ /* If the page boundary is below the head, we are below the heap memory
+ * region and can stop.*/
+ if(bktptr < (uintptr_t)cache->head) return NULL;
+
+ /* At each page boundary below the object, check for the heap checksum.
+ * If it is found, we can assume that we found a bucket.*/
+ bucket_t *bucket = (bucket_t*)bktptr;
+ if(OBJ_VALID(bucket)) {
+ if(bucket->limit > objptr) return NULL;
+ return bucket;
+ }
+
+ bktptr -= BUCKET_ALIGN;
+ }
+}
+
+void
+__free_impl(heap_cache_t *cache, void *ptr)
+{
+ DBGPRINT("free: asking to free ptr %p\n", ptr);
+ assert(cache != NULL);
+ /* free(NULL) does nothing.*/
+ if(ptr == NULL) return;
+
+ /* Make sure the pointer actually comes from the heap.*/
+ assert((uintptr_t)ptr < (uintptr_t)cache->tail);
+ assert((uintptr_t)ptr > (uintptr_t)cache->head);
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+ assert(OBJ_VALID(obj));
+
+ /* Mask the taken bit in the object to mark it free.*/
+ DBGPRINT("free: masking taken bit in object %p (was %i, now ",
+ obj, obj->size_taken);
+ obj->size_taken &= ~1;
+ DBGPRINT("%i)\n", obj->size_taken);
+
+ /* If the buckets' lastfree is greater than this new free object, this
+ * object will be the lastfree.*/
+ bucket_t *bucket = __bucket_from_obj(cache, obj);
+ if(bucket != NULL && bucket->firstfree > obj)
+ bucket->firstfree = obj;
+
+ /* Start trying to merge this free object with the next object.
+ * If the object after is taken, do nothing.*/
+ bucket_obj_t *after = OBJ_AFTER(obj);
+ if((uintptr_t)after > bucket->limit) return;
+ assert(OBJ_VALID(after));
+ if(OBJ_TAKEN(after)) return;
+
+ /* Merge with the next object.
+ * from:
+ * [meta| -objw-][meta| -afterw-]
+ * to:
+ * [meta| ---------objw---------]*/
+ DBGPRINT("free: obj %p merging with adjacent free object %p (was %i, now \n",
+ obj, after, obj->size_taken);
+ obj->size_taken += sizeof(bucket_obj_t) + after->size_taken;
+ DBGPRINT("%i)\n", obj->size_taken);
+}