summaryrefslogtreecommitdiffstats
path: root/memory/alloc/malloc.h
diff options
context:
space:
mode:
Diffstat (limited to 'memory/alloc/malloc.h')
-rw-r--r--memory/alloc/malloc.h183
1 files changed, 183 insertions, 0 deletions
diff --git a/memory/alloc/malloc.h b/memory/alloc/malloc.h
new file mode 100644
index 0000000..abe87ff
--- /dev/null
+++ b/memory/alloc/malloc.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#ifndef _MALLOC_H
+#define _MALLOC_H 1
+
+/* A bucket-based malloc implementation for 2^n sized objects.
+ * Allocates memory at pbrk for use in dynamic allocations, called "objects".
+ * Each object is pre-empted by metadata, which stores a checksum, the width,
+ * and a bit to mark it as taken.
+ *
+ * Each alloc-able region is pre-empted by a "bucket", which stores metadata
+ * about the bucket's state, the width, the first free object, and the next
+ * bucket in the chain.
+ *
+ * Buckets are allocated when needed. The width of the new bucket is determined
+ * by the object that needs the bucket.
+ * Buckets are always page aligned, and the size is a multiple of a page width.
+ *
+ * Dependencies:
+ * stddef.h
+ * stdint.h
+ * errno.h
+ * assert.h
+ * assert
+ * string.h
+ * memset
+ * memcpy
+ * unistd.h
+ * sbrk
+ * brk
+ *
+ * with MALLOC_DBG:
+ * stdio.h
+ * printf*/
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define DBGPRINT(...)
+#ifdef MALLOC_DBG
+#include "io/log.h"
+#undef DBGPRINT
+#define DBGPRINT(...) klogf(__VA_ARGS__)
+#endif
+
+/* Bucket checksum is a 64-bit value containing the string "BUCKET\0\0"*/
+#define OBJ_CHECKSUM \
+ (((uintmax_t)'B') | \
+ ((uintmax_t)'U' << 8) | \
+ ((uintmax_t)'C' << 16) | \
+ ((uintmax_t)'K' << 24) | \
+ ((uintmax_t)'E' << 32) | \
+ ((uintmax_t)'T' << 40))
+
+/* For an object to be valid, the object must have the correct checksum.*/
+#define OBJ_VALID(obj) (obj->checksum == OBJ_CHECKSUM)
+
+/* Object width is assumed to be a multiple of two. The first bit is reserved
+ * for the taken flag.*/
+#define OBJ_WIDTH(obj) (obj->size_taken & ~1)
+#define OBJ_TAKEN(obj) (obj->size_taken & 1)
+
+/* Gets the object struct after this object.
+ * Since objects are allocated in-place, we can get the next object using
+ * the object's size and it's pointer to the start of data.
+ *
+ * The same is true for getting the metadata for any given data pointer.
+ * We take the pointer and subtract the size of the object struct from it
+ * since the metadata is behind the actual data.*/
+#define OBJ_AFTER(obj) \
+ ((bucket_obj_t*)(((uintptr_t)obj->data) + OBJ_WIDTH(obj)))
+#define OBJ_FROM_PTR(ptr) ((bucket_obj_t*)((uintptr_t)ptr - sizeof(bucket_obj_t)));
+
+typedef struct bucket_obj
+{
+ uintmax_t checksum;
+ size_t size_taken;
+ char data[];
+} bucket_obj_t;
+
+#define LOG2(n) ((31) - __builtin_clz(n))
+/* Buckets are always aligned to the page boundary for cache friendliness.*/
+#define BUCKET_ALIGN (4096)
+/* Buckets should always be a multiple of the page width, expanding with the
+ * initial object size.*/
+#define BUCKET_WIDTH(w) \
+ (1UL << (12 + (w < BUCKET_ALIGN ? 0 : LOG2(w + 1) - 11)))
+
+/* Bucket metadata is stored behind the object metadata.*/
+typedef struct bucket
+{
+ size_t checksum;
+ struct bucket *next;
+
+ uintptr_t limit;
+ bucket_obj_t *firstfree; /* Helpful for fast object allocations.*/
+ char objs[];
+} bucket_t;
+
+/* The overall size of an object (data + metadata) should be 2*metadata.*/
+#define OBJ_SIZE_MIN (sizeof(bucket_obj_t))
+/* Data width is always a power of two.*/
+#define REALW(n) \
+ (n < OBJ_SIZE_MIN ? OBJ_SIZE_MIN : (1UL << (LOG2(n) + 1)))
+
+typedef struct heap_cache
+{
+ bucket_t *head;
+ bucket_t *tail;
+} heap_cache_t;
+
+/**Given the heap cache and an object, find the bucket to which the object
+ * belongs.
+ * Since buckets are always page aligned, we can check each page boundary
+ * for the checksum until we find it, then check the limit.
+ * Since we also know the first bucket in the cache, we can also check if the
+ * passed object is invalid and return NULL.
+ * @param heap cache.
+ * @param obj to find the bucket for.
+ * @return bucket owning obj, or NULL if obj is invalid.*/
+bucket_t *__bucket_from_obj(heap_cache_t *heap, bucket_obj_t *obj);
+
+/**Allocate w bytes into the given heap.
+ * If w == 0, returns NULL.
+ * If w is not a power of 2, rounds to the nearest pow2.
+ * If the heap is corrupted, assert fail.
+ * If the program is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param w n-bytes to allocate.
+ * @return pointer to allocated memory.*/
+void* __malloc_impl(heap_cache_t *heap, size_t w);
+
+/**Allocate a contiguous array of nmemb members each w wide.
+ * Newly allocated memory is zero'd.
+ * If w == 0 || nmemb == 0, returns NULL
+ * If w is not a power of 2, rounds to nearest pow2
+ * If the heap is corrupted, assert fail
+ * If the program is OON, ernno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param nmemb number of members.
+ * @param w number of bytes per member.
+ * @return pointer to allocated memory.*/
+void* __calloc_impl(heap_cache_t *heap, size_t nmemb, size_t w);
+
+/**Resize the given allocated memory region to fit w.
+ * If w is less than the original width, does nothing.
+ * If w == 0, frees p
+ * If the heap is corrupted, assert fail.
+ * If the heap is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param p pointer to reallocate.
+ * @param w width to fit p to.
+ * @return pointer to memory allocated to fit w.*/
+void* __realloc_impl(heap_cache_t *heap, void *p, size_t w);
+
+/**Resize the given allocated contiguous array to fit nmemb members of w width
+ * objects.
+ * Zeros the new members excluding already existing data.
+ * If w * nmemb is less than the original width, does nothing.
+ * If w == 0 || nmemb == 0, frees p
+ * If the heap is corrupted, assert fail.
+ * If the heap is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param p pointer to reallocate.
+ * @param nmemb number of members.
+ * @param w width to fit p to.
+ * @return pointer to memory allocated to fit w * nmemb.*/
+void* __reallocarray_impl(heap_cache_t *heap, void *p, size_t nmemb, size_t w);
+
+/**Free a pointer in the given heap.
+ * If p == NULL, do nothing.
+ * If p does not fall in the range of heap, assert fail.
+ * If the heap is corrupted, assert fail.
+ * @param heap cache.
+ * @param p pointer to free.*/
+void __free_impl(heap_cache_t *heap, void *p);
+
+#endif