summaryrefslogtreecommitdiffstats
path: root/memory
diff options
context:
space:
mode:
Diffstat (limited to 'memory')
-rw-r--r--memory/alloc/calloc.c21
-rw-r--r--memory/alloc/free.c80
-rw-r--r--memory/alloc/malloc.c206
-rw-r--r--memory/alloc/malloc.h183
-rw-r--r--memory/alloc/realloc.c73
-rw-r--r--memory/alloc/reallocarray.c37
-rw-r--r--memory/bump.c33
-rw-r--r--memory/bump.h10
-rw-r--r--memory/memory.c39
-rw-r--r--memory/phys.c39
-rw-r--r--memory/slab.c148
-rw-r--r--memory/zone.c145
12 files changed, 1014 insertions, 0 deletions
diff --git a/memory/alloc/calloc.c b/memory/alloc/calloc.c
new file mode 100644
index 0000000..cd272af
--- /dev/null
+++ b/memory/alloc/calloc.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include <string.h>
+
+void*
+__calloc_impl(heap_cache_t *cache, size_t nmemb, size_t w)
+{
+ w = REALW(w);
+ void *ptr = __malloc_impl(cache, w * nmemb);
+ if(ptr == NULL) return NULL;
+
+ /* Zero newly allocated memory as per spec.*/
+ memset(ptr, 0, w * nmemb);
+ return ptr;
+}
diff --git a/memory/alloc/free.c b/memory/alloc/free.c
new file mode 100644
index 0000000..e2cc060
--- /dev/null
+++ b/memory/alloc/free.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+
+bucket_t*
+__bucket_from_obj(heap_cache_t *cache, bucket_obj_t *obj)
+{
+ assert(cache != NULL);
+ assert(cache->head != NULL);
+ /* Buckets are always aligned to the nearest page boundary, and that the
+ * metadata is always below the object. We use this fact to quickly find
+ * owning buckets.*/
+ uintptr_t objptr = (uintptr_t)obj;
+ uintptr_t bktptr = objptr & ~0xFFF;
+ while(1) {
+ /* If the page boundary is below the head, we are below the heap memory
+ * region and can stop.*/
+ if(bktptr < (uintptr_t)cache->head) return NULL;
+
+ /* At each page boundary below the object, check for the heap checksum.
+ * If it is found, we can assume that we found a bucket.*/
+ bucket_t *bucket = (bucket_t*)bktptr;
+ if(OBJ_VALID(bucket)) {
+ if(bucket->limit > objptr) return NULL;
+ return bucket;
+ }
+
+ bktptr -= BUCKET_ALIGN;
+ }
+}
+
+void
+__free_impl(heap_cache_t *cache, void *ptr)
+{
+ DBGPRINT("free: asking to free ptr %p\n", ptr);
+ assert(cache != NULL);
+ /* free(NULL) does nothing.*/
+ if(ptr == NULL) return;
+
+ /* Make sure the pointer actually comes from the heap.*/
+ assert((uintptr_t)ptr < (uintptr_t)cache->tail);
+ assert((uintptr_t)ptr > (uintptr_t)cache->head);
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+ assert(OBJ_VALID(obj));
+
+ /* Mask the taken bit in the object to mark it free.*/
+ DBGPRINT("free: masking taken bit in object %p (was %i, now ",
+ obj, obj->size_taken);
+ obj->size_taken &= ~1;
+ DBGPRINT("%i)\n", obj->size_taken);
+
+ /* If the buckets' lastfree is greater than this new free object, this
+ * object will be the lastfree.*/
+ bucket_t *bucket = __bucket_from_obj(cache, obj);
+ if(bucket != NULL && bucket->firstfree > obj)
+ bucket->firstfree = obj;
+
+ /* Start trying to merge this free object with the next object.
+ * If the object after is taken, do nothing.*/
+ bucket_obj_t *after = OBJ_AFTER(obj);
+ if((uintptr_t)after > bucket->limit) return;
+ assert(OBJ_VALID(after));
+ if(OBJ_TAKEN(after)) return;
+
+ /* Merge with the next object.
+ * from:
+ * [meta| -objw-][meta| -afterw-]
+ * to:
+ * [meta| ---------objw---------]*/
+ DBGPRINT("free: obj %p merging with adjacent free object %p (was %i, now \n",
+ obj, after, obj->size_taken);
+ obj->size_taken += sizeof(bucket_obj_t) + after->size_taken;
+ DBGPRINT("%i)\n", obj->size_taken);
+}
diff --git a/memory/alloc/malloc.c b/memory/alloc/malloc.c
new file mode 100644
index 0000000..ba1e2d2
--- /dev/null
+++ b/memory/alloc/malloc.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+#include "jove.h"
+#include "memory/bump.h"
+
+/**Allocate memory for a new bucket in the heap.
+ * Automatically aligns new memory to BUCKET_ALIGN.
+ * Instantiates a new free object spanning the entire bucket.
+ * Then adds the new bucket to the heap cache.
+ * If sbrk encounters an error, pass through errno and return NULL.
+ * @param cache to allocate for.
+ * @param objw width of the object the bucket is assumed to hold.
+ * @return bucket created by this function.*/
+bucket_t*
+s_bucket_new(heap_cache_t *cache, size_t objw)
+{
+ DBGPRINT("malloc: ask for new bucket for cache %p with objw %i\n", cache, objw);
+ size_t bucketw = BUCKET_WIDTH(objw);
+ /* Assuming sbrk allocates memory for after the program break, make sure
+ * we allocate enough for the requested bucket width and any adjustments
+ * to ensure page alignment.*/
+ uintptr_t pbrk = (uintptr_t)bump_where();
+ uintptr_t brko = pbrk % BUCKET_ALIGN;
+ uintptr_t nbrk = (uintptr_t)bump_alloc(bucketw + brko);
+
+ bucket_t *bucket = (bucket_t*)(nbrk + brko);
+ DBGPRINT("malloc: got new bucket at %p\n", bucket);
+
+ *bucket = (bucket_t){
+ .checksum = OBJ_CHECKSUM,
+ .next = NULL,
+ .limit = (uintptr_t)bucket + bucketw,
+ };
+ /* Create a new object spanning the entire bucket. This new object will
+ * be the first free.*/
+ bucket_obj_t *obj0 = (bucket_obj_t*)bucket->objs;
+ *obj0 = (bucket_obj_t) {
+ .checksum = OBJ_CHECKSUM,
+ .size_taken = bucketw - sizeof(bucket_t) - sizeof(bucket_obj_t)
+ };
+ bucket->firstfree = obj0;
+
+ /* If the cache is empty, this will be the first and last bucket.*/
+ if(cache->head == NULL) {
+ cache->head = cache->tail = bucket;
+ return bucket;
+ }
+ /* Add the bucket to the heap cache bucket list.*/
+ cache->tail->next = bucket;
+ cache->tail = bucket;
+ return bucket;
+}
+
+/**Given an existing object and a size, split the object so that the original
+ * has size w.
+ * If the object is smaller than or is already the correct size, do nothing.
+ * Space after the split is allocated to a new free object after the resized
+ * original.
+ * If the original == bucket->firstfree, the new object is assigned to
+ * that variable.
+ * If the heap is corrupted, assert fail.
+ * @param bucket object's parent bucket.
+ * @param parent object to split.
+ * @param w requested size for original object. */
+void
+s_obj_take_split(bucket_t *bucket, bucket_obj_t *parent, size_t w)
+{
+ DBGPRINT("malloc: requesting to split object %p (last %p) for size %i (was %i)\n",
+ bucket, parent, w, parent->size_taken);
+
+ /* Assert fail on corrupted heap.*/
+ assert(OBJ_VALID(bucket));
+ assert(OBJ_VALID(parent));
+ assert(w >= OBJ_SIZE_MIN);
+
+ size_t objw = OBJ_WIDTH(parent);
+ if(w == objw) { /* Obj is already correct size, take and ignore.*/
+ if(bucket->firstfree == parent)
+ bucket->firstfree = OBJ_AFTER(parent);
+ /* Ensure that firstfree does not exceed bucket limits.*/
+ if((uintptr_t)bucket->firstfree > bucket->limit)
+ bucket->firstfree = NULL;
+ DBGPRINT("malloc: Object is already right size, ignoring\n");
+
+ /* Mark object as taken.*/
+ parent->size_taken = w | 1;
+ return;
+ }
+ /* New object created by split will be the original width minus the new
+ * width and metadata size.
+ *
+ * from:
+ * [meta| --------objw-------- ]
+ *
+ * to:
+ * [meta| -w- ][meta| -afterw- ]*/
+ size_t afterw = objw - sizeof(bucket_obj_t) - w;
+
+ parent->size_taken = w | 1;
+ bucket_obj_t *obj = OBJ_AFTER(parent);
+ DBGPRINT("malloc: New object created inplace at %p with size %i\n",
+ obj, afterw);
+
+ /* In-place allocate after obj.*/
+ *obj = (bucket_obj_t){
+ .checksum = OBJ_CHECKSUM,
+ .size_taken = afterw,
+ };
+ if(bucket->firstfree == parent)
+ bucket->firstfree = obj;
+}
+
+/* Look for a free object in the given bucket that closest matches size w.
+ * Uses bucket->firstfree as a starting point to save checks against taken
+ * objects.
+ * If the heap is corrupted, assert fail.
+ * If there are no good objects in this bucket, return NULL.
+ * @param bucket to search in.
+ * @param w to search for.
+ * @return best-fitting object or NULL if none are found.
+ * */
+bucket_obj_t*
+s_obj_bestfit(bucket_t *bucket, size_t w)
+{
+ assert(bucket != NULL);
+ assert(OBJ_VALID(bucket));
+
+ bucket_obj_t *best = NULL;
+ bucket_obj_t *obj = bucket->firstfree;
+ if(obj == NULL) return NULL;
+ for(; (uintptr_t)obj < bucket->limit; obj = OBJ_AFTER(obj))
+ {
+ assert(OBJ_VALID(obj));
+ if(OBJ_TAKEN(obj)) continue;
+ size_t objw = OBJ_WIDTH(obj);
+ if(objw < w) continue;
+ if(best == NULL) best = obj;
+
+ size_t bestw = OBJ_WIDTH(best);
+ if(objw < bestw) best = obj;
+ }
+
+ DBGPRINT("malloc: Bucket %p returns best fit at %p for size %i ",
+ bucket, best, w);
+ if(best != NULL) { DBGPRINT("(size %i)\n", best->size_taken); }
+ else { DBGPRINT("(is NULL)\n"); }
+
+ return best;
+}
+
+/**Searching through the entire heap, or until an object is found, find a
+ * free object that best fits the requested width and place the resulting
+ * bucket in the passed pointer.
+ * If the heap is corrupted, assert fail.
+ * If no object is found in any of the buckets, return NULL.
+ * @param cache heap to search through.
+ * @param w object width to search for.
+ * @param bucket pointer to bucket pointer owning the returned bucket.
+ * If no object is found, variable is untouched.
+ * @return object that fits the given width.*/
+bucket_obj_t*
+s_bucket_bestfit(heap_cache_t *cache, size_t w, bucket_t **bucket)
+{
+ *bucket = cache->head;
+ for(; *bucket != NULL; *bucket = (*bucket)->next)
+ {
+ bucket_obj_t *obj = s_obj_bestfit(*bucket, w);
+ /* Object is already found, don't waste cycles searching through
+ * the entire heap.*/
+ if(obj != NULL) return obj;
+ }
+ return NULL;
+}
+
+void*
+__malloc_impl(heap_cache_t *cache, size_t w)
+{
+ assert(cache != NULL);
+ /* w == 0 is ub, just return NULL.*/
+ if(w == 0) return NULL;
+ /* Ensure that w is valid.*/
+ w = REALW(w);
+
+ DBGPRINT("malloc: Asking to malloc for size %i\n", w);
+
+ bucket_t *bucket = NULL;
+ bucket_obj_t *obj = s_bucket_bestfit(cache, w, &bucket);
+ if(obj == NULL) {
+ /* No bucket in the heap has an appropriate space.
+ * Allocate a new bucket and take the first object.
+ * If the bucket allocation fails, return -1 and pass errno through.*/
+ bucket = s_bucket_new(cache, w);
+ if(bucket == NULL) return (void*)(-1);
+ obj = (bucket_obj_t*)bucket->objs;
+ }
+ /* Split the bucket to make the object bestfit.*/
+ s_obj_take_split(bucket, obj, w);
+ return obj->data;
+}
diff --git a/memory/alloc/malloc.h b/memory/alloc/malloc.h
new file mode 100644
index 0000000..abe87ff
--- /dev/null
+++ b/memory/alloc/malloc.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#ifndef _MALLOC_H
+#define _MALLOC_H 1
+
+/* A bucket-based malloc implementation for 2^n sized objects.
+ * Allocates memory at pbrk for use in dynamic allocations, called "objects".
+ * Each object is pre-empted by metadata, which stores a checksum, the width,
+ * and a bit to mark it as taken.
+ *
+ * Each alloc-able region is pre-empted by a "bucket", which stores metadata
+ * about the bucket's state, the width, the first free object, and the next
+ * bucket in the chain.
+ *
+ * Buckets are allocated when needed. The width of the new bucket is determined
+ * by the object that needs the bucket.
+ * Buckets are always page aligned, and the size is a multiple of a page width.
+ *
+ * Dependencies:
+ * stddef.h
+ * stdint.h
+ * errno.h
+ * assert.h
+ * assert
+ * string.h
+ * memset
+ * memcpy
+ * unistd.h
+ * sbrk
+ * brk
+ *
+ * with MALLOC_DBG:
+ * stdio.h
+ * printf*/
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define DBGPRINT(...)
+#ifdef MALLOC_DBG
+#include "io/log.h"
+#undef DBGPRINT
+#define DBGPRINT(...) klogf(__VA_ARGS__)
+#endif
+
+/* Bucket checksum is a 64-bit value containing the string "BUCKET\0\0"*/
+#define OBJ_CHECKSUM \
+ (((uintmax_t)'B') | \
+ ((uintmax_t)'U' << 8) | \
+ ((uintmax_t)'C' << 16) | \
+ ((uintmax_t)'K' << 24) | \
+ ((uintmax_t)'E' << 32) | \
+ ((uintmax_t)'T' << 40))
+
+/* For an object to be valid, the object must have the correct checksum.*/
+#define OBJ_VALID(obj) (obj->checksum == OBJ_CHECKSUM)
+
+/* Object width is assumed to be a multiple of two. The first bit is reserved
+ * for the taken flag.*/
+#define OBJ_WIDTH(obj) (obj->size_taken & ~1)
+#define OBJ_TAKEN(obj) (obj->size_taken & 1)
+
+/* Gets the object struct after this object.
+ * Since objects are allocated in-place, we can get the next object using
+ * the object's size and it's pointer to the start of data.
+ *
+ * The same is true for getting the metadata for any given data pointer.
+ * We take the pointer and subtract the size of the object struct from it
+ * since the metadata is behind the actual data.*/
+#define OBJ_AFTER(obj) \
+ ((bucket_obj_t*)(((uintptr_t)obj->data) + OBJ_WIDTH(obj)))
+#define OBJ_FROM_PTR(ptr) ((bucket_obj_t*)((uintptr_t)ptr - sizeof(bucket_obj_t)));
+
+typedef struct bucket_obj
+{
+ uintmax_t checksum;
+ size_t size_taken;
+ char data[];
+} bucket_obj_t;
+
+#define LOG2(n) ((31) - __builtin_clz(n))
+/* Buckets are always aligned to the page boundary for cache friendliness.*/
+#define BUCKET_ALIGN (4096)
+/* Buckets should always be a multiple of the page width, expanding with the
+ * initial object size.*/
+#define BUCKET_WIDTH(w) \
+ (1UL << (12 + (w < BUCKET_ALIGN ? 0 : LOG2(w + 1) - 11)))
+
+/* Bucket metadata is stored behind the object metadata.*/
+typedef struct bucket
+{
+ size_t checksum;
+ struct bucket *next;
+
+ uintptr_t limit;
+ bucket_obj_t *firstfree; /* Helpful for fast object allocations.*/
+ char objs[];
+} bucket_t;
+
+/* The overall size of an object (data + metadata) should be 2*metadata.*/
+#define OBJ_SIZE_MIN (sizeof(bucket_obj_t))
+/* Data width is always a power of two.*/
+#define REALW(n) \
+ (n < OBJ_SIZE_MIN ? OBJ_SIZE_MIN : (1UL << (LOG2(n) + 1)))
+
+typedef struct heap_cache
+{
+ bucket_t *head;
+ bucket_t *tail;
+} heap_cache_t;
+
+/**Given the heap cache and an object, find the bucket to which the object
+ * belongs.
+ * Since buckets are always page aligned, we can check each page boundary
+ * for the checksum until we find it, then check the limit.
+ * Since we also know the first bucket in the cache, we can also check if the
+ * passed object is invalid and return NULL.
+ * @param heap cache.
+ * @param obj to find the bucket for.
+ * @return bucket owning obj, or NULL if obj is invalid.*/
+bucket_t *__bucket_from_obj(heap_cache_t *heap, bucket_obj_t *obj);
+
+/**Allocate w bytes into the given heap.
+ * If w == 0, returns NULL.
+ * If w is not a power of 2, rounds to the nearest pow2.
+ * If the heap is corrupted, assert fail.
+ * If the program is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param w n-bytes to allocate.
+ * @return pointer to allocated memory.*/
+void* __malloc_impl(heap_cache_t *heap, size_t w);
+
+/**Allocate a contiguous array of nmemb members each w wide.
+ * Newly allocated memory is zero'd.
+ * If w == 0 || nmemb == 0, returns NULL
+ * If w is not a power of 2, rounds to nearest pow2
+ * If the heap is corrupted, assert fail
+ * If the program is OON, ernno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param nmemb number of members.
+ * @param w number of bytes per member.
+ * @return pointer to allocated memory.*/
+void* __calloc_impl(heap_cache_t *heap, size_t nmemb, size_t w);
+
+/**Resize the given allocated memory region to fit w.
+ * If w is less than the original width, does nothing.
+ * If w == 0, frees p
+ * If the heap is corrupted, assert fail.
+ * If the heap is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param p pointer to reallocate.
+ * @param w width to fit p to.
+ * @return pointer to memory allocated to fit w.*/
+void* __realloc_impl(heap_cache_t *heap, void *p, size_t w);
+
+/**Resize the given allocated contiguous array to fit nmemb members of w width
+ * objects.
+ * Zeros the new members excluding already existing data.
+ * If w * nmemb is less than the original width, does nothing.
+ * If w == 0 || nmemb == 0, frees p
+ * If the heap is corrupted, assert fail.
+ * If the heap is OOM, errno is assumed to be set and returns (void*)-1
+ * @param heap cache.
+ * @param p pointer to reallocate.
+ * @param nmemb number of members.
+ * @param w width to fit p to.
+ * @return pointer to memory allocated to fit w * nmemb.*/
+void* __reallocarray_impl(heap_cache_t *heap, void *p, size_t nmemb, size_t w);
+
+/**Free a pointer in the given heap.
+ * If p == NULL, do nothing.
+ * If p does not fall in the range of heap, assert fail.
+ * If the heap is corrupted, assert fail.
+ * @param heap cache.
+ * @param p pointer to free.*/
+void __free_impl(heap_cache_t *heap, void *p);
+
+#endif
diff --git a/memory/alloc/realloc.c b/memory/alloc/realloc.c
new file mode 100644
index 0000000..1b90e8c
--- /dev/null
+++ b/memory/alloc/realloc.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+#include "string.h"
+
+void*
+__realloc_impl(heap_cache_t *cache, void *ptr, size_t w)
+{
+ if(w == 0) {
+ DBGPRINT("realloc: called with w == 0, freeing %p\n", ptr);
+ __free_impl(cache, ptr);
+ return NULL;
+ }
+ if(ptr == NULL) {
+ DBGPRINT("realloc: called with NULL ptr, alloc %i\n", w);
+ return __malloc_impl(cache, w);
+ }
+
+ w = REALW(w);
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+
+ assert(OBJ_VALID(obj));
+ size_t objw = OBJ_WIDTH(obj);
+ if(objw > w) return ptr;
+
+ size_t wdiff = w - objw;
+ DBGPRINT("realloc: ptr %p needs %i new bytes\n", ptr, wdiff);
+
+ bucket_obj_t *after = OBJ_AFTER(obj);
+ size_t afterw = 0;
+ bucket_t *bucket = __bucket_from_obj(cache, obj);
+ void *newptr = NULL;
+
+ if(bucket == NULL)
+ goto resize_fail;
+
+ if((uintptr_t)after > bucket->limit) goto resize_fail;
+ assert(OBJ_VALID(after));
+ afterw = OBJ_WIDTH(after);
+
+ if(OBJ_TAKEN(after))
+ goto resize_fail;
+
+ DBGPRINT("realloc: obj %p after obj %p is free (w %i)\n", after, obj, afterw);
+ if(objw + sizeof(bucket_obj_t) + afterw < w)
+ goto resize_fail;
+
+ obj->size_taken = w | 1;
+ if(bucket->firstfree == after)
+ bucket->firstfree = OBJ_AFTER(obj);
+
+ after = OBJ_AFTER(obj);
+ *after = (bucket_obj_t){
+ .checksum = OBJ_CHECKSUM,
+ .size_taken = afterw - wdiff
+ };
+ DBGPRINT("realloc: moved obj %p (%i) to resize obj %p (%i)\n",
+ after, after->size_taken, obj, obj->size_taken);
+ return ptr;
+
+resize_fail:
+ DBGPRINT("realloc: could not resize existing object, calling malloc\n");
+ newptr = __malloc_impl(cache, w);
+ memcpy(newptr, ptr, obj->size_taken & ~1);
+ __free_impl(cache, ptr);
+ return newptr;
+}
diff --git a/memory/alloc/reallocarray.c b/memory/alloc/reallocarray.c
new file mode 100644
index 0000000..c58dfe9
--- /dev/null
+++ b/memory/alloc/reallocarray.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) Jon Santmyer.
+ * This source file is released under the LGPL Version 3 as detailed in
+ * the LICENSE file provided in the following repository:
+ * https://git.jonsantmyer.com/heap/
+ * */
+
+#include "malloc.h"
+#include "assert.h"
+#include "string.h"
+
+void*
+__reallocarray_impl(heap_cache_t *cache, void *ptr, size_t nmemb, size_t w)
+{
+ /* Free if w == nmemb == 0*/
+ if(w == 0 || nmemb == 0) {
+ __free_impl(cache, ptr);
+ return NULL;
+ }
+
+ if(ptr == NULL)
+ return __calloc_impl(cache, nmemb, w);
+
+ size_t allocw = w * nmemb;
+ w = REALW(w);
+
+ bucket_obj_t *obj = OBJ_FROM_PTR(ptr);
+ assert(OBJ_VALID(obj));
+ size_t oldw = OBJ_WIDTH(obj);
+ if(allocw < oldw) return ptr;
+
+ ptr = __realloc_impl(cache, ptr, allocw);
+ size_t wdiff = allocw - oldw;
+ void *zeroat = (void*)(((uintptr_t)ptr) + oldw);
+ memset(zeroat, 0, wdiff);
+ return ptr;
+}
diff --git a/memory/bump.c b/memory/bump.c
new file mode 100644
index 0000000..a6b0228
--- /dev/null
+++ b/memory/bump.c
@@ -0,0 +1,33 @@
+#include "bump.h"
+#include "memory.h"
+
+static void *s_next_free;
+
+void*
+bump_where(void)
+{
+ return s_next_free;
+}
+
+void *bump_alloc(size_t size)
+{
+ void *ret = s_next_free;
+ s_next_free = (void*)((uintptr_t)s_next_free + size);
+ vm_ensure(
+ (uintptr_t)ret,
+ (uintptr_t)s_next_free,
+ (page_flags_t) {
+ .present = true,
+ .writeable = true,
+ .useraccess = false,
+ .executable = false
+ });
+ return ret;
+}
+
+void
+bump_setup(void)
+{
+ extern void *_kernel_end;
+ s_next_free = (void*)&_kernel_end;
+}
diff --git a/memory/bump.h b/memory/bump.h
new file mode 100644
index 0000000..5b1c2b7
--- /dev/null
+++ b/memory/bump.h
@@ -0,0 +1,10 @@
+#ifndef JOVE_MEMORY_BUMP_H
+#define JOVE_MEMORY_BUMP_H 1
+
+#include <stddef.h>
+
+void *bump_where(void);
+void *bump_alloc(size_t size);
+void bump_setup(void);
+
+#endif
diff --git a/memory/memory.c b/memory/memory.c
new file mode 100644
index 0000000..2a0e8f2
--- /dev/null
+++ b/memory/memory.c
@@ -0,0 +1,39 @@
+#include "memory.h"
+#include "alloc/malloc.h"
+#include "zone.h"
+#include "bump.h"
+
+heap_cache_t heap_cache;
+
+void*
+kmalloc(size_t width)
+{
+ return __malloc_impl(&heap_cache, width);
+}
+
+void*
+krealloc(void *ptr, size_t width)
+{
+ return __realloc_impl(&heap_cache, ptr, width);
+}
+
+void
+kfree(void *ptr)
+{
+ __free_impl(&heap_cache, ptr);
+}
+
+void
+mm_setup_early(void)
+{
+ pm_zone_setup();
+ vm_setup_early();
+}
+
+void
+mm_setup(void)
+{
+ heap_cache = (heap_cache_t) { NULL, NULL };
+ bump_setup();
+ vm_setup();
+}
diff --git a/memory/phys.c b/memory/phys.c
new file mode 100644
index 0000000..4d9dbc1
--- /dev/null
+++ b/memory/phys.c
@@ -0,0 +1,39 @@
+#include "memory.h"
+#include "zone.h"
+#include "jove.h"
+
+void
+pm_reserve(physptr_t start, physptr_t end)
+{
+ size_t zone = pm_zone_for(start);
+ size_t limit = pm_zone_bound_upper(zone);
+
+ if(end > limit) {
+ pm_reserve(limit, end);
+ end = limit;
+ }
+ pm_zone_resv(MEM_ZONE_STANDARD, start, end);
+}
+
+void
+pm_release(physptr_t start, physptr_t end)
+{
+ size_t zone = pm_zone_for(start);
+ size_t limit = pm_zone_bound_upper(zone);
+
+ if(end > limit) {
+ pm_release(limit, end);
+ end = limit;
+ }
+ pm_zone_free(MEM_ZONE_STANDARD, start, end);
+}
+
+physptr_t
+pm_alloc(size_t pages)
+{
+ if(pm_zone_pages_free(MEM_ZONE_HIGHER) >= pages)
+ return pm_zone_alloc(MEM_ZONE_HIGHER, pages);
+ if(pm_zone_pages_free(MEM_ZONE_STANDARD) >= pages)
+ return pm_zone_alloc(MEM_ZONE_STANDARD, pages);
+ kpanic("Kernel ran out of physical memory!\n");
+}
diff --git a/memory/slab.c b/memory/slab.c
new file mode 100644
index 0000000..dc8aeda
--- /dev/null
+++ b/memory/slab.c
@@ -0,0 +1,148 @@
+#include "slab.h"
+#include "bump.h"
+#include "memory.h"
+#include "string.h"
+#include "jove.h"
+#include "print.h"
+#include "klib/format.h"
+
+static int
+s_get_free_listw(size_t slabw, size_t objw)
+{
+ int freelistc = 1;
+ while(freelistc < 256) {
+ int maxobjc = (slabw - (freelistc * sizeof(uintptr_t))) / objw;
+ if(maxobjc <= freelistc) return maxobjc;
+ freelistc++;
+ }
+ return freelistc;
+}
+
+static slab_t
+*s_slab_new(slab_cache_t *cache, slab_t *last)
+{
+ size_t slab_width = (cache->slab_pages << PAGE_SHIFT);
+ uintptr_t descr_base = (uintptr_t)bump_alloc(slab_width);
+ slab_t *descr = (slab_t*)descr_base;
+
+ size_t free_listc = s_get_free_listw(
+ slab_width - sizeof(slab_t),
+ cache->obj_size);
+ size_t descriptor_width = sizeof(slab_t)
+ + (free_listc * sizeof(uintptr_t));
+ uintptr_t obj_base = descr_base + descriptor_width;
+
+ if(free_listc < 8) {
+ free_listc = ((slab_width - sizeof(slab_t)) / cache->obj_size);
+ descr = kmalloc(sizeof(slab_t) + (free_listc * sizeof(uintptr_t)));
+ obj_base = descr_base;
+ }
+ cache->obj_capacity += free_listc;
+
+ *descr = (slab_t) {
+ .prev = last,
+ .next = (last == NULL ? NULL : last->next),
+ .slab_base = (void*)descr_base,
+ .obj_base = (void*)obj_base,
+ .free_count = free_listc,
+ .free_index = free_listc - 1
+ };
+ for(size_t i = 0; i < free_listc; i++) {
+ descr->free[i] = obj_base + (i * cache->obj_size);
+ }
+
+ return descr;
+}
+
+void
+slabcache_new(slab_cache_t *cache, char *name, size_t objsize)
+{
+ if(objsize % 8 > 0) objsize += (8 - (objsize % 8));
+ size_t pages = objsize > 512 ? (objsize >> 9) : 1;
+ *cache = (slab_cache_t){
+ .obj_size = objsize,
+ .slab_pages = pages,
+ .list_free = NULL,
+ .list_partial = NULL,
+ .list_full = NULL
+ };
+ size_t namelen = strlen(name);
+ namelen = namelen > 32 ? 32 : namelen;
+ memcpy(cache->name, name, namelen);
+
+ //Allocate the first slab
+ cache->list_free = s_slab_new(cache, NULL);
+}
+
+void*
+slab_alloc(slab_cache_t *cache)
+{
+ // Get a free slab
+ slab_t *slab = NULL;
+ if(cache->list_partial != NULL) slab = cache->list_partial;
+ if(slab == NULL && cache->list_free != NULL) {
+ slab = cache->list_free;
+ cache->list_free = slab->next;
+ }
+ if(slab == NULL) slab = s_slab_new(cache, cache->list_free);
+ cache->list_partial = slab;
+
+ // Take an object from the slab.
+ uintptr_t objaddr = slab->free[slab->free_index];
+ slab->free_index -= 1;
+
+ if(slab->free_index < 0) {
+ slab->next = cache->list_full;
+ cache->list_full = slab;
+ }
+ return (void*)objaddr;
+}
+
+void
+slab_free(slab_cache_t *cache, void *ptr)
+{
+ uintptr_t addr = (uintptr_t)ptr;
+ //Look for the pointer in the bounds of every slab
+ for(slab_t *slab = cache->list_full;
+ slab != NULL; slab = slab->next)
+ {
+ uintptr_t base = (uintptr_t)slab->obj_base;
+ uintptr_t limit = ((uintptr_t)slab->slab_base)
+ + (cache->slab_pages << PAGE_SHIFT);
+ if(addr > limit || addr < base) continue;
+ if((addr - base) % cache->obj_size != 0) {
+ klogf("Tried to free offset pointer %#016X in slab %s\n",
+ addr, cache->name);
+ return;
+ }
+ slab->free_index++;
+ slab->free[slab->free_index] = addr;
+
+ cache->list_full = slab->next;
+ slab->next = cache->list_partial;
+ cache->list_partial = slab;
+ return;
+ }
+ for(slab_t *slab = cache->list_partial;
+ slab != NULL; slab = slab->next)
+ {
+ uintptr_t base = (uintptr_t)slab->obj_base;
+ uintptr_t limit = ((uintptr_t)slab->slab_base)
+ + (cache->slab_pages << PAGE_SHIFT);
+ if(addr > limit || addr < base) continue;
+ if((addr - base) % cache->obj_size != 0) {
+ klogf("Tried to free offset pointer %#016X in slab %s\n",
+ addr, cache->name);
+ return;
+ }
+ slab->free_index++;
+ slab->free[slab->free_index] = addr;
+
+ if(slab->free_index == ((int)slab->free_count - 1)) {
+ cache->list_partial = slab->next;
+ slab->next = cache->list_free;
+ cache->list_free = slab;
+ }
+ return;
+ }
+}
diff --git a/memory/zone.c b/memory/zone.c
new file mode 100644
index 0000000..4201cee
--- /dev/null
+++ b/memory/zone.c
@@ -0,0 +1,145 @@
+#include "zone.h"
+#include "boot.h"
+#include "memory.h"
+#include "jove.h"
+#include "string.h"
+#include "print.h"
+
+#define MEM_ZONE_STANDARD_PAGES (MEM_ZONE_STANDARD_LIMIT >> PAGE_SHIFT)
+
+static uintmax_t
+ s_zone_standard_freemap_blocks_flat[BUDDY_BLOCKS_FOR(MEM_ZONE_STANDARD_PAGES)];
+static uintmax_t*
+ s_zone_standard_freemap_blocks[MEM_BUDDY_ORDERS];
+
+static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] =
+{
+ {
+ .name = "Standard",
+ .base = MEM_ZONE_STANDARD_BASE,
+ .limit = MEM_ZONE_STANDARD_LIMIT,
+ .freemap = {
+ .orders = MEM_BUDDY_ORDERS,
+ .bits = MEM_ZONE_STANDARD_PAGES,
+ .free = 0,
+ .blocks = s_zone_standard_freemap_blocks
+ }
+ },
+ {
+ .name = "Higher",
+ .base = MEM_ZONE_HIGHER_BASE,
+ .limit = -1,
+ .freemap = {
+ .orders = MEM_BUDDY_ORDERS
+ }
+ }
+};
+
+int
+pm_zone_for(uintptr_t addr)
+{
+ addr &= ~PAGE_MASK;
+ for(size_t zonei = 0; zonei < MEM_ZONE_COUNT; zonei++)
+ {
+ struct PhysicalMemoryZone *pmz = &s_zones[zonei];
+ if(addr >= pmz->base && addr < pmz->limit) return zonei;
+ }
+ return -1;
+}
+
+uintptr_t
+pm_zone_bound_lower(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].base;
+}
+
+uintptr_t
+pm_zone_bound_upper(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].limit;
+}
+
+size_t
+pm_zone_pages_free(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].freemap.free;
+}
+
+void
+_zone_resv(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
+{
+ buddy_mark_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
+}
+
+void
+_zone_free(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
+{
+ buddy_free_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
+}
+
+int
+pm_zone_resv(size_t zone, uintptr_t base, uintptr_t limit)
+{
+ assert(zone < MEM_ZONE_COUNT);
+
+ size_t base_off = base % PAGE_SIZE;
+
+ size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
+ size_t limit_real = limit & ~PAGE_MASK;
+ _zone_resv(&s_zones[zone], base_real, limit_real);
+ return 0;
+}
+
+int
+pm_zone_free(size_t zone, uintptr_t base, uintptr_t limit)
+{
+ assert(zone < MEM_ZONE_COUNT);
+
+ size_t base_off = base % PAGE_SIZE;
+
+ size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
+ size_t limit_real = limit & ~PAGE_MASK;
+ _zone_free(&s_zones[zone], base_real, limit_real);
+ return 0;
+}
+
+uintptr_t
+pm_zone_alloc(size_t zone, size_t pages)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+
+ struct PhysicalMemoryZone *pmz = &s_zones[zone];
+ intmax_t pagei = buddy_alloc(&pmz->freemap, pages);
+ if(pagei < 0) {
+ return 0;
+ }
+
+ return (((uintmax_t)pagei) << PAGE_SHIFT) + pmz->base;
+}
+
+void
+pm_zone_setup(void)
+{
+ struct PhysicalMemoryZone *standard_zone = &s_zones[MEM_ZONE_STANDARD];
+ uintmax_t *map_block_layer_base = s_zone_standard_freemap_blocks_flat;
+ for(size_t i = 0; i < MEM_BUDDY_ORDERS; i++) {
+ size_t layer_entries = (standard_zone->freemap.bits / BUDDY_BLOCK_BITS) >> i;
+ standard_zone->freemap.blocks[i] = map_block_layer_base;
+ memset(map_block_layer_base, 0xFF, layer_entries * sizeof(uintmax_t));
+ map_block_layer_base = &map_block_layer_base[layer_entries];
+ }
+
+ for(int i = 0; i < boot_memorymap.count; i++) {
+ struct MemoryMapEntry *entry = &boot_memorymap.entries[i];
+ kdbgf("%2i\t%#016X -> %#016X (%i)\n",
+ i, entry->base, entry->base + entry->length, entry->usable);
+ if(entry->base > MEM_ZONE_STANDARD_LIMIT) continue;
+ size_t limit = entry->base + entry->length;
+ if(limit > MEM_ZONE_STANDARD_LIMIT) limit = MEM_ZONE_STANDARD_LIMIT;
+ if(entry->usable)
+ pm_zone_free(MEM_ZONE_STANDARD, entry->base, limit);
+ }
+}