summaryrefslogtreecommitdiffstats
path: root/mem/slab.c
diff options
context:
space:
mode:
authorJon Santmyer <jon@jonsantmyer.com>2024-03-11 21:30:31 -0400
committerJon Santmyer <jon@jonsantmyer.com>2024-03-11 21:30:31 -0400
commitd1ff7bcc91886626dc9060ec5fb67ee102ab7c1d (patch)
tree8f0b5cd8aad31089131785dc6e37b659490f9955 /mem/slab.c
downloadjove-kernel-d1ff7bcc91886626dc9060ec5fb67ee102ab7c1d.tar.gz
jove-kernel-d1ff7bcc91886626dc9060ec5fb67ee102ab7c1d.tar.bz2
jove-kernel-d1ff7bcc91886626dc9060ec5fb67ee102ab7c1d.zip
usermode capable kernel with logging syscall
Diffstat (limited to 'mem/slab.c')
-rw-r--r--mem/slab.c201
1 files changed, 201 insertions, 0 deletions
diff --git a/mem/slab.c b/mem/slab.c
new file mode 100644
index 0000000..75b8302
--- /dev/null
+++ b/mem/slab.c
@@ -0,0 +1,201 @@
+#include "slab.h"
+#include "memory.h"
+#include "lib/format.h"
+#include "lib/string.h"
+#include "lib/jove.h"
+#include "io/log.h"
+
+extern void *_kernel_end;
+
+static uintptr_t s_addr_next_free;
+
+#define GENERIC_CACHEC 8
+static struct SlabCache s_generic_caches[GENERIC_CACHEC];
+
+static uintptr_t
+s_next_free(size_t width)
+{
+ uintptr_t ret = s_addr_next_free;
+ s_addr_next_free += width;
+ mem_ensure_range(ret, s_addr_next_free, true, false);
+ return ret;
+}
+
+static int
+s_get_free_listw(size_t slabw, size_t objw)
+{
+ int freelistc = 1;
+ while(freelistc < 256) {
+ int maxobjc = (slabw - (freelistc * sizeof(uintptr_t))) / objw;
+ if(maxobjc <= freelistc) return maxobjc;
+ freelistc++;
+ }
+ return freelistc;
+}
+
+static struct SlabDescriptor
+*s_slab_new(struct SlabCache *cache, struct SlabDescriptor *last)
+{
+ size_t slab_width = (cache->slab_pages * PAGESIZE);
+ uintptr_t descr_base = s_next_free(slab_width);
+ struct SlabDescriptor *descr = (struct SlabDescriptor*)descr_base;
+
+ size_t free_listc = s_get_free_listw(
+ slab_width - sizeof(struct SlabDescriptor),
+ cache->obj_size);
+ size_t descriptor_width = sizeof(struct SlabDescriptor)
+ + (free_listc * sizeof(uintptr_t));
+ uintptr_t obj_base = descr_base + descriptor_width;
+
+ if(free_listc < 8) {
+ free_listc = ((slab_width - sizeof(struct SlabDescriptor)) / cache->obj_size);
+ descr = mem_alloc(sizeof(struct SlabDescriptor) + (free_listc * sizeof(uintptr_t)));
+ obj_base = descr_base;
+ }
+
+ *descr = (struct SlabDescriptor) {
+ .prev = last,
+ .next = (last == NULL ? NULL : last->next),
+ .slab_base = (void*)descr_base,
+ .obj_base = (void*)obj_base,
+ .free_count = free_listc,
+ .free_index = free_listc - 1
+ };
+ for(size_t i = 0; i < free_listc; i++) {
+ descr->free[i] = obj_base + (i * cache->obj_size);
+ }
+
+ return descr;
+}
+
+void
+mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize)
+{
+ if(objsize % 8 > 0) objsize += (8 - (objsize % 8));
+ size_t pages = objsize > 512 ? (objsize >> 9) : 1;
+ *cache = (struct SlabCache){
+ .obj_size = objsize,
+ .slab_pages = pages,
+ .list_free = NULL,
+ .list_partial = NULL,
+ .list_full = NULL
+ };
+ size_t namelen = strlen(name);
+ namelen = namelen > 32 ? 32 : namelen;
+ memcpy(cache->name, name, namelen);
+
+ //Allocate the first slab
+ cache->list_free = s_slab_new(cache, NULL);
+}
+
+void*
+mem_slab_alloc(struct SlabCache *cache)
+{
+ // Get a free slab
+ struct SlabDescriptor *slab = NULL;
+ if(cache->list_partial != NULL) slab = cache->list_partial;
+ if(slab == NULL && cache->list_free != NULL) {
+ slab = cache->list_free;
+ cache->list_free = slab->next;
+ }
+ if(slab == NULL) slab = s_slab_new(cache, cache->list_free);
+ cache->list_partial = slab;
+
+ // Take an object from the slab.
+ uintptr_t objaddr = slab->free[slab->free_index];
+ slab->free_index -= 1;
+
+ if(slab->free_index < 0) {
+ slab->next = cache->list_full;
+ cache->list_full = slab;
+ }
+ return (void*)objaddr;
+}
+
+void
+mem_slab_free(struct SlabCache *cache, void *ptr)
+{
+ uintptr_t addr = (uintptr_t)ptr;
+ //Look for the pointer in the bounds of every slab
+ for(struct SlabDescriptor *slab = cache->list_full;
+ slab != NULL; slab = slab->next)
+ {
+ uintptr_t base = (uintptr_t)slab->obj_base;
+ uintptr_t limit = ((uintptr_t)slab->slab_base)
+ + (cache->slab_pages * PAGESIZE);
+ if(addr > limit || addr < base) continue;
+ if((addr - base) % cache->obj_size != 0) {
+ klogf("Tried to free offset pointer %#016X in slab %s\n",
+ addr, cache->name);
+ return;
+ }
+ slab->free_index++;
+ slab->free[slab->free_index] = addr;
+
+ cache->list_full = slab->next;
+ slab->next = cache->list_partial;
+ cache->list_partial = slab;
+ return;
+ }
+ for(struct SlabDescriptor *slab = cache->list_partial;
+ slab != NULL; slab = slab->next)
+ {
+ uintptr_t base = (uintptr_t)slab->obj_base;
+ uintptr_t limit = ((uintptr_t)slab->slab_base)
+ + (cache->slab_pages * PAGESIZE);
+ if(addr > limit || addr < base) continue;
+ if((addr - base) % cache->obj_size != 0) {
+ klogf("Tried to free offset pointer %#016X in slab %s\n",
+ addr, cache->name);
+ return;
+ }
+ slab->free_index++;
+ slab->free[slab->free_index] = addr;
+
+ if(slab->free_index == (slab->free_count - 1)) {
+ cache->list_partial = slab->next;
+ slab->next = cache->list_free;
+ cache->list_free = slab;
+ }
+ return;
+ }
+}
+
+void*
+mem_alloc(size_t width)
+{
+ size_t width_log2 = (__builtin_clz(width) ^ 31) + 1;
+ if(width_log2 < 6) width_log2 = 6;
+ width_log2 -= 6;
+ if(width_log2 >= GENERIC_CACHEC) {
+ klogf("Allocation size %i too big for generic caches!\n", width);
+ return NULL;
+ }
+
+ struct SlabCache *generic_cache = &s_generic_caches[width_log2];
+ return mem_slab_alloc(generic_cache);
+}
+
+void
+mem_free(void *ptr)
+{
+ for(int i = 0; i < GENERIC_CACHEC; i++) {
+ mem_slab_free(&s_generic_caches[i], ptr);
+ }
+}
+
+void
+mem_slab_setup(void)
+{
+ s_addr_next_free = (uintptr_t)&_kernel_end;
+ s_addr_next_free = ((s_addr_next_free >> 12) + 1) << 12;
+ s_get_free_listw(PAGESIZE - sizeof(struct SlabDescriptor), 32);
+
+ for(int i = 0; i < GENERIC_CACHEC; i++)
+ {
+ size_t objsize = 1 << (i + 6);
+ char slab_name[SLABCACHE_NAME_LIMIT];
+ sfmt(slab_name, SLABCACHE_NAME_LIMIT, "generic_%i", 1 << (i + 6));
+ mem_slabcache_new(&s_generic_caches[i], slab_name, objsize);
+ }
+}