summaryrefslogtreecommitdiffstats
path: root/mem
diff options
context:
space:
mode:
Diffstat (limited to 'mem')
-rw-r--r--mem/buddymap.c151
-rw-r--r--mem/buddymap.d2
-rw-r--r--mem/buddymap.h21
-rw-r--r--mem/memory.h49
-rw-r--r--mem/phys.c14
-rw-r--r--mem/phys.d1
-rw-r--r--mem/slab.c201
-rw-r--r--mem/slab.d2
-rw-r--r--mem/slab.h33
9 files changed, 474 insertions, 0 deletions
diff --git a/mem/buddymap.c b/mem/buddymap.c
new file mode 100644
index 0000000..5165876
--- /dev/null
+++ b/mem/buddymap.c
@@ -0,0 +1,151 @@
+#include "buddymap.h"
+#include "lib/string.h"
+#include "boot/boot.h"
+#include "io/log.h"
+#include <stdbool.h>
+
+#define ENTRY_BITS (sizeof(uintmax_t) * 8)
+#define ENTRY_SIZE (ENTRY_BITS * PAGESIZE)
+#define ENTRY_COUNT (MEMMAP_BUDDY_LIMIT / ENTRY_SIZE)
+#define BUDDY_LAYERS 4
+
+uintmax_t s_buddy_l0[ENTRY_COUNT];
+uintmax_t s_buddy_l1[ENTRY_COUNT << 1];
+uintmax_t s_buddy_l2[ENTRY_COUNT << 2];
+uintmax_t s_buddy_l3[ENTRY_COUNT << 3];
+uintmax_t *s_buddies[BUDDY_LAYERS] = {
+ s_buddy_l0,
+ s_buddy_l1,
+ s_buddy_l2,
+ s_buddy_l3
+};
+size_t s_buddies_lastfree[BUDDY_LAYERS] = { 1, 1, 1, 1 };
+
+static bool
+s_buddy_test(size_t l, size_t i)
+{
+ return (s_buddies[l][i / (ENTRY_BITS)] & (1ULL << (i % (ENTRY_BITS)))) > 0;
+}
+
+static void
+s_buddy_set(size_t l, size_t i)
+{
+ size_t j = i << l;
+ size_t w = 1 << l;
+ for(int layer = 0; layer < BUDDY_LAYERS; layer++)
+ {
+ if(w == 0) w = 1;
+ for(size_t bit = 0; bit < w; bit++) {
+ size_t entry = (j + bit) / ENTRY_BITS;
+ s_buddies[layer][entry] |= (1ULL << (j + bit % ENTRY_BITS));
+ }
+ j >>= 1;
+ w >>= 1;
+ }
+}
+
+static void
+s_buddy_unset(size_t l, size_t i)
+{
+ size_t j = i << l;
+ size_t w = 1 << l;
+ bool free_upper = false;
+ for(int layer = 0; layer < BUDDY_LAYERS; layer++)
+ {
+ if(w == 0) {
+ size_t lower = (j << 1) % ENTRY_BITS;
+ size_t other = (lower + 1);
+ size_t entry = (j << 1) / ENTRY_BITS;
+ if((s_buddies[layer-1][entry] & (1ULL << lower)) > 0 &&
+ (s_buddies[layer-1][entry] & (1ULL << other)) > 0)
+ s_buddies[layer][entry >> 1] &= ~(1ULL << (j % ENTRY_BITS));
+ }
+
+ for(size_t bit = 0; bit < w; bit++) {
+ size_t entry = j / ENTRY_BITS;
+ s_buddies[layer][entry] |= (1ULL << bit);
+ }
+ j >>= 1;
+ w >>= 1;
+ }
+}
+
+void
+mem_buddy_set_range(uintptr_t base, size_t length)
+{
+ for(int l = 0; l < BUDDY_LAYERS; l++) {
+ size_t bits = (length / PAGESIZE) >> l;
+ size_t biti = (base / PAGESIZE) >> l;
+
+ if(bits == 0) bits = 1;
+ for(size_t i = 0; i < bits; i++) {
+ size_t entry = (biti + i) / ENTRY_BITS;
+ s_buddies[l][entry] |= 1ULL << ((biti + i) % ENTRY_BITS);
+ }
+ }
+}
+
+void
+mem_buddy_free_range(uintptr_t base, size_t length)
+{
+ for(int l = 0; l < BUDDY_LAYERS; l++) {
+ size_t bits = (length / PAGESIZE) >> l;
+ size_t biti = (base / PAGESIZE) >> l;
+ size_t bitbase = (biti * PAGESIZE) << l;
+
+ if(bits == 0) continue;
+ for(size_t i = 0; i < bits; i++, bitbase += (PAGESIZE << l)) {
+ if(bitbase < base) continue;
+ size_t entry = (biti + i) / ENTRY_BITS;
+ s_buddies[l][entry] &= ~(1ULL << ((biti+ i) % ENTRY_BITS));
+ }
+ }
+}
+
+
+uintptr_t
+mem_buddy_takefree(size_t l)
+{
+ uintmax_t *layer = s_buddies[l];
+ size_t lastfree = s_buddies_lastfree[l];
+ if(s_buddy_test(l, lastfree)) lastfree = 0;
+ size_t entries = ENTRY_COUNT >> l;
+ for(size_t i = lastfree / ENTRY_BITS; i < entries; i++) {
+ uintmax_t entry = layer[i];
+ if(entry == (uintmax_t)-1LL) continue;
+ for(size_t j = 0; j < ENTRY_BITS; j++) {
+ if((entry & (1ULL << j)) == 0) {
+ size_t bit = (i * ENTRY_BITS) + j;
+ s_buddies_lastfree[l] = bit + 1;
+ s_buddy_set(l, bit);
+ return bit * (PAGESIZE << l);
+ }
+ }
+ }
+ return 0;
+}
+
+void
+mem_buddy_setup()
+{
+ memset(s_buddy_l0, 0xFF, sizeof(s_buddy_l0));
+ memset(s_buddy_l1, 0xFF, sizeof(s_buddy_l1));
+ memset(s_buddy_l2, 0xFF, sizeof(s_buddy_l2));
+ memset(s_buddy_l3, 0xFF, sizeof(s_buddy_l3));
+
+ for(int i = 0; i < boot_memorymap.count; i++) {
+ struct MemoryMapEntry *entry = &boot_memorymap.entries[i];
+ klogf("%2i\t%#016X -> %#016X (%i)\n",
+ i, entry->base, entry->base + entry->length, entry->usable);
+ if(entry->base > MEMMAP_BUDDY_LIMIT) continue;
+ size_t length = entry->length;
+ if(entry->base + length > MEMMAP_BUDDY_LIMIT) length = MEMMAP_BUDDY_LIMIT - (entry->base + length);
+ if(entry->usable)
+ mem_buddy_free_range(entry->base, entry->length);
+ }
+
+ s_buddies[0][0] |= 1;
+ s_buddies[1][0] |= 1;
+ s_buddies[2][0] |= 1;
+ s_buddies[3][0] |= 1;
+}
diff --git a/mem/buddymap.d b/mem/buddymap.d
new file mode 100644
index 0000000..7132cfe
--- /dev/null
+++ b/mem/buddymap.d
@@ -0,0 +1,2 @@
+mem/buddymap.o: mem/buddymap.c mem/buddymap.h mem/memory.h mem/slab.h \
+ lib/string.h boot/boot.h io/log.h
diff --git a/mem/buddymap.h b/mem/buddymap.h
new file mode 100644
index 0000000..2f4f5dc
--- /dev/null
+++ b/mem/buddymap.h
@@ -0,0 +1,21 @@
+#ifndef JOVE_MEMORY_BUDDYMAP_H
+#define JOVE_MEMORY_BUDDYMAP_H 1
+
+#include "memory.h"
+#include <stdint.h>
+#include <stddef.h>
+
+#define MEMMAP_BUDDY_LIMIT (4 * GiB)
+
+void mem_buddy_set_range(uintptr_t base, size_t length);
+void mem_buddy_free_range(uintptr_t base, size_t length);
+uintptr_t mem_buddy_takefree(size_t layer);
+
+#define mem_buddy_takefree_4k() mem_buddy_takefree(0)
+#define mem_buddy_takefree_8k() mem_buddy_takefree(1)
+#define mem_buddy_takefree_16k() mem_buddy_takefree(2)
+#define mem_buddy_takefree_32k() mem_buddy_takefree(3)
+
+void mem_buddy_setup(void);
+
+#endif
diff --git a/mem/memory.h b/mem/memory.h
new file mode 100644
index 0000000..eb30217
--- /dev/null
+++ b/mem/memory.h
@@ -0,0 +1,49 @@
+#ifndef JOVE_MEM_H
+#define JOVE_MEM_H 1
+
+#define PAGESIZE 4096ULL
+#define KiB 1024ULL
+#define MiB (KiB * KiB)
+#define GiB (MiB * KiB)
+#define TiB (GiB * KiB)
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+typedef uintptr_t physptr_t;
+
+#include "slab.h"
+
+/*Linear*/
+void mem_paging_setup(void);
+
+physptr_t mem_linear_tophys(uintptr_t virt);
+
+/**Check if pointer is within valid memory.
+ * @param ptr pointer to check.
+ * @return if the pointer is invalid.*/
+bool mem_check_ptr(const void *ptr);
+
+/**Make sure the range indicated is available in memory.
+ * If necessary, allocate new pages using the passed flags
+ * @param from start of the range.
+ * @param to end of the range.
+ * @param rw flag to mark page is writeable.
+ * @param user flag to mark page as user accessable*/
+void mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user);
+
+void mem_slab_setup(void);
+void mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize);
+
+void* mem_slab_alloc(struct SlabCache *cache);
+void mem_slab_free(struct SlabCache *cache, void *ptr);
+
+void* mem_alloc(size_t width);
+void mem_free(void *ptr);
+
+/*Physical*/
+
+physptr_t mem_phys_take4k(void);
+void mem_phys_reserve(physptr_t start, size_t len);
+
+#endif
diff --git a/mem/phys.c b/mem/phys.c
new file mode 100644
index 0000000..e56a4d6
--- /dev/null
+++ b/mem/phys.c
@@ -0,0 +1,14 @@
+#include "memory.h"
+#include "buddymap.h"
+
+physptr_t
+mem_phys_take4k(void)
+{
+ return mem_buddy_takefree_4k();
+}
+
+void
+mem_phys_reserve(physptr_t start, size_t len)
+{
+ mem_buddy_set_range(start, len);
+}
diff --git a/mem/phys.d b/mem/phys.d
new file mode 100644
index 0000000..192b714
--- /dev/null
+++ b/mem/phys.d
@@ -0,0 +1 @@
+mem/phys.o: mem/phys.c mem/memory.h mem/slab.h mem/buddymap.h
diff --git a/mem/slab.c b/mem/slab.c
new file mode 100644
index 0000000..75b8302
--- /dev/null
+++ b/mem/slab.c
@@ -0,0 +1,201 @@
+#include "slab.h"
+#include "memory.h"
+#include "lib/format.h"
+#include "lib/string.h"
+#include "lib/jove.h"
+#include "io/log.h"
+
+extern void *_kernel_end;
+
+static uintptr_t s_addr_next_free;
+
+#define GENERIC_CACHEC 8
+static struct SlabCache s_generic_caches[GENERIC_CACHEC];
+
+static uintptr_t
+s_next_free(size_t width)
+{
+ uintptr_t ret = s_addr_next_free;
+ s_addr_next_free += width;
+ mem_ensure_range(ret, s_addr_next_free, true, false);
+ return ret;
+}
+
+static int
+s_get_free_listw(size_t slabw, size_t objw)
+{
+ int freelistc = 1;
+ while(freelistc < 256) {
+ int maxobjc = (slabw - (freelistc * sizeof(uintptr_t))) / objw;
+ if(maxobjc <= freelistc) return maxobjc;
+ freelistc++;
+ }
+ return freelistc;
+}
+
+static struct SlabDescriptor
+*s_slab_new(struct SlabCache *cache, struct SlabDescriptor *last)
+{
+ size_t slab_width = (cache->slab_pages * PAGESIZE);
+ uintptr_t descr_base = s_next_free(slab_width);
+ struct SlabDescriptor *descr = (struct SlabDescriptor*)descr_base;
+
+ size_t free_listc = s_get_free_listw(
+ slab_width - sizeof(struct SlabDescriptor),
+ cache->obj_size);
+ size_t descriptor_width = sizeof(struct SlabDescriptor)
+ + (free_listc * sizeof(uintptr_t));
+ uintptr_t obj_base = descr_base + descriptor_width;
+
+ if(free_listc < 8) {
+ free_listc = ((slab_width - sizeof(struct SlabDescriptor)) / cache->obj_size);
+ descr = mem_alloc(sizeof(struct SlabDescriptor) + (free_listc * sizeof(uintptr_t)));
+ obj_base = descr_base;
+ }
+
+ *descr = (struct SlabDescriptor) {
+ .prev = last,
+ .next = (last == NULL ? NULL : last->next),
+ .slab_base = (void*)descr_base,
+ .obj_base = (void*)obj_base,
+ .free_count = free_listc,
+ .free_index = free_listc - 1
+ };
+ for(size_t i = 0; i < free_listc; i++) {
+ descr->free[i] = obj_base + (i * cache->obj_size);
+ }
+
+ return descr;
+}
+
+void
+mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize)
+{
+ if(objsize % 8 > 0) objsize += (8 - (objsize % 8));
+ size_t pages = objsize > 512 ? (objsize >> 9) : 1;
+ *cache = (struct SlabCache){
+ .obj_size = objsize,
+ .slab_pages = pages,
+ .list_free = NULL,
+ .list_partial = NULL,
+ .list_full = NULL
+ };
+ size_t namelen = strlen(name);
+ namelen = namelen > 32 ? 32 : namelen;
+ memcpy(cache->name, name, namelen);
+
+ //Allocate the first slab
+ cache->list_free = s_slab_new(cache, NULL);
+}
+
+void*
+mem_slab_alloc(struct SlabCache *cache)
+{
+ // Get a free slab
+ struct SlabDescriptor *slab = NULL;
+ if(cache->list_partial != NULL) slab = cache->list_partial;
+ if(slab == NULL && cache->list_free != NULL) {
+ slab = cache->list_free;
+ cache->list_free = slab->next;
+ }
+ if(slab == NULL) slab = s_slab_new(cache, cache->list_free);
+ cache->list_partial = slab;
+
+ // Take an object from the slab.
+ uintptr_t objaddr = slab->free[slab->free_index];
+ slab->free_index -= 1;
+
+ if(slab->free_index < 0) {
+ slab->next = cache->list_full;
+ cache->list_full = slab;
+ }
+ return (void*)objaddr;
+}
+
+void
+mem_slab_free(struct SlabCache *cache, void *ptr)
+{
+ uintptr_t addr = (uintptr_t)ptr;
+ //Look for the pointer in the bounds of every slab
+ for(struct SlabDescriptor *slab = cache->list_full;
+ slab != NULL; slab = slab->next)
+ {
+ uintptr_t base = (uintptr_t)slab->obj_base;
+ uintptr_t limit = ((uintptr_t)slab->slab_base)
+ + (cache->slab_pages * PAGESIZE);
+ if(addr > limit || addr < base) continue;
+ if((addr - base) % cache->obj_size != 0) {
+ klogf("Tried to free offset pointer %#016X in slab %s\n",
+ addr, cache->name);
+ return;
+ }
+ slab->free_index++;
+ slab->free[slab->free_index] = addr;
+
+ cache->list_full = slab->next;
+ slab->next = cache->list_partial;
+ cache->list_partial = slab;
+ return;
+ }
+ for(struct SlabDescriptor *slab = cache->list_partial;
+ slab != NULL; slab = slab->next)
+ {
+ uintptr_t base = (uintptr_t)slab->obj_base;
+ uintptr_t limit = ((uintptr_t)slab->slab_base)
+ + (cache->slab_pages * PAGESIZE);
+ if(addr > limit || addr < base) continue;
+ if((addr - base) % cache->obj_size != 0) {
+ klogf("Tried to free offset pointer %#016X in slab %s\n",
+ addr, cache->name);
+ return;
+ }
+ slab->free_index++;
+ slab->free[slab->free_index] = addr;
+
+ if(slab->free_index == (slab->free_count - 1)) {
+ cache->list_partial = slab->next;
+ slab->next = cache->list_free;
+ cache->list_free = slab;
+ }
+ return;
+ }
+}
+
+void*
+mem_alloc(size_t width)
+{
+ size_t width_log2 = (__builtin_clz(width) ^ 31) + 1;
+ if(width_log2 < 6) width_log2 = 6;
+ width_log2 -= 6;
+ if(width_log2 >= GENERIC_CACHEC) {
+ klogf("Allocation size %i too big for generic caches!\n", width);
+ return NULL;
+ }
+
+ struct SlabCache *generic_cache = &s_generic_caches[width_log2];
+ return mem_slab_alloc(generic_cache);
+}
+
+void
+mem_free(void *ptr)
+{
+ for(int i = 0; i < GENERIC_CACHEC; i++) {
+ mem_slab_free(&s_generic_caches[i], ptr);
+ }
+}
+
+void
+mem_slab_setup(void)
+{
+ s_addr_next_free = (uintptr_t)&_kernel_end;
+ s_addr_next_free = ((s_addr_next_free >> 12) + 1) << 12;
+ s_get_free_listw(PAGESIZE - sizeof(struct SlabDescriptor), 32);
+
+ for(int i = 0; i < GENERIC_CACHEC; i++)
+ {
+ size_t objsize = 1 << (i + 6);
+ char slab_name[SLABCACHE_NAME_LIMIT];
+ sfmt(slab_name, SLABCACHE_NAME_LIMIT, "generic_%i", 1 << (i + 6));
+ mem_slabcache_new(&s_generic_caches[i], slab_name, objsize);
+ }
+}
diff --git a/mem/slab.d b/mem/slab.d
new file mode 100644
index 0000000..5cffd4c
--- /dev/null
+++ b/mem/slab.d
@@ -0,0 +1,2 @@
+mem/slab.o: mem/slab.c mem/slab.h mem/memory.h lib/format.h lib/string.h \
+ lib/jove.h io/log.h
diff --git a/mem/slab.h b/mem/slab.h
new file mode 100644
index 0000000..074d278
--- /dev/null
+++ b/mem/slab.h
@@ -0,0 +1,33 @@
+#ifndef JOVE_MEMORY_SLAB_H
+#define JOVE_MEMORY_SLAB_H 1
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+#define SLABCACHE_NAME_LIMIT 32
+struct SlabCache
+{
+ char name[SLABCACHE_NAME_LIMIT];
+
+ struct SlabDescriptor *list_free;
+ struct SlabDescriptor *list_partial;
+ struct SlabDescriptor *list_full;
+
+ size_t obj_size;
+ size_t slab_pages;
+};
+
+struct SlabDescriptor
+{
+ struct SlabDescriptor *prev;
+ struct SlabDescriptor *next;
+ void *slab_base;
+ void *obj_base;
+
+ size_t free_count;
+ int free_index;
+ uintptr_t free[];
+};
+
+#endif