From ace65b453151845bc361f21f3e5b651c35f9f126 Mon Sep 17 00:00:00 2001
From: Jon Santmyer <jon@jonsantmyer.com>
Date: Wed, 22 May 2024 13:00:41 -0400
Subject: massive refactor for mp and organization

---
 mem/memory.c |  10 ---
 mem/memory.h |  94 ---------------------------
 mem/phys.c   |  39 -----------
 mem/slab.c   | 209 -----------------------------------------------------------
 mem/slab.h   |  33 ----------
 mem/zone.c   | 146 -----------------------------------------
 mem/zone.h   |  71 --------------------
 7 files changed, 602 deletions(-)
 delete mode 100644 mem/memory.c
 delete mode 100644 mem/memory.h
 delete mode 100644 mem/phys.c
 delete mode 100644 mem/slab.c
 delete mode 100644 mem/slab.h
 delete mode 100644 mem/zone.c
 delete mode 100644 mem/zone.h

(limited to 'mem')

diff --git a/mem/memory.c b/mem/memory.c
deleted file mode 100644
index 26bbbd8..0000000
--- a/mem/memory.c
+++ /dev/null
@@ -1,10 +0,0 @@
-#include "memory.h"
-#include "zone.h"
-
-void
-mem_setup(void)
-{
-    mem_zone_setup_standard();
-    mem_paging_setup();
-    mem_slab_setup();
-}
diff --git a/mem/memory.h b/mem/memory.h
deleted file mode 100644
index 251c6f5..0000000
--- a/mem/memory.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef JOVE_MEM_H
-#define JOVE_MEM_H 1
-
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (1 << PAGE_SHIFT)
-#define PAGE_MASK (PAGE_SIZE - 1)
-
-#define KiB 1024ULL
-#define MiB (KiB * KiB)
-#define GiB (MiB * KiB)
-#define TiB (GiB * KiB)
-
-#include "lib/spinlock.h"
-#include "sys/types.h"
-
-typedef struct page_directory
-{
-    spinlock_t lock;
-    size_t id;
-    size_t ref;
-    physptr_t phys;
-    void *virt;
-} page_directory_t;
-
-#include "slab.h"
-
-extern page_directory_t *current_page_directory;
-
-/**Setup the kernel structures responsible for handling physical memory translation.*/
-void mem_paging_setup(void);
-
-/**Check if a given pointer is valid.
- * @param ptr pointer to check.
- * @return if the pointer is valid.*/
-bool mem_check_ptr(const void *ptr);
-
-/** Return the physical memory mapping for the given address.
- * @param pd page directory to get mapping in.
- * @param addr address to get mapping for.
- * @return HAL compliant page mapping.*/
-page_mapping_t mem_get_mapping_as(page_directory_t *pd, uintptr_t addr);
-
-/** Return the physical memory mapping for the given address.
- * @param addr address to get mapping for.
- * @return HAL compliant page mapping.*/
-page_mapping_t mem_get_mapping(uintptr_t addr);
-
-/** Map a page mapping to a given virtual address.
- * @param pd pointer to the page directory to edit.
- * @param mapping mapping to apply.
- * @param virt virtual address to map to. */
-void mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt);
-
-/** Map a page mapping to a given virtual address.
- * @param pd pointer to the page directory to edit.
- * @param mapping mapping to apply.
- * @param virt virtual address to map to. */
-void mem_set_mapping(page_mapping_t mapping, uintptr_t virt);
-
-/** Make sure the range indicated is available in memory for specified pd
- * If necessary, allocate new pages using the passed flags
- * @param pd pointer to page directory to edit
- * @param from start of the range
- * @param to end of the range
- * @param rw flag to mark page is writeable
- * @param user flag to mark page as user accessable*/
-void mem_ensure_range_as(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg);
-
-/**Make sure the range indicated is available in memory
- * If necessary, allocate new pages using the passed flags
- * @param from start of the range.
- * @param to end of the range.
- * @param rw flag to mark page is writeable.
- * @param user flag to mark page as user accessable*/
-void mem_ensure_range(uintptr_t from, uintptr_t to, page_flags_t flg);
-
-void mem_slab_setup(void);
-void mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize);
-
-void* mem_slab_alloc(struct SlabCache *cache);
-void mem_slab_free(struct SlabCache *cache, void *ptr);
-
-void* mem_alloc(size_t width);
-void mem_free(void *ptr);
-
-/*Physical*/
-
-physptr_t mem_phys_alloc(size_t pages);
-void mem_phys_reserve(physptr_t start, physptr_t end);
-void mem_phys_release(physptr_t start, physptr_t end);
-
-void mem_setup(void);
-
-#endif
diff --git a/mem/phys.c b/mem/phys.c
deleted file mode 100644
index bf56b77..0000000
--- a/mem/phys.c
+++ /dev/null
@@ -1,39 +0,0 @@
-#include "memory.h"
-#include "zone.h"
-#include "lib/jove.h"
-
-void
-mem_phys_reserve(physptr_t start, physptr_t end)
-{
-    size_t zone = mem_zone_for(start);
-    size_t limit = mem_zone_bound_upper(zone);
-
-    if(end > limit) {
-        mem_phys_reserve(limit, end);
-        end = limit;
-    }
-    mem_zone_resv(MEM_ZONE_STANDARD, start, end);
-}
-
-void 
-mem_phys_release(physptr_t start, physptr_t end)
-{
-    size_t zone = mem_zone_for(start);
-    size_t limit = mem_zone_bound_upper(zone);
-
-    if(end > limit) {
-        mem_phys_release(limit, end);
-        end = limit;
-    }
-    mem_zone_free(MEM_ZONE_STANDARD, start, end);
-}
-
-physptr_t
-mem_phys_alloc(size_t pages)
-{
-    if(mem_zone_pages_free(MEM_ZONE_HIGHER) >= pages)
-        return mem_zone_alloc(MEM_ZONE_HIGHER, pages);
-    if(mem_zone_pages_free(MEM_ZONE_STANDARD) >= pages)
-        return mem_zone_alloc(MEM_ZONE_STANDARD, pages);
-    kpanic("Kernel ran out of physical memory!\n");
-}
diff --git a/mem/slab.c b/mem/slab.c
deleted file mode 100644
index 30bc23a..0000000
--- a/mem/slab.c
+++ /dev/null
@@ -1,209 +0,0 @@
-#include "slab.h"
-#include "memory.h"
-#include "lib/format.h"
-#include "lib/string.h"
-#include "lib/jove.h"
-#include "io/log.h"
-
-extern void *_kernel_end;
-
-static uintptr_t s_addr_next_free;
-
-#define GENERIC_CACHEC 8
-static struct SlabCache s_generic_caches[GENERIC_CACHEC];
-
-static uintptr_t
-s_next_free(size_t width)
-{
-    uintptr_t ret = s_addr_next_free;
-    s_addr_next_free += width;
-    mem_ensure_range(
-            ret,
-            s_addr_next_free,
-            (page_flags_t) { 
-                .present = true,
-                .writeable = true,
-                .useraccess = false,
-                .executable = false
-            });
-    return ret;
-}
-
-static int
-s_get_free_listw(size_t slabw, size_t objw)
-{
-    int freelistc = 1;
-    while(freelistc < 256) {
-        int maxobjc = (slabw - (freelistc * sizeof(uintptr_t))) / objw;
-        if(maxobjc <= freelistc) return maxobjc;
-        freelistc++;
-    }
-    return freelistc;
-}
-
-static struct SlabDescriptor
-*s_slab_new(struct SlabCache *cache, struct SlabDescriptor *last)
-{
-    size_t slab_width = (cache->slab_pages << PAGE_SHIFT);
-    uintptr_t descr_base = s_next_free(slab_width);
-    struct SlabDescriptor *descr = (struct SlabDescriptor*)descr_base;
-
-    size_t free_listc = s_get_free_listw(
-            slab_width - sizeof(struct SlabDescriptor),
-            cache->obj_size);
-    size_t descriptor_width = sizeof(struct SlabDescriptor)
-        + (free_listc * sizeof(uintptr_t));
-    uintptr_t obj_base = descr_base + descriptor_width;
-
-    if(free_listc < 8) {
-        free_listc = ((slab_width - sizeof(struct SlabDescriptor)) / cache->obj_size);
-        descr = mem_alloc(sizeof(struct SlabDescriptor) + (free_listc * sizeof(uintptr_t)));
-        obj_base = descr_base;
-    }
-
-    *descr = (struct SlabDescriptor) {
-        .prev = last,
-        .next = (last == NULL ? NULL : last->next),
-        .slab_base = (void*)descr_base,
-        .obj_base = (void*)obj_base,
-        .free_count = free_listc,
-        .free_index = free_listc - 1
-    };
-    for(size_t i = 0; i < free_listc; i++) {
-        descr->free[i] = obj_base + (i * cache->obj_size);
-    }
-
-    return descr;
-}
-
-void
-mem_slabcache_new(struct SlabCache *cache, char *name, size_t objsize)
-{
-    if(objsize % 8 > 0) objsize += (8 - (objsize % 8));
-    size_t pages = objsize > 512 ? (objsize >> 9) : 1;
-    *cache = (struct SlabCache){
-        .obj_size = objsize,
-        .slab_pages = pages,
-        .list_free = NULL,
-        .list_partial = NULL,
-        .list_full = NULL
-    };
-    size_t namelen = strlen(name);
-    namelen = namelen > 32 ? 32 : namelen;
-    memcpy(cache->name, name, namelen);
-
-    //Allocate the first slab
-    cache->list_free = s_slab_new(cache, NULL);
-}
-
-void*
-mem_slab_alloc(struct SlabCache *cache)
-{
-    // Get a free slab
-    struct SlabDescriptor *slab = NULL;
-    if(cache->list_partial != NULL) slab = cache->list_partial;
-    if(slab == NULL && cache->list_free != NULL) {
-        slab = cache->list_free;
-        cache->list_free = slab->next;
-    }
-    if(slab == NULL) slab = s_slab_new(cache, cache->list_free);
-    cache->list_partial = slab;
-
-    // Take an object from the slab.
-    uintptr_t objaddr = slab->free[slab->free_index];
-    slab->free_index -= 1;
-
-    if(slab->free_index < 0) {
-        slab->next = cache->list_full;
-        cache->list_full = slab;
-    }
-    return (void*)objaddr;
-}
-
-void
-mem_slab_free(struct SlabCache *cache, void *ptr)
-{
-    uintptr_t addr = (uintptr_t)ptr;
-    //Look for the pointer in the bounds of every slab
-    for(struct SlabDescriptor *slab = cache->list_full; 
-            slab != NULL; slab = slab->next)
-    {
-        uintptr_t base = (uintptr_t)slab->obj_base;
-        uintptr_t limit = ((uintptr_t)slab->slab_base) 
-            + (cache->slab_pages << PAGE_SHIFT);
-        if(addr > limit || addr < base) continue;
-        if((addr - base) % cache->obj_size != 0) {
-            klogf("Tried to free offset pointer %#016X in slab %s\n",
-                    addr, cache->name);
-            return;
-        }
-        slab->free_index++;
-        slab->free[slab->free_index] = addr;
-
-        cache->list_full = slab->next;
-        slab->next = cache->list_partial;
-        cache->list_partial = slab;
-        return;
-    }
-    for(struct SlabDescriptor *slab = cache->list_partial; 
-            slab != NULL; slab = slab->next)
-    {
-        uintptr_t base = (uintptr_t)slab->obj_base;
-        uintptr_t limit = ((uintptr_t)slab->slab_base) 
-            + (cache->slab_pages << PAGE_SHIFT);
-        if(addr > limit || addr < base) continue;
-        if((addr - base) % cache->obj_size != 0) {
-            klogf("Tried to free offset pointer %#016X in slab %s\n",
-                    addr, cache->name);
-            return;
-        }
-        slab->free_index++;
-        slab->free[slab->free_index] = addr;
-
-        if(slab->free_index == (slab->free_count - 1)) {
-            cache->list_partial = slab->next;
-            slab->next = cache->list_free;
-            cache->list_free = slab;
-        }
-        return;
-    }
-}
-
-void*
-mem_alloc(size_t width)
-{
-    size_t width_log2 = (__builtin_clz(width) ^ 31) + 1;
-    if(width_log2 < 6) width_log2 = 6;
-    width_log2 -= 6;
-    if(width_log2 >= GENERIC_CACHEC) {
-        klogf("Allocation size %i too big for generic caches!\n", width);
-        return NULL;
-    }
-
-    struct SlabCache *generic_cache = &s_generic_caches[width_log2];
-    return mem_slab_alloc(generic_cache);
-}
-
-void
-mem_free(void *ptr)
-{
-    for(int i = 0; i < GENERIC_CACHEC; i++) {
-        mem_slab_free(&s_generic_caches[i], ptr);
-    }
-}
-
-void
-mem_slab_setup(void)
-{
-    s_addr_next_free = (uintptr_t)&_kernel_end;
-    s_addr_next_free = ((s_addr_next_free >> 12) + 1) << 12;
-    s_get_free_listw(PAGE_SIZE - sizeof(struct SlabDescriptor), 32);
-
-    for(int i = 0; i < GENERIC_CACHEC; i++)
-    {
-        size_t objsize = 1 << (i + 6);
-        char slab_name[SLABCACHE_NAME_LIMIT];
-        sfmt(slab_name, SLABCACHE_NAME_LIMIT, "generic_%i", 1 << (i + 6));
-        mem_slabcache_new(&s_generic_caches[i], slab_name, objsize);
-    }
-}
diff --git a/mem/slab.h b/mem/slab.h
deleted file mode 100644
index 074d278..0000000
--- a/mem/slab.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef JOVE_MEMORY_SLAB_H
-#define JOVE_MEMORY_SLAB_H 1
-
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-
-#define SLABCACHE_NAME_LIMIT 32
-struct SlabCache
-{
-    char name[SLABCACHE_NAME_LIMIT];
-
-    struct SlabDescriptor *list_free;
-    struct SlabDescriptor *list_partial;
-    struct SlabDescriptor *list_full;
-
-    size_t obj_size;
-    size_t slab_pages;
-};
-
-struct SlabDescriptor
-{
-    struct SlabDescriptor *prev;
-    struct SlabDescriptor *next;
-    void *slab_base;
-    void *obj_base;
-
-    size_t free_count;
-    int free_index;
-    uintptr_t free[];
-};
-
-#endif
diff --git a/mem/zone.c b/mem/zone.c
deleted file mode 100644
index 42a056e..0000000
--- a/mem/zone.c
+++ /dev/null
@@ -1,146 +0,0 @@
-#include "zone.h"
-#include "memory.h"
-#include "boot/boot.h"
-#include "lib/string.h"
-#include "lib/jove.h"
-#include "sys/errno.h"
-#include "io/log.h"
-
-#define MEM_ZONE_STANDARD_PAGES  (MEM_ZONE_STANDARD_LIMIT >> PAGE_SHIFT)
-
-static uintmax_t 
-    s_zone_standard_freemap_blocks_flat[BUDDY_BLOCKS_FOR(MEM_ZONE_STANDARD_PAGES)];
-static uintmax_t*
-    s_zone_standard_freemap_blocks[MEM_BUDDY_ORDERS];
-
-static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] = 
-{
-    {
-        .name = "Standard",
-        .base = MEM_ZONE_STANDARD_BASE,
-        .limit = MEM_ZONE_STANDARD_LIMIT,
-        .freemap = { 
-            .orders = MEM_BUDDY_ORDERS,
-            .bits = MEM_ZONE_STANDARD_PAGES,
-            .free = 0,
-            .blocks = s_zone_standard_freemap_blocks
-        }
-    },
-    {
-        .name = "Higher",
-        .base = MEM_ZONE_HIGHER_BASE,
-        .limit = -1,
-        .freemap = {
-            .orders = MEM_BUDDY_ORDERS
-        }
-    }
-};
-
-int
-mem_zone_for(uintptr_t addr)
-{
-    addr &= ~PAGE_MASK;
-    for(size_t zonei = 0; zonei < MEM_ZONE_COUNT; zonei++)
-    {
-        struct PhysicalMemoryZone *pmz = &s_zones[zonei];
-        if(addr >= pmz->base && addr < pmz->limit) return zonei;
-    }
-    return -ENOTFOUND;
-}
-
-uintptr_t 
-mem_zone_bound_lower(size_t zone)
-{
-    if(zone >= MEM_ZONE_COUNT) return 0;
-    return s_zones[zone].base;
-}
-
-uintptr_t 
-mem_zone_bound_upper(size_t zone)
-{
-    if(zone >= MEM_ZONE_COUNT) return 0;
-    return s_zones[zone].limit;
-}
-
-size_t
-mem_zone_pages_free(size_t zone)
-{
-    if(zone >= MEM_ZONE_COUNT) return 0;
-    return s_zones[zone].freemap.free;
-}
-
-void
-_zone_resv(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
-{
-    buddy_mark_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
-}
-
-void
-_zone_free(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
-{
-    buddy_free_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
-}
-
-int
-mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit)
-{
-    if(zone >= MEM_ZONE_COUNT) return -EINVAL;
-
-    size_t base_off = base % PAGE_SIZE;
-
-    size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
-    size_t limit_real = limit & ~PAGE_MASK;
-    _zone_resv(&s_zones[zone], base_real, limit_real);
-    return 0;
-}
-
-int
-mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit)
-{
-    if(zone >= MEM_ZONE_COUNT) return -EINVAL;
-
-    size_t base_off = base % PAGE_SIZE;
-
-    size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
-    size_t limit_real = limit & ~PAGE_MASK;
-    _zone_free(&s_zones[zone], base_real, limit_real);
-    return 0;
-}
-
-uintptr_t
-mem_zone_alloc(size_t zone, size_t pages)
-{
-    if(zone >= MEM_ZONE_COUNT) return 0;
-
-    struct PhysicalMemoryZone *pmz = &s_zones[zone];
-    intmax_t pagei = buddy_alloc(&pmz->freemap, pages);
-    if(pagei < 0) {
-        return 0;
-    }
-
-    return (((uintmax_t)pagei) << PAGE_SHIFT) + pmz->base;
-}
-
-void
-mem_zone_setup_standard(void)
-{
-    struct PhysicalMemoryZone *standard_zone = &s_zones[MEM_ZONE_STANDARD];
-    uintmax_t *map_block_layer_base = s_zone_standard_freemap_blocks_flat;
-    for(size_t i = 0; i < MEM_BUDDY_ORDERS; i++) {
-        size_t layer_entries = (standard_zone->freemap.bits / BUDDY_BLOCK_BITS) >> i;
-        standard_zone->freemap.blocks[i] = map_block_layer_base;
-        memset(map_block_layer_base, 0xFF, layer_entries * sizeof(uintmax_t));
-        map_block_layer_base = &map_block_layer_base[layer_entries];
-    }
-
-    for(int i = 0; i < boot_memorymap.count; i++) {
-        struct MemoryMapEntry *entry = &boot_memorymap.entries[i];
-        klogf("%2i\t%#016X -> %#016X (%i)\n",
-                i, entry->base, entry->base + entry->length, entry->usable);
-        if(entry->base > MEM_ZONE_STANDARD_LIMIT) continue;
-        size_t limit = entry->base + entry->length;
-        if(limit > MEM_ZONE_STANDARD_LIMIT) limit = MEM_ZONE_STANDARD_LIMIT;
-        if(entry->usable)
-            mem_zone_free(MEM_ZONE_STANDARD, entry->base, limit);
-    }
-}
diff --git a/mem/zone.h b/mem/zone.h
deleted file mode 100644
index c0b0f52..0000000
--- a/mem/zone.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef JOVE_MEM_ZONE_H
-#define JOVE_MEM_ZONE_H 1
-
-#include <stdint.h>
-#include "lib/buddymap.h"
-
-enum {
-    MEM_ZONE_STANDARD = 0, /* First GiB of physical memory. */
-    MEM_ZONE_HIGHER,
-    MEM_ZONE_COUNT
-};
-
-#define MEM_ZONE_STANDARD_BASE 0
-#define MEM_ZONE_STANDARD_LIMIT (1 * GiB)
-#define MEM_ZONE_HIGHER_BASE MEM_ZONE_STANDARD_LIMIT
-
-#define MEM_BUDDY_ORDERS 12
-struct PhysicalMemoryZone
-{
-    const char *name;
-
-    uintptr_t base;
-    uintptr_t limit;
-
-    struct BuddyMap freemap;
-};
-
-/**Return the zone index for the given address
- * @param addr address to look up
- * @return zone index*/
-int mem_zone_for(uintptr_t addr);
-
-/**Return the lower bound for the given zone index.
- * @param zone index into zones.
- * @return lower bound.*/
-uintptr_t mem_zone_bound_lower(size_t zone);
-
-/**Return the upper bound for the given zone index.
- * @param zone index into zones.
- * @return upper bound.*/
-uintptr_t mem_zone_bound_upper(size_t zone);
-
-/**Return the number of pages free in the given zone.
- * @param zone index into zones.
- * @return number of free pages.*/
-size_t mem_zone_pages_free(size_t zone);
-
-/** Using a given zone, reserve a range of physical addresses
- * @param zone identifier of zone to modify
- * @param base starting address to reserve
- * @param limit ending address to reserve
- * @return error code or 0 if success */
-int mem_zone_resv(size_t zone, uintptr_t base, uintptr_t limit);
-
-/** Using a given zone, free a range of physical addresses
- * @param zone identifier of zone to modify
- * @param base starting address to free
- * @param limit ending address to free
- * @return error code or 0 if success*/
-int mem_zone_free(size_t zone, uintptr_t base, uintptr_t limit);
-
-/** Allocate a number of pages from the given zone
- * @param zone identifier of the zone to modify
- * @param pages number of pages to allocate
- * @return physical memory address of allocation
- *         zero if allocation failed*/
-uintptr_t mem_zone_alloc(size_t zone, size_t pages);
-
-void mem_zone_setup_standard(void);
-
-#endif
-- 
cgit v1.2.1