summaryrefslogtreecommitdiffstats
path: root/memory/zone.c
diff options
context:
space:
mode:
Diffstat (limited to 'memory/zone.c')
-rw-r--r--memory/zone.c145
1 files changed, 145 insertions, 0 deletions
diff --git a/memory/zone.c b/memory/zone.c
new file mode 100644
index 0000000..4201cee
--- /dev/null
+++ b/memory/zone.c
@@ -0,0 +1,145 @@
+#include "zone.h"
+#include "boot.h"
+#include "memory.h"
+#include "jove.h"
+#include "string.h"
+#include "print.h"
+
+#define MEM_ZONE_STANDARD_PAGES (MEM_ZONE_STANDARD_LIMIT >> PAGE_SHIFT)
+
+static uintmax_t
+ s_zone_standard_freemap_blocks_flat[BUDDY_BLOCKS_FOR(MEM_ZONE_STANDARD_PAGES)];
+static uintmax_t*
+ s_zone_standard_freemap_blocks[MEM_BUDDY_ORDERS];
+
+static struct PhysicalMemoryZone s_zones[MEM_ZONE_COUNT] =
+{
+ {
+ .name = "Standard",
+ .base = MEM_ZONE_STANDARD_BASE,
+ .limit = MEM_ZONE_STANDARD_LIMIT,
+ .freemap = {
+ .orders = MEM_BUDDY_ORDERS,
+ .bits = MEM_ZONE_STANDARD_PAGES,
+ .free = 0,
+ .blocks = s_zone_standard_freemap_blocks
+ }
+ },
+ {
+ .name = "Higher",
+ .base = MEM_ZONE_HIGHER_BASE,
+ .limit = -1,
+ .freemap = {
+ .orders = MEM_BUDDY_ORDERS
+ }
+ }
+};
+
+int
+pm_zone_for(uintptr_t addr)
+{
+ addr &= ~PAGE_MASK;
+ for(size_t zonei = 0; zonei < MEM_ZONE_COUNT; zonei++)
+ {
+ struct PhysicalMemoryZone *pmz = &s_zones[zonei];
+ if(addr >= pmz->base && addr < pmz->limit) return zonei;
+ }
+ return -1;
+}
+
+uintptr_t
+pm_zone_bound_lower(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].base;
+}
+
+uintptr_t
+pm_zone_bound_upper(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].limit;
+}
+
+size_t
+pm_zone_pages_free(size_t zone)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+ return s_zones[zone].freemap.free;
+}
+
+void
+_zone_resv(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
+{
+ buddy_mark_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
+}
+
+void
+_zone_free(struct PhysicalMemoryZone *zone, uintptr_t base, uintptr_t limit)
+{
+ buddy_free_range(&zone->freemap, base >> PAGE_SHIFT, limit >> PAGE_SHIFT);
+}
+
+int
+pm_zone_resv(size_t zone, uintptr_t base, uintptr_t limit)
+{
+ assert(zone < MEM_ZONE_COUNT);
+
+ size_t base_off = base % PAGE_SIZE;
+
+ size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
+ size_t limit_real = limit & ~PAGE_MASK;
+ _zone_resv(&s_zones[zone], base_real, limit_real);
+ return 0;
+}
+
+int
+pm_zone_free(size_t zone, uintptr_t base, uintptr_t limit)
+{
+ assert(zone < MEM_ZONE_COUNT);
+
+ size_t base_off = base % PAGE_SIZE;
+
+ size_t base_real = (base & ~PAGE_MASK) + (base_off > 0 ? PAGE_SIZE : 0);
+ size_t limit_real = limit & ~PAGE_MASK;
+ _zone_free(&s_zones[zone], base_real, limit_real);
+ return 0;
+}
+
+uintptr_t
+pm_zone_alloc(size_t zone, size_t pages)
+{
+ if(zone >= MEM_ZONE_COUNT) return 0;
+
+ struct PhysicalMemoryZone *pmz = &s_zones[zone];
+ intmax_t pagei = buddy_alloc(&pmz->freemap, pages);
+ if(pagei < 0) {
+ return 0;
+ }
+
+ return (((uintmax_t)pagei) << PAGE_SHIFT) + pmz->base;
+}
+
+void
+pm_zone_setup(void)
+{
+ struct PhysicalMemoryZone *standard_zone = &s_zones[MEM_ZONE_STANDARD];
+ uintmax_t *map_block_layer_base = s_zone_standard_freemap_blocks_flat;
+ for(size_t i = 0; i < MEM_BUDDY_ORDERS; i++) {
+ size_t layer_entries = (standard_zone->freemap.bits / BUDDY_BLOCK_BITS) >> i;
+ standard_zone->freemap.blocks[i] = map_block_layer_base;
+ memset(map_block_layer_base, 0xFF, layer_entries * sizeof(uintmax_t));
+ map_block_layer_base = &map_block_layer_base[layer_entries];
+ }
+
+ for(int i = 0; i < boot_memorymap.count; i++) {
+ struct MemoryMapEntry *entry = &boot_memorymap.entries[i];
+ kdbgf("%2i\t%#016X -> %#016X (%i)\n",
+ i, entry->base, entry->base + entry->length, entry->usable);
+ if(entry->base > MEM_ZONE_STANDARD_LIMIT) continue;
+ size_t limit = entry->base + entry->length;
+ if(limit > MEM_ZONE_STANDARD_LIMIT) limit = MEM_ZONE_STANDARD_LIMIT;
+ if(entry->usable)
+ pm_zone_free(MEM_ZONE_STANDARD, entry->base, limit);
+ }
+}