summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/memory/pml4.c
diff options
context:
space:
mode:
authorJon Santmyer <jon@jonsantmyer.com>2025-09-10 13:28:28 -0400
committerJon Santmyer <jon@jonsantmyer.com>2025-09-10 13:28:28 -0400
commit7f350e7ee1c2c38e5ac0b6c22c17388f6c78f0b5 (patch)
treeaef9904e2495ce840319f2815cd859c47294c88a /arch/x86_64/memory/pml4.c
parent032a7bc4d79efea100a00cf3464bea3249a07ff6 (diff)
downloadjove-kernel-7f350e7ee1c2c38e5ac0b6c22c17388f6c78f0b5.tar.gz
jove-kernel-7f350e7ee1c2c38e5ac0b6c22c17388f6c78f0b5.tar.bz2
jove-kernel-7f350e7ee1c2c38e5ac0b6c22c17388f6c78f0b5.zip
refactor paging code. regression on loading init program
Diffstat (limited to 'arch/x86_64/memory/pml4.c')
-rw-r--r--arch/x86_64/memory/pml4.c153
1 files changed, 153 insertions, 0 deletions
diff --git a/arch/x86_64/memory/pml4.c b/arch/x86_64/memory/pml4.c
new file mode 100644
index 0000000..fcfc897
--- /dev/null
+++ b/arch/x86_64/memory/pml4.c
@@ -0,0 +1,153 @@
+#include "arch/x86_64/page.h"
+#include "device/processor.h"
+#include "memory.h"
+#include "boot.h"
+#include "object.h"
+#include "string.h"
+#include "jove.h"
+#include "error.h"
+#include <stdint.h>
+
+#define IDENTITY_BASE 0xFFFF800000000000
+uintptr_t vptr_tophys(void *vptr)
+{
+ return ((uintptr_t)vptr) - IDENTITY_BASE;
+}
+
+physptr_t
+vptr_tophys_koff(virtptr_t v)
+{
+ return v - (physptr_t)&_kernel_start + _boot_kernel_phys_base;
+}
+
+void*
+pptr_tovirt_ident(physptr_t p)
+{
+ return (void*)(p + IDENTITY_BASE);
+}
+
+void*
+pmle_get_page(pmle_t entry)
+{
+ uintptr_t pptr = entry.paddr << 12;
+ return pptr_tovirt_ident(pptr);
+}
+
+void
+pml4_get_path(uintptr_t vptr, uint8_t depth, uint16_t *path)
+{
+ for(uint8_t i = 0; i < depth; i++) {
+ path[i] = PML_I_FOR_LAYER(vptr, 4 - i);
+ }
+}
+
+pmle_t*
+pml4_traverse(pmle_t *pml4, uint8_t depth, uint16_t *path)
+{
+ uint16_t pathi = path[0];
+
+ pmle_t *pmle = &pml4[pathi];
+ pmle_t *pmle_table = pptr_tovirt_ident(pmle->paddr << 12);
+ if(depth == 0) return pmle;
+ if(!pmle->p) return NULL;
+ return pml4_traverse(pmle_table, depth - 1, path + 1);
+}
+
+pmle_t*
+pml4_get_mapping(pmle_t *pml4, uint8_t depth, uintptr_t vptr)
+{
+ uint64_t pathval = 0;
+ uint16_t *path = (uint16_t*)&pathval;
+
+ pml4_get_path(vptr, depth, path);
+ return pml4_traverse(pml4, depth - 1, path);
+}
+
+int
+pml4_try_map(pmle_t *pml4, uintptr_t pptr, uintptr_t vptr)
+{
+ uint64_t pathval = 0;
+ uint16_t *path = (uint16_t*)&pathval;
+
+ pml4_get_path(vptr, 4, path);
+ pmle_t *mapping = pml4_traverse(pml4, 3, path);
+
+ if(mapping == NULL) return KE_DNE;
+ if(mapping->p) return KE_FULL;
+
+ mapping->value = pptr | PAGE_PRESENT | PAGE_RW | PAGE_US;
+ return KE_OK;
+}
+
+__attribute__((aligned(0x1000))) pmle_t s_kernel_pml4[512]; // Page L4
+__attribute__((aligned(0x1000))) pmle_t s_kernel_pml3[512]; // Page L3
+__attribute__((aligned(0x1000))) pmle_t s_kernel_pml2[512]; // Page directory
+__attribute__((aligned(0x1000))) pmle_t s_kernel_pml1[512]; // Page table
+
+__attribute__((aligned(0x1000))) pmle_t s_idmap_pml3[512];
+__attribute__((aligned(0x1000))) pmle_t s_idmap_pml2[512];
+
+void
+pml4_setup_init(void)
+{
+ memset(s_kernel_pml4, 0, 0x1000);
+ memset(s_kernel_pml3, 0, 0x1000);
+ memset(s_kernel_pml2, 0, 0x1000);
+ memset(s_kernel_pml1, 0, 0x1000);
+
+ memset(s_idmap_pml3, 0, 0x1000);
+ memset(s_idmap_pml2, 0, 0x1000);
+
+ virtptr_t kernel_start = (virtptr_t)&_kernel_start;
+ virtptr_t kernel_end = (virtptr_t)&_kernel_end;
+
+ size_t kernel_size = kernel_end - kernel_start;
+ size_t kernel_size_pages = (kernel_size / 0x1000) + 1;
+
+ physptr_t kernel_pml4_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml4);
+ physptr_t kernel_pml3_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml3);
+ physptr_t kernel_pml2_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml2);
+ physptr_t kernel_pml1_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml1);
+
+ physptr_t idmap_pml3_base = vptr_tophys_koff((virtptr_t)&s_idmap_pml3);
+ physptr_t idmap_pml2_base = vptr_tophys_koff((virtptr_t)&s_idmap_pml2);
+
+ processor_t *processor = processor_current();
+ processor->pdir = kernel_pml4_base;
+
+ //Map memory identity pages.
+ s_kernel_pml4[256].value = idmap_pml3_base | 2 | 1;
+ s_idmap_pml3[0].value = idmap_pml2_base | 2 | 1;
+ for(int i = 0; i < 512; i++) {
+ s_idmap_pml2[i].value = (i * 0x1000 * 512) | 0x80 | 2 | 1;
+ }
+
+ //Map the kernel to himem.
+
+ pmli_t kernel_pml3_i = PML_I_FOR_LAYER(kernel_start, 3);
+ pmli_t kernel_pml2_i = PML_I_FOR_LAYER(kernel_start, 2);
+ pmli_t kernel_pml1_ib = PML_I_FOR_LAYER(kernel_start, 1);
+ pmli_t kernel_pml1_ie = PML_I_FOR_LAYER(kernel_end, 1) + 1;
+
+ s_kernel_pml4[511].value = kernel_pml3_base | 2 | 1;
+ s_kernel_pml3[kernel_pml3_i].value = kernel_pml2_base | 2 | 1;
+ s_kernel_pml2[kernel_pml2_i].value = kernel_pml1_base | 2 | 1;
+ for(pmli_t i = kernel_pml1_ib; i < kernel_pml1_ie; i++) {
+ s_kernel_pml1[i].value = ((i * 0x1000) + _boot_kernel_phys_base) | 3;
+ }
+
+ __asm__ volatile("mov %0, %%cr3":: "r"(kernel_pml4_base));
+
+ //Add page mapping object to init directory.
+ _initDirectory.entries[INIT_OBJECT_PAGEMAP] = (objdir_entry_t) {
+ .type = KO_MEMORY_MAPPING,
+ .data = kernel_pml4_base
+ };
+}
+
+void
+pml4_setup(pmle_t *pml4)
+{
+ memset(pml4, 0, 0x800);
+ memcpy(&pml4[256], &s_kernel_pml4[256], 0x800);
+}