From ace65b453151845bc361f21f3e5b651c35f9f126 Mon Sep 17 00:00:00 2001 From: Jon Santmyer Date: Wed, 22 May 2024 13:00:41 -0400 Subject: massive refactor for mp and organization --- arch/x86_64/pagedirectory.c | 138 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 arch/x86_64/pagedirectory.c (limited to 'arch/x86_64/pagedirectory.c') diff --git a/arch/x86_64/pagedirectory.c b/arch/x86_64/pagedirectory.c new file mode 100644 index 0000000..cdfffeb --- /dev/null +++ b/arch/x86_64/pagedirectory.c @@ -0,0 +1,138 @@ +#include "arch/page.h" +#include "arch/processor.h" +#include "klib/rbtree.h" +#include "boot.h" +#include "memory.h" +#include "string.h" +#include "print.h" + +PAGEALIGN static uint64_t s_kernel_initial_pml4[512]; +PAGEALIGN static uint64_t s_kernel_initial_pml3[2][512]; +PAGEALIGN static uint64_t s_kernel_initial_pml2[2][512]; +PAGEALIGN static uint64_t s_kernel_initial_pml1[2][512]; +static page_directory_t s_kernel_initial_pd; + +static rbtree_t s_page_directories; +static intmax_t s_next_pdid = 0; + +page_directory_t* +pd_new() +{ + page_directory_t newpd = { + .id = s_next_pdid++, + .phys = pm_alloc(1), + }; + newpd.pml = (void*)pm_tovirt(newpd.phys); + for(size_t i = 256; i < 512; i++) { + pmle_t *kpe = &(s_kernel_initial_pd.pml)[i]; + pmle_t *ppe = &(newpd.pml)[i]; + *ppe = *kpe; + } + + return rbtree_insert(&s_page_directories, newpd.id, &newpd); +} + +static void +s_pd_dup_pml(pmle_t *src, pmle_t *dest, size_t l, size_t i) +{ + pmle_t srce = src[i]; + if(!srce.p) return; + + dest[i] = srce; + dest[i].paddr = pm_alloc(1); + pmle_t dste = dest[i]; + + pmle_t *srct = (pmle_t*)pm_tovirt(srce.paddr << PAGE_SHIFT); + pmle_t *dstt = (pmle_t*)pm_tovirt(dste.paddr << PAGE_SHIFT); + + if(l == 0) { + memcpy(dstt, srct, PAGE_SIZE); + return; + } + + for(i = 0; i < 512; i++) { + dstt[i] = srct[i]; + if(!srct[i].p) continue; + s_pd_dup_pml(srct, dstt, l - 1, i); + } +} + +page_directory_t* +pd_dup(page_directory_t *pd) +{ + page_directory_t *newpd = pd_new(); + for(size_t i = 0; i < 256; i++) { + s_pd_dup_pml(pd->pml, newpd->pml, 3, i); + } + return newpd; +} + +page_directory_t* +pd_get(pdid_t pdid) +{ + return rbtree_find(&s_page_directories, pdid); +} + +void +pd_switch(page_directory_t *pd) +{ + processor_t *pc = processor_current(); + if(pc->pd == pd) return; + pc->pd = pd; + __asm__ volatile("movq %0, %%cr3":: "r"(pd->phys)); +} + +void +vm_setup_early(void) +{ + memset(s_kernel_initial_pml4, 0, PAGE_SIZE); + memset(s_kernel_initial_pml3, 0, 2 * PAGE_SIZE); + memset(s_kernel_initial_pml2, 0, 2 * PAGE_SIZE); + memset(s_kernel_initial_pml1, 0, PAGE_SIZE); + s_kernel_initial_pd = (page_directory_t){ + .phys = vm_tophys_koff((uintptr_t)&s_kernel_initial_pml4), + .pml = (pmle_t*)&s_kernel_initial_pml4, + .id = s_next_pdid++ + }; + processor_current()->pd = &s_kernel_initial_pd; + + /* Map first few GiBs */ + s_kernel_initial_pml4[256] = + vm_tophys_koff((uintptr_t)&s_kernel_initial_pml3[0]) + | 3; + s_kernel_initial_pml3[0][0] = + vm_tophys_koff((uintptr_t)&s_kernel_initial_pml2[0]) + | 3; + for(int i = 0; i < 512; i++) { + s_kernel_initial_pml2[0][i] = (i * (PAGE_SIZE * 512)) | 0x80 | 3; + } + + size_t kernel_pml3e = (_kernel_virtual_base >> (30)) % 512; + size_t kernel_pml2e = (_kernel_virtual_base >> (21)) % 512; + size_t kernel_npages = ((((uintptr_t)&_kernel_end) - _kernel_virtual_base) >> 12) + 1; + klogf("Kernel has %i pages\n", kernel_npages); + + /* Map kernel pages */ + s_kernel_initial_pml4[511] = + vm_tophys_koff((uintptr_t)&s_kernel_initial_pml3[1]) | 3; + s_kernel_initial_pml3[1][kernel_pml3e] = + vm_tophys_koff((uintptr_t)&s_kernel_initial_pml2[1]) | 3; + s_kernel_initial_pml2[1][kernel_pml2e] = + vm_tophys_koff((uintptr_t)&s_kernel_initial_pml1[0]) | 3; + for(size_t i = 0; i < kernel_npages; i++) { + s_kernel_initial_pml1[0][i] = (i * PAGE_SIZE) + boot_kernel_physical_address | 3; + } + + extern int_state_t *_pagefault_handler(int_state_t*); + int_handler_set(14, _pagefault_handler); + __asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.phys)); +} + +void +vm_setup(void) +{ + rbtree_new(&s_page_directories, page_directory_t); + + processor_t *proc = processor_current(); + pd_switch(pd_new()); +} -- cgit v1.2.1