#include "paging.h"
#include "interrupt.h"
#include "io/log.h"
#include "lib/jove.h"
#include "lib/string.h"
#include "lib/hashtable.h"
#include "mem/memory.h"
#include "boot/boot.h"
extern void *_kernel_end;
PAGEALIGN static uint64_t s_kernel_initial_pml4[512];
PAGEALIGN static uint64_t s_kernel_initial_pml3[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml2[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml1[2][512];
static intmax_t s_next_pdid = 0;
static page_directory_t s_kernel_initial_pd;
page_directory_t *current_page_directory;
struct PageStateCache {
page_directory_t *pd;
size_t pmli[4];
pmle_t *pml[4];
} s_state_cache;
physptr_t
mem_linear_tophys_koffset(uintptr_t virt)
{
return (virt - boot_kernel_virtual_base) + boot_kernel_physical_address;
}
uintptr_t
mem_phys_tolinear(physptr_t phys)
{
return (uintptr_t)(phys + 0xFFFF800000000000ULL);
}
static size_t
s_paging_pmli(size_t l, uintptr_t addr)
{
size_t shift = (12 + (9 * l));
return (addr & (0x1FFULL << shift)) >> shift;
}
static pmle_t*
s_paging_fetch_table(pmle_t *pml, size_t l, uintptr_t virt)
{
size_t pmli = s_paging_pmli(l, virt);
if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
return s_state_cache.pml[l];
pmle_t entry = pml[pmli];
bool entry_new = false;
if(!entry.p) {
entry_new = true;
entry.value = mem_phys_alloc(1);
entry.p = 1;
entry.rw = 1;
entry.us = 1;
pml[pmli] = entry;
}
pmle_t *table = (pmle_t*)(mem_phys_tolinear(entry.paddr << 12));
if(entry_new) memset(table, 0, PAGE_SIZE);
s_state_cache.pmli[l] = pmli;
s_state_cache.pml[l] = table;
return table;
}
static void
s_paging_cache_tables(page_directory_t *pd, uintptr_t virt)
{
pmle_t *pml4 = (pmle_t*)pd->virt;
if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));
pmle_t *pml3 = s_paging_fetch_table(pml4, 3, virt);
pmle_t *pml2 = s_paging_fetch_table(pml3, 2, virt);
pmle_t *pml1 = s_paging_fetch_table(pml2, 1, virt);
}
static pmle_t*
s_paging_get_table(pmle_t *pt, size_t l, uintptr_t virt)
{
if(pt == NULL) return NULL;
size_t pmli = s_paging_pmli(l, virt);
if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
return s_state_cache.pml[l];
pmle_t entry = pt[pmli];
if(!entry.p) return NULL;
return (pmle_t*)(mem_phys_tolinear(entry.paddr << 12));
}
page_mapping_t
mem_get_mapping_as(page_directory_t *pd, uintptr_t addr)
{
spinlock_acquire(pd->lock);
page_mapping_t mapping = { 0 };
pmle_t *pml4 = (pmle_t*)pd->virt;
if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));
pmle_t *pml3 = s_paging_get_table(pml4, 3, addr);
pmle_t *pml2 = s_paging_get_table(pml3, 2, addr);
pmle_t *pml1 = s_paging_get_table(pml2, 1, addr);
if(pml1 == NULL) goto release_return;
size_t pml1i = s_paging_pmli(0, addr);
pmle_t pml1e = pml1[pml1i];
mapping = (page_mapping_t) {
.phys = (pml1e.paddr << 12) & ~PAGE_MASK,
.pf = {
.present = pml1e.p,
.writeable = pml1e.rw,
.useraccess = pml1e.us,
.executable = !pml1e.xd
}
};
release_return:
spinlock_release(pd->lock);
return mapping;
}
page_mapping_t mem_get_mapping(uintptr_t addr)
{ return mem_get_mapping_as(current_page_directory, addr); }
bool
mem_check_ptr(const void *ptr)
{
return mem_get_mapping((uintptr_t)ptr).pf.present != 0;
}
void
mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt)
{
spinlock_acquire(pd->lock);
s_paging_cache_tables(pd, virt);
pmle_t *pml1 = s_state_cache.pml[0];
size_t pml1i = s_paging_pmli(0, virt);
pml1[pml1i] = (pmle_t) {
.p = mapping.pf.present,
.rw = mapping.pf.writeable,
.us = mapping.pf.useraccess,
.xd = !mapping.pf.executable,
.paddr = mapping.phys >> 12
};
spinlock_release(pd->lock);
}
void mem_set_mapping(page_mapping_t mapping, uintptr_t virt)
{ mem_set_mapping_as(current_page_directory, mapping, virt); }
void
mem_ensure_range_as(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg)
{
spinlock_acquire(pd->lock);
from &= ~(PAGE_SIZE - 1);
if(to < from) to = from;
size_t pages = (to - from) >> 12;
if(pages == 0) pages = 1;
for(size_t i = 0; i < pages; i++) {
uintptr_t waddr = from + (i << 12);
s_paging_cache_tables(pd, waddr);
pmle_t *pml1 = s_state_cache.pml[1];
size_t pml1i = s_paging_pmli(0, waddr);
if(!pml1[pml1i].p) {
physptr_t phys = mem_phys_alloc(1);
pml1[pml1i] = (pmle_t) {
.p = flg.present,
.rw = flg.writeable,
.us = flg.useraccess,
.xd = !flg.executable,
.paddr = phys >> 12
};
}
}
spinlock_release(pd->lock);
}
void mem_ensure_range(uintptr_t from, uintptr_t to, page_flags_t flg)
{ mem_ensure_range_as(current_page_directory, from, to, flg); }
struct Registers*
s_pagefault_handler(struct Registers *state)
{
extern uint64_t __isr_err;
uintptr_t fault_addr = 0;
__asm__ volatile("movq %%cr2, %0": "=r"(fault_addr));
bool present = __isr_err & 1;
bool write = __isr_err & 2;
bool user = __isr_err & 4;
bool fetch = __isr_err & 16;
klogf("Page fault at %016X\n", fault_addr);
klogf("%s %s from a %s address\n",
user ? "user" : "kernel",
write ? "wrote" : "read",
present ? "present" : "non-present");
kpanic("Unhandled page fault at %016X\n", state->ip);
return state;
}
void
mem_paging_setup(void)
{
memset(s_kernel_initial_pml4, 0, PAGE_SIZE);
memset(s_kernel_initial_pml3, 0, 2 * PAGE_SIZE);
memset(s_kernel_initial_pml2, 0, 2 * PAGE_SIZE);
memset(s_kernel_initial_pml1, 0, PAGE_SIZE);
s_kernel_initial_pd = (page_directory_t){
.phys = mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml4),
.virt = (union PageEntry*)&s_kernel_initial_pml4,
.ref = 1,
.id = s_next_pdid++
};
current_page_directory = &s_kernel_initial_pd;
/* Map first few GiBs */
s_kernel_initial_pml4[256] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml3[0])
| 3;
s_kernel_initial_pml3[0][0] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml2[0])
| 3;
for(int i = 0; i < 512; i++) {
s_kernel_initial_pml2[0][i] = (i * (PAGE_SIZE * 512)) | 0x80 | 3;
}
size_t kernel_pml4e = (boot_kernel_virtual_base >> (39));
size_t kernel_pml3e = (boot_kernel_virtual_base >> (30)) % 512;
size_t kernel_pml2e = (boot_kernel_virtual_base >> (21)) % 512;
size_t kernel_npages = ((((uintptr_t)&_kernel_end) - boot_kernel_virtual_base) >> 12) + 1;
klogf("Kernel has %i pages\n", kernel_npages);
/* Map kernel pages */
s_kernel_initial_pml4[511] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml3[1]) | 3;
s_kernel_initial_pml3[1][kernel_pml3e] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml2[1]) | 3;
s_kernel_initial_pml2[1][kernel_pml2e] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml1[0]) | 3;
for(int i = 0; i < kernel_npages; i++) {
s_kernel_initial_pml1[0][i] = (i * PAGE_SIZE) + boot_kernel_physical_address | 3;
}
int_set_handler(14, s_pagefault_handler);
__asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.phys));
}