summaryrefslogblamecommitdiffstats
path: root/arch/x86_64/page.c
blob: d816f66452929d8701b7dc003dd757c1f42330f6 (plain) (tree)












































































































































































                                                                                  
#include "arch/x86_64/page.h"
#include "klib/rbtree.h"
#include "arch/processor.h"
#include "string.h"
#include "jove.h"
#include "memory.h"
#include "print.h"

extern void *_kernel_end;

struct PageDirectoryListEntry {
    struct PageDirectoryListEntry *next;
    page_directory_t pd;
};

struct PageStateCache {
    page_directory_t *pd;
    size_t pmli[4];
    pmle_t *pml[4];
} s_state_cache;

const uintptr_t USERLAND_MEMORY_BASE = KiB;
const uintptr_t USERLAND_MEMORY_LIMIT = 0x00007FFFFFFFFFFFULL;

const uintptr_t PHYSMAP_MEMORY_BASE = 0xFFFF800000000000ULL;
const uintptr_t PHYSMAP_MEMORY_LIMIT = PHYSMAP_MEMORY_BASE + (1 * GiB);

const uintptr_t KERNEL_MEMORY_BASE = 0xFFFF800000000000ULL;
const uintptr_t KERNEL_MEMORY_LIMIT = 0xFFFFFFFFFFFFFFFFULL;

static size_t
s_paging_pmli(size_t l, uintptr_t addr)
{
    size_t shift = (PAGE_SHIFT + (9 * l));
    return (addr & (0x1FFULL << shift)) >> shift;
}

static pmle_t*
s_paging_fetch_table(pmle_t *pml, size_t l, uintptr_t virt)
{
    size_t pmli = s_paging_pmli(l, virt);
    if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
        return s_state_cache.pml[l];

    pmle_t entry = pml[pmli];
    bool entry_new = false;
    if(!entry.p) {
        entry_new = true;
        entry.value = pm_alloc(1);
        entry.p = 1;
        entry.rw = 1;
        entry.us = 1;
        pml[pmli] = entry;
    }
    pmle_t *table = (pmle_t*)(pm_tovirt(entry.paddr << PAGE_SHIFT));
    if(entry_new) memset(table, 0, PAGE_SIZE);

    s_state_cache.pmli[l] = pmli;
    s_state_cache.pml[l] = table;
    return table;
}

static void
s_paging_cache_tables(page_directory_t *pd, uintptr_t virt)
{
    pmle_t *pml4 = pd->pml;
    if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));

    pmle_t *pml3 = s_paging_fetch_table(pml4, 3, virt);
    pmle_t *pml2 = s_paging_fetch_table(pml3, 2, virt);
    pmle_t *pml1 = s_paging_fetch_table(pml2, 1, virt);
}

static pmle_t*
s_paging_get_table(pmle_t *pt, size_t l, uintptr_t virt)
{
    if(pt == NULL) return NULL;
    size_t pmli = s_paging_pmli(l, virt);
    if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
        return s_state_cache.pml[l];

    pmle_t entry = pt[pmli];
    if(!entry.p) return NULL;
    return (pmle_t*)(pm_tovirt(entry.paddr << PAGE_SHIFT));
}

page_mapping_t
vm_pd_mapping_get(page_directory_t *pd, uintptr_t addr)
{
    spinlock_acquire(pd->lock);
    page_mapping_t mapping = { 0 };
    
    pmle_t *pml4 = pd->pml;
    if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));

    pmle_t *pml3 = s_paging_get_table(pml4, 3, addr);
    pmle_t *pml2 = s_paging_get_table(pml3, 2, addr);
    pmle_t *pml1 = s_paging_get_table(pml2, 1, addr);
    if(pml1 == NULL) goto release_return;

    size_t pml1i = s_paging_pmli(0, addr);
    pmle_t pml1e = pml1[pml1i];

    mapping = (page_mapping_t) {
        .phys = (pml1e.paddr << PAGE_SHIFT) & ~PAGE_MASK,
        .pf = {
            .present = pml1e.p,
            .writeable = pml1e.rw,
            .useraccess = pml1e.us,
            .executable = !pml1e.xd
        }
    };
release_return:
    spinlock_release(pd->lock);
    return mapping;
}

page_mapping_t vm_mapping_get(uintptr_t addr)
{ return vm_pd_mapping_get(pd_current(), addr); }

void
mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt)
{
    spinlock_acquire(pd->lock);
    s_paging_cache_tables(pd, virt);
    pmle_t *pml1 = s_state_cache.pml[0];
    size_t pml1i = s_paging_pmli(0, virt);

    pml1[pml1i] = (pmle_t) {
        .p = mapping.pf.present,
        .rw = mapping.pf.writeable,
        .us = mapping.pf.useraccess,
        .xd = !mapping.pf.executable,
        .paddr = mapping.phys >> PAGE_SHIFT
    };
    spinlock_release(pd->lock);
}

void mem_set_mapping(page_mapping_t mapping, uintptr_t virt)
{ mem_set_mapping_as(pd_current(), mapping, virt); }

void
vm_pd_ensure(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg)
{
    spinlock_acquire(pd->lock);
    from &= ~(PAGE_SIZE - 1);
    to += (to % PAGE_SIZE > 0 ? (PAGE_SIZE - (to % PAGE_SIZE)) : 0);

    if(to < from) to = from;
    size_t pages = (to - from) >> PAGE_SHIFT;
    if(pages == 0) pages = 1;
    for(size_t i = 0; i < pages; i++) {
        uintptr_t waddr = from + (i << PAGE_SHIFT);
        s_paging_cache_tables(pd, waddr);
        pmle_t *pml1 = s_state_cache.pml[1];
        size_t pml1i = s_paging_pmli(0, waddr);

        if(!pml1[pml1i].p) {
            physptr_t phys = pm_alloc(1);
            pml1[pml1i] = (pmle_t) {
                .p = flg.present,
                .rw = flg.writeable,
                .us = flg.useraccess,
                .xd = !flg.executable,
                .paddr = phys >> PAGE_SHIFT
            };
        }
    }
    spinlock_release(pd->lock);
}

void vm_ensure(uintptr_t from, uintptr_t to, page_flags_t flg)
{ vm_pd_ensure(pd_current(), from, to, flg); }