summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/pagedirectory.c
blob: cdfffeba72e79707c5ae71b1b164737e0dcf2123 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#include "arch/page.h"
#include "arch/processor.h"
#include "klib/rbtree.h"
#include "boot.h"
#include "memory.h"
#include "string.h"
#include "print.h"

PAGEALIGN static uint64_t s_kernel_initial_pml4[512];
PAGEALIGN static uint64_t s_kernel_initial_pml3[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml2[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml1[2][512];
static page_directory_t s_kernel_initial_pd;

static rbtree_t s_page_directories;
static intmax_t s_next_pdid = 0;

page_directory_t*
pd_new()
{
    page_directory_t newpd = {
        .id = s_next_pdid++,
        .phys = pm_alloc(1),
    };
    newpd.pml = (void*)pm_tovirt(newpd.phys);
    for(size_t i = 256; i < 512; i++) {
        pmle_t *kpe = &(s_kernel_initial_pd.pml)[i];
        pmle_t *ppe = &(newpd.pml)[i];
        *ppe = *kpe;
    }

    return rbtree_insert(&s_page_directories, newpd.id, &newpd);
}

static void
s_pd_dup_pml(pmle_t *src, pmle_t *dest, size_t l, size_t i)
{
    pmle_t srce = src[i];
    if(!srce.p) return;

    dest[i] = srce;
    dest[i].paddr = pm_alloc(1);
    pmle_t dste = dest[i];

    pmle_t *srct = (pmle_t*)pm_tovirt(srce.paddr << PAGE_SHIFT);
    pmle_t *dstt = (pmle_t*)pm_tovirt(dste.paddr << PAGE_SHIFT);

    if(l == 0) {
        memcpy(dstt, srct, PAGE_SIZE);
        return;
    }

    for(i = 0; i < 512; i++) {
        dstt[i] = srct[i];
        if(!srct[i].p) continue;
        s_pd_dup_pml(srct, dstt, l - 1, i);
    }
}

page_directory_t*
pd_dup(page_directory_t *pd)
{
    page_directory_t *newpd = pd_new();
    for(size_t i = 0; i < 256; i++) {
        s_pd_dup_pml(pd->pml, newpd->pml, 3, i);
    }
    return newpd;
}

page_directory_t*
pd_get(pdid_t pdid)
{
    return rbtree_find(&s_page_directories, pdid);
}

void 
pd_switch(page_directory_t *pd)
{
    processor_t *pc = processor_current();
    if(pc->pd == pd) return;
    pc->pd = pd;
    __asm__ volatile("movq %0, %%cr3":: "r"(pd->phys));
}

void
vm_setup_early(void)
{
    memset(s_kernel_initial_pml4, 0, PAGE_SIZE);
    memset(s_kernel_initial_pml3, 0, 2 * PAGE_SIZE);
    memset(s_kernel_initial_pml2, 0, 2 * PAGE_SIZE);
    memset(s_kernel_initial_pml1, 0, PAGE_SIZE);
    s_kernel_initial_pd = (page_directory_t){
        .phys = vm_tophys_koff((uintptr_t)&s_kernel_initial_pml4),
        .pml = (pmle_t*)&s_kernel_initial_pml4,
        .id = s_next_pdid++
    };
    processor_current()->pd = &s_kernel_initial_pd;

    /* Map first few GiBs */
    s_kernel_initial_pml4[256] =
        vm_tophys_koff((uintptr_t)&s_kernel_initial_pml3[0])
        | 3;
    s_kernel_initial_pml3[0][0] =
        vm_tophys_koff((uintptr_t)&s_kernel_initial_pml2[0])
        | 3;
    for(int i = 0; i < 512; i++) {
        s_kernel_initial_pml2[0][i] = (i * (PAGE_SIZE * 512)) | 0x80 | 3;
    }

    size_t kernel_pml3e = (_kernel_virtual_base >> (30)) % 512;
    size_t kernel_pml2e = (_kernel_virtual_base >> (21)) % 512;
    size_t kernel_npages = ((((uintptr_t)&_kernel_end) - _kernel_virtual_base) >> 12) + 1;
    klogf("Kernel has %i pages\n", kernel_npages);

    /* Map kernel pages */
    s_kernel_initial_pml4[511] =
        vm_tophys_koff((uintptr_t)&s_kernel_initial_pml3[1]) | 3;
    s_kernel_initial_pml3[1][kernel_pml3e] =
        vm_tophys_koff((uintptr_t)&s_kernel_initial_pml2[1]) | 3;
    s_kernel_initial_pml2[1][kernel_pml2e] =
        vm_tophys_koff((uintptr_t)&s_kernel_initial_pml1[0]) | 3;
    for(size_t i = 0; i < kernel_npages; i++) {
        s_kernel_initial_pml1[0][i] = (i * PAGE_SIZE) + boot_kernel_physical_address | 3;
    }
 
    extern int_state_t *_pagefault_handler(int_state_t*);
    int_handler_set(14, _pagefault_handler);
    __asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.phys));
}

void
vm_setup(void)
{
    rbtree_new(&s_page_directories, page_directory_t);

    processor_t *proc = processor_current();
    pd_switch(pd_new());
}