summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/memory/pml4.c
blob: b22bcfb1b6f868b87aaa4df49c68ab92ee9be6b8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
#include "arch/x86_64/page.h"
#include "arch/x86_64/processor.h"
#include "device/processor.h"
#include "memory.h"
#include "boot.h"
#include "object.h"
#include "init.h"
#include "string.h"
#include "jove.h"
#include "error.h"
#include <stdint.h>

#define IDENTITY_BASE 0xFFFF800000000000
uintptr_t vptr_tophys(void *vptr)
{
    return ((uintptr_t)vptr) - IDENTITY_BASE;
}

physptr_t 
vptr_tophys_koff(virtptr_t v)
{
    return v - (physptr_t)&_kernel_start + _boot_kernel_phys_base;
}

void*
pptr_tovirt_ident(physptr_t p)
{
    return (void*)(p + IDENTITY_BASE);
}

void*
pmle_get_page(pmle_t entry)
{
    uintptr_t pptr = entry.paddr << 12;
    return pptr_tovirt_ident(pptr);
}

void
pml4_get_path(uintptr_t vptr, uint8_t depth, uint16_t *path)
{
    for(uint8_t i = 0; i < depth; i++) {
        path[i] = PML_I_FOR_LAYER(vptr, 4 - i);
    }
}

pmle_t*
pml4_traverse(pmle_t *pml4, uint8_t depth, uint16_t *path)
{
    uint16_t pathi = path[0];

    pmle_t *pmle = &pml4[pathi];
    pmle_t *pmle_table = pptr_tovirt_ident(pmle->paddr << 12);
    if(depth == 0) return pmle;
    if(!pmle->p) return NULL;
    return pml4_traverse(pmle_table, depth - 1, path + 1);
}

pmle_t*
pml4_get_mapping(pmle_t *pml4, uint8_t depth, uintptr_t vptr)
{
    uint64_t pathval = 0;
    uint16_t *path = (uint16_t*)&pathval;

    pml4_get_path(vptr, depth, path);
    return pml4_traverse(pml4, depth - 1, path);
}

int 
pml4_try_map(pmle_t *pml4, uintptr_t pptr, uintptr_t vptr)
{
    uint64_t pathval = 0;
    uint16_t *path = (uint16_t*)&pathval;

    pml4_get_path(vptr, 4, path);
    pmle_t *mapping = pml4_traverse(pml4, 3, path);
    
    if(mapping == NULL) return KE_DNE;
    if(mapping->p) return KE_FULL;

    mapping->value = pptr | PAGE_PRESENT | PAGE_RW | PAGE_US;
    return KE_OK;
}

__attribute__((aligned(0x1000))) pmle_t s_kernel_pml4[512]; // Page L4
__attribute__((aligned(0x1000))) pmle_t s_kernel_pml3[512]; // Page L3
__attribute__((aligned(0x1000))) pmle_t s_kernel_pml2[512]; // Page directory
__attribute__((aligned(0x1000))) pmle_t s_kernel_pml1[512]; // Page table

__attribute__((aligned(0x1000))) pmle_t s_idmap_pml3[512];
__attribute__((aligned(0x1000))) pmle_t s_idmap_pml2[512];

void
pml4_setup_init(void)
{
    memset(s_kernel_pml4, 0, 0x1000);
    memset(s_kernel_pml3, 0, 0x1000);
    memset(s_kernel_pml2, 0, 0x1000);
    memset(s_kernel_pml1, 0, 0x1000);

    memset(s_idmap_pml3, 0, 0x1000);
    memset(s_idmap_pml2, 0, 0x1000);

    virtptr_t kernel_start = (virtptr_t)&_kernel_start;
    virtptr_t kernel_end = (virtptr_t)&_kernel_end;

    size_t kernel_size = kernel_end - kernel_start;
    size_t kernel_size_pages = (kernel_size / 0x1000) + 1;

    physptr_t kernel_pml4_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml4);
    physptr_t kernel_pml3_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml3);
    physptr_t kernel_pml2_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml2);
    physptr_t kernel_pml1_base = vptr_tophys_koff((virtptr_t)&s_kernel_pml1);

    physptr_t idmap_pml3_base = vptr_tophys_koff((virtptr_t)&s_idmap_pml3);
    physptr_t idmap_pml2_base = vptr_tophys_koff((virtptr_t)&s_idmap_pml2);

    processor_t *processor = processor_current();
    processor->pdir = kernel_pml4_base;

    //Map memory identity pages.
    s_kernel_pml4[256].value = idmap_pml3_base | 2 | 1;
    s_idmap_pml3[0].value = idmap_pml2_base | 2 | 1;
    for(int i = 0; i < 512; i++) {
        s_idmap_pml2[i].value = (i * 0x1000 * 512) | 0x80 | 2 | 1;
    }
    
    //Map the kernel to himem.

    pmli_t kernel_pml3_i = PML_I_FOR_LAYER(kernel_start, 3);
    pmli_t kernel_pml2_i = PML_I_FOR_LAYER(kernel_start, 2);
    pmli_t kernel_pml1_ib = PML_I_FOR_LAYER(kernel_start, 1);
    pmli_t kernel_pml1_ie = PML_I_FOR_LAYER(kernel_end, 1) + 1;

    s_kernel_pml4[511].value = kernel_pml3_base | 2 | 1;
    s_kernel_pml3[kernel_pml3_i].value = kernel_pml2_base | 2 | 1;
    s_kernel_pml2[kernel_pml2_i].value = kernel_pml1_base | 2 | 1;
    for(pmli_t i = kernel_pml1_ib; i < kernel_pml1_ie; i++) {
        s_kernel_pml1[i].value = ((i * 0x1000) + _boot_kernel_phys_base) | 3;
    }

    __asm__ volatile("mov %0, %%cr3":: "r"(kernel_pml4_base));

    //Add page mapping object to init directory.
    _initDirectory.entries[INIT_OBJECT_PAGEMAP] = (objdir_entry_t) {
        .type = KO_MEMORY_MAPPING,
        .data = kernel_pml4_base
    };
}

void
pml4_setup(pmle_t *pml4)
{
    memset(pml4, 0, 0x800);
    memcpy(&pml4[256], &s_kernel_pml4[256], 0x800);
}