summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/page_directory.c
blob: 494cab5f94a31901789a8400dbf436fc9a60b09a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#include "arch/x86_64/page.h"
#include "arch/x86_64/object.h"
#include "device/processor.h"
#include "print.h"
#include "memory.h"
#include "object.h"
#include "string.h"
#include "jove.h"
#include <stdint.h>

physptr_t s_kpbase(void);

#if defined(__limine__)

#include "boot/limine/limine.h"
static struct limine_kernel_address_request s_kaddr_req = {
    .id = LIMINE_KERNEL_ADDRESS_REQUEST
};

physptr_t s_kpbase(void) { return s_kaddr_req.response->physical_base; }

#endif

physptr_t 
vmem_tophys_koff(virtptr_t v)
{
    return v - (physptr_t)&_kernel_start + s_kpbase();
}

#define IDENTITY_BASE 0xFFFF800000000000
void*
vmem_phys_tovirt(physptr_t p)
{
    return (void*)(p + IDENTITY_BASE);
}

uintptr_t vmem_ident_tophys(void *vptr)
{
    return ((uintptr_t)vptr) - IDENTITY_BASE;
}

void*
pmle_get_page(pmle_t entry)
{
    uintptr_t pptr = entry.paddr << 12;
    return vmem_phys_tovirt(pptr);
}

uint8_t
pmle_level(pmle_t entry)
{
    return entry.osflg;
}

__attribute__((aligned(0x1000))) pmle_t s_kernel_pml4[512]; // Page L4
__attribute__((aligned(0x1000))) pmle_t s_kernel_pml3[512]; // Page L3
__attribute__((aligned(0x1000))) pmle_t s_kernel_pml2[512]; // Page directory
__attribute__((aligned(0x1000))) pmle_t s_kernel_pml1[512]; // Page table

__attribute__((aligned(0x1000))) pmle_t s_idmap_pml3[512];
__attribute__((aligned(0x1000))) pmle_t s_idmap_pml2[512];

void
vmem_setup(void)
{
    memset(s_kernel_pml4, 0, 0x1000);
    memset(s_kernel_pml3, 0, 0x1000);
    memset(s_kernel_pml2, 0, 0x1000);
    memset(s_kernel_pml1, 0, 0x1000);

    memset(s_idmap_pml3, 0, 0x1000);
    memset(s_idmap_pml2, 0, 0x1000);

    virtptr_t kernel_start = (virtptr_t)&_kernel_start;
    virtptr_t kernel_end = (virtptr_t)&_kernel_end;

    size_t kernel_size = kernel_end - kernel_start;
    size_t kernel_size_pages = (kernel_size / 0x1000) + 1;

    physptr_t kernel_pml4_base = vmem_tophys_koff((virtptr_t)&s_kernel_pml4);
    physptr_t kernel_pml3_base = vmem_tophys_koff((virtptr_t)&s_kernel_pml3);
    physptr_t kernel_pml2_base = vmem_tophys_koff((virtptr_t)&s_kernel_pml2);
    physptr_t kernel_pml1_base = vmem_tophys_koff((virtptr_t)&s_kernel_pml1);

    physptr_t idmap_pml3_base = vmem_tophys_koff((virtptr_t)&s_idmap_pml3);
    physptr_t idmap_pml2_base = vmem_tophys_koff((virtptr_t)&s_idmap_pml2);

    processor_t *processor = processor_current();
    processor->pdir = kernel_pml4_base;

    //Map memory identity pages.
    s_kernel_pml4[256].value = idmap_pml3_base | 2 | 1;
    s_idmap_pml3[0].value = idmap_pml2_base | 2 | 1;
    for(int i = 0; i < 512; i++) {
        s_idmap_pml2[i].value = (i * 0x1000 * 512) | 0x80 | 2 | 1;
    }
    
    //Map the kernel to himem.

    pmli_t kernel_pml3_i = PML_I_FOR_LAYER(kernel_start, 3);
    pmli_t kernel_pml2_i = PML_I_FOR_LAYER(kernel_start, 2);
    pmli_t kernel_pml1_ib = PML_I_FOR_LAYER(kernel_start, 1);
    pmli_t kernel_pml1_ie = PML_I_FOR_LAYER(kernel_end, 1) + 1;

    s_kernel_pml4[511].value = kernel_pml3_base | 2 | 1;
    s_kernel_pml3[kernel_pml3_i].value = kernel_pml2_base | 2 | 1;
    s_kernel_pml2[kernel_pml2_i].value = kernel_pml1_base | 2 | 1;
    for(pmli_t i = kernel_pml1_ib; i < kernel_pml1_ie; i++) {
        s_kernel_pml1[i].value = ((i * 0x1000) + s_kpbase()) | 3;
    }

    __asm__ volatile("mov %0, %%cr3":: "r"(kernel_pml4_base));

    //Add page mapping object to init directory.
    _initDirectory.entries[INIT_OBJECT_PAGEMAP] = (objdir_entry_t) {
        .type = KO_MEMORY_MAPPING,
        .data = kernel_pml4_base | 3
    };
}