summaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/elf.c10
-rw-r--r--arch/x86_64/paging.c234
-rw-r--r--arch/x86_64/paging.h20
-rw-r--r--arch/x86_64/syscall_setup.S3
-rw-r--r--arch/x86_64/tasking.c13
5 files changed, 144 insertions, 136 deletions
diff --git a/arch/x86_64/elf.c b/arch/x86_64/elf.c
index 60b8f13..969cbf0 100644
--- a/arch/x86_64/elf.c
+++ b/arch/x86_64/elf.c
@@ -37,7 +37,15 @@ elf_load(const void *data, size_t len)
struct ELF_phdr *phdr = &phdrs[phdri];
void *pdata = (void*)phdr->p_vaddr;
- mem_ensure_range(phdr->p_vaddr, phdr->p_vaddr + phdr->p_memsz, true, true);
+ mem_ensure_range(
+ phdr->p_vaddr,
+ phdr->p_vaddr + phdr->p_memsz,
+ (page_flags_t) {
+ .present = true,
+ .writeable = true,
+ .useraccess = true,
+ .executable = true
+ });
if(phdr->p_type == PT_LOAD)
{
memcpy(pdata, (void*)((uintptr_t)data + phdr->p_offset), phdr->p_filesz);
diff --git a/arch/x86_64/paging.c b/arch/x86_64/paging.c
index 9e8a5ed..dc27ca2 100644
--- a/arch/x86_64/paging.c
+++ b/arch/x86_64/paging.c
@@ -1,9 +1,10 @@
#include "paging.h"
#include "interrupt.h"
-#include <stddef.h>
-#include "lib/jove.h"
#include "io/log.h"
+#include "lib/jove.h"
#include "lib/string.h"
+#include "lib/hashtable.h"
+#include "mem/memory.h"
#include "boot/boot.h"
extern void *_kernel_end;
@@ -12,9 +13,16 @@ PAGEALIGN static uint64_t s_kernel_initial_pml4[512];
PAGEALIGN static uint64_t s_kernel_initial_pml3[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml2[2][512];
PAGEALIGN static uint64_t s_kernel_initial_pml1[2][512];
-static struct PageDirectory s_kernel_initial_pd;
+static intmax_t s_next_pdid = 0;
+static page_directory_t s_kernel_initial_pd;
+
+page_directory_t *current_page_directory;
-struct PageDirectory *mem_current_pd;
+struct PageStateCache {
+ page_directory_t *pd;
+ size_t pmli[4];
+ pmle_t *pml[4];
+} s_state_cache;
physptr_t
mem_linear_tophys_koffset(uintptr_t virt)
@@ -29,17 +37,20 @@ mem_phys_tolinear(physptr_t phys)
}
static size_t
-s_paging_pmle(size_t l, uintptr_t addr)
+s_paging_pmli(size_t l, uintptr_t addr)
{
size_t shift = (12 + (9 * l));
return (addr & (0x1FFULL << shift)) >> shift;
}
-static union PageEntry*
-s_paging_fetch_table(union PageEntry *pt, size_t l, uintptr_t virt)
+static pmle_t*
+s_paging_fetch_table(pmle_t *pml, size_t l, uintptr_t virt)
{
- size_t pmle = s_paging_pmle(l, virt);
- union PageEntry entry = pt[pmle];
+ size_t pmli = s_paging_pmli(l, virt);
+ if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
+ return s_state_cache.pml[l];
+
+ pmle_t entry = pml[pmli];
bool entry_new = false;
if(!entry.p) {
entry_new = true;
@@ -47,122 +58,134 @@ s_paging_fetch_table(union PageEntry *pt, size_t l, uintptr_t virt)
entry.p = 1;
entry.rw = 1;
entry.us = 1;
- pt[pmle] = entry;
+ pml[pmli] = entry;
}
- union PageEntry *table = (union PageEntry*)(mem_phys_tolinear(entry.paddr << 12));
- if(entry_new) memset(table, 0, PAGESIZE);
+ pmle_t *table = (pmle_t*)(mem_phys_tolinear(entry.paddr << 12));
+ if(entry_new) memset(table, 0, PAGE_SIZE);
+
+ s_state_cache.pmli[l] = pmli;
+ s_state_cache.pml[l] = table;
return table;
}
-static union PageEntry*
-s_paging_get_table(union PageEntry *pt, size_t l, uintptr_t virt)
+static void
+s_paging_cache_tables(page_directory_t *pd, uintptr_t virt)
{
- if(pt == NULL) return NULL;
- size_t pmle = s_paging_pmle(l, virt);
- union PageEntry entry = pt[pmle];
- if(!entry.p) return NULL;
- return (union PageEntry*)(mem_phys_tolinear(entry.paddr << 12));
+ pmle_t *pml4 = (pmle_t*)pd->virt;
+ if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));
+
+ pmle_t *pml3 = s_paging_fetch_table(pml4, 3, virt);
+ pmle_t *pml2 = s_paging_fetch_table(pml3, 2, virt);
+ pmle_t *pml1 = s_paging_fetch_table(pml2, 1, virt);
}
-physptr_t
-mem_linear_tophys(uintptr_t virt)
+static pmle_t*
+s_paging_get_table(pmle_t *pt, size_t l, uintptr_t virt)
{
- struct PageDirectory *pd = mem_current_pd;
- union PageEntry *pml3 = s_paging_get_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml2 = s_paging_get_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml1 = s_paging_get_table(pd->pml4_vaddr, 3, virt);
- if(pml1 == NULL) return 0;
-
- size_t pml1i = s_paging_pmle(0, virt);
+ if(pt == NULL) return NULL;
+ size_t pmli = s_paging_pmli(l, virt);
+ if(s_state_cache.pmli[l] == pmli && s_state_cache.pml[l] != NULL)
+ return s_state_cache.pml[l];
- if(!pml1[pml1i].p) return 0;
- return pml1[pml1i].paddr << 12;
+ pmle_t entry = pt[pmli];
+ if(!entry.p) return NULL;
+ return (pmle_t*)(mem_phys_tolinear(entry.paddr << 12));
}
-bool
-mem_check_ptr(const void *ptr)
+page_mapping_t
+mem_get_mapping_as(page_directory_t *pd, uintptr_t addr)
{
- if(ptr == NULL) return false;
- return mem_linear_tophys((uintptr_t)ptr) != 0;
-}
+ spinlock_acquire(pd->lock);
+ page_mapping_t mapping = { 0 };
+
+ pmle_t *pml4 = (pmle_t*)pd->virt;
+ if(s_state_cache.pd != pd) memset(&s_state_cache, 0, sizeof(s_state_cache));
-void
-mem_paging_map4k(struct PageDirectory *pd, physptr_t phys, uintptr_t virt, uint8_t flg)
-{
- union PageEntry *pml3 = s_paging_fetch_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml2 = s_paging_fetch_table(pml3, 2, virt);
- union PageEntry *pml1 = s_paging_fetch_table(pml2, 1, virt);
- size_t pml1e = s_paging_pmle(0, virt);
-
- pml1[pml1e] = (union PageEntry) {
- .p = (flg & 1) > 0,
- .rw = (flg & 2) > 0,
- .us = (flg & 4) > 0,
- .paddr = phys >> 12
+ pmle_t *pml3 = s_paging_get_table(pml4, 3, addr);
+ pmle_t *pml2 = s_paging_get_table(pml3, 2, addr);
+ pmle_t *pml1 = s_paging_get_table(pml2, 1, addr);
+ if(pml1 == NULL) goto release_return;
+
+ size_t pml1i = s_paging_pmli(0, addr);
+ pmle_t pml1e = pml1[pml1i];
+
+ mapping = (page_mapping_t) {
+ .phys = (pml1e.paddr << 12) & ~PAGE_MASK,
+ .pf = {
+ .present = pml1e.p,
+ .writeable = pml1e.rw,
+ .useraccess = pml1e.us,
+ .executable = !pml1e.xd
+ }
};
+release_return:
+ spinlock_release(pd->lock);
+ return mapping;
}
-union PageEntry
-mem_paging_fetch4k(struct PageDirectory *pd, uintptr_t virt)
-{
- union PageEntry *pml3 = s_paging_fetch_table(pd->pml4_vaddr, 3, virt);
- union PageEntry *pml2 = s_paging_fetch_table(pml3, 2, virt);
- union PageEntry *pml1 = s_paging_fetch_table(pml2, 1, virt);
- return pml1[s_paging_pmle(0, virt)];
-}
+page_mapping_t mem_get_mapping(uintptr_t addr)
+{ return mem_get_mapping_as(current_page_directory, addr); }
-void
-mem_pd_ensure_4k(struct PageDirectory *pd, uintptr_t virt, uint8_t flg)
-{
- union PageEntry pml1e = mem_paging_fetch4k(pd, virt);
- if(!pml1e.p) {
- uintptr_t phys = mem_phys_alloc(1);
- mem_paging_map4k(pd, phys, virt, flg);
- }
-}
-void
-mem_pd_ensure_range(struct PageDirectory *pd, uintptr_t from, uintptr_t to, uint8_t flg)
+bool
+mem_check_ptr(const void *ptr)
{
- from &= ~0xFFF;
- for(; from < to; from += PAGESIZE)
- mem_pd_ensure_4k(pd, from, flg);
+ return mem_get_mapping((uintptr_t)ptr).pf.present != 0;
}
void
-mem_ensure_range_for(void *pd, uintptr_t from, uintptr_t to, bool rw, bool user)
+mem_set_mapping_as(page_directory_t *pd, page_mapping_t mapping, uintptr_t virt)
{
- mem_pd_ensure_range((struct PageDirectory*)pd, from, to, 1 | (rw << 1) | (user << 2));
+ spinlock_acquire(pd->lock);
+ s_paging_cache_tables(pd, virt);
+ pmle_t *pml1 = s_state_cache.pml[0];
+ size_t pml1i = s_paging_pmli(0, virt);
+
+ pml1[pml1i] = (pmle_t) {
+ .p = mapping.pf.present,
+ .rw = mapping.pf.writeable,
+ .us = mapping.pf.useraccess,
+ .xd = !mapping.pf.executable,
+ .paddr = mapping.phys >> 12
+ };
+ spinlock_release(pd->lock);
}
+void mem_set_mapping(page_mapping_t mapping, uintptr_t virt)
+{ mem_set_mapping_as(current_page_directory, mapping, virt); }
+
void
-mem_ensure_range(uintptr_t from, uintptr_t to, bool rw, bool user)
+mem_ensure_range_as(page_directory_t *pd, uintptr_t from, uintptr_t to, page_flags_t flg)
{
- mem_ensure_range_for(mem_current_pd, from, to, rw, user);
-}
+ spinlock_acquire(pd->lock);
+ from &= ~(PAGE_SIZE - 1);
-void mem_pd_new(struct PageDirectory *pd)
-{
- physptr_t pml4p = mem_phys_alloc(1);
- union PageEntry *pml4 = (union PageEntry*)mem_phys_tolinear(pml4p);
- memset(pml4, 0, PAGESIZE);
- memcpy(&pml4[256], &pd->pml4_vaddr[256], PAGESIZE / 2);
-
- *pd = (struct PageDirectory){
- .pml4_vaddr = pml4,
- .pml4_paddr = pml4p,
- .references = 1
- };
-}
+ if(to < from) to = from;
+ size_t pages = (to - from) >> 12;
+ if(pages == 0) pages = 1;
+ for(size_t i = 0; i < pages; i++) {
+ uintptr_t waddr = from + (i << 12);
+ s_paging_cache_tables(pd, waddr);
+ pmle_t *pml1 = s_state_cache.pml[1];
+ size_t pml1i = s_paging_pmli(0, waddr);
-void mem_pd_clone(struct PageDirectory *pd, struct PageDirectory *parent)
-{
- mem_pd_new(pd);
- for(size_t i = 0; i < 256; i++) {
- //TODO: Impl pd cloning
+ if(!pml1[pml1i].p) {
+ physptr_t phys = mem_phys_alloc(1);
+ pml1[pml1i] = (pmle_t) {
+ .p = flg.present,
+ .rw = flg.writeable,
+ .us = flg.useraccess,
+ .xd = !flg.executable,
+ .paddr = phys >> 12
+ };
+ }
}
+ spinlock_release(pd->lock);
}
+void mem_ensure_range(uintptr_t from, uintptr_t to, page_flags_t flg)
+{ mem_ensure_range_as(current_page_directory, from, to, flg); }
+
struct Registers*
s_pagefault_handler(struct Registers *state)
{
@@ -189,16 +212,17 @@ s_pagefault_handler(struct Registers *state)
void
mem_paging_setup(void)
{
- memset(s_kernel_initial_pml4, 0, PAGESIZE);
- memset(s_kernel_initial_pml3, 0, 2 * PAGESIZE);
- memset(s_kernel_initial_pml2, 0, 2 * PAGESIZE);
- memset(s_kernel_initial_pml1, 0, PAGESIZE);
- s_kernel_initial_pd = (struct PageDirectory){
- .pml4_vaddr = (union PageEntry*)&s_kernel_initial_pml4,
- .pml4_paddr = mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml4),
- .references = 1
+ memset(s_kernel_initial_pml4, 0, PAGE_SIZE);
+ memset(s_kernel_initial_pml3, 0, 2 * PAGE_SIZE);
+ memset(s_kernel_initial_pml2, 0, 2 * PAGE_SIZE);
+ memset(s_kernel_initial_pml1, 0, PAGE_SIZE);
+ s_kernel_initial_pd = (page_directory_t){
+ .phys = mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml4),
+ .virt = (union PageEntry*)&s_kernel_initial_pml4,
+ .ref = 1,
+ .id = s_next_pdid++
};
- mem_current_pd = &s_kernel_initial_pd;
+ current_page_directory = &s_kernel_initial_pd;
/* Map first few GiBs */
s_kernel_initial_pml4[256] =
@@ -208,7 +232,7 @@ mem_paging_setup(void)
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml2[0])
| 3;
for(int i = 0; i < 512; i++) {
- s_kernel_initial_pml2[0][i] = (i * (PAGESIZE * 512)) | 0x80 | 3;
+ s_kernel_initial_pml2[0][i] = (i * (PAGE_SIZE * 512)) | 0x80 | 3;
}
size_t kernel_pml4e = (boot_kernel_virtual_base >> (39));
@@ -225,9 +249,9 @@ mem_paging_setup(void)
s_kernel_initial_pml2[1][kernel_pml2e] =
mem_linear_tophys_koffset((uintptr_t)&s_kernel_initial_pml1[0]) | 3;
for(int i = 0; i < kernel_npages; i++) {
- s_kernel_initial_pml1[0][i] = (i * PAGESIZE) + boot_kernel_physical_address | 3;
+ s_kernel_initial_pml1[0][i] = (i * PAGE_SIZE) + boot_kernel_physical_address | 3;
}
int_set_handler(14, s_pagefault_handler);
- __asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.pml4_paddr));
+ __asm__ volatile("mov %0, %%cr3":: "r"(s_kernel_initial_pd.phys));
}
diff --git a/arch/x86_64/paging.h b/arch/x86_64/paging.h
index 1e88a0b..28dfad2 100644
--- a/arch/x86_64/paging.h
+++ b/arch/x86_64/paging.h
@@ -2,9 +2,8 @@
#define JOVE_ARCH_x86_64_PAGING_H 1
#include <stdint.h>
-#include "mem/memory.h"
-union PageEntry
+typedef union PageMappingLevelEntry
{
struct {
uint8_t p : 1; /* Present */
@@ -24,21 +23,6 @@ union PageEntry
uint8_t xd : 1;
}__attribute__((packed));
uint64_t value;
-}__attribute__((packed));
-
-struct PageDirectory
-{
- union PageEntry *pml4_vaddr;
- physptr_t pml4_paddr;
- size_t references;
-};
-
-extern struct PageDirectory *mem_current_pd;
-
-void mem_pd_new(struct PageDirectory *pd);
-void mem_pd_clone(struct PageDirectory *pd, struct PageDirectory *parent);
-
-void mem_pd_ensure_4k(struct PageDirectory *pd, uintptr_t virt, uint8_t flg);
-void mem_pd_ensure_range(struct PageDirectory *pd, uintptr_t from, uintptr_t to, uint8_t flg);
+} __attribute__((packed)) pmle_t;
#endif
diff --git a/arch/x86_64/syscall_setup.S b/arch/x86_64/syscall_setup.S
index 972e345..4f5c6f0 100644
--- a/arch/x86_64/syscall_setup.S
+++ b/arch/x86_64/syscall_setup.S
@@ -6,12 +6,11 @@
syscall_entry:
swapgs
movq %rsp, %rax
- movq (_kernel_task_bp), %rsp
+ movq _kernel_task_bp, %rsp
pushq %rax
pushq %rbp
pushq %rcx
pushq %r11
- movq %rsp, %rbp
call syscall_handler
popq %r11
popq %rcx
diff --git a/arch/x86_64/tasking.c b/arch/x86_64/tasking.c
index fe6ecdb..9b29330 100644
--- a/arch/x86_64/tasking.c
+++ b/arch/x86_64/tasking.c
@@ -1,4 +1,4 @@
-#include "tsk/tasking.h"
+#include "usr/tasking.h"
#include "mem/memory.h"
#include "io/log.h"
#include "lib/hashtable.h"
@@ -9,7 +9,6 @@
struct TaskBody {
struct Task base;
- struct PageDirectory *pd;
struct Registers state;
};
@@ -44,7 +43,7 @@ tasking_setup(void)
.base.id = s_task_id_next++,
.base.kbp = ((uintptr_t)mem_slab_alloc(&s_kbp_cache)) + 0xFF0,
.base.perm = (size_t)-1,
- .pd = mem_current_pd
+ .base.pd = current_page_directory
};
hashtable_insert(&s_tasks, 0, ktask);
@@ -80,14 +79,8 @@ void
task_free(struct Task *task)
{
struct TaskBody *body = (struct TaskBody*)task;
- body->pd->references--;
+ task->pd->ref--;
task->kbp -= 0xFFF;
mem_slab_free(&s_kbp_cache, (void*)(task->kbp));
klogf("Need impl for task_free\n");
}
-
-void*
-task_get_pd(struct Task *task)
-{
- return ((struct TaskBody*)task)->pd;
-}